From 338a233fbf28f2d71025e5ac6eae1905db0f7a5d Mon Sep 17 00:00:00 2001 From: Ali Maredia Date: Fri, 11 Dec 2020 16:19:39 -0500 Subject: [PATCH] rgw: add DPP's to logging for most ops This commit contains the following major changes: - Most log output lines for major ops now have DoutPrefixProviders. - Create new macro ldpp_subdout. This macro is meant as a replacement for lsubdout for headerfiles that do not define the rgw subsys. - Changes to RGWObjManifest begin and end iterators. - Make it so that rgw_dencoder.cc should only contain the logic related to encoding. Also add dpp to ldouts and lderr's already using req_state and replace sync_env/env->dpp's with dpp's Signed-off-by: Ali Maredia Signed-off-by: Kalpesh Pandya Signed-off-by: Casey Bodley (cherry picked from commit 6cfc2ce2ce669e139acb682898c53c5602075e31) --- src/common/dout.h | 5 + src/rgw/cls_fifo_legacy.cc | 558 ++++---- src/rgw/cls_fifo_legacy.h | 67 +- src/rgw/librgw.cc | 41 +- src/rgw/rgw_acl.cc | 19 +- src/rgw/rgw_acl.h | 4 +- src/rgw/rgw_acl_s3.cc | 44 +- src/rgw/rgw_acl_s3.h | 4 +- src/rgw/rgw_acl_swift.cc | 41 +- src/rgw/rgw_acl_swift.h | 11 +- src/rgw/rgw_admin.cc | 662 ++++----- src/rgw/rgw_auth.cc | 31 +- src/rgw/rgw_auth.h | 3 +- src/rgw/rgw_auth_filters.h | 6 +- src/rgw/rgw_auth_s3.cc | 78 +- src/rgw/rgw_auth_s3.h | 20 +- src/rgw/rgw_bucket.cc | 627 +++++---- src/rgw/rgw_bucket.h | 119 +- src/rgw/rgw_bucket_sync.cc | 6 +- src/rgw/rgw_bucket_sync.h | 2 +- src/rgw/rgw_cache.cc | 49 +- src/rgw/rgw_cache.h | 15 +- src/rgw/rgw_common.cc | 10 +- src/rgw/rgw_common.h | 8 +- src/rgw/rgw_coroutine.cc | 28 +- src/rgw/rgw_coroutine.h | 18 +- src/rgw/rgw_cors_s3.cc | 24 +- src/rgw/rgw_cors_s3.h | 10 +- src/rgw/rgw_cr_rados.cc | 164 +-- src/rgw/rgw_cr_rados.h | 210 +-- src/rgw/rgw_cr_rest.cc | 10 +- src/rgw/rgw_cr_rest.h | 26 +- src/rgw/rgw_cr_tools.cc | 51 +- src/rgw/rgw_crypt.cc | 70 +- src/rgw/rgw_crypt.h | 2 +- src/rgw/rgw_data_sync.cc | 257 ++-- src/rgw/rgw_data_sync.h | 57 +- src/rgw/rgw_datalog.cc | 194 +-- src/rgw/rgw_datalog.h | 46 +- src/rgw/rgw_dencoder.cc | 159 --- src/rgw/rgw_etag_verifier.cc | 15 +- src/rgw/rgw_etag_verifier.h | 3 +- src/rgw/rgw_file.cc | 10 +- src/rgw/rgw_file.h | 12 +- src/rgw/rgw_frontend.h | 19 +- src/rgw/rgw_gc.cc | 10 +- src/rgw/rgw_json_enc.cc | 7 +- src/rgw/rgw_lc.cc | 130 +- src/rgw/rgw_lc.h | 3 +- src/rgw/rgw_lib.h | 8 +- src/rgw/rgw_lib_frontend.h | 2 +- src/rgw/rgw_loadgen.cc | 5 +- src/rgw/rgw_loadgen.h | 2 +- src/rgw/rgw_loadgen_process.cc | 4 +- src/rgw/rgw_log.cc | 24 +- src/rgw/rgw_log_backing.cc | 160 ++- src/rgw/rgw_log_backing.h | 109 +- src/rgw/rgw_lua.cc | 22 +- src/rgw/rgw_lua.h | 12 +- src/rgw/rgw_lua_request.cc | 6 +- src/rgw/rgw_main.cc | 9 +- src/rgw/rgw_mdlog.h | 17 +- src/rgw/rgw_metadata.cc | 107 +- src/rgw/rgw_metadata.h | 57 +- src/rgw/rgw_multi.cc | 62 +- src/rgw/rgw_multi.h | 14 +- src/rgw/rgw_notify.cc | 157 +-- src/rgw/rgw_notify.h | 15 +- src/rgw/rgw_obj_manifest.cc | 215 ++- src/rgw/rgw_obj_manifest.h | 88 +- src/rgw/rgw_object_expirer.cc | 3 +- src/rgw/rgw_object_expirer_core.cc | 98 +- src/rgw/rgw_object_expirer_core.h | 34 +- src/rgw/rgw_oidc_provider.cc | 50 +- src/rgw/rgw_oidc_provider.h | 12 +- src/rgw/rgw_op.cc | 534 ++++---- src/rgw/rgw_op.h | 23 +- src/rgw/rgw_orphan.cc | 281 ++-- src/rgw/rgw_orphan.h | 40 +- src/rgw/rgw_os_lib.cc | 2 +- src/rgw/rgw_otp.cc | 25 +- src/rgw/rgw_otp.h | 7 +- src/rgw/rgw_period_history.cc | 12 +- src/rgw/rgw_period_history.h | 5 +- src/rgw/rgw_period_puller.cc | 30 +- src/rgw/rgw_period_puller.h | 2 +- src/rgw/rgw_period_pusher.cc | 33 +- src/rgw/rgw_period_pusher.h | 2 +- src/rgw/rgw_process.cc | 6 +- src/rgw/rgw_process.h | 13 +- src/rgw/rgw_pubsub.cc | 148 +-- src/rgw/rgw_pubsub.h | 42 +- src/rgw/rgw_pubsub_push.cc | 10 +- src/rgw/rgw_putobj_processor.cc | 28 +- src/rgw/rgw_quota.cc | 108 +- src/rgw/rgw_quota.h | 2 +- src/rgw/rgw_rados.cc | 1476 +++++++++++---------- src/rgw/rgw_rados.h | 333 ++--- src/rgw/rgw_realm_reloader.cc | 26 +- src/rgw/rgw_realm_watcher.cc | 22 +- src/rgw/rgw_realm_watcher.h | 4 +- src/rgw/rgw_reshard.cc | 208 +-- src/rgw/rgw_reshard.h | 51 +- src/rgw/rgw_rest.cc | 54 +- src/rgw/rgw_rest_bucket.cc | 26 +- src/rgw/rgw_rest_client.cc | 74 +- src/rgw/rgw_rest_client.h | 26 +- src/rgw/rgw_rest_config.cc | 4 +- src/rgw/rgw_rest_conn.cc | 48 +- src/rgw/rgw_rest_conn.h | 43 +- src/rgw/rgw_rest_iam.cc | 6 +- src/rgw/rgw_rest_log.cc | 165 +-- src/rgw/rgw_rest_metadata.cc | 26 +- src/rgw/rgw_rest_oidc_provider.cc | 14 +- src/rgw/rgw_rest_pubsub.cc | 98 +- src/rgw/rgw_rest_pubsub_common.cc | 64 +- src/rgw/rgw_rest_realm.cc | 70 +- src/rgw/rgw_rest_role.cc | 38 +- src/rgw/rgw_rest_s3.cc | 96 +- src/rgw/rgw_rest_s3.h | 2 +- src/rgw/rgw_rest_s3website.h | 2 +- src/rgw/rgw_rest_sts.cc | 40 +- src/rgw/rgw_rest_sts.h | 2 +- src/rgw/rgw_rest_swift.cc | 34 +- src/rgw/rgw_rest_usage.cc | 4 +- src/rgw/rgw_rest_user.cc | 74 +- src/rgw/rgw_rest_user_policy.cc | 42 +- src/rgw/rgw_role.cc | 109 +- src/rgw/rgw_role.h | 25 +- src/rgw/rgw_sal.h | 91 +- src/rgw/rgw_sal_rados.cc | 254 ++-- src/rgw/rgw_sal_rados.h | 113 +- src/rgw/rgw_service.cc | 102 +- src/rgw/rgw_service.h | 20 +- src/rgw/rgw_sts.cc | 24 +- src/rgw/rgw_sts.h | 6 +- src/rgw/rgw_swift_auth.cc | 14 +- src/rgw/rgw_sync.cc | 246 ++-- src/rgw/rgw_sync.h | 46 +- src/rgw/rgw_sync_checkpoint.cc | 9 +- src/rgw/rgw_sync_error_repo.cc | 8 +- src/rgw/rgw_sync_module.cc | 8 +- src/rgw/rgw_sync_module.h | 2 +- src/rgw/rgw_sync_module_aws.cc | 60 +- src/rgw/rgw_sync_module_es.cc | 30 +- src/rgw/rgw_sync_module_es_rest.cc | 16 +- src/rgw/rgw_sync_module_log.cc | 4 +- src/rgw/rgw_sync_module_pubsub.cc | 143 +- src/rgw/rgw_sync_module_pubsub_rest.cc | 28 +- src/rgw/rgw_sync_trace.cc | 5 +- src/rgw/rgw_tools.cc | 42 +- src/rgw/rgw_tools.h | 23 +- src/rgw/rgw_torrent.cc | 16 +- src/rgw/rgw_trim_bilog.cc | 143 +- src/rgw/rgw_trim_bilog.h | 7 +- src/rgw/rgw_trim_datalog.cc | 53 +- src/rgw/rgw_trim_datalog.h | 6 +- src/rgw/rgw_trim_mdlog.cc | 87 +- src/rgw/rgw_usage.cc | 12 +- src/rgw/rgw_usage.h | 7 +- src/rgw/rgw_user.cc | 437 +++--- src/rgw/rgw_user.h | 164 ++- src/rgw/rgw_worker.h | 13 +- src/rgw/rgw_zone.cc | 505 +++---- src/rgw/rgw_zone.h | 120 +- src/rgw/services/svc_bi.h | 10 +- src/rgw/services/svc_bi_rados.cc | 90 +- src/rgw/services/svc_bi_rados.h | 35 +- src/rgw/services/svc_bilog_rados.cc | 23 +- src/rgw/services/svc_bilog_rados.h | 13 +- src/rgw/services/svc_bucket.h | 23 +- src/rgw/services/svc_bucket_sobj.cc | 105 +- src/rgw/services/svc_bucket_sobj.h | 31 +- src/rgw/services/svc_bucket_sync.h | 12 +- src/rgw/services/svc_bucket_sync_sobj.cc | 137 +- src/rgw/services/svc_bucket_sync_sobj.h | 22 +- src/rgw/services/svc_cls.cc | 121 +- src/rgw/services/svc_cls.h | 49 +- src/rgw/services/svc_config_key_rados.cc | 2 +- src/rgw/services/svc_config_key_rados.h | 2 +- src/rgw/services/svc_finisher.cc | 2 +- src/rgw/services/svc_finisher.h | 2 +- src/rgw/services/svc_mdlog.cc | 129 +- src/rgw/services/svc_mdlog.h | 23 +- src/rgw/services/svc_meta_be.cc | 47 +- src/rgw/services/svc_meta_be.h | 56 +- src/rgw/services/svc_meta_be_otp.cc | 10 +- src/rgw/services/svc_meta_be_otp.h | 6 +- src/rgw/services/svc_meta_be_sobj.cc | 33 +- src/rgw/services/svc_meta_be_sobj.h | 17 +- src/rgw/services/svc_notify.cc | 69 +- src/rgw/services/svc_notify.h | 15 +- src/rgw/services/svc_otp.cc | 32 +- src/rgw/services/svc_otp.h | 20 +- src/rgw/services/svc_rados.cc | 32 +- src/rgw/services/svc_rados.h | 16 +- src/rgw/services/svc_sync_modules.cc | 4 +- src/rgw/services/svc_sync_modules.h | 2 +- src/rgw/services/svc_sys_obj.cc | 67 +- src/rgw/services/svc_sys_obj.h | 40 +- src/rgw/services/svc_sys_obj_cache.cc | 114 +- src/rgw/services/svc_sys_obj_cache.h | 38 +- src/rgw/services/svc_sys_obj_core.cc | 175 +-- src/rgw/services/svc_sys_obj_core.h | 52 +- src/rgw/services/svc_sys_obj_core_types.h | 3 +- src/rgw/services/svc_user.h | 40 +- src/rgw/services/svc_user_rados.cc | 232 ++-- src/rgw/services/svc_user_rados.h | 71 +- src/rgw/services/svc_zone.cc | 374 +++--- src/rgw/services/svc_zone.h | 38 +- src/rgw/services/svc_zone_utils.cc | 2 +- src/rgw/services/svc_zone_utils.h | 2 +- src/test/rgw/test_cls_fifo_legacy.cc | 216 +-- src/test/rgw/test_log_backing.cc | 53 +- src/test/rgw/test_rgw_lua.cc | 4 +- src/test/rgw/test_rgw_manifest.cc | 33 +- src/test/rgw/test_rgw_period_history.cc | 30 +- src/test/rgw/test_rgw_throttle.cc | 6 +- src/test/test_cors.cc | 3 +- 219 files changed, 8197 insertions(+), 7366 deletions(-) diff --git a/src/common/dout.h b/src/common/dout.h index b8f762991dbae..421222d535f8d 100644 --- a/src/common/dout.h +++ b/src/common/dout.h @@ -175,6 +175,11 @@ struct is_dynamic> : public std::true_type {}; #define ldout(cct, v) dout_impl(cct, dout_subsys, v) dout_prefix #define lderr(cct) dout_impl(cct, ceph_subsys_, -1) dout_prefix +#define ldpp_subdout(dpp, sub, v) \ + if (decltype(auto) pdpp = (dpp); pdpp) /* workaround -Wnonnull-compare for 'this' */ \ + dout_impl(pdpp->get_cct(), ceph_subsys_##sub, v) \ + pdpp->gen_prefix(*_dout) + #define ldpp_dout(dpp, v) \ if (decltype(auto) pdpp = (dpp); pdpp) /* workaround -Wnonnull-compare for 'this' */ \ dout_impl(pdpp->get_cct(), ceph::dout::need_dynamic(pdpp->get_subsys()), v) \ diff --git a/src/rgw/cls_fifo_legacy.cc b/src/rgw/cls_fifo_legacy.cc index 45a3ad505146a..80af90055255a 100644 --- a/src/rgw/cls_fifo_legacy.cc +++ b/src/rgw/cls_fifo_legacy.cc @@ -65,7 +65,7 @@ void create_meta(lr::ObjectWriteOperation* op, op->exec(fifo::op::CLASS, fifo::op::CREATE_META, in); } -int get_meta(lr::IoCtx& ioctx, const std::string& oid, +int get_meta(const DoutPrefixProvider *dpp, lr::IoCtx& ioctx, const std::string& oid, std::optional objv, fifo::info* info, std::uint32_t* part_header_size, std::uint32_t* part_entry_overhead, @@ -81,7 +81,7 @@ int get_meta(lr::IoCtx& ioctx, const std::string& oid, op.exec(fifo::op::CLASS, fifo::op::GET_META, in, &bl, nullptr); - auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y); if (r >= 0) try { fifo::op::get_meta_reply reply; auto iter = bl.cbegin(); @@ -91,13 +91,13 @@ int get_meta(lr::IoCtx& ioctx, const std::string& oid, if (part_entry_overhead) *part_entry_overhead = reply.part_entry_overhead; } catch (const cb::error& err) { - lderr(static_cast(ioctx.cct())) + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " decode failed: " << err.what() << " tid=" << tid << dendl; r = from_error_code(err.code()); } else if (!(probe && (r == -ENOENT || r == -ENODATA))) { - lderr(static_cast(ioctx.cct())) + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " fifo::op::GET_META failed r=" << r << " tid=" << tid << dendl; @@ -137,7 +137,7 @@ void part_init(lr::ObjectWriteOperation* op, std::string_view tag, op->exec(fifo::op::CLASS, fifo::op::INIT_PART, in); } -int push_part(lr::IoCtx& ioctx, const std::string& oid, std::string_view tag, +int push_part(const DoutPrefixProvider *dpp, lr::IoCtx& ioctx, const std::string& oid, std::string_view tag, std::deque data_bufs, std::uint64_t tid, optional_yield y) { @@ -155,16 +155,16 @@ int push_part(lr::IoCtx& ioctx, const std::string& oid, std::string_view tag, encode(pp, in); auto retval = 0; op.exec(fifo::op::CLASS, fifo::op::PUSH_PART, in, nullptr, &retval); - auto r = rgw_rados_operate(ioctx, oid, &op, y, lr::OPERATION_RETURNVEC); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y, lr::OPERATION_RETURNVEC); if (r < 0) { - lderr(static_cast(ioctx.cct())) + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " fifo::op::PUSH_PART failed r=" << r << " tid=" << tid << dendl; return r; } if (retval < 0) { - lderr(static_cast(ioctx.cct())) + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " error handling response retval=" << retval << " tid=" << tid << dendl; @@ -208,7 +208,7 @@ void trim_part(lr::ObjectWriteOperation* op, op->exec(fifo::op::CLASS, fifo::op::TRIM_PART, in); } -int list_part(lr::IoCtx& ioctx, const std::string& oid, +int list_part(const DoutPrefixProvider *dpp, lr::IoCtx& ioctx, const std::string& oid, std::optional tag, std::uint64_t ofs, std::uint64_t max_entries, std::vector* entries, @@ -226,7 +226,7 @@ int list_part(lr::IoCtx& ioctx, const std::string& oid, encode(lp, in); cb::list bl; op.exec(fifo::op::CLASS, fifo::op::LIST_PART, in, &bl, nullptr); - auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y); if (r >= 0) try { fifo::op::list_part_reply reply; auto iter = bl.cbegin(); @@ -236,13 +236,13 @@ int list_part(lr::IoCtx& ioctx, const std::string& oid, if (full_part) *full_part = reply.full_part; if (ptag) *ptag = reply.tag; } catch (const cb::error& err) { - lderr(static_cast(ioctx.cct())) + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " decode failed: " << err.what() << " tid=" << tid << dendl; r = from_error_code(err.code()); } else if (r != -ENOENT) { - lderr(static_cast(ioctx.cct())) + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " fifo::op::LIST_PART failed r=" << r << " tid=" << tid << dendl; @@ -314,7 +314,7 @@ lr::ObjectReadOperation list_part(CephContext* cct, return op; } -int get_part_info(lr::IoCtx& ioctx, const std::string& oid, +int get_part_info(const DoutPrefixProvider *dpp, lr::IoCtx& ioctx, const std::string& oid, fifo::part_header* header, std::uint64_t tid, optional_yield y) { @@ -325,20 +325,20 @@ int get_part_info(lr::IoCtx& ioctx, const std::string& oid, cb::list bl; encode(gpi, in); op.exec(fifo::op::CLASS, fifo::op::GET_PART_INFO, in, &bl, nullptr); - auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y); if (r >= 0) try { fifo::op::get_part_info_reply reply; auto iter = bl.cbegin(); decode(reply, iter); if (header) *header = std::move(reply.header); } catch (const cb::error& err) { - lderr(static_cast(ioctx.cct())) + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " decode failed: " << err.what() << " tid=" << tid << dendl; r = from_error_code(err.code()); } else { - lderr(static_cast(ioctx.cct())) + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " fifo::op::GET_PART_INFO failed r=" << r << " tid=" << tid << dendl; @@ -457,16 +457,16 @@ int FIFO::apply_update(fifo::info* info, return {}; } -int FIFO::_update_meta(const fifo::update& update, +int FIFO::_update_meta(const DoutPrefixProvider *dpp, const fifo::update& update, fifo::objv version, bool* pcanceled, std::uint64_t tid, optional_yield y) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; lr::ObjectWriteOperation op; bool canceled = false; update_meta(&op, info.version, update); - auto r = rgw_rados_operate(ioctx, oid, &op, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y); if (r >= 0 || r == -ECANCELED) { canceled = (r == -ECANCELED); if (!canceled) { @@ -474,17 +474,17 @@ int FIFO::_update_meta(const fifo::update& update, if (r < 0) canceled = true; } if (canceled) { - r = read_meta(tid, y); + r = read_meta(dpp, tid, y); canceled = r < 0 ? false : true; } } if (pcanceled) *pcanceled = canceled; if (canceled) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled: tid=" << tid << dendl; } if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " returning error: r=" << r << " tid=" << tid << dendl; } return r; @@ -497,27 +497,27 @@ struct Updater : public Completion { bool reread = false; bool* pcanceled = nullptr; std::uint64_t tid; - Updater(FIFO* fifo, lr::AioCompletion* super, + Updater(const DoutPrefixProvider *dpp, FIFO* fifo, lr::AioCompletion* super, const fifo::update& update, fifo::objv version, bool* pcanceled, std::uint64_t tid) - : Completion(super), fifo(fifo), update(update), version(version), + : Completion(dpp, super), fifo(fifo), update(update), version(version), pcanceled(pcanceled) {} - void handle(Ptr&& p, int r) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; if (reread) handle_reread(std::move(p), r); else - handle_update(std::move(p), r); + handle_update(dpp, std::move(p), r); } - void handle_update(Ptr&& p, int r) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + void handle_update(const DoutPrefixProvider *dpp, Ptr&& p, int r) { + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " handling async update_meta: tid=" << tid << dendl; if (r < 0 && r != -ECANCELED) { - lderr(fifo->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " update failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); return; @@ -526,7 +526,7 @@ struct Updater : public Completion { if (!canceled) { int r = fifo->apply_update(&fifo->info, version, update, tid); if (r < 0) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " update failed, marking canceled: r=" << r << " tid=" << tid << dendl; canceled = true; @@ -534,12 +534,12 @@ struct Updater : public Completion { } if (canceled) { reread = true; - fifo->read_meta(tid, call(std::move(p))); + fifo->read_meta(dpp, tid, call(std::move(p))); return; } if (pcanceled) *pcanceled = false; - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " completing: tid=" << tid << dendl; complete(std::move(p), 0); } @@ -565,24 +565,24 @@ struct Updater : public Completion { } }; -void FIFO::_update_meta(const fifo::update& update, +void FIFO::_update_meta(const DoutPrefixProvider *dpp, const fifo::update& update, fifo::objv version, bool* pcanceled, std::uint64_t tid, lr::AioCompletion* c) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; lr::ObjectWriteOperation op; update_meta(&op, info.version, update); - auto updater = std::make_unique(this, c, update, version, pcanceled, + auto updater = std::make_unique(dpp, this, c, update, version, pcanceled, tid); auto r = ioctx.aio_operate(oid, Updater::call(std::move(updater)), &op); assert(r >= 0); } -int FIFO::create_part(int64_t part_num, std::string_view tag, std::uint64_t tid, +int FIFO::create_part(const DoutPrefixProvider *dpp, int64_t part_num, std::string_view tag, std::uint64_t tid, optional_yield y) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; lr::ObjectWriteOperation op; op.create(false); /* We don't need exclusivity, part_init ensures @@ -591,37 +591,37 @@ int FIFO::create_part(int64_t part_num, std::string_view tag, std::uint64_t tid, part_init(&op, tag, info.params); auto oid = info.part_oid(part_num); l.unlock(); - auto r = rgw_rados_operate(ioctx, oid, &op, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " part_init failed: r=" << r << " tid=" << tid << dendl; } return r; } -int FIFO::remove_part(int64_t part_num, std::string_view tag, std::uint64_t tid, +int FIFO::remove_part(const DoutPrefixProvider *dpp, int64_t part_num, std::string_view tag, std::uint64_t tid, optional_yield y) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; lr::ObjectWriteOperation op; op.remove(); std::unique_lock l(m); auto oid = info.part_oid(part_num); l.unlock(); - auto r = rgw_rados_operate(ioctx, oid, &op, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " remove failed: r=" << r << " tid=" << tid << dendl; } return r; } -int FIFO::process_journal(std::uint64_t tid, optional_yield y) +int FIFO::process_journal(const DoutPrefixProvider *dpp, std::uint64_t tid, optional_yield y) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; std::vector processed; @@ -634,12 +634,12 @@ int FIFO::process_journal(std::uint64_t tid, optional_yield y) int r = 0; for (auto& [n, entry] : tmpjournal) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " processing entry: entry=" << entry << " tid=" << tid << dendl; switch (entry.op) { case fifo::journal_entry::Op::create: - r = create_part(entry.part_num, entry.part_tag, tid, y); + r = create_part(dpp, entry.part_num, entry.part_tag, tid, y); if (entry.part_num > new_max) { new_max = entry.part_num; } @@ -651,21 +651,21 @@ int FIFO::process_journal(std::uint64_t tid, optional_yield y) } break; case fifo::journal_entry::Op::remove: - r = remove_part(entry.part_num, entry.part_tag, tid, y); + r = remove_part(dpp, entry.part_num, entry.part_tag, tid, y); if (r == -ENOENT) r = 0; if (entry.part_num >= new_tail) { new_tail = entry.part_num + 1; } break; default: - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " unknown journaled op: entry=" << entry << " tid=" << tid << dendl; return -EIO; } if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " processing entry failed: entry=" << entry << " r=" << r << " tid=" << tid << dendl; return -r; @@ -678,7 +678,7 @@ int FIFO::process_journal(std::uint64_t tid, optional_yield y) bool canceled = true; for (auto i = 0; canceled && i < MAX_RACE_RETRIES; ++i) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " postprocessing: i=" << i << " tid=" << tid << dendl; std::optional tail_part_num; @@ -695,7 +695,7 @@ int FIFO::process_journal(std::uint64_t tid, optional_yield y) if (processed.empty() && !tail_part_num && !max_part_num) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " nothing to update any more: i=" << i << " tid=" << tid << dendl; canceled = false; @@ -704,9 +704,9 @@ int FIFO::process_journal(std::uint64_t tid, optional_yield y) auto u = fifo::update().tail_part_num(tail_part_num) .head_part_num(head_part_num).max_push_part_num(max_part_num) .journal_entries_rm(processed); - r = _update_meta(u, objv, &canceled, tid, y); + r = _update_meta(dpp, u, objv, &canceled, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _update_meta failed: update=" << u << " r=" << r << " tid=" << tid << dendl; break; @@ -715,7 +715,7 @@ int FIFO::process_journal(std::uint64_t tid, optional_yield y) if (canceled) { std::vector new_processed; std::unique_lock l(m); - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " update canceled, retrying: i=" << i << " tid=" << tid << dendl; for (auto& e : processed) { @@ -731,31 +731,31 @@ int FIFO::process_journal(std::uint64_t tid, optional_yield y) } } if (r == 0 && canceled) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; r = -ECANCELED; } if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " failed, r=: " << r << " tid=" << tid << dendl; } return r; } -int FIFO::_prepare_new_part(bool is_head, std::uint64_t tid, optional_yield y) +int FIFO::_prepare_new_part(const DoutPrefixProvider *dpp, bool is_head, std::uint64_t tid, optional_yield y) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; std::unique_lock l(m); std::vector jentries = { info.next_journal_entry(generate_tag()) }; if (info.journal.find(jentries.front().part_num) != info.journal.end()) { l.unlock(); - ldout(cct, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " new part journaled, but not processed: tid=" << tid << dendl; - auto r = process_journal(tid, y); + auto r = process_journal(dpp, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " process_journal failed: r=" << r << " tid=" << tid << dendl; } return r; @@ -764,7 +764,7 @@ int FIFO::_prepare_new_part(bool is_head, std::uint64_t tid, optional_yield y) auto version = info.version; if (is_head) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " needs new head: tid=" << tid << dendl; auto new_head_jentry = jentries.front(); new_head_jentry.op = fifo::journal_entry::Op::set_head; @@ -777,23 +777,23 @@ int FIFO::_prepare_new_part(bool is_head, std::uint64_t tid, optional_yield y) bool canceled = true; for (auto i = 0; canceled && i < MAX_RACE_RETRIES; ++i) { canceled = false; - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " updating metadata: i=" << i << " tid=" << tid << dendl; auto u = fifo::update{}.journal_entries_add(jentries); - r = _update_meta(u, version, &canceled, tid, y); + r = _update_meta(dpp, u, version, &canceled, tid, y); if (r >= 0 && canceled) { std::unique_lock l(m); auto found = (info.journal.find(jentries.front().part_num) != info.journal.end()); if ((info.max_push_part_num >= jentries.front().part_num && info.head_part_num >= new_head_part_num)) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " raced, but journaled and processed: i=" << i << " tid=" << tid << dendl; return 0; } if (found) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " raced, journaled but not processed: i=" << i << " tid=" << tid << dendl; canceled = false; @@ -801,28 +801,28 @@ int FIFO::_prepare_new_part(bool is_head, std::uint64_t tid, optional_yield y) l.unlock(); } if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _update_meta failed: update=" << u << " r=" << r << " tid=" << tid << dendl; return r; } } if (canceled) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; return -ECANCELED; } - r = process_journal(tid, y); + r = process_journal(dpp, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " process_journal failed: r=" << r << " tid=" << tid << dendl; } return r; } -int FIFO::_prepare_new_head(std::uint64_t tid, optional_yield y) +int FIFO::_prepare_new_head(const DoutPrefixProvider *dpp, std::uint64_t tid, optional_yield y) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; std::unique_lock l(m); std::int64_t new_head_num = info.head_part_num + 1; @@ -832,18 +832,18 @@ int FIFO::_prepare_new_head(std::uint64_t tid, optional_yield y) int r = 0; if (max_push_part_num < new_head_num) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " need new part: tid=" << tid << dendl; - r = _prepare_new_part(true, tid, y); + r = _prepare_new_part(dpp, true, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _prepare_new_part failed: r=" << r << " tid=" << tid << dendl; return r; } std::unique_lock l(m); if (info.max_push_part_num < new_head_num) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " inconsistency, push part less than head part: " << " tid=" << tid << dendl; return -EIO; @@ -854,12 +854,12 @@ int FIFO::_prepare_new_head(std::uint64_t tid, optional_yield y) bool canceled = true; for (auto i = 0; canceled && i < MAX_RACE_RETRIES; ++i) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " updating head: i=" << i << " tid=" << tid << dendl; auto u = fifo::update{}.head_part_num(new_head_num); - r = _update_meta(u, version, &canceled, tid, y); + r = _update_meta(dpp, u, version, &canceled, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _update_meta failed: update=" << u << " r=" << r << " tid=" << tid << dendl; return r; @@ -869,14 +869,14 @@ int FIFO::_prepare_new_head(std::uint64_t tid, optional_yield y) version = info.version; l.unlock(); if (canceled && (head_part_num >= new_head_num)) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " raced, but completed by the other caller: i=" << i << " tid=" << tid << dendl; canceled = false; } } if (canceled) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; return -ECANCELED; } @@ -891,18 +891,18 @@ struct NewPartPreparer : public Completion { bool canceled = false; uint64_t tid; - NewPartPreparer(FIFO* f, lr::AioCompletion* super, + NewPartPreparer(const DoutPrefixProvider *dpp, FIFO* f, lr::AioCompletion* super, std::vector jentries, std::int64_t new_head_part_num, std::uint64_t tid) - : Completion(super), f(f), jentries(std::move(jentries)), + : Completion(dpp, super), f(f), jentries(std::move(jentries)), new_head_part_num(new_head_part_num), tid(tid) {} - void handle(Ptr&& p, int r) { - ldout(f->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; if (r < 0) { - lderr(f->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _update_meta failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); @@ -919,7 +919,7 @@ struct NewPartPreparer : public Completion { l.unlock(); if ((max_push_part_num >= jentries.front().part_num && head_part_num >= new_head_part_num)) { - ldout(f->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " raced, but journaled and processed: i=" << i << " tid=" << tid << dendl; complete(std::move(p), 0); @@ -931,34 +931,34 @@ struct NewPartPreparer : public Completion { } if (!found) { ++i; - f->_update_meta(fifo::update{} + f->_update_meta(dpp, fifo::update{} .journal_entries_add(jentries), version, &canceled, tid, call(std::move(p))); return; } else { - ldout(f->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " raced, journaled but not processed: i=" << i << " tid=" << tid << dendl; canceled = false; } // Fall through. We still need to process the journal. } - f->process_journal(tid, super()); + f->process_journal(dpp, tid, super()); return; } }; -void FIFO::_prepare_new_part(bool is_head, std::uint64_t tid, +void FIFO::_prepare_new_part(const DoutPrefixProvider *dpp, bool is_head, std::uint64_t tid, lr::AioCompletion* c) { std::unique_lock l(m); std::vector jentries = { info.next_journal_entry(generate_tag()) }; if (info.journal.find(jentries.front().part_num) != info.journal.end()) { l.unlock(); - ldout(cct, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " new part journaled, but not processed: tid=" << tid << dendl; - process_journal(tid, c); + process_journal(dpp, tid, c); return; } std::int64_t new_head_part_num = info.head_part_num; @@ -972,10 +972,10 @@ void FIFO::_prepare_new_part(bool is_head, std::uint64_t tid, } l.unlock(); - auto n = std::make_unique(this, c, jentries, + auto n = std::make_unique(dpp, this, c, jentries, new_head_part_num, tid); auto np = n.get(); - _update_meta(fifo::update{}.journal_entries_add(jentries), version, + _update_meta(dpp, fifo::update{}.journal_entries_add(jentries), version, &np->canceled, tid, NewPartPreparer::call(std::move(n))); } @@ -987,16 +987,16 @@ struct NewHeadPreparer : public Completion { bool canceled = false; std::uint64_t tid; - NewHeadPreparer(FIFO* f, lr::AioCompletion* super, + NewHeadPreparer(const DoutPrefixProvider *dpp, FIFO* f, lr::AioCompletion* super, bool newpart, std::int64_t new_head_num, std::uint64_t tid) - : Completion(super), f(f), newpart(newpart), new_head_num(new_head_num), + : Completion(dpp, super), f(f), newpart(newpart), new_head_num(new_head_num), tid(tid) {} - void handle(Ptr&& p, int r) { + void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { if (newpart) handle_newpart(std::move(p), r); else - handle_update(std::move(p), r); + handle_update(dpp, std::move(p), r); } void handle_newpart(Ptr&& p, int r) { @@ -1020,14 +1020,14 @@ struct NewHeadPreparer : public Completion { } } - void handle_update(Ptr&& p, int r) { + void handle_update(const DoutPrefixProvider *dpp, Ptr&& p, int r) { std::unique_lock l(f->m); auto head_part_num = f->info.head_part_num; auto version = f->info.version; l.unlock(); if (r < 0) { - lderr(f->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _update_meta failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); @@ -1035,7 +1035,7 @@ struct NewHeadPreparer : public Completion { } if (canceled) { if (i >= MAX_RACE_RETRIES) { - lderr(f->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; complete(std::move(p), -ECANCELED); return; @@ -1045,23 +1045,23 @@ struct NewHeadPreparer : public Completion { if (head_part_num < new_head_num) { canceled = false; ++i; - ldout(f->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " updating head: i=" << i << " tid=" << tid << dendl; - f->_update_meta(fifo::update{}.head_part_num(new_head_num), + f->_update_meta(dpp, fifo::update{}.head_part_num(new_head_num), version, &this->canceled, tid, call(std::move(p))); return; } } - ldout(f->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " succeeded : i=" << i << " tid=" << tid << dendl; complete(std::move(p), 0); return; } }; -void FIFO::_prepare_new_head(std::uint64_t tid, lr::AioCompletion* c) +void FIFO::_prepare_new_head(const DoutPrefixProvider *dpp, std::uint64_t tid, lr::AioCompletion* c) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; std::unique_lock l(m); int64_t new_head_num = info.head_part_num + 1; @@ -1070,26 +1070,26 @@ void FIFO::_prepare_new_head(std::uint64_t tid, lr::AioCompletion* c) l.unlock(); if (max_push_part_num < new_head_num) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " need new part: tid=" << tid << dendl; - auto n = std::make_unique(this, c, true, new_head_num, + auto n = std::make_unique(dpp, this, c, true, new_head_num, tid); - _prepare_new_part(true, tid, NewHeadPreparer::call(std::move(n))); + _prepare_new_part(dpp, true, tid, NewHeadPreparer::call(std::move(n))); } else { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " updating head: tid=" << tid << dendl; - auto n = std::make_unique(this, c, false, new_head_num, + auto n = std::make_unique(dpp, this, c, false, new_head_num, tid); auto np = n.get(); - _update_meta(fifo::update{}.head_part_num(new_head_num), version, + _update_meta(dpp, fifo::update{}.head_part_num(new_head_num), version, &np->canceled, tid, NewHeadPreparer::call(std::move(n))); } } -int FIFO::push_entries(const std::deque& data_bufs, +int FIFO::push_entries(const DoutPrefixProvider *dpp, const std::deque& data_bufs, std::uint64_t tid, optional_yield y) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; std::unique_lock l(m); auto head_part_num = info.head_part_num; @@ -1097,9 +1097,9 @@ int FIFO::push_entries(const std::deque& data_bufs, const auto part_oid = info.part_oid(head_part_num); l.unlock(); - auto r = push_part(ioctx, part_oid, tag, data_bufs, tid, y); + auto r = push_part(dpp, ioctx, part_oid, tag, data_bufs, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " push_part failed: r=" << r << " tid=" << tid << dendl; } return r; @@ -1117,21 +1117,21 @@ void FIFO::push_entries(const std::deque& data_bufs, push_part(ioctx, part_oid, tag, data_bufs, tid, c); } -int FIFO::trim_part(int64_t part_num, uint64_t ofs, +int FIFO::trim_part(const DoutPrefixProvider *dpp, int64_t part_num, uint64_t ofs, std::optional tag, bool exclusive, std::uint64_t tid, optional_yield y) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; lr::ObjectWriteOperation op; std::unique_lock l(m); const auto part_oid = info.part_oid(part_num); l.unlock(); rgw::cls::fifo::trim_part(&op, tag, ofs, exclusive); - auto r = rgw_rados_operate(ioctx, part_oid, &op, y); + auto r = rgw_rados_operate(dpp, ioctx, part_oid, &op, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " trim_part failed: r=" << r << " tid=" << tid << dendl; } return 0; @@ -1153,22 +1153,21 @@ void FIFO::trim_part(int64_t part_num, uint64_t ofs, ceph_assert(r >= 0); } -int FIFO::open(lr::IoCtx ioctx, std::string oid, std::unique_ptr* fifo, +int FIFO::open(const DoutPrefixProvider *dpp, lr::IoCtx ioctx, std::string oid, std::unique_ptr* fifo, optional_yield y, std::optional objv, bool probe) { - auto cct = static_cast(ioctx.cct()); - ldout(cct, 20) + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering" << dendl; fifo::info info; std::uint32_t size; std::uint32_t over; - int r = get_meta(ioctx, std::move(oid), objv, &info, &size, &over, 0, y, + int r = get_meta(dpp, ioctx, std::move(oid), objv, &info, &size, &over, 0, y, probe); if (r < 0) { if (!(probe && (r == -ENOENT || r == -ENODATA))) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " get_meta failed: r=" << r << dendl; } return r; @@ -1180,12 +1179,12 @@ int FIFO::open(lr::IoCtx ioctx, std::string oid, std::unique_ptr* fifo, // If there are journal entries, process them, in case // someone crashed mid-transaction. if (!info.journal.empty()) { - ldout(cct, 20) + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " processing leftover journal" << dendl; - r = f->process_journal(0, y); + r = f->process_journal(dpp, 0, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " process_journal failed: r=" << r << dendl; return r; } @@ -1194,39 +1193,38 @@ int FIFO::open(lr::IoCtx ioctx, std::string oid, std::unique_ptr* fifo, return 0; } -int FIFO::create(lr::IoCtx ioctx, std::string oid, std::unique_ptr* fifo, +int FIFO::create(const DoutPrefixProvider *dpp, lr::IoCtx ioctx, std::string oid, std::unique_ptr* fifo, optional_yield y, std::optional objv, std::optional oid_prefix, bool exclusive, std::uint64_t max_part_size, std::uint64_t max_entry_size) { - auto cct = static_cast(ioctx.cct()); - ldout(cct, 20) + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering" << dendl; lr::ObjectWriteOperation op; create_meta(&op, oid, objv, oid_prefix, exclusive, max_part_size, max_entry_size); - auto r = rgw_rados_operate(ioctx, oid, &op, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " create_meta failed: r=" << r << dendl; return r; } - r = open(std::move(ioctx), std::move(oid), fifo, y, objv); + r = open(dpp, std::move(ioctx), std::move(oid), fifo, y, objv); return r; } -int FIFO::read_meta(std::uint64_t tid, optional_yield y) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ +int FIFO::read_meta(const DoutPrefixProvider *dpp, std::uint64_t tid, optional_yield y) { + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; fifo::info _info; std::uint32_t _phs; std::uint32_t _peo; - auto r = get_meta(ioctx, oid, nullopt, &_info, &_phs, &_peo, tid, y); + auto r = get_meta(dpp, ioctx, oid, nullopt, &_info, &_phs, &_peo, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " get_meta failed: r=" << r << " tid=" << tid << dendl; return r; } @@ -1240,23 +1238,22 @@ int FIFO::read_meta(std::uint64_t tid, optional_yield y) { return 0; } -int FIFO::read_meta(optional_yield y) { +int FIFO::read_meta(const DoutPrefixProvider *dpp, optional_yield y) { std::unique_lock l(m); auto tid = ++next_tid; l.unlock(); - return read_meta(tid, y); + return read_meta(dpp, tid, y); } struct Reader : public Completion { FIFO* fifo; cb::list bl; std::uint64_t tid; - Reader(FIFO* fifo, lr::AioCompletion* super, std::uint64_t tid) - : Completion(super), fifo(fifo), tid(tid) {} + Reader(const DoutPrefixProvider *dpp, FIFO* fifo, lr::AioCompletion* super, std::uint64_t tid) + : Completion(dpp, super), fifo(fifo), tid(tid) {} - void handle(Ptr&& p, int r) { - auto cct = fifo->cct; - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; if (r >= 0) try { fifo::op::get_meta_reply reply; @@ -1269,12 +1266,12 @@ struct Reader : public Completion { fifo->part_entry_overhead = reply.part_entry_overhead; } } catch (const cb::error& err) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " failed to decode response err=" << err.what() << " tid=" << tid << dendl; r = from_error_code(err.code()); } else { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " read_meta failed r=" << r << " tid=" << tid << dendl; } @@ -1282,15 +1279,15 @@ struct Reader : public Completion { } }; -void FIFO::read_meta(std::uint64_t tid, lr::AioCompletion* c) +void FIFO::read_meta(const DoutPrefixProvider *dpp, std::uint64_t tid, lr::AioCompletion* c) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; lr::ObjectReadOperation op; fifo::op::get_meta gm; cb::list in; encode(gm, in); - auto reader = std::make_unique(this, c, tid); + auto reader = std::make_unique(dpp, this, c, tid); auto rp = reader.get(); auto r = ioctx.aio_exec(oid, Reader::call(std::move(reader)), fifo::op::CLASS, fifo::op::GET_META, in, &rp->bl); @@ -1305,25 +1302,25 @@ std::pair FIFO::get_part_layout_info() const { return {part_header_size, part_entry_overhead}; } -int FIFO::push(const cb::list& bl, optional_yield y) { - return push(std::vector{ bl }, y); +int FIFO::push(const DoutPrefixProvider *dpp, const cb::list& bl, optional_yield y) { + return push(dpp, std::vector{ bl }, y); } -void FIFO::push(const cb::list& bl, lr::AioCompletion* c) { - push(std::vector{ bl }, c); +void FIFO::push(const DoutPrefixProvider *dpp, const cb::list& bl, lr::AioCompletion* c) { + push(dpp, std::vector{ bl }, c); } -int FIFO::push(const std::vector& data_bufs, optional_yield y) +int FIFO::push(const DoutPrefixProvider *dpp, const std::vector& data_bufs, optional_yield y) { std::unique_lock l(m); auto tid = ++next_tid; auto max_entry_size = info.params.max_entry_size; auto need_new_head = info.need_new_head(); l.unlock(); - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; if (data_bufs.empty()) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " empty push, returning success tid=" << tid << dendl; return 0; } @@ -1331,7 +1328,7 @@ int FIFO::push(const std::vector& data_bufs, optional_yield y) // Validate sizes for (const auto& bl : data_bufs) { if (bl.length() > max_entry_size) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entry bigger than max_entry_size tid=" << tid << dendl; return -E2BIG; } @@ -1339,11 +1336,11 @@ int FIFO::push(const std::vector& data_bufs, optional_yield y) int r = 0; if (need_new_head) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " need new head tid=" << tid << dendl; - r = _prepare_new_head(tid, y); + r = _prepare_new_head(dpp, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _prepare_new_head failed: r=" << r << " tid=" << tid << dendl; return r; @@ -1358,7 +1355,7 @@ int FIFO::push(const std::vector& data_bufs, optional_yield y) bool canceled = true; while ((!remaining.empty() || !batch.empty()) && (retries <= MAX_RACE_RETRIES)) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " preparing push: remaining=" << remaining.size() << " batch=" << batch.size() << " retries=" << retries << " tid=" << tid << dendl; @@ -1377,21 +1374,21 @@ int FIFO::push(const std::vector& data_bufs, optional_yield y) batch.push_back(std::move(remaining.front())); remaining.pop_front(); } - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " prepared push: remaining=" << remaining.size() << " batch=" << batch.size() << " retries=" << retries << " batch_len=" << batch_len << " tid=" << tid << dendl; - auto r = push_entries(batch, tid, y); + auto r = push_entries(dpp, batch, tid, y); if (r == -ERANGE) { canceled = true; ++retries; - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " need new head tid=" << tid << dendl; - r = _prepare_new_head(tid, y); + r = _prepare_new_head(dpp, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " prepare_new_head failed: r=" << r << " tid=" << tid << dendl; return r; @@ -1400,7 +1397,7 @@ int FIFO::push(const std::vector& data_bufs, optional_yield y) continue; } if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " push_entries failed: r=" << r << " tid=" << tid << dendl; return r; @@ -1419,7 +1416,7 @@ int FIFO::push(const std::vector& data_bufs, optional_yield y) } } if (canceled) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; return -ECANCELED; } @@ -1485,21 +1482,21 @@ struct Pusher : public Completion { f->push_entries(batch, tid, call(std::move(p))); } - void new_head(Ptr&& p) { + void new_head(const DoutPrefixProvider *dpp, Ptr&& p) { new_heading = true; - f->_prepare_new_head(tid, call(std::move(p))); + f->_prepare_new_head(dpp, tid, call(std::move(p))); } - void handle(Ptr&& p, int r) { + void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { if (!new_heading) { if (r == -ERANGE) { - ldout(f->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " need new head tid=" << tid << dendl; - new_head(std::move(p)); + new_head(dpp, std::move(p)); return; } if (r < 0) { - lderr(f->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " push_entries failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); @@ -1509,7 +1506,7 @@ struct Pusher : public Completion { prep_then_push(std::move(p), r); } else { if (r < 0) { - lderr(f->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " prepare_new_head failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); @@ -1543,13 +1540,13 @@ struct Pusher : public Completion { } } - Pusher(FIFO* f, std::deque&& remaining, + Pusher(const DoutPrefixProvider *dpp, FIFO* f, std::deque&& remaining, std::uint64_t tid, lr::AioCompletion* super) - : Completion(super), f(f), remaining(std::move(remaining)), + : Completion(dpp, super), f(f), remaining(std::move(remaining)), tid(tid) {} }; -void FIFO::push(const std::vector& data_bufs, +void FIFO::push(const DoutPrefixProvider *dpp, const std::vector& data_bufs, lr::AioCompletion* c) { std::unique_lock l(m); @@ -1557,14 +1554,14 @@ void FIFO::push(const std::vector& data_bufs, auto max_entry_size = info.params.max_entry_size; auto need_new_head = info.need_new_head(); l.unlock(); - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; - auto p = std::make_unique(this, std::deque(data_bufs.begin(), data_bufs.end()), + auto p = std::make_unique(dpp, this, std::deque(data_bufs.begin(), data_bufs.end()), tid, c); // Validate sizes for (const auto& bl : data_bufs) { if (bl.length() > max_entry_size) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entry bigger than max_entry_size tid=" << tid << dendl; Pusher::complete(std::move(p), -E2BIG); return; @@ -1572,22 +1569,22 @@ void FIFO::push(const std::vector& data_bufs, } if (data_bufs.empty() ) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " empty push, returning success tid=" << tid << dendl; Pusher::complete(std::move(p), 0); return; } if (need_new_head) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " need new head tid=" << tid << dendl; - p->new_head(std::move(p)); + p->new_head(dpp, std::move(p)); } else { p->prep_then_push(std::move(p), 0); } } -int FIFO::list(int max_entries, +int FIFO::list(const DoutPrefixProvider *dpp, int max_entries, std::optional markstr, std::vector* presult, bool* pmore, optional_yield y) @@ -1596,13 +1593,13 @@ int FIFO::list(int max_entries, auto tid = ++next_tid; std::int64_t part_num = info.tail_part_num; l.unlock(); - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; std::uint64_t ofs = 0; if (markstr) { auto marker = to_marker(*markstr); if (!marker) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " invalid marker string: " << markstr << " tid= "<< tid << dendl; return -EINVAL; @@ -1618,7 +1615,7 @@ int FIFO::list(int max_entries, std::vector entries; int r = 0; while (max_entries > 0) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " max_entries=" << max_entries << " tid=" << tid << dendl; bool part_more = false; bool part_full = false; @@ -1627,22 +1624,22 @@ int FIFO::list(int max_entries, auto part_oid = info.part_oid(part_num); l.unlock(); - r = list_part(ioctx, part_oid, {}, ofs, max_entries, &entries, + r = list_part(dpp, ioctx, part_oid, {}, ofs, max_entries, &entries, &part_more, &part_full, nullptr, tid, y); if (r == -ENOENT) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " missing part, rereading metadata" << " tid= "<< tid << dendl; - r = read_meta(tid, y); + r = read_meta(dpp, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " read_meta failed: r=" << r << " tid= "<< tid << dendl; return r; } if (part_num < info.tail_part_num) { /* raced with trim? restart */ - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " raced with trim, restarting: tid=" << tid << dendl; max_entries += result.size(); result.clear(); @@ -1652,7 +1649,7 @@ int FIFO::list(int max_entries, ofs = 0; continue; } - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " assuming part was not written yet, so end of data: " << "tid=" << tid << dendl; more = false; @@ -1660,7 +1657,7 @@ int FIFO::list(int max_entries, break; } if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " list_entries failed: r=" << r << " tid= "<< tid << dendl; return r; @@ -1682,7 +1679,7 @@ int FIFO::list(int max_entries, } if (!part_full) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " head part is not full, so we can assume we're done: " << "tid=" << tid << dendl; break; @@ -1699,7 +1696,7 @@ int FIFO::list(int max_entries, return 0; } -int FIFO::trim(std::string_view markstr, bool exclusive, optional_yield y) +int FIFO::trim(const DoutPrefixProvider *dpp, std::string_view markstr, bool exclusive, optional_yield y) { bool overshoot = false; auto marker = to_marker(markstr); @@ -1714,7 +1711,7 @@ int FIFO::trim(std::string_view markstr, bool exclusive, optional_yield y) const auto max_part_size = info.params.max_part_size; if (part_num > hn) { l.unlock(); - auto r = read_meta(tid, y); + auto r = read_meta(dpp, tid, y); if (r < 0) { return r; } @@ -1731,27 +1728,27 @@ int FIFO::trim(std::string_view markstr, bool exclusive, optional_yield y) } auto pn = info.tail_part_num; l.unlock(); - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; int r = 0; while (pn < part_num) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " pn=" << pn << " tid=" << tid << dendl; std::unique_lock l(m); l.unlock(); - r = trim_part(pn, max_part_size, std::nullopt, false, tid, y); + r = trim_part(dpp, pn, max_part_size, std::nullopt, false, tid, y); if (r < 0 && r == -ENOENT) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " trim_part failed: r=" << r << " tid= "<< tid << dendl; return r; } ++pn; } - r = trim_part(part_num, ofs, std::nullopt, exclusive, tid, y); + r = trim_part(dpp, part_num, ofs, std::nullopt, exclusive, tid, y); if (r < 0 && r != -ENOENT) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " trim_part failed: r=" << r << " tid= "<< tid << dendl; return r; @@ -1766,16 +1763,16 @@ int FIFO::trim(std::string_view markstr, bool exclusive, optional_yield y) while ((tail_part_num < part_num) && canceled && (retries <= MAX_RACE_RETRIES)) { - r = _update_meta(fifo::update{}.tail_part_num(part_num), objv, &canceled, + r = _update_meta(dpp, fifo::update{}.tail_part_num(part_num), objv, &canceled, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _update_meta failed: r=" << r << " tid= "<< tid << dendl; return r; } if (canceled) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled: retries=" << retries << " tid=" << tid << dendl; l.lock(); @@ -1786,7 +1783,7 @@ int FIFO::trim(std::string_view markstr, bool exclusive, optional_yield y) } } if (canceled) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; return -EIO; } @@ -1806,20 +1803,19 @@ struct Trimmer : public Completion { bool overshoot = false; int retries = 0; - Trimmer(FIFO* fifo, std::int64_t part_num, std::uint64_t ofs, std::int64_t pn, + Trimmer(const DoutPrefixProvider *dpp, FIFO* fifo, std::int64_t part_num, std::uint64_t ofs, std::int64_t pn, bool exclusive, lr::AioCompletion* super, std::uint64_t tid) - : Completion(super), fifo(fifo), part_num(part_num), ofs(ofs), pn(pn), + : Completion(dpp, super), fifo(fifo), part_num(part_num), ofs(ofs), pn(pn), exclusive(exclusive), tid(tid) {} - void handle(Ptr&& p, int r) { - auto cct = fifo->cct; - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; if (reread) { reread = false; if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " read_meta failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); @@ -1841,7 +1837,7 @@ struct Trimmer : public Completion { } pn = tail_part_num; if (pn < part_num) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " pn=" << pn << " tid=" << tid << dendl; fifo->trim_part(pn++, max_part_size, std::nullopt, false, tid, call(std::move(p))); @@ -1859,7 +1855,7 @@ struct Trimmer : public Completion { } if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << (update ? " update_meta " : " trim ") << "failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); @@ -1867,11 +1863,11 @@ struct Trimmer : public Completion { } if (!update) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " handling preceding trim callback: tid=" << tid << dendl; retries = 0; if (pn < part_num) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " pn=" << pn << " tid=" << tid << dendl; std::unique_lock l(fifo->m); const auto max_part_size = fifo->info.params.max_part_size; @@ -1891,7 +1887,7 @@ struct Trimmer : public Completion { return; } - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " handling update-needed callback: tid=" << tid << dendl; std::unique_lock l(fifo->m); auto tail_part_num = fifo->info.tail_part_num; @@ -1900,13 +1896,13 @@ struct Trimmer : public Completion { if ((tail_part_num < part_num) && canceled) { if (retries > MAX_RACE_RETRIES) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; complete(std::move(p), -EIO); return; } ++retries; - fifo->_update_meta(fifo::update{} + fifo->_update_meta(dpp, fifo::update{} .tail_part_num(part_num), objv, &canceled, tid, call(std::move(p))); } else { @@ -1915,7 +1911,7 @@ struct Trimmer : public Completion { } }; -void FIFO::trim(std::string_view markstr, bool exclusive, +void FIFO::trim(const DoutPrefixProvider *dpp, std::string_view markstr, bool exclusive, lr::AioCompletion* c) { auto marker = to_marker(markstr); auto realmark = marker.value_or(::rgw::cls::fifo::marker{}); @@ -1926,9 +1922,9 @@ void FIFO::trim(std::string_view markstr, bool exclusive, const auto part_oid = info.part_oid(pn); auto tid = ++next_tid; l.unlock(); - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; - auto trimmer = std::make_unique(this, realmark.num, realmark.ofs, + auto trimmer = std::make_unique(dpp, this, realmark.num, realmark.ofs, pn, exclusive, c, tid); if (!marker) { Trimmer::complete(std::move(trimmer), -EINVAL); @@ -1938,11 +1934,11 @@ void FIFO::trim(std::string_view markstr, bool exclusive, auto ofs = marker->ofs; if (marker->num > hn) { trimmer->reread = true; - read_meta(tid, Trimmer::call(std::move(trimmer))); + read_meta(dpp, tid, Trimmer::call(std::move(trimmer))); return; } if (pn < marker->num) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " pn=" << pn << " tid=" << tid << dendl; ofs = max_part_size; } else { @@ -1952,7 +1948,7 @@ void FIFO::trim(std::string_view markstr, bool exclusive, tid, Trimmer::call(std::move(trimmer))); } -int FIFO::get_part_info(int64_t part_num, +int FIFO::get_part_info(const DoutPrefixProvider *dpp, int64_t part_num, fifo::part_header* header, optional_yield y) { @@ -1960,9 +1956,9 @@ int FIFO::get_part_info(int64_t part_num, const auto part_oid = info.part_oid(part_num); auto tid = ++next_tid; l.unlock(); - auto r = rgw::cls::fifo::get_part_info(ioctx, part_oid, header, tid, y); + auto r = rgw::cls::fifo::get_part_info(dpp, ioctx, part_oid, header, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " get_part_info failed: r=" << r << " tid=" << tid << dendl; } @@ -1989,13 +1985,13 @@ struct InfoGetter : Completion { std::uint64_t tid; bool headerread = false; - InfoGetter(FIFO* fifo, fu2::function f, + InfoGetter(const DoutPrefixProvider *dpp, FIFO* fifo, fu2::function f, std::uint64_t tid, lr::AioCompletion* super) - : Completion(super), fifo(fifo), f(std::move(f)), tid(tid) {} - void handle(Ptr&& p, int r) { + : Completion(dpp, super), fifo(fifo), f(std::move(f)), tid(tid) {} + void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { if (!headerread) { if (r < 0) { - lderr(fifo->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " read_meta failed: r=" << r << " tid=" << tid << dendl; if (f) @@ -2007,7 +2003,7 @@ struct InfoGetter : Completion { auto info = fifo->meta(); auto hpn = info.head_part_num; if (hpn < 0) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " no head, returning empty partinfo r=" << r << " tid=" << tid << dendl; if (f) @@ -2027,7 +2023,7 @@ struct InfoGetter : Completion { } if (r < 0) { - lderr(fifo->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " get_part_info failed: r=" << r << " tid=" << tid << dendl; } @@ -2039,15 +2035,15 @@ struct InfoGetter : Completion { } }; -void FIFO::get_head_info(fu2::unique_function f, lr::AioCompletion* c) { std::unique_lock l(m); auto tid = ++next_tid; l.unlock(); - auto ig = std::make_unique(this, std::move(f), tid, c); - read_meta(tid, InfoGetter::call(std::move(ig))); + auto ig = std::make_unique(dpp, this, std::move(f), tid, c); + read_meta(dpp, tid, InfoGetter::call(std::move(ig))); } struct JournalProcessor : public Completion { @@ -2102,12 +2098,12 @@ private: return; } - void finish_je(Ptr&& p, int r, + void finish_je(const DoutPrefixProvider *dpp, Ptr&& p, int r, const fifo::journal_entry& entry) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " finishing entry: entry=" << entry << " tid=" << tid << dendl; @@ -2115,7 +2111,7 @@ private: r = 0; if (r < 0) { - lderr(fifo->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " processing entry failed: entry=" << entry << " r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); @@ -2142,26 +2138,26 @@ private: processed.push_back(entry); } ++iter; - process(std::move(p)); + process(dpp, std::move(p)); } - void postprocess(Ptr&& p) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + void postprocess(const DoutPrefixProvider *dpp, Ptr&& p) { + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; if (processed.empty()) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " nothing to update any more: race_retries=" << race_retries << " tid=" << tid << dendl; complete(std::move(p), 0); return; } - pp_run(std::move(p), 0, false); + pp_run(dpp, std::move(p), 0, false); } public: - JournalProcessor(FIFO* fifo, std::uint64_t tid, lr::AioCompletion* super) - : Completion(super), fifo(fifo), tid(tid) { + JournalProcessor(const DoutPrefixProvider *dpp, FIFO* fifo, std::uint64_t tid, lr::AioCompletion* super) + : Completion(dpp, super), fifo(fifo), tid(tid) { std::unique_lock l(fifo->m); journal = fifo->info.journal; iter = journal.begin(); @@ -2170,26 +2166,26 @@ public: new_max = fifo->info.max_push_part_num; } - void pp_run(Ptr&& p, int r, bool canceled) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + void pp_run(const DoutPrefixProvider *dpp, Ptr&& p, int r, bool canceled) { + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; std::optional tail_part_num; std::optional head_part_num; std::optional max_part_num; if (r < 0) { - lderr(fifo->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " failed, r=: " << r << " tid=" << tid << dendl; complete(std::move(p), r); } - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " postprocessing: race_retries=" << race_retries << " tid=" << tid << dendl; if (!first_pp && r == 0 && !canceled) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " nothing to update any more: race_retries=" << race_retries << " tid=" << tid << dendl; complete(std::move(p), 0); @@ -2200,13 +2196,13 @@ public: if (canceled) { if (race_retries >= MAX_RACE_RETRIES) { - lderr(fifo->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; complete(std::move(p), -ECANCELED); return; } - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " update canceled, retrying: race_retries=" << race_retries << " tid=" << tid << dendl; @@ -2245,14 +2241,14 @@ public: !tail_part_num && !max_part_num) { /* nothing to update anymore */ - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " nothing to update any more: race_retries=" << race_retries << " tid=" << tid << dendl; complete(std::move(p), 0); return; } state = pp_callback; - fifo->_update_meta(fifo::update{} + fifo->_update_meta(dpp, fifo::update{} .tail_part_num(tail_part_num) .head_part_num(head_part_num) .max_push_part_num(max_part_num) @@ -2266,11 +2262,11 @@ public: JournalProcessor(JournalProcessor&&) = delete; JournalProcessor& operator =(JournalProcessor&&) = delete; - void process(Ptr&& p) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + void process(const DoutPrefixProvider *dpp, Ptr&& p) { + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; while (iter != journal.end()) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " processing entry: entry=" << *iter << " tid=" << tid << dendl; const auto entry = iter->second; @@ -2296,21 +2292,21 @@ public: return; } } - postprocess(std::move(p)); + postprocess(dpp, std::move(p)); return; } - void handle(Ptr&& p, int r) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; switch (state) { case entry_callback: - finish_je(std::move(p), r, iter->second); + finish_je(dpp, std::move(p), r, iter->second); return; case pp_callback: auto c = canceled; canceled = false; - pp_run(std::move(p), r, c); + pp_run(dpp, std::move(p), r, c); return; } @@ -2319,9 +2315,9 @@ public: }; -void FIFO::process_journal(std::uint64_t tid, lr::AioCompletion* c) { - auto p = std::make_unique(this, tid, c); - p->process(std::move(p)); +void FIFO::process_journal(const DoutPrefixProvider *dpp, std::uint64_t tid, lr::AioCompletion* c) { + auto p = std::make_unique(dpp, this, tid, c); + p->process(dpp, std::move(p)); } struct Lister : Completion { @@ -2350,10 +2346,10 @@ struct Lister : Completion { } public: - Lister(FIFO* f, std::int64_t part_num, std::uint64_t ofs, int max_entries, + Lister(const DoutPrefixProvider *dpp, FIFO* f, std::int64_t part_num, std::uint64_t ofs, int max_entries, std::vector* entries_out, bool* more_out, std::uint64_t tid, lr::AioCompletion* super) - : Completion(super), f(f), part_num(part_num), ofs(ofs), max_entries(max_entries), + : Completion(dpp, super), f(f), part_num(part_num), ofs(ofs), max_entries(max_entries), entries_out(entries_out), more_out(more_out), tid(tid) { result.reserve(max_entries); } @@ -2363,11 +2359,11 @@ public: Lister(Lister&&) = delete; Lister& operator =(Lister&&) = delete; - void handle(Ptr&& p, int r) { + void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { if (read) handle_read(std::move(p), r); else - handle_list(std::move(p), r); + handle_list(dpp, std::move(p), r); } void list(Ptr&& p) { @@ -2415,7 +2411,7 @@ public: return; } - void handle_list(Ptr&& p, int r) { + void handle_list(const DoutPrefixProvider *dpp, Ptr&& p, int r) { if (r >= 0) r = r_out; r_out = 0; std::unique_lock l(f->m); @@ -2423,7 +2419,7 @@ public: l.unlock(); if (r == -ENOENT) { read = true; - f->read_meta(tid, call(std::move(p))); + f->read_meta(dpp, tid, call(std::move(p))); return; } if (r < 0) { @@ -2456,7 +2452,7 @@ public: } }; -void FIFO::list(int max_entries, +void FIFO::list(const DoutPrefixProvider *dpp, int max_entries, std::optional markstr, std::vector* out, bool* more, @@ -2476,7 +2472,7 @@ void FIFO::list(int max_entries, } } - auto ls = std::make_unique(this, part_num, ofs, max_entries, out, + auto ls = std::make_unique(dpp, this, part_num, ofs, max_entries, out, more, tid, c); if (markstr && !marker) { auto l = ls.get(); diff --git a/src/rgw/cls_fifo_legacy.h b/src/rgw/cls_fifo_legacy.h index 307abbb198918..93657ecd867b1 100644 --- a/src/rgw/cls_fifo_legacy.h +++ b/src/rgw/cls_fifo_legacy.h @@ -56,13 +56,12 @@ void create_meta(lr::ObjectWriteOperation* op, std::string_view id, bool exclusive = false, std::uint64_t max_part_size = default_max_part_size, std::uint64_t max_entry_size = default_max_entry_size); -int get_meta(lr::IoCtx& ioctx, const std::string& oid, +int get_meta(const DoutPrefixProvider *dpp, lr::IoCtx& ioctx, const std::string& oid, std::optional objv, fifo::info* info, std::uint32_t* part_header_size, std::uint32_t* part_entry_overhead, std::uint64_t tid, optional_yield y, bool probe = false); - struct marker { std::int64_t num = 0; std::uint64_t ofs = 0; @@ -134,27 +133,27 @@ class FIFO { const fifo::objv& objv, const fifo::update& update, std::uint64_t tid); - int _update_meta(const fifo::update& update, + int _update_meta(const DoutPrefixProvider *dpp, const fifo::update& update, fifo::objv version, bool* pcanceled, std::uint64_t tid, optional_yield y); - void _update_meta(const fifo::update& update, + void _update_meta(const DoutPrefixProvider *dpp, const fifo::update& update, fifo::objv version, bool* pcanceled, std::uint64_t tid, lr::AioCompletion* c); - int create_part(int64_t part_num, std::string_view tag, std::uint64_t tid, + int create_part(const DoutPrefixProvider *dpp, int64_t part_num, std::string_view tag, std::uint64_t tid, optional_yield y); - int remove_part(int64_t part_num, std::string_view tag, std::uint64_t tid, + int remove_part(const DoutPrefixProvider *dpp, int64_t part_num, std::string_view tag, std::uint64_t tid, optional_yield y); - int process_journal(std::uint64_t tid, optional_yield y); - void process_journal(std::uint64_t tid, lr::AioCompletion* c); - int _prepare_new_part(bool is_head, std::uint64_t tid, optional_yield y); - void _prepare_new_part(bool is_head, std::uint64_t tid, lr::AioCompletion* c); - int _prepare_new_head(std::uint64_t tid, optional_yield y); - void _prepare_new_head(std::uint64_t tid, lr::AioCompletion* c); - int push_entries(const std::deque& data_bufs, + int process_journal(const DoutPrefixProvider *dpp, std::uint64_t tid, optional_yield y); + void process_journal(const DoutPrefixProvider *dpp, std::uint64_t tid, lr::AioCompletion* c); + int _prepare_new_part(const DoutPrefixProvider *dpp, bool is_head, std::uint64_t tid, optional_yield y); + void _prepare_new_part(const DoutPrefixProvider *dpp, bool is_head, std::uint64_t tid, lr::AioCompletion* c); + int _prepare_new_head(const DoutPrefixProvider *dpp, std::uint64_t tid, optional_yield y); + void _prepare_new_head(const DoutPrefixProvider *dpp, std::uint64_t tid, lr::AioCompletion* c); + int push_entries(const DoutPrefixProvider *dpp, const std::deque& data_bufs, std::uint64_t tid, optional_yield y); void push_entries(const std::deque& data_bufs, std::uint64_t tid, lr::AioCompletion* c); - int trim_part(int64_t part_num, uint64_t ofs, + int trim_part(const DoutPrefixProvider *dpp, int64_t part_num, uint64_t ofs, std::optional tag, bool exclusive, std::uint64_t tid, optional_yield y); void trim_part(int64_t part_num, uint64_t ofs, @@ -162,9 +161,9 @@ class FIFO { std::uint64_t tid, lr::AioCompletion* c); /// Force refresh of metadata, yielding/blocking style - int read_meta(std::uint64_t tid, optional_yield y); + int read_meta(const DoutPrefixProvider *dpp, std::uint64_t tid, optional_yield y); /// Force refresh of metadata, with a librados Completion - void read_meta(std::uint64_t tid, lr::AioCompletion* c); + void read_meta(const DoutPrefixProvider *dpp, std::uint64_t tid, lr::AioCompletion* c); public: @@ -174,7 +173,7 @@ public: FIFO& operator =(FIFO&&) = delete; /// Open an existing FIFO. - static int open(lr::IoCtx ioctx, //< IO Context + static int open(const DoutPrefixProvider *dpp, lr::IoCtx ioctx, //< IO Context std::string oid, //< OID for metadata object std::unique_ptr* fifo, //< OUT: Pointer to FIFO object optional_yield y, //< Optional yield context @@ -184,7 +183,7 @@ public: /// can't find it. bool probe = false); /// Create a new or open an existing FIFO. - static int create(lr::IoCtx ioctx, //< IO Context + static int create(const DoutPrefixProvider *dpp, lr::IoCtx ioctx, //< IO Context std::string oid, //< OID for metadata object std::unique_ptr* fifo, //< OUT: Pointer to FIFO object optional_yield y, //< Optional yield context @@ -201,29 +200,31 @@ public: std::uint64_t max_entry_size = default_max_entry_size); /// Force refresh of metadata, yielding/blocking style - int read_meta(optional_yield y); + int read_meta(const DoutPrefixProvider *dpp, optional_yield y); /// Get currently known metadata const fifo::info& meta() const; /// Get partition header and entry overhead size std::pair get_part_layout_info() const; /// Push an entry to the FIFO - int push(const cb::list& bl, //< Entry to push + int push(const DoutPrefixProvider *dpp, + const cb::list& bl, //< Entry to push optional_yield y //< Optional yield ); /// Push an entry to the FIFO - void push(const cb::list& bl, //< Entry to push + void push(const DoutPrefixProvider *dpp, const cb::list& bl, //< Entry to push lr::AioCompletion* c //< Async Completion ); /// Push entries to the FIFO - int push(const std::vector& data_bufs, //< Entries to push + int push(const DoutPrefixProvider *dpp, const std::vector& data_bufs, //< Entries to push optional_yield y //< Optional yield ); /// Push entries to the FIFO - void push(const std::vector& data_bufs, //< Entries to push + void push(const DoutPrefixProvider *dpp, const std::vector& data_bufs, //< Entries to push lr::AioCompletion* c //< Async Completion ); /// List entries - int list(int max_entries, //< Maximum entries to list + int list(const DoutPrefixProvider *dpp, + int max_entries, //< Maximum entries to list /// Point after which to begin listing. Start at tail if null std::optional markstr, std::vector* out, //< OUT: entries @@ -231,7 +232,8 @@ public: bool* more, optional_yield y //< Optional yield ); - void list(int max_entries, //< Maximum entries to list + void list(const DoutPrefixProvider *dpp, + int max_entries, //< Maximum entries to list /// Point after which to begin listing. Start at tail if null std::optional markstr, std::vector* out, //< OUT: entries @@ -240,19 +242,21 @@ public: lr::AioCompletion* c //< Async Completion ); /// Trim entries, coroutine/block style - int trim(std::string_view markstr, //< Position to which to trim, inclusive + int trim(const DoutPrefixProvider *dpp, + std::string_view markstr, //< Position to which to trim, inclusive bool exclusive, //< If true, do not trim the target entry //< itself, just all those before it. optional_yield y //< Optional yield ); /// Trim entries, librados AioCompletion style - void trim(std::string_view markstr, //< Position to which to trim, inclusive + void trim(const DoutPrefixProvider *dpp, + std::string_view markstr, //< Position to which to trim, inclusive bool exclusive, //< If true, do not trim the target entry //< itself, just all those before it. lr::AioCompletion* c //< librados AIO Completion ); /// Get part info - int get_part_info(int64_t part_num, /// Part number + int get_part_info(const DoutPrefixProvider *dpp, int64_t part_num, /// Part number fifo::part_header* header, //< OUT: Information optional_yield y //< Optional yield ); @@ -264,7 +268,7 @@ public: /// A convenience method to fetch the part information for the FIFO /// head, using librados::AioCompletion, since /// libradio::AioCompletions compose lousily. - void get_head_info(fu2::unique_function< //< Function to receive info + void get_head_info(const DoutPrefixProvider *dpp, fu2::unique_function< //< Function to receive info void(int r, fifo::part_header&&)>, lr::AioCompletion* c //< AIO Completion ); @@ -273,6 +277,7 @@ public: template struct Completion { private: + const DoutPrefixProvider *_dpp; lr::AioCompletion* _cur = nullptr; lr::AioCompletion* _super; public: @@ -286,7 +291,7 @@ public: return _super; } - Completion(lr::AioCompletion* super) : _super(super) { + Completion(const DoutPrefixProvider *dpp, lr::AioCompletion* super) : _dpp(dpp), _super(super) { super->pc->get(); } @@ -326,7 +331,7 @@ public: auto r = t->_cur->get_return_value(); t->_cur->release(); t->_cur = nullptr; - t->handle(Ptr(t), r); + t->handle(t->_dpp, Ptr(t), r); } }; diff --git a/src/rgw/librgw.cc b/src/rgw/librgw.cc index d9fb3f67a3f59..e626b1ba4f584 100644 --- a/src/rgw/librgw.cc +++ b/src/rgw/librgw.cc @@ -61,7 +61,6 @@ #include #include - #define dout_subsys ceph_subsys_rgw bool global_stop = false; @@ -123,7 +122,8 @@ namespace rgw { RGWLibFS* fs = iter->first->ref(); uniq.unlock(); fs->gc(); - fs->update_user(); + const DoutPrefix dp(cct, dout_subsys, "librgw: "); + fs->update_user(&dp); fs->rele(); uniq.lock(); if (cur_gen != gen) @@ -134,7 +134,7 @@ namespace rgw { } } - void RGWLibProcess::handle_request(RGWRequest* r) + void RGWLibProcess::handle_request(const DoutPrefixProvider *dpp, RGWRequest* r) { /* * invariant: valid requests are derived from RGWLibRequst @@ -205,7 +205,7 @@ namespace rgw { */ RGWOp *op = (req->op) ? req->op : dynamic_cast(req); if (! op) { - dout(1) << "failed to derive cognate RGWOp (invalid op?)" << dendl; + ldpp_dout(op, 1) << "failed to derive cognate RGWOp (invalid op?)" << dendl; return -EINVAL; } @@ -241,7 +241,7 @@ namespace rgw { /* XXX and -then- stash req_state pointers everywhere they are needed */ ret = req->init(rgw_env, &rados_ctx, io, s); if (ret < 0) { - dout(10) << "failed to initialize request" << dendl; + ldpp_dout(op, 10) << "failed to initialize request" << dendl; abort_req(s, op, ret); goto done; } @@ -301,9 +301,9 @@ namespace rgw { ret = op->verify_permission(null_yield); if (ret < 0) { if (s->system_request) { - dout(2) << "overriding permissions due to system operation" << dendl; + ldpp_dout(op, 2) << "overriding permissions due to system operation" << dendl; } else if (s->auth.identity->is_admin_of(s->user->get_id())) { - dout(2) << "overriding permissions due to admin operation" << dendl; + ldpp_dout(op, 2) << "overriding permissions due to admin operation" << dendl; } else { abort_req(s, op, ret); goto done; @@ -343,7 +343,7 @@ namespace rgw { ldpp_dout(s, 2) << "http status=" << http_ret << dendl; - dout(1) << "====== " << __func__ + ldpp_dout(op, 1) << "====== " << __func__ << " req done req=" << hex << req << dec << " http_status=" << http_ret << " ======" << dendl; @@ -366,7 +366,7 @@ namespace rgw { */ RGWOp *op = (req->op) ? req->op : dynamic_cast(req); if (! op) { - dout(1) << "failed to derive cognate RGWOp (invalid op?)" << dendl; + ldpp_dout(op, 1) << "failed to derive cognate RGWOp (invalid op?)" << dendl; return -EINVAL; } @@ -379,7 +379,7 @@ namespace rgw { int ret = req->init(rgw_env, &rados_ctx, &io_ctx, s); if (ret < 0) { - dout(10) << "failed to initialize request" << dendl; + ldpp_dout(op, 10) << "failed to initialize request" << dendl; abort_req(s, op, ret); goto done; } @@ -433,9 +433,9 @@ namespace rgw { ret = op->verify_permission(null_yield); if (ret < 0) { if (s->system_request) { - dout(2) << "overriding permissions due to system operation" << dendl; + ldpp_dout(op, 2) << "overriding permissions due to system operation" << dendl; } else if (s->auth.identity->is_admin_of(s->user->get_id())) { - dout(2) << "overriding permissions due to admin operation" << dendl; + ldpp_dout(op, 2) << "overriding permissions due to admin operation" << dendl; } else { abort_req(s, op, ret); goto done; @@ -460,14 +460,14 @@ namespace rgw { { RGWOp *op = (req->op) ? req->op : dynamic_cast(req); if (! op) { - dout(1) << "failed to derive cognate RGWOp (invalid op?)" << dendl; + ldpp_dout(op, 1) << "failed to derive cognate RGWOp (invalid op?)" << dendl; return -EINVAL; } int ret = req->exec_finish(); int op_ret = op->get_ret(); - dout(1) << "====== " << __func__ + ldpp_dout(op, 1) << "====== " << __func__ << " finishing continued request req=" << hex << req << dec << " op status=" << op_ret << " ======" << dendl; @@ -537,7 +537,7 @@ namespace rgw { g_conf()->rgw_run_sync_thread && g_conf()->rgw_nfs_run_sync_thread; - store = RGWStoreManager::get_storage(g_ceph_context, + store = RGWStoreManager::get_storage(this, g_ceph_context, run_gc, run_lc, run_quota, @@ -656,7 +656,8 @@ namespace rgw { int RGWLibIO::set_uid(rgw::sal::RGWRadosStore *store, const rgw_user& uid) { - int ret = store->ctl()->user->get_info_by_uid(uid, &user_info, null_yield); + const DoutPrefix dp(store->ctx(), dout_subsys, "librgw: "); + int ret = store->ctl()->user->get_info_by_uid(&dp, uid, &user_info, null_yield); if (ret < 0) { derr << "ERROR: failed reading user info: uid=" << uid << " ret=" << ret << dendl; @@ -667,9 +668,9 @@ namespace rgw { int RGWLibRequest::read_permissions(RGWOp* op, optional_yield y) { /* bucket and object ops */ int ret = - rgw_build_bucket_policies(rgwlib.get_store(), get_state(), y); + rgw_build_bucket_policies(op, rgwlib.get_store(), get_state(), y); if (ret < 0) { - ldout(get_state()->cct, 10) << "read_permissions (bucket policy) on " + ldpp_dout(op, 10) << "read_permissions (bucket policy) on " << get_state()->bucket << ":" << get_state()->object << " only_bucket=" << only_bucket() @@ -678,10 +679,10 @@ namespace rgw { ret = -EACCES; } else if (! only_bucket()) { /* object ops */ - ret = rgw_build_object_policies(rgwlib.get_store(), get_state(), + ret = rgw_build_object_policies(op, rgwlib.get_store(), get_state(), op->prefetch_data(), y); if (ret < 0) { - ldout(get_state()->cct, 10) << "read_permissions (object policy) on" + ldpp_dout(op, 10) << "read_permissions (object policy) on" << get_state()->bucket << ":" << get_state()->object << " ret=" << ret << dendl; diff --git a/src/rgw/rgw_acl.cc b/src/rgw/rgw_acl.cc index caee6d329d979..ec5de88cecb9d 100644 --- a/src/rgw/rgw_acl.cc +++ b/src/rgw/rgw_acl.cc @@ -134,18 +134,19 @@ uint32_t RGWAccessControlList::get_perm(const DoutPrefixProvider* dpp, return perm_mask & auth_identity.get_perms_from_aclspec(dpp, acl_user_map); } -uint32_t RGWAccessControlList::get_group_perm(ACLGroupTypeEnum group, +uint32_t RGWAccessControlList::get_group_perm(const DoutPrefixProvider *dpp, + ACLGroupTypeEnum group, const uint32_t perm_mask) const { - ldout(cct, 5) << "Searching permissions for group=" << (int)group + ldpp_dout(dpp, 5) << "Searching permissions for group=" << (int)group << " mask=" << perm_mask << dendl; const auto iter = acl_group_map.find((uint32_t)group); if (iter != acl_group_map.end()) { - ldout(cct, 5) << "Found permission: " << iter->second << dendl; + ldpp_dout(dpp, 5) << "Found permission: " << iter->second << dendl; return iter->second & perm_mask; } - ldout(cct, 5) << "Permissions for group not found" << dendl; + ldpp_dout(dpp, 5) << "Permissions for group not found" << dendl; return 0; } @@ -192,11 +193,11 @@ uint32_t RGWAccessControlPolicy::get_perm(const DoutPrefixProvider* dpp, /* should we continue looking up? */ if (!ignore_public_acls && ((perm & perm_mask) != perm_mask)) { - perm |= acl.get_group_perm(ACL_GROUP_ALL_USERS, perm_mask); + perm |= acl.get_group_perm(dpp, ACL_GROUP_ALL_USERS, perm_mask); if (false == auth_identity.is_owner_of(rgw_user(RGW_USER_ANON_ID))) { /* this is not the anonymous user */ - perm |= acl.get_group_perm(ACL_GROUP_AUTHENTICATED_USERS, perm_mask); + perm |= acl.get_group_perm(dpp, ACL_GROUP_AUTHENTICATED_USERS, perm_mask); } } @@ -246,14 +247,14 @@ bool RGWAccessControlPolicy::verify_permission(const DoutPrefixProvider* dpp, } -bool RGWAccessControlPolicy::is_public() const +bool RGWAccessControlPolicy::is_public(const DoutPrefixProvider *dpp) const { static constexpr auto public_groups = {ACL_GROUP_ALL_USERS, ACL_GROUP_AUTHENTICATED_USERS}; return std::any_of(public_groups.begin(), public_groups.end(), - [&](ACLGroupTypeEnum g) { - auto p = acl.get_group_perm(g, RGW_PERM_FULL_CONTROL); + [&, dpp](ACLGroupTypeEnum g) { + auto p = acl.get_group_perm(dpp, g, RGW_PERM_FULL_CONTROL); return (p != RGW_PERM_NONE) && (p != RGW_PERM_INVALID); } ); diff --git a/src/rgw/rgw_acl.h b/src/rgw/rgw_acl.h index 06c79e744ce22..dd6db08fbe0ca 100644 --- a/src/rgw/rgw_acl.h +++ b/src/rgw/rgw_acl.h @@ -336,7 +336,7 @@ public: uint32_t get_perm(const DoutPrefixProvider* dpp, const rgw::auth::Identity& auth_identity, uint32_t perm_mask); - uint32_t get_group_perm(ACLGroupTypeEnum group, uint32_t perm_mask) const; + uint32_t get_group_perm(const DoutPrefixProvider *dpp, ACLGroupTypeEnum group, uint32_t perm_mask) const; uint32_t get_referer_perm(uint32_t current_perm, std::string http_referer, uint32_t perm_mask); @@ -502,7 +502,7 @@ public: } virtual bool compare_group_name(string& id, ACLGroupTypeEnum group) { return false; } - bool is_public() const; + bool is_public(const DoutPrefixProvider *dpp) const; friend bool operator==(const RGWAccessControlPolicy& lhs, const RGWAccessControlPolicy& rhs); friend bool operator!=(const RGWAccessControlPolicy& lhs, const RGWAccessControlPolicy& rhs); diff --git a/src/rgw/rgw_acl_s3.cc b/src/rgw/rgw_acl_s3.cc index fb1383c4f4e9a..6329bb7fad48d 100644 --- a/src/rgw/rgw_acl_s3.cc +++ b/src/rgw/rgw_acl_s3.cc @@ -290,7 +290,7 @@ static const char *get_acl_header(const RGWEnv *env, return env->get(header, NULL); } -static int parse_grantee_str(RGWUserCtl *user_ctl, string& grantee_str, +static int parse_grantee_str(const DoutPrefixProvider *dpp, RGWUserCtl *user_ctl, string& grantee_str, const struct s3_acl_header *perm, ACLGrant& grant) { string id_type, id_val_quoted; @@ -306,14 +306,14 @@ static int parse_grantee_str(RGWUserCtl *user_ctl, string& grantee_str, string id_val = rgw_trim_quotes(id_val_quoted); if (strcasecmp(id_type.c_str(), "emailAddress") == 0) { - ret = user_ctl->get_info_by_email(id_val, &info, null_yield); + ret = user_ctl->get_info_by_email(dpp, id_val, &info, null_yield); if (ret < 0) return ret; grant.set_canon(info.user_id, info.display_name, rgw_perm); } else if (strcasecmp(id_type.c_str(), "id") == 0) { rgw_user user(id_val); - ret = user_ctl->get_info_by_uid(user, &info, null_yield); + ret = user_ctl->get_info_by_uid(dpp, user, &info, null_yield); if (ret < 0) return ret; @@ -331,7 +331,7 @@ static int parse_grantee_str(RGWUserCtl *user_ctl, string& grantee_str, return 0; } -static int parse_acl_header(RGWUserCtl *user_ctl, const RGWEnv *env, +static int parse_acl_header(const DoutPrefixProvider *dpp, RGWUserCtl *user_ctl, const RGWEnv *env, const struct s3_acl_header *perm, std::list& _grants) { std::list grantees; @@ -346,7 +346,7 @@ static int parse_acl_header(RGWUserCtl *user_ctl, const RGWEnv *env, for (list::iterator it = grantees.begin(); it != grantees.end(); ++it) { ACLGrant grant; - int ret = parse_grantee_str(user_ctl, *it, perm, grant); + int ret = parse_grantee_str(dpp, user_ctl, *it, perm, grant); if (ret < 0) return ret; @@ -451,13 +451,13 @@ static const s3_acl_header acl_header_perms[] = { {0, NULL} }; -int RGWAccessControlPolicy_S3::create_from_headers(RGWUserCtl *user_ctl, const RGWEnv *env, ACLOwner& _owner) +int RGWAccessControlPolicy_S3::create_from_headers(const DoutPrefixProvider *dpp, RGWUserCtl *user_ctl, const RGWEnv *env, ACLOwner& _owner) { std::list grants; int r = 0; for (const struct s3_acl_header *p = acl_header_perms; p->rgw_perm; p++) { - r = parse_acl_header(user_ctl, env, p, grants); + r = parse_acl_header(dpp, user_ctl, env, p, grants); if (r < 0) { return r; } @@ -474,7 +474,7 @@ int RGWAccessControlPolicy_S3::create_from_headers(RGWUserCtl *user_ctl, const R /* can only be called on object that was parsed */ -int RGWAccessControlPolicy_S3::rebuild(RGWUserCtl *user_ctl, ACLOwner *owner, RGWAccessControlPolicy& dest, +int RGWAccessControlPolicy_S3::rebuild(const DoutPrefixProvider *dpp, RGWUserCtl *user_ctl, ACLOwner *owner, RGWAccessControlPolicy& dest, std::string &err_msg) { if (!owner) @@ -488,8 +488,8 @@ int RGWAccessControlPolicy_S3::rebuild(RGWUserCtl *user_ctl, ACLOwner *owner, RG } RGWUserInfo owner_info; - if (user_ctl->get_info_by_uid(owner->get_id(), &owner_info, null_yield) < 0) { - ldout(cct, 10) << "owner info does not exist" << dendl; + if (user_ctl->get_info_by_uid(dpp, owner->get_id(), &owner_info, null_yield) < 0) { + ldpp_dout(dpp, 10) << "owner info does not exist" << dendl; err_msg = "Invalid id"; return -EINVAL; } @@ -497,8 +497,8 @@ int RGWAccessControlPolicy_S3::rebuild(RGWUserCtl *user_ctl, ACLOwner *owner, RG dest_owner.set_id(owner->get_id()); dest_owner.set_name(owner_info.display_name); - ldout(cct, 20) << "owner id=" << owner->get_id() << dendl; - ldout(cct, 20) << "dest owner id=" << dest.get_owner().get_id() << dendl; + ldpp_dout(dpp, 20) << "owner id=" << owner->get_id() << dendl; + ldpp_dout(dpp, 20) << "dest owner id=" << dest.get_owner().get_id() << dendl; RGWAccessControlList& dst_acl = dest.get_acl(); @@ -517,13 +517,13 @@ int RGWAccessControlPolicy_S3::rebuild(RGWUserCtl *user_ctl, ACLOwner *owner, RG string email; rgw_user u; if (!src_grant.get_id(u)) { - ldout(cct, 0) << "ERROR: src_grant.get_id() failed" << dendl; + ldpp_dout(dpp, 0) << "ERROR: src_grant.get_id() failed" << dendl; return -EINVAL; } email = u.id; - ldout(cct, 10) << "grant user email=" << email << dendl; - if (user_ctl->get_info_by_email(email, &grant_user, null_yield) < 0) { - ldout(cct, 10) << "grant user email not found or other error" << dendl; + ldpp_dout(dpp, 10) << "grant user email=" << email << dendl; + if (user_ctl->get_info_by_email(dpp, email, &grant_user, null_yield) < 0) { + ldpp_dout(dpp, 10) << "grant user email not found or other error" << dendl; err_msg = "The e-mail address you provided does not match any account on record."; return -ERR_UNRESOLVABLE_EMAIL; } @@ -533,14 +533,14 @@ int RGWAccessControlPolicy_S3::rebuild(RGWUserCtl *user_ctl, ACLOwner *owner, RG { if (type.get_type() == ACL_TYPE_CANON_USER) { if (!src_grant.get_id(uid)) { - ldout(cct, 0) << "ERROR: src_grant.get_id() failed" << dendl; + ldpp_dout(dpp, 0) << "ERROR: src_grant.get_id() failed" << dendl; err_msg = "Invalid id"; return -EINVAL; } } - if (grant_user.user_id.empty() && user_ctl->get_info_by_uid(uid, &grant_user, null_yield) < 0) { - ldout(cct, 10) << "grant user does not exist:" << uid << dendl; + if (grant_user.user_id.empty() && user_ctl->get_info_by_uid(dpp, uid, &grant_user, null_yield) < 0) { + ldpp_dout(dpp, 10) << "grant user does not exist:" << uid << dendl; err_msg = "Invalid id"; return -EINVAL; } else { @@ -549,7 +549,7 @@ int RGWAccessControlPolicy_S3::rebuild(RGWUserCtl *user_ctl, ACLOwner *owner, RG grant_ok = true; rgw_user new_id; new_grant.get_id(new_id); - ldout(cct, 10) << "new grant: " << new_id << ":" << grant_user.display_name << dendl; + ldpp_dout(dpp, 10) << "new grant: " << new_id << ":" << grant_user.display_name << dendl; } } break; @@ -559,9 +559,9 @@ int RGWAccessControlPolicy_S3::rebuild(RGWUserCtl *user_ctl, ACLOwner *owner, RG if (ACLGrant_S3::group_to_uri(src_grant.get_group(), uri)) { new_grant = src_grant; grant_ok = true; - ldout(cct, 10) << "new grant: " << uri << dendl; + ldpp_dout(dpp, 10) << "new grant: " << uri << dendl; } else { - ldout(cct, 10) << "bad grant group:" << (int)src_grant.get_group() << dendl; + ldpp_dout(dpp, 10) << "bad grant group:" << (int)src_grant.get_group() << dendl; err_msg = "Invalid group uri"; return -EINVAL; } diff --git a/src/rgw/rgw_acl_s3.h b/src/rgw/rgw_acl_s3.h index eac1412474039..60613a8a1c14b 100644 --- a/src/rgw/rgw_acl_s3.h +++ b/src/rgw/rgw_acl_s3.h @@ -83,7 +83,7 @@ public: bool xml_end(const char *el) override; void to_xml(ostream& out); - int rebuild(RGWUserCtl *user_ctl, ACLOwner *owner, RGWAccessControlPolicy& dest, + int rebuild(const DoutPrefixProvider *dpp, RGWUserCtl *user_ctl, ACLOwner *owner, RGWAccessControlPolicy& dest, std::string &err_msg); bool compare_group_name(string& id, ACLGroupTypeEnum group) override; @@ -97,7 +97,7 @@ public: int ret = _acl.create_canned(owner, bucket_owner, canned_acl); return ret; } - int create_from_headers(RGWUserCtl *user_ctl, const RGWEnv *env, ACLOwner& _owner); + int create_from_headers(const DoutPrefixProvider *dpp, RGWUserCtl *user_ctl, const RGWEnv *env, ACLOwner& _owner); }; /** diff --git a/src/rgw/rgw_acl_swift.cc b/src/rgw/rgw_acl_swift.cc index 36f00c9173eea..896a38bf9fbfb 100644 --- a/src/rgw/rgw_acl_swift.cc +++ b/src/rgw/rgw_acl_swift.cc @@ -111,7 +111,8 @@ static boost::optional referrer_to_grant(std::string url_spec, } } -static ACLGrant user_to_grant(CephContext* const cct, +static ACLGrant user_to_grant(const DoutPrefixProvider *dpp, + CephContext* const cct, RGWUserCtl* const user_ctl, const std::string& uid, const uint32_t perm) @@ -120,7 +121,7 @@ static ACLGrant user_to_grant(CephContext* const cct, RGWUserInfo grant_user; ACLGrant grant; - if (user_ctl->get_info_by_uid(user, &grant_user, null_yield) < 0) { + if (user_ctl->get_info_by_uid(dpp, user, &grant_user, null_yield) < 0) { ldout(cct, 10) << "grant user does not exist: " << uid << dendl; /* skipping silently */ grant.set_canon(user, std::string(), perm); @@ -131,20 +132,21 @@ static ACLGrant user_to_grant(CephContext* const cct, return grant; } -int RGWAccessControlPolicy_SWIFT::add_grants(RGWUserCtl* const user_ctl, +int RGWAccessControlPolicy_SWIFT::add_grants(const DoutPrefixProvider *dpp, + RGWUserCtl* const user_ctl, const std::vector& uids, const uint32_t perm) { for (const auto& uid : uids) { boost::optional grant; - ldout(cct, 20) << "trying to add grant for ACL uid=" << uid << dendl; + ldpp_dout(dpp, 20) << "trying to add grant for ACL uid=" << uid << dendl; /* Let's check whether the item has a separator potentially indicating * a special meaning (like an HTTP referral-based grant). */ const size_t pos = uid.find(':'); if (std::string::npos == pos) { /* No, it don't have -- we've got just a regular user identifier. */ - grant = user_to_grant(cct, user_ctl, uid, perm); + grant = user_to_grant(dpp, cct, user_ctl, uid, perm); } else { /* Yes, *potentially* an HTTP referral. */ auto designator = uid.substr(0, pos); @@ -155,7 +157,7 @@ int RGWAccessControlPolicy_SWIFT::add_grants(RGWUserCtl* const user_ctl, boost::algorithm::trim(designatee); if (! boost::algorithm::starts_with(designator, ".")) { - grant = user_to_grant(cct, user_ctl, uid, perm); + grant = user_to_grant(dpp, cct, user_ctl, uid, perm); } else if ((perm & SWIFT_PERM_WRITE) == 0 && is_referrer(designator)) { /* HTTP referrer-based ACLs aren't acceptable for writes. */ grant = referrer_to_grant(designatee, perm); @@ -173,7 +175,8 @@ int RGWAccessControlPolicy_SWIFT::add_grants(RGWUserCtl* const user_ctl, } -int RGWAccessControlPolicy_SWIFT::create(RGWUserCtl* const user_ctl, +int RGWAccessControlPolicy_SWIFT::create(const DoutPrefixProvider *dpp, + RGWUserCtl* const user_ctl, const rgw_user& id, const std::string& name, const char* read_list, @@ -189,12 +192,12 @@ int RGWAccessControlPolicy_SWIFT::create(RGWUserCtl* const user_ctl, std::vector uids; int r = parse_list(read_list, uids); if (r < 0) { - ldout(cct, 0) << "ERROR: parse_list for read returned r=" + ldpp_dout(dpp, 0) << "ERROR: parse_list for read returned r=" << r << dendl; return r; } - r = add_grants(user_ctl, uids, SWIFT_PERM_READ); + r = add_grants(dpp, user_ctl, uids, SWIFT_PERM_READ); if (r < 0) { ldout(cct, 0) << "ERROR: add_grants for read returned r=" << r << dendl; @@ -206,12 +209,12 @@ int RGWAccessControlPolicy_SWIFT::create(RGWUserCtl* const user_ctl, std::vector uids; int r = parse_list(write_list, uids); if (r < 0) { - ldout(cct, 0) << "ERROR: parse_list for write returned r=" + ldpp_dout(dpp, 0) << "ERROR: parse_list for write returned r=" << r << dendl; return r; } - r = add_grants(user_ctl, uids, SWIFT_PERM_WRITE); + r = add_grants(dpp, user_ctl, uids, SWIFT_PERM_WRITE); if (r < 0) { ldout(cct, 0) << "ERROR: add_grants for write returned r=" << r << dendl; @@ -297,7 +300,8 @@ void RGWAccessControlPolicy_SWIFT::to_str(string& read, string& write) } } -void RGWAccessControlPolicy_SWIFTAcct::add_grants(RGWUserCtl * const user_ctl, +void RGWAccessControlPolicy_SWIFTAcct::add_grants(const DoutPrefixProvider *dpp, + RGWUserCtl * const user_ctl, const std::vector& uids, const uint32_t perm) { @@ -311,7 +315,7 @@ void RGWAccessControlPolicy_SWIFTAcct::add_grants(RGWUserCtl * const user_ctl, } else { rgw_user user(uid); - if (user_ctl->get_info_by_uid(user, &grant_user, null_yield) < 0) { + if (user_ctl->get_info_by_uid(dpp, user, &grant_user, null_yield) < 0) { ldout(cct, 10) << "grant user does not exist:" << uid << dendl; /* skipping silently */ grant.set_canon(user, std::string(), perm); @@ -324,7 +328,8 @@ void RGWAccessControlPolicy_SWIFTAcct::add_grants(RGWUserCtl * const user_ctl, } } -bool RGWAccessControlPolicy_SWIFTAcct::create(RGWUserCtl * const user_ctl, +bool RGWAccessControlPolicy_SWIFTAcct::create(const DoutPrefixProvider *dpp, + RGWUserCtl * const user_ctl, const rgw_user& id, const std::string& name, const std::string& acl_str) @@ -336,7 +341,7 @@ bool RGWAccessControlPolicy_SWIFTAcct::create(RGWUserCtl * const user_ctl, JSONParser parser; if (!parser.parse(acl_str.c_str(), acl_str.length())) { - ldout(cct, 0) << "ERROR: JSONParser::parse returned error=" << dendl; + ldpp_dout(dpp, 0) << "ERROR: JSONParser::parse returned error=" << dendl; return false; } @@ -346,7 +351,7 @@ bool RGWAccessControlPolicy_SWIFTAcct::create(RGWUserCtl * const user_ctl, decode_json_obj(admin, *iter); ldout(cct, 0) << "admins: " << admin << dendl; - add_grants(user_ctl, admin, SWIFT_PERM_ADMIN); + add_grants(dpp, user_ctl, admin, SWIFT_PERM_ADMIN); } iter = parser.find_first("read-write"); @@ -355,7 +360,7 @@ bool RGWAccessControlPolicy_SWIFTAcct::create(RGWUserCtl * const user_ctl, decode_json_obj(readwrite, *iter); ldout(cct, 0) << "read-write: " << readwrite << dendl; - add_grants(user_ctl, readwrite, SWIFT_PERM_RWRT); + add_grants(dpp, user_ctl, readwrite, SWIFT_PERM_RWRT); } iter = parser.find_first("read-only"); @@ -364,7 +369,7 @@ bool RGWAccessControlPolicy_SWIFTAcct::create(RGWUserCtl * const user_ctl, decode_json_obj(readonly, *iter); ldout(cct, 0) << "read-only: " << readonly << dendl; - add_grants(user_ctl, readonly, SWIFT_PERM_READ); + add_grants(dpp, user_ctl, readonly, SWIFT_PERM_READ); } return true; diff --git a/src/rgw/rgw_acl_swift.h b/src/rgw/rgw_acl_swift.h index f5f540241ccd5..68104bd36fd81 100644 --- a/src/rgw/rgw_acl_swift.h +++ b/src/rgw/rgw_acl_swift.h @@ -17,7 +17,7 @@ class RGWUserCtl; class RGWAccessControlPolicy_SWIFT : public RGWAccessControlPolicy { - int add_grants(RGWUserCtl *user_ctl, + int add_grants(const DoutPrefixProvider *dpp, RGWUserCtl *user_ctl, const std::vector& uids, uint32_t perm); @@ -27,7 +27,8 @@ public: } ~RGWAccessControlPolicy_SWIFT() override = default; - int create(RGWUserCtl *user_ctl, + int create(const DoutPrefixProvider *dpp, + RGWUserCtl *user_ctl, const rgw_user& id, const std::string& name, const char* read_list, @@ -45,10 +46,12 @@ public: } ~RGWAccessControlPolicy_SWIFTAcct() override {} - void add_grants(RGWUserCtl *user_ctl, + void add_grants(const DoutPrefixProvider *dpp, + RGWUserCtl *user_ctl, const std::vector& uids, uint32_t perm); - bool create(RGWUserCtl *user_ctl, + bool create(const DoutPrefixProvider *dpp, + RGWUserCtl *user_ctl, const rgw_user& id, const std::string& name, const std::string& acl_str); diff --git a/src/rgw/rgw_admin.cc b/src/rgw/rgw_admin.cc index 33c8eae5725be..cc0c53ab78b31 100644 --- a/src/rgw/rgw_admin.cc +++ b/src/rgw/rgw_admin.cc @@ -78,7 +78,7 @@ static rgw::sal::RGWRadosStore *store = NULL; static const DoutPrefixProvider* dpp() { struct GlobalPrefix : public DoutPrefixProvider { - CephContext *get_cct() const override { return store->ctx(); } + CephContext *get_cct() const override { return dout_context; } unsigned get_subsys() const override { return dout_subsys; } std::ostream& gen_prefix(std::ostream& out) const override { return out; } }; @@ -1141,10 +1141,10 @@ static int init_bucket(const string& tenant_name, auto obj_ctx = store->svc()->sysobj->init_obj_ctx(); int r; if (bucket_id.empty()) { - r = store->getRados()->get_bucket_info(store->svc(), tenant_name, bucket_name, bucket_info, nullptr, null_yield, pattrs); + r = store->getRados()->get_bucket_info(store->svc(), tenant_name, bucket_name, bucket_info, nullptr, null_yield, dpp(), pattrs); } else { string bucket_instance_id = bucket_name + ":" + bucket_id; - r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket_instance_id, bucket_info, NULL, pattrs, null_yield); + r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket_instance_id, bucket_info, NULL, pattrs, null_yield, dpp()); } if (r < 0) { cerr << "could not get bucket info for bucket=" << bucket_name << std::endl; @@ -1317,7 +1317,7 @@ int set_bucket_quota(rgw::sal::RGWRadosStore *store, OPT opt_cmd, { RGWBucketInfo bucket_info; map attrs; - int r = store->getRados()->get_bucket_info(store->svc(), tenant_name, bucket_name, bucket_info, NULL, null_yield, &attrs); + int r = store->getRados()->get_bucket_info(store->svc(), tenant_name, bucket_name, bucket_info, NULL, null_yield, dpp(), &attrs); if (r < 0) { cerr << "could not get bucket info for bucket=" << bucket_name << ": " << cpp_strerror(-r) << std::endl; return -r; @@ -1325,7 +1325,7 @@ int set_bucket_quota(rgw::sal::RGWRadosStore *store, OPT opt_cmd, set_quota_info(bucket_info.quota, opt_cmd, max_size, max_objects, have_max_size, have_max_objects); - r = store->getRados()->put_bucket_instance_info(bucket_info, false, real_time(), &attrs); + r = store->getRados()->put_bucket_instance_info(bucket_info, false, real_time(), &attrs, dpp()); if (r < 0) { cerr << "ERROR: failed writing bucket instance info: " << cpp_strerror(-r) << std::endl; return -r; @@ -1343,7 +1343,7 @@ int set_user_bucket_quota(OPT opt_cmd, RGWUser& user, RGWUserAdminOpState& op_st op_state.set_bucket_quota(user_info.bucket_quota); string err; - int r = user.modify(op_state, null_yield, &err); + int r = user.modify(dpp(), op_state, null_yield, &err); if (r < 0) { cerr << "ERROR: failed updating user info: " << cpp_strerror(-r) << ": " << err << std::endl; return -r; @@ -1361,7 +1361,7 @@ int set_user_quota(OPT opt_cmd, RGWUser& user, RGWUserAdminOpState& op_state, in op_state.set_user_quota(user_info.user_quota); string err; - int r = user.modify(op_state, null_yield, &err); + int r = user.modify(dpp(), op_state, null_yield, &err); if (r < 0) { cerr << "ERROR: failed updating user info: " << cpp_strerror(-r) << ": " << err << std::endl; return -r; @@ -1372,9 +1372,9 @@ int set_user_quota(OPT opt_cmd, RGWUser& user, RGWUserAdminOpState& op_state, in int check_min_obj_stripe_size(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info, rgw::sal::RGWObject* obj, uint64_t min_stripe_size, bool *need_rewrite) { RGWObjectCtx obj_ctx(store); - int ret = obj->get_obj_attrs(&obj_ctx, null_yield); + int ret = obj->get_obj_attrs(&obj_ctx, null_yield, dpp()); if (ret < 0) { - lderr(store->ctx()) << "ERROR: failed to stat object, returned error: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp(), -1) << "ERROR: failed to stat object, returned error: " << cpp_strerror(-ret) << dendl; return ret; } @@ -1392,7 +1392,7 @@ int check_min_obj_stripe_size(rgw::sal::RGWRadosStore *store, RGWBucketInfo& buc auto biter = bl.cbegin(); decode(manifest, biter); } catch (buffer::error& err) { - ldout(store->ctx(), 0) << "ERROR: failed to decode manifest" << dendl; + ldpp_dout(dpp(), 0) << "ERROR: failed to decode manifest" << dendl; return -EIO; } @@ -1434,7 +1434,7 @@ int check_obj_locator_underscore(RGWBucketInfo& bucket_info, rgw_obj& obj, rgw_o RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, obj); RGWRados::Object::Read read_op(&op_target); - int ret = read_op.prepare(null_yield); + int ret = read_op.prepare(null_yield, dpp()); bool needs_fixing = (ret == -ENOENT); f->dump_bool("needs_fixing", needs_fixing); @@ -1442,7 +1442,7 @@ int check_obj_locator_underscore(RGWBucketInfo& bucket_info, rgw_obj& obj, rgw_o string status = (needs_fixing ? "needs_fixing" : "ok"); if ((needs_fixing || remove_bad) && fix) { - ret = store->getRados()->fix_head_obj_locator(bucket_info, needs_fixing, remove_bad, key); + ret = store->getRados()->fix_head_obj_locator(dpp(), bucket_info, needs_fixing, remove_bad, key); if (ret < 0) { cerr << "ERROR: fix_head_object_locator() returned ret=" << ret << std::endl; goto done; @@ -1469,7 +1469,7 @@ int check_obj_tail_locator_underscore(RGWBucketInfo& bucket_info, rgw_obj& obj, bool needs_fixing; string status; - int ret = store->getRados()->fix_tail_obj_locator(bucket_info, key, fix, &needs_fixing, null_yield); + int ret = store->getRados()->fix_tail_obj_locator(dpp(), bucket_info, key, fix, &needs_fixing, null_yield); if (ret < 0) { cerr << "ERROR: fix_tail_object_locator_underscore() returned ret=" << ret << std::endl; status = "failed"; @@ -1529,7 +1529,7 @@ int do_check_object_locator(const string& tenant_name, const string& bucket_name f->open_array_section("check_objects"); do { - ret = list_op.list_objects(max_entries - count, &result, &common_prefixes, &truncated, null_yield); + ret = list_op.list_objects(dpp(), max_entries - count, &result, &common_prefixes, &truncated, null_yield); if (ret < 0) { cerr << "ERROR: store->list_objects(): " << cpp_strerror(-ret) << std::endl; return -ret; @@ -1610,7 +1610,7 @@ static int send_to_remote_gateway(RGWRESTConn* conn, req_info& info, ceph::bufferlist response; rgw_user user; - int ret = conn->forward(user, info, nullptr, MAX_REST_RESPONSE, &in_data, &response, null_yield); + int ret = conn->forward(dpp(), user, info, nullptr, MAX_REST_RESPONSE, &in_data, &response, null_yield); int parse_ret = parser.parse(response.c_str(), response.length()); if (parse_ret < 0) { @@ -1636,7 +1636,7 @@ static int send_to_url(const string& url, const string& access, RGWRESTSimpleRequest req(g_ceph_context, info.method, url, NULL, ¶ms); bufferlist response; - int ret = req.forward_request(key, info, MAX_REST_RESPONSE, &in_data, &response, null_yield); + int ret = req.forward_request(dpp(), key, info, MAX_REST_RESPONSE, &in_data, &response, null_yield); int parse_ret = parser.parse(response.c_str(), response.length()); if (parse_ret < 0) { @@ -1671,7 +1671,7 @@ static int commit_period(RGWRealm& realm, RGWPeriod& period, if (store->svc()->zone->zone_id() == master_zone) { // read the current period RGWPeriod current_period; - int ret = current_period.init(g_ceph_context, + int ret = current_period.init(dpp(), g_ceph_context, store->svc()->sysobj, realm.get_id(), null_yield); if (ret < 0) { @@ -1680,7 +1680,7 @@ static int commit_period(RGWRealm& realm, RGWPeriod& period, return ret; } // the master zone can commit locally - ret = period.commit(store, realm, current_period, cerr, null_yield, force); + ret = period.commit(dpp(), store, realm, current_period, cerr, null_yield, force); if (ret < 0) { cerr << "failed to commit period: " << cpp_strerror(-ret) << std::endl; } @@ -1744,23 +1744,23 @@ static int commit_period(RGWRealm& realm, RGWPeriod& period, } // the master zone gave us back the period that it committed, so it's // safe to save it as our latest epoch - ret = period.store_info(false, null_yield); + ret = period.store_info(dpp(), false, null_yield); if (ret < 0) { cerr << "Error storing committed period " << period.get_id() << ": " << cpp_strerror(ret) << std::endl; return ret; } - ret = period.set_latest_epoch(null_yield, period.get_epoch()); + ret = period.set_latest_epoch(dpp(), null_yield, period.get_epoch()); if (ret < 0) { cerr << "Error updating period epoch: " << cpp_strerror(ret) << std::endl; return ret; } - ret = period.reflect(null_yield); + ret = period.reflect(dpp(), null_yield); if (ret < 0) { cerr << "Error updating local objects: " << cpp_strerror(ret) << std::endl; return ret; } - realm.notify_new_period(period, null_yield); + realm.notify_new_period(dpp(), period, null_yield); return ret; } @@ -1771,7 +1771,7 @@ static int update_period(const string& realm_id, const string& realm_name, Formatter *formatter, bool force) { RGWRealm realm(realm_id, realm_name); - int ret = realm.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0 ) { cerr << "Error initializing realm " << cpp_strerror(-ret) << std::endl; return ret; @@ -1781,19 +1781,19 @@ static int update_period(const string& realm_id, const string& realm_name, epoch = atoi(period_epoch.c_str()); } RGWPeriod period(period_id, epoch); - ret = period.init(g_ceph_context, store->svc()->sysobj, realm.get_id(), null_yield); + ret = period.init(dpp(), g_ceph_context, store->svc()->sysobj, realm.get_id(), null_yield); if (ret < 0) { cerr << "period init failed: " << cpp_strerror(-ret) << std::endl; return ret; } period.fork(); - ret = period.update(null_yield); + ret = period.update(dpp(), null_yield); if(ret < 0) { // Dropping the error message here, as both the ret codes were handled in // period.update() return ret; } - ret = period.store_info(false, null_yield); + ret = period.store_info(dpp(), false, null_yield); if (ret < 0) { cerr << "failed to store period: " << cpp_strerror(-ret) << std::endl; return ret; @@ -1853,7 +1853,7 @@ static int do_period_pull(RGWRESTConn *remote_conn, const string& url, cerr << "request failed: " << cpp_strerror(-ret) << std::endl; return ret; } - ret = period->init(g_ceph_context, store->svc()->sysobj, null_yield, false); + ret = period->init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield, false); if (ret < 0) { cerr << "faile to init period " << cpp_strerror(-ret) << std::endl; return ret; @@ -1864,12 +1864,12 @@ static int do_period_pull(RGWRESTConn *remote_conn, const string& url, cout << "failed to decode JSON input: " << e.what() << std::endl; return -EINVAL; } - ret = period->store_info(false, null_yield); + ret = period->store_info(dpp(), false, null_yield); if (ret < 0) { cerr << "Error storing period " << period->get_id() << ": " << cpp_strerror(ret) << std::endl; } // store latest epoch (ignore errors) - period->update_latest_epoch(period->get_epoch(), null_yield); + period->update_latest_epoch(dpp(), period->get_epoch(), null_yield); return 0; } @@ -1878,7 +1878,7 @@ static int read_current_period_id(rgw::sal::RGWRadosStore* store, const std::str std::string* period_id) { RGWRealm realm(realm_id, realm_name); - int ret = realm.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { std::cerr << "failed to read realm: " << cpp_strerror(-ret) << std::endl; return ret; @@ -1908,14 +1908,14 @@ static void get_md_sync_status(list& status) { RGWMetaSyncStatusManager sync(store, store->svc()->rados->get_async_processor()); - int ret = sync.init(); + int ret = sync.init(dpp()); if (ret < 0) { status.push_back(string("failed to retrieve sync info: sync.init() failed: ") + cpp_strerror(-ret)); return; } rgw_meta_sync_status sync_status; - ret = sync.read_sync_status(&sync_status); + ret = sync.read_sync_status(dpp(), &sync_status); if (ret < 0) { status.push_back(string("failed to read sync status: ") + cpp_strerror(-ret)); return; @@ -1974,7 +1974,7 @@ static void get_md_sync_status(list& status) map master_shards_info; string master_period = store->svc()->zone->get_current_period_id(); - ret = sync.read_master_log_shards_info(master_period, &master_shards_info); + ret = sync.read_master_log_shards_info(dpp(), master_period, &master_shards_info); if (ret < 0) { status.push_back(string("failed to fetch master sync status: ") + cpp_strerror(-ret)); return; @@ -2012,7 +2012,7 @@ static void get_md_sync_status(list& status) push_ss(ss, status) << "behind shards: " << "[" << shards_behind_set << "]"; map master_pos; - ret = sync.read_master_log_shards_next(sync_status.sync_info.period, shards_behind, &master_pos); + ret = sync.read_master_log_shards_next(dpp(), sync_status.sync_info.period, shards_behind, &master_pos); if (ret < 0) { derr << "ERROR: failed to fetch master next positions (" << cpp_strerror(-ret) << ")" << dendl; } else { @@ -2060,7 +2060,7 @@ static void get_data_sync_status(const rgw_zone_id& source_zone, list& s } RGWDataSyncStatusManager sync(store, store->svc()->rados->get_async_processor(), source_zone, nullptr); - int ret = sync.init(); + int ret = sync.init(dpp()); if (ret < 0) { push_ss(ss, status, tab) << string("failed to retrieve sync info: ") + cpp_strerror(-ret); flush_ss(ss, status); @@ -2068,14 +2068,14 @@ static void get_data_sync_status(const rgw_zone_id& source_zone, list& s } rgw_data_sync_status sync_status; - ret = sync.read_sync_status(&sync_status); + ret = sync.read_sync_status(dpp(), &sync_status); if (ret < 0 && ret != -ENOENT) { push_ss(ss, status, tab) << string("failed read sync status: ") + cpp_strerror(-ret); return; } set recovering_shards; - ret = sync.read_recovering_shards(sync_status.sync_info.num_shards, recovering_shards); + ret = sync.read_recovering_shards(dpp(), sync_status.sync_info.num_shards, recovering_shards); if (ret < 0 && ret != ENOENT) { push_ss(ss, status, tab) << string("failed read recovering shards: ") + cpp_strerror(-ret); return; @@ -2132,7 +2132,7 @@ static void get_data_sync_status(const rgw_zone_id& source_zone, list& s map source_shards_info; - ret = sync.read_source_log_shards_info(&source_shards_info); + ret = sync.read_source_log_shards_info(dpp(), &source_shards_info); if (ret < 0) { push_ss(ss, status, tab) << string("failed to fetch source sync status: ") + cpp_strerror(-ret); return; @@ -2167,7 +2167,7 @@ static void get_data_sync_status(const rgw_zone_id& source_zone, list& s push_ss(ss, status, tab) << "behind shards: " << "[" << shards_behind_set << "]" ; map master_pos; - ret = sync.read_source_log_shards_next(shards_behind, &master_pos); + ret = sync.read_source_log_shards_next(dpp(), shards_behind, &master_pos); if (ret < 0) { derr << "ERROR: failed to fetch next positions (" << cpp_strerror(-ret) << ")" << dendl; } else { @@ -2262,7 +2262,7 @@ std::ostream& operator<<(std::ostream& out, const indented& h) { return out << std::setw(h.w) << h.header << std::setw(1) << ' '; } -static int bucket_source_sync_status(rgw::sal::RGWRadosStore *store, const RGWZone& zone, +static int bucket_source_sync_status(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, const RGWZone& zone, const RGWZone& source, RGWRESTConn *conn, const RGWBucketInfo& bucket_info, rgw_sync_bucket_pipe pipe, @@ -2277,7 +2277,7 @@ static int bucket_source_sync_status(rgw::sal::RGWRadosStore *store, const RGWZo } if (!pipe.source.bucket) { - lderr(store->ctx()) << __func__ << "(): missing source bucket" << dendl; + ldpp_dout(dpp, -1) << __func__ << "(): missing source bucket" << dendl; return -EINVAL; } @@ -2285,7 +2285,7 @@ static int bucket_source_sync_status(rgw::sal::RGWRadosStore *store, const RGWZo rgw_bucket source_bucket; int r = init_bucket(*pipe.source.bucket, source_bucket_info, source_bucket); if (r < 0) { - lderr(store->ctx()) << "failed to read source bucket info: " << cpp_strerror(r) << dendl; + ldpp_dout(dpp, -1) << "failed to read source bucket info: " << cpp_strerror(r) << dendl; return r; } @@ -2293,9 +2293,9 @@ static int bucket_source_sync_status(rgw::sal::RGWRadosStore *store, const RGWZo pipe.dest.bucket = bucket_info.bucket; std::vector status; - r = rgw_bucket_sync_status(dpp(), store, pipe, bucket_info, &source_bucket_info, &status); + r = rgw_bucket_sync_status(dpp, store, pipe, bucket_info, &source_bucket_info, &status); if (r < 0) { - lderr(store->ctx()) << "failed to read bucket sync status: " << cpp_strerror(r) << dendl; + ldpp_dout(dpp, -1) << "failed to read bucket sync status: " << cpp_strerror(r) << dendl; return r; } @@ -2324,9 +2324,9 @@ static int bucket_source_sync_status(rgw::sal::RGWRadosStore *store, const RGWZo out << indented{width} << "incremental sync: " << num_inc << "/" << total_shards << " shards\n"; BucketIndexShardsManager remote_markers; - r = rgw_read_remote_bilog_info(conn, source_bucket, remote_markers, null_yield); + r = rgw_read_remote_bilog_info(dpp, conn, source_bucket, remote_markers, null_yield); if (r < 0) { - lderr(store->ctx()) << "failed to read remote log: " << cpp_strerror(r) << dendl; + ldpp_dout(dpp, -1) << "failed to read remote log: " << cpp_strerror(r) << dendl; return r; } @@ -2382,7 +2382,7 @@ static void get_hint_entities(const std::set& zones, const std::set rgw_bucket hint_bucket; int ret = init_bucket(b, hint_bucket_info, hint_bucket); if (ret < 0) { - ldout(store->ctx(), 20) << "could not init bucket info for hint bucket=" << b << " ... skipping" << dendl; + ldpp_dout(dpp(), 20) << "could not init bucket info for hint bucket=" << b << " ... skipping" << dendl; continue; } @@ -2440,7 +2440,7 @@ static int sync_info(std::optional opt_target_zone, std::optionalalloc_child(*eff_bucket, nullopt)); } - ret = bucket_handler->init(null_yield); + ret = bucket_handler->init(dpp(), null_yield); if (ret < 0) { cerr << "ERROR: failed to init bucket sync policy handler: " << cpp_strerror(-ret) << " (ret=" << ret << ")" << std::endl; return ret; @@ -2486,9 +2486,9 @@ static int sync_info(std::optional opt_target_zone, std::optionalctl()->bucket->get_sync_policy_handler(zid, hint_bucket, &hint_bucket_handler, null_yield); + int r = store->ctl()->bucket->get_sync_policy_handler(zid, hint_bucket, &hint_bucket_handler, null_yield, dpp()); if (r < 0) { - ldout(store->ctx(), 20) << "could not get bucket sync policy handler for hint bucket=" << hint_bucket << " ... skipping" << dendl; + ldpp_dout(dpp(), 20) << "could not get bucket sync policy handler for hint bucket=" << hint_bucket << " ... skipping" << dendl; continue; } @@ -2537,16 +2537,16 @@ static int bucket_sync_info(rgw::sal::RGWRadosStore *store, const RGWBucketInfo& out << indented{width, "zone"} << zone.id << " (" << zone.name << ")\n"; out << indented{width, "bucket"} << info.bucket << "\n\n"; - if (!store->ctl()->bucket->bucket_imports_data(info.bucket, null_yield)) { + if (!store->ctl()->bucket->bucket_imports_data(info.bucket, null_yield, dpp())) { out << "Sync is disabled for bucket " << info.bucket.name << '\n'; return 0; } RGWBucketSyncPolicyHandlerRef handler; - int r = store->ctl()->bucket->get_sync_policy_handler(std::nullopt, info.bucket, &handler, null_yield); + int r = store->ctl()->bucket->get_sync_policy_handler(std::nullopt, info.bucket, &handler, null_yield, dpp()); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to get policy handler for bucket (" << info.bucket << "): r=" << r << ": " << cpp_strerror(-r) << dendl; + ldpp_dout(dpp(), -1) << "ERROR: failed to get policy handler for bucket (" << info.bucket << "): r=" << r << ": " << cpp_strerror(-r) << dendl; return r; } @@ -2578,16 +2578,16 @@ static int bucket_sync_status(rgw::sal::RGWRadosStore *store, const RGWBucketInf out << indented{width, "zone"} << zone.id << " (" << zone.name << ")\n"; out << indented{width, "bucket"} << info.bucket << "\n\n"; - if (!store->ctl()->bucket->bucket_imports_data(info.bucket, null_yield)) { + if (!store->ctl()->bucket->bucket_imports_data(info.bucket, null_yield, dpp())) { out << "Sync is disabled for bucket " << info.bucket.name << " or bucket has no sync sources" << std::endl; return 0; } RGWBucketSyncPolicyHandlerRef handler; - int r = store->ctl()->bucket->get_sync_policy_handler(std::nullopt, info.bucket, &handler, null_yield); + int r = store->ctl()->bucket->get_sync_policy_handler(std::nullopt, info.bucket, &handler, null_yield, dpp()); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to get policy handler for bucket (" << info.bucket << "): r=" << r << ": " << cpp_strerror(-r) << dendl; + ldpp_dout(dpp(), -1) << "ERROR: failed to get policy handler for bucket (" << info.bucket << "): r=" << r << ": " << cpp_strerror(-r) << dendl; return r; } @@ -2599,13 +2599,13 @@ static int bucket_sync_status(rgw::sal::RGWRadosStore *store, const RGWBucketInf if (!source_zone_id.empty()) { auto z = zonegroup.zones.find(source_zone_id); if (z == zonegroup.zones.end()) { - lderr(store->ctx()) << "Source zone not found in zonegroup " + ldpp_dout(dpp(), -1) << "Source zone not found in zonegroup " << zonegroup.get_name() << dendl; return -EINVAL; } auto c = zone_conn_map.find(source_zone_id); if (c == zone_conn_map.end()) { - lderr(store->ctx()) << "No connection to zone " << z->second.name << dendl; + ldpp_dout(dpp(), -1) << "No connection to zone " << z->second.name << dendl; return -EINVAL; } zone_ids.insert(source_zone_id); @@ -2632,7 +2632,7 @@ static int bucket_sync_status(rgw::sal::RGWRadosStore *store, const RGWBucketInf continue; } if (pipe.source.zone.value_or(rgw_zone_id()) == z->second.id) { - bucket_source_sync_status(store, zone, z->second, + bucket_source_sync_status(dpp(), store, zone, z->second, c->second, info, pipe, width, out); @@ -2784,7 +2784,7 @@ static int scan_totp(CephContext *cct, ceph::real_time& now, rados::cls::otp::ot pins[1].c_str()); if (rc != OATH_INVALID_OTP) { *pofs = time_ofs - step_size + step_size * totp.window / 2; - ldout(cct, 20) << "found at time=" << start_time - time_ofs << " time_ofs=" << time_ofs << dendl; + ldpp_dout(dpp(), 20) << "found at time=" << start_time - time_ofs << " time_ofs=" << time_ofs << dendl; return 0; } } @@ -2802,7 +2802,7 @@ static int trim_sync_error_log(int shard_id, const string& marker, int delay_ms) shard_id); // call cls_log_trim() until it returns -ENODATA for (;;) { - int ret = store->svc()->cls->timelog.trim(oid, {}, {}, {}, marker, nullptr, + int ret = store->svc()->cls->timelog.trim(dpp(), oid, {}, {}, {}, marker, nullptr, null_yield); if (ret == -ENODATA) { return 0; @@ -2897,7 +2897,7 @@ public: bucket(_bucket) {} int init() { - int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = zonegroup.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; return ret; @@ -2928,7 +2928,7 @@ public: int write_policy() { if (!bucket) { - int ret = zonegroup.update(null_yield); + int ret = zonegroup.update(dpp(), null_yield); if (ret < 0) { cerr << "failed to update zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -2936,7 +2936,7 @@ public: return 0; } - int ret = store->getRados()->put_bucket_instance_info(bucket_info, false, real_time(), &bucket_attrs); + int ret = store->getRados()->put_bucket_instance_info(bucket_info, false, real_time(), &bucket_attrs, dpp()); if (ret < 0) { cerr << "failed to store bucket info: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -3909,9 +3909,9 @@ int main(int argc, const char **argv) bool need_cache = readonly_ops_list.find(opt_cmd) == readonly_ops_list.end(); if (raw_storage_op) { - store = RGWStoreManager::get_raw_storage(g_ceph_context); + store = RGWStoreManager::get_raw_storage(dpp(), g_ceph_context); } else { - store = RGWStoreManager::get_storage(g_ceph_context, false, false, false, false, false, + store = RGWStoreManager::get_storage(dpp(), g_ceph_context, false, false, false, false, false, need_cache && g_conf()->rgw_cache_enabled); } if (!store) { @@ -3950,12 +3950,12 @@ int main(int argc, const char **argv) return EINVAL; } RGWPeriod period(period_id); - int ret = period.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = period.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "period.init failed: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = period.delete_obj(null_yield); + ret = period.delete_obj(dpp(), null_yield); if (ret < 0) { cerr << "ERROR: couldn't delete period: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -3971,7 +3971,7 @@ int main(int argc, const char **argv) } if (staging) { RGWRealm realm(realm_id, realm_name); - int ret = realm.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0 ) { cerr << "Error initializing realm " << cpp_strerror(-ret) << std::endl; return -ret; @@ -3982,7 +3982,7 @@ int main(int argc, const char **argv) epoch = 1; } RGWPeriod period(period_id, epoch); - int ret = period.init(g_ceph_context, store->svc()->sysobj, realm_id, + int ret = period.init(dpp(), g_ceph_context, store->svc()->sysobj, realm_id, null_yield, realm_name); if (ret < 0) { cerr << "period init failed: " << cpp_strerror(-ret) << std::endl; @@ -4007,7 +4007,7 @@ int main(int argc, const char **argv) case OPT::PERIOD_LIST: { list periods; - int ret = store->svc()->zone->list_periods(periods); + int ret = store->svc()->zone->list_periods(dpp(), periods); if (ret < 0) { cerr << "failed to list periods: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4035,13 +4035,13 @@ int main(int argc, const char **argv) if (url.empty()) { // load current period for endpoints RGWRealm realm(realm_id, realm_name); - int ret = realm.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl; return -ret; } RGWPeriod current_period(realm.get_current_period()); - ret = current_period.init(g_ceph_context, store->svc()->sysobj, null_yield); + ret = current_period.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init current period: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4081,7 +4081,7 @@ int main(int argc, const char **argv) RGWRealm realm(g_ceph_context, store->svc()->sysobj); if (!realm_name.empty()) { // look up realm_id for the given realm_name - int ret = realm.read_id(realm_name, realm_id, null_yield); + int ret = realm.read_id(dpp(), realm_name, realm_id, null_yield); if (ret < 0) { cerr << "ERROR: failed to read realm for " << realm_name << ": " << cpp_strerror(-ret) << std::endl; @@ -4089,7 +4089,7 @@ int main(int argc, const char **argv) } } else { // use default realm_id when none is given - int ret = realm.read_default_id(realm_id, null_yield); + int ret = realm.read_default_id(dpp(), realm_id, null_yield); if (ret < 0 && ret != -ENOENT) { // on ENOENT, use empty realm_id cerr << "ERROR: failed to read default realm: " << cpp_strerror(-ret) << std::endl; @@ -4099,7 +4099,7 @@ int main(int argc, const char **argv) } RGWPeriodConfig period_config; - int ret = period_config.read(store->svc()->sysobj, realm_id, null_yield); + int ret = period_config.read(dpp(), store->svc()->sysobj, realm_id, null_yield); if (ret < 0 && ret != -ENOENT) { cerr << "ERROR: failed to read period config: " << cpp_strerror(-ret) << std::endl; @@ -4130,7 +4130,7 @@ int main(int argc, const char **argv) if (opt_cmd != OPT::GLOBAL_QUOTA_GET) { // write the modified period config - ret = period_config.write(store->svc()->sysobj, realm_id, null_yield); + ret = period_config.write(dpp(), store->svc()->sysobj, realm_id, null_yield); if (ret < 0) { cerr << "ERROR: failed to write period config: " << cpp_strerror(-ret) << std::endl; @@ -4157,14 +4157,14 @@ int main(int argc, const char **argv) } RGWRealm realm(realm_name, g_ceph_context, store->svc()->sysobj); - int ret = realm.create(null_yield); + int ret = realm.create(dpp(), null_yield); if (ret < 0) { cerr << "ERROR: couldn't create realm " << realm_name << ": " << cpp_strerror(-ret) << std::endl; return -ret; } if (set_default) { - ret = realm.set_as_default(null_yield); + ret = realm.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set realm " << realm_name << " as default: " << cpp_strerror(-ret) << std::endl; } @@ -4181,12 +4181,12 @@ int main(int argc, const char **argv) cerr << "missing realm name or id" << std::endl; return EINVAL; } - int ret = realm.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "realm.init failed: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = realm.delete_obj(null_yield); + ret = realm.delete_obj(dpp(), null_yield); if (ret < 0) { cerr << "ERROR: couldn't : " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4197,7 +4197,7 @@ int main(int argc, const char **argv) case OPT::REALM_GET: { RGWRealm realm(realm_id, realm_name); - int ret = realm.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { if (ret == -ENOENT && realm_name.empty() && realm_id.empty()) { cerr << "missing realm name or id, or default realm not found" << std::endl; @@ -4214,7 +4214,7 @@ int main(int argc, const char **argv) { RGWRealm realm(g_ceph_context, store->svc()->sysobj); string default_id; - int ret = realm.read_default_id(default_id, null_yield); + int ret = realm.read_default_id(dpp(), default_id, null_yield); if (ret == -ENOENT) { cout << "No default realm is set" << std::endl; return -ret; @@ -4229,12 +4229,12 @@ int main(int argc, const char **argv) { RGWRealm realm(g_ceph_context, store->svc()->sysobj); string default_id; - int ret = realm.read_default_id(default_id, null_yield); + int ret = realm.read_default_id(dpp(), default_id, null_yield); if (ret < 0 && ret != -ENOENT) { cerr << "could not determine default realm: " << cpp_strerror(-ret) << std::endl; } list realms; - ret = store->svc()->zone->list_realms(realms); + ret = store->svc()->zone->list_realms(dpp(), realms); if (ret < 0) { cerr << "failed to list realms: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4253,7 +4253,7 @@ int main(int argc, const char **argv) return -ret; } list periods; - ret = store->svc()->zone->list_periods(period_id, periods, null_yield); + ret = store->svc()->zone->list_periods(dpp(), period_id, periods, null_yield); if (ret < 0) { cerr << "list periods failed: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4277,12 +4277,12 @@ int main(int argc, const char **argv) cerr << "missing realm name or id" << std::endl; return EINVAL; } - int ret = realm.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "realm.init failed: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = realm.rename(realm_new_name, null_yield); + ret = realm.rename(dpp(), realm_new_name, null_yield); if (ret < 0) { cerr << "realm.rename failed: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4300,7 +4300,7 @@ int main(int argc, const char **argv) } RGWRealm realm(realm_id, realm_name); bool new_realm = false; - int ret = realm.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0 && ret != -ENOENT) { cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4320,13 +4320,13 @@ int main(int argc, const char **argv) if (new_realm) { cout << "clearing period and epoch for new realm" << std::endl; realm.clear_current_period_and_epoch(); - ret = realm.create(null_yield); + ret = realm.create(dpp(), null_yield); if (ret < 0) { cerr << "ERROR: couldn't create new realm: " << cpp_strerror(-ret) << std::endl; return 1; } } else { - ret = realm.update(null_yield); + ret = realm.update(dpp(), null_yield); if (ret < 0) { cerr << "ERROR: couldn't store realm info: " << cpp_strerror(-ret) << std::endl; return 1; @@ -4334,7 +4334,7 @@ int main(int argc, const char **argv) } if (set_default) { - ret = realm.set_as_default(null_yield); + ret = realm.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set realm " << realm_name << " as default: " << cpp_strerror(-ret) << std::endl; } @@ -4347,12 +4347,12 @@ int main(int argc, const char **argv) case OPT::REALM_DEFAULT: { RGWRealm realm(realm_id, realm_name); - int ret = realm.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = realm.set_as_default(null_yield); + ret = realm.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set realm as default: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4389,7 +4389,7 @@ int main(int argc, const char **argv) return -ret; } RGWRealm realm; - realm.init(g_ceph_context, store->svc()->sysobj, null_yield, false); + realm.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield, false); try { decode_json_obj(realm, &p); } catch (const JSONDecoder::err& e) { @@ -4408,13 +4408,13 @@ int main(int argc, const char **argv) return -ret; } } - ret = realm.create(null_yield, false); + ret = realm.create(dpp(), null_yield, false); if (ret < 0 && ret != -EEXIST) { cerr << "Error storing realm " << realm.get_id() << ": " << cpp_strerror(ret) << std::endl; return -ret; } else if (ret ==-EEXIST) { - ret = realm.update(null_yield); + ret = realm.update(dpp(), null_yield); if (ret < 0) { cerr << "Error storing realm " << realm.get_id() << ": " << cpp_strerror(ret) << std::endl; @@ -4422,7 +4422,7 @@ int main(int argc, const char **argv) } if (set_default) { - ret = realm.set_as_default(null_yield); + ret = realm.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set realm " << realm_name << " as default: " << cpp_strerror(-ret) << std::endl; } @@ -4441,21 +4441,21 @@ int main(int argc, const char **argv) } RGWZoneGroup zonegroup(zonegroup_id,zonegroup_name); - int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = zonegroup.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to initialize zonegroup " << zonegroup_name << " id " << zonegroup_id << " :" << cpp_strerror(-ret) << std::endl; return -ret; } RGWZoneParams zone(zone_id, zone_name); - ret = zone.init(g_ceph_context, store->svc()->sysobj, null_yield); + ret = zone.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl; return -ret; } if (zone.realm_id != zonegroup.realm_id) { zone.realm_id = zonegroup.realm_id; - ret = zone.update(null_yield); + ret = zone.update(dpp(), null_yield); if (ret < 0) { cerr << "failed to save zone info: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4475,7 +4475,7 @@ int main(int argc, const char **argv) bool *psync_from_all = (sync_from_all_specified ? &sync_from_all : nullptr); string *predirect_zone = (redirect_zone_set ? &redirect_zone : nullptr); - ret = zonegroup.add_zone(zone, + ret = zonegroup.add_zone(dpp(), zone, (is_master_set ? &is_master : NULL), (is_read_only_set ? &read_only : NULL), endpoints, ptier_type, @@ -4500,7 +4500,7 @@ int main(int argc, const char **argv) return EINVAL; } RGWRealm realm(realm_id, realm_name); - int ret = realm.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4508,14 +4508,14 @@ int main(int argc, const char **argv) RGWZoneGroup zonegroup(zonegroup_name, is_master, g_ceph_context, store->svc()->sysobj, realm.get_id(), endpoints); zonegroup.api_name = (api_name.empty() ? zonegroup_name : api_name); - ret = zonegroup.create(null_yield); + ret = zonegroup.create(dpp(), null_yield); if (ret < 0) { cerr << "failed to create zonegroup " << zonegroup_name << ": " << cpp_strerror(-ret) << std::endl; return -ret; } if (set_default) { - ret = zonegroup.set_as_default(null_yield); + ret = zonegroup.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set zonegroup " << zonegroup_name << " as default: " << cpp_strerror(-ret) << std::endl; } @@ -4533,13 +4533,13 @@ int main(int argc, const char **argv) } RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = zonegroup.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = zonegroup.set_as_default(null_yield); + ret = zonegroup.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set zonegroup as default: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4553,13 +4553,13 @@ int main(int argc, const char **argv) return EINVAL; } RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, + int ret = zonegroup.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = zonegroup.delete_obj(null_yield); + ret = zonegroup.delete_obj(dpp(), null_yield); if (ret < 0) { cerr << "ERROR: couldn't delete zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4569,7 +4569,7 @@ int main(int argc, const char **argv) case OPT::ZONEGROUP_GET: { RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = zonegroup.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4582,7 +4582,7 @@ int main(int argc, const char **argv) case OPT::ZONEGROUP_LIST: { RGWZoneGroup zonegroup; - int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, + int ret = zonegroup.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield, false); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; @@ -4590,13 +4590,13 @@ int main(int argc, const char **argv) } list zonegroups; - ret = store->svc()->zone->list_zonegroups(zonegroups); + ret = store->svc()->zone->list_zonegroups(dpp(), zonegroups); if (ret < 0) { cerr << "failed to list zonegroups: " << cpp_strerror(-ret) << std::endl; return -ret; } string default_zonegroup; - ret = zonegroup.read_default_id(default_zonegroup, null_yield); + ret = zonegroup.read_default_id(dpp(), default_zonegroup, null_yield); if (ret < 0 && ret != -ENOENT) { cerr << "could not determine default zonegroup: " << cpp_strerror(-ret) << std::endl; } @@ -4610,7 +4610,7 @@ int main(int argc, const char **argv) case OPT::ZONEGROUP_MODIFY: { RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = zonegroup.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4624,7 +4624,7 @@ int main(int argc, const char **argv) } if (is_master_set) { - zonegroup.update_master(is_master, null_yield); + zonegroup.update_master(dpp(), is_master, null_yield); need_update = true; } @@ -4644,7 +4644,7 @@ int main(int argc, const char **argv) } else if (!realm_name.empty()) { // get realm id from name RGWRealm realm{g_ceph_context, store->svc()->sysobj}; - ret = realm.read_id(realm_name, zonegroup.realm_id, null_yield); + ret = realm.read_id(dpp(), realm_name, zonegroup.realm_id, null_yield); if (ret < 0) { cerr << "failed to find realm by name " << realm_name << std::endl; return -ret; @@ -4660,7 +4660,7 @@ int main(int argc, const char **argv) } if (need_update) { - ret = zonegroup.update(null_yield); + ret = zonegroup.update(dpp(), null_yield); if (ret < 0) { cerr << "failed to update zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4668,7 +4668,7 @@ int main(int argc, const char **argv) } if (set_default) { - ret = zonegroup.set_as_default(null_yield); + ret = zonegroup.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set zonegroup " << zonegroup_name << " as default: " << cpp_strerror(-ret) << std::endl; } @@ -4681,7 +4681,7 @@ int main(int argc, const char **argv) case OPT::ZONEGROUP_SET: { RGWRealm realm(realm_id, realm_name); - int ret = realm.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); bool default_realm_not_exist = (ret == -ENOENT && realm_id.empty() && realm_name.empty()); if (ret < 0 && !default_realm_not_exist ) { @@ -4690,7 +4690,7 @@ int main(int argc, const char **argv) } RGWZoneGroup zonegroup; - ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, + ret = zonegroup.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield, false); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; @@ -4703,12 +4703,12 @@ int main(int argc, const char **argv) if (zonegroup.realm_id.empty() && !default_realm_not_exist) { zonegroup.realm_id = realm.get_id(); } - ret = zonegroup.create(null_yield); + ret = zonegroup.create(dpp(), null_yield); if (ret < 0 && ret != -EEXIST) { cerr << "ERROR: couldn't create zonegroup info: " << cpp_strerror(-ret) << std::endl; return 1; } else if (ret == -EEXIST) { - ret = zonegroup.update(null_yield); + ret = zonegroup.update(dpp(), null_yield); if (ret < 0) { cerr << "ERROR: couldn't store zonegroup info: " << cpp_strerror(-ret) << std::endl; return 1; @@ -4716,7 +4716,7 @@ int main(int argc, const char **argv) } if (set_default) { - ret = zonegroup.set_as_default(null_yield); + ret = zonegroup.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set zonegroup " << zonegroup_name << " as default: " << cpp_strerror(-ret) << std::endl; } @@ -4729,7 +4729,7 @@ int main(int argc, const char **argv) case OPT::ZONEGROUP_REMOVE: { RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = zonegroup.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4754,7 +4754,7 @@ int main(int argc, const char **argv) } } - ret = zonegroup.remove_zone(zone_id, null_yield); + ret = zonegroup.remove_zone(dpp(), zone_id, null_yield); if (ret < 0) { cerr << "failed to remove zone: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4775,12 +4775,12 @@ int main(int argc, const char **argv) return EINVAL; } RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = zonegroup.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = zonegroup.rename(zonegroup_new_name, null_yield); + ret = zonegroup.rename(dpp(), zonegroup_new_name, null_yield); if (ret < 0) { cerr << "failed to rename zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4790,7 +4790,7 @@ int main(int argc, const char **argv) case OPT::ZONEGROUP_PLACEMENT_LIST: { RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, + int ret = zonegroup.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; @@ -4809,7 +4809,7 @@ int main(int argc, const char **argv) } RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = zonegroup.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4846,7 +4846,7 @@ int main(int argc, const char **argv) } RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = zonegroup.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4889,8 +4889,8 @@ int main(int argc, const char **argv) zonegroup.default_placement = rule; } - zonegroup.post_process_params(null_yield); - ret = zonegroup.update(null_yield); + zonegroup.post_process_params(dpp(), null_yield); + ret = zonegroup.update(dpp(), null_yield); if (ret < 0) { cerr << "failed to update zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4910,7 +4910,7 @@ int main(int argc, const char **argv) RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); /* if the user didn't provide zonegroup info , create stand alone zone */ if (!zonegroup_id.empty() || !zonegroup_name.empty()) { - ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, null_yield); + ret = zonegroup.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "unable to initialize zonegroup " << zonegroup_name << ": " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4921,7 +4921,7 @@ int main(int argc, const char **argv) } RGWZoneParams zone(zone_id, zone_name); - ret = zone.init(g_ceph_context, store->svc()->sysobj, null_yield, false); + ret = zone.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield, false); if (ret < 0) { cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4938,7 +4938,7 @@ int main(int argc, const char **argv) } } - ret = zone.create(null_yield); + ret = zone.create(dpp(), null_yield); if (ret < 0) { cerr << "failed to create zone " << zone_name << ": " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4948,7 +4948,7 @@ int main(int argc, const char **argv) string *ptier_type = (tier_type_specified ? &tier_type : nullptr); bool *psync_from_all = (sync_from_all_specified ? &sync_from_all : nullptr); string *predirect_zone = (redirect_zone_set ? &redirect_zone : nullptr); - ret = zonegroup.add_zone(zone, + ret = zonegroup.add_zone(dpp(), zone, (is_master_set ? &is_master : NULL), (is_read_only_set ? &read_only : NULL), endpoints, @@ -4966,7 +4966,7 @@ int main(int argc, const char **argv) } if (set_default) { - ret = zone.set_as_default(null_yield); + ret = zone.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set zone " << zone_name << " as default: " << cpp_strerror(-ret) << std::endl; } @@ -4979,7 +4979,7 @@ int main(int argc, const char **argv) case OPT::ZONE_DEFAULT: { RGWZoneGroup zonegroup(zonegroup_id,zonegroup_name); - int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = zonegroup.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "WARNING: failed to initialize zonegroup " << zonegroup_name << std::endl; } @@ -4988,12 +4988,12 @@ int main(int argc, const char **argv) return EINVAL; } RGWZoneParams zone(zone_id, zone_name); - ret = zone.init(g_ceph_context, store->svc()->sysobj, null_yield); + ret = zone.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = zone.set_as_default(null_yield); + ret = zone.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set zone as default: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5007,14 +5007,14 @@ int main(int argc, const char **argv) return EINVAL; } RGWZoneParams zone(zone_id, zone_name); - int ret = zone.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = zone.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl; return -ret; } list zonegroups; - ret = store->svc()->zone->list_zonegroups(zonegroups); + ret = store->svc()->zone->list_zonegroups(dpp(), zonegroups); if (ret < 0) { cerr << "failed to list zonegroups: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5022,19 +5022,19 @@ int main(int argc, const char **argv) for (list::iterator iter = zonegroups.begin(); iter != zonegroups.end(); ++iter) { RGWZoneGroup zonegroup(string(), *iter); - int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = zonegroup.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "WARNING: failed to initialize zonegroup " << zonegroup_name << std::endl; continue; } - ret = zonegroup.remove_zone(zone.get_id(), null_yield); + ret = zonegroup.remove_zone(dpp(), zone.get_id(), null_yield); if (ret < 0 && ret != -ENOENT) { cerr << "failed to remove zone " << zone_name << " from zonegroup " << zonegroup.get_name() << ": " << cpp_strerror(-ret) << std::endl; } } - ret = zone.delete_obj(null_yield); + ret = zone.delete_obj(dpp(), null_yield); if (ret < 0) { cerr << "failed to delete zone " << zone_name << ": " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5044,7 +5044,7 @@ int main(int argc, const char **argv) case OPT::ZONE_GET: { RGWZoneParams zone(zone_id, zone_name); - int ret = zone.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = zone.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5056,13 +5056,13 @@ int main(int argc, const char **argv) case OPT::ZONE_SET: { RGWZoneParams zone(zone_name); - int ret = zone.init(g_ceph_context, store->svc()->sysobj, null_yield, + int ret = zone.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield, false); if (ret < 0) { return -ret; } - ret = zone.read(null_yield); + ret = zone.read(dpp(), null_yield); if (ret < 0 && ret != -ENOENT) { cerr << "zone.read() returned ret=" << ret << std::endl; return -ret; @@ -5077,7 +5077,7 @@ int main(int argc, const char **argv) if(zone.realm_id.empty()) { RGWRealm realm(realm_id, realm_name); - int ret = realm.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0 && ret != -ENOENT) { cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5115,19 +5115,19 @@ int main(int argc, const char **argv) } cerr << "zone id " << zone.get_id(); - ret = zone.fix_pool_names(null_yield); + ret = zone.fix_pool_names(dpp(), null_yield); if (ret < 0) { cerr << "ERROR: couldn't fix zone: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = zone.write(false, null_yield); + ret = zone.write(dpp(), false, null_yield); if (ret < 0) { cerr << "ERROR: couldn't create zone: " << cpp_strerror(-ret) << std::endl; return 1; } if (set_default) { - ret = zone.set_as_default(null_yield); + ret = zone.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set zone " << zone_name << " as default: " << cpp_strerror(-ret) << std::endl; } @@ -5140,20 +5140,20 @@ int main(int argc, const char **argv) case OPT::ZONE_LIST: { list zones; - int ret = store->svc()->zone->list_zones(zones); + int ret = store->svc()->zone->list_zones(dpp(), zones); if (ret < 0) { cerr << "failed to list zones: " << cpp_strerror(-ret) << std::endl; return -ret; } RGWZoneParams zone; - ret = zone.init(g_ceph_context, store->svc()->sysobj, null_yield, false); + ret = zone.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield, false); if (ret < 0) { cerr << "failed to init zone: " << cpp_strerror(-ret) << std::endl; return -ret; } string default_zone; - ret = zone.read_default_id(default_zone, null_yield); + ret = zone.read_default_id(dpp(), default_zone, null_yield); if (ret < 0 && ret != -ENOENT) { cerr << "could not determine default zone: " << cpp_strerror(-ret) << std::endl; } @@ -5167,7 +5167,7 @@ int main(int argc, const char **argv) case OPT::ZONE_MODIFY: { RGWZoneParams zone(zone_id, zone_name); - int ret = zone.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = zone.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zone: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5190,7 +5190,7 @@ int main(int argc, const char **argv) } else if (!realm_name.empty()) { // get realm id from name RGWRealm realm{g_ceph_context, store->svc()->sysobj}; - ret = realm.read_id(realm_name, zone.realm_id, null_yield); + ret = realm.read_id(dpp(), realm_name, zone.realm_id, null_yield); if (ret < 0) { cerr << "failed to find realm by name " << realm_name << std::endl; return -ret; @@ -5217,7 +5217,7 @@ int main(int argc, const char **argv) } if (need_zone_update) { - ret = zone.update(null_yield); + ret = zone.update(dpp(), null_yield); if (ret < 0) { cerr << "failed to save zone info: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5225,7 +5225,7 @@ int main(int argc, const char **argv) } RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, null_yield); + ret = zonegroup.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5235,7 +5235,7 @@ int main(int argc, const char **argv) bool *psync_from_all = (sync_from_all_specified ? &sync_from_all : nullptr); string *predirect_zone = (redirect_zone_set ? &redirect_zone : nullptr); - ret = zonegroup.add_zone(zone, + ret = zonegroup.add_zone(dpp(), zone, (is_master_set ? &is_master : NULL), (is_read_only_set ? &read_only : NULL), endpoints, ptier_type, @@ -5248,14 +5248,14 @@ int main(int argc, const char **argv) return -ret; } - ret = zonegroup.update(null_yield); + ret = zonegroup.update(dpp(), null_yield); if (ret < 0) { cerr << "failed to update zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; } if (set_default) { - ret = zone.set_as_default(null_yield); + ret = zone.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set zone " << zone_name << " as default: " << cpp_strerror(-ret) << std::endl; } @@ -5276,23 +5276,23 @@ int main(int argc, const char **argv) return EINVAL; } RGWZoneParams zone(zone_id,zone_name); - int ret = zone.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = zone.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = zone.rename(zone_new_name, null_yield); + ret = zone.rename(dpp(), zone_new_name, null_yield); if (ret < 0) { cerr << "failed to rename zone " << zone_name << " to " << zone_new_name << ": " << cpp_strerror(-ret) << std::endl; return -ret; } RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, null_yield); + ret = zonegroup.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "WARNING: failed to initialize zonegroup " << zonegroup_name << std::endl; } else { - ret = zonegroup.rename_zone(zone, null_yield); + ret = zonegroup.rename_zone(dpp(), zone, null_yield); if (ret < 0) { cerr << "Error in zonegroup rename for " << zone_name << ": " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5316,7 +5316,7 @@ int main(int argc, const char **argv) } RGWZoneParams zone(zone_id, zone_name); - int ret = zone.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = zone.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zone: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5325,7 +5325,7 @@ int main(int argc, const char **argv) if (opt_cmd == OPT::ZONE_PLACEMENT_ADD || opt_cmd == OPT::ZONE_PLACEMENT_MODIFY) { RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, null_yield); + ret = zonegroup.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5399,7 +5399,7 @@ int main(int argc, const char **argv) } } - ret = zone.update(null_yield); + ret = zone.update(dpp(), null_yield); if (ret < 0) { cerr << "failed to save zone info: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5412,7 +5412,7 @@ int main(int argc, const char **argv) case OPT::ZONE_PLACEMENT_LIST: { RGWZoneParams zone(zone_id, zone_name); - int ret = zone.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = zone.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5429,7 +5429,7 @@ int main(int argc, const char **argv) } RGWZoneParams zone(zone_id, zone_name); - int ret = zone.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = zone.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5574,7 +5574,7 @@ int main(int argc, const char **argv) RGWUser user; int ret = 0; if (!(user_id.empty() && access_key.empty()) || !subuser.empty()) { - ret = user.init(store, user_op, null_yield); + ret = user.init(dpp(), store, user_op, null_yield); if (ret < 0) { cerr << "user.init failed: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5605,7 +5605,7 @@ int main(int argc, const char **argv) if (!user_op.has_existing_user()) { user_op.set_generate_key(); // generate a new key by default } - ret = user.add(user_op, null_yield, &err_msg); + ret = user.add(dpp(), user_op, null_yield, &err_msg); if (ret < 0) { cerr << "could not create user: " << err_msg << std::endl; if (ret == -ERR_INVALID_TENANT_NAME) @@ -5614,7 +5614,7 @@ int main(int argc, const char **argv) return -ret; } if (!subuser.empty()) { - ret = user.subusers.add(user_op, null_yield, &err_msg); + ret = user.subusers.add(dpp(), user_op, null_yield, &err_msg); if (ret < 0) { cerr << "could not create subuser: " << err_msg << std::endl; return -ret; @@ -5622,7 +5622,7 @@ int main(int argc, const char **argv) } break; case OPT::USER_RM: - ret = user.remove(user_op, null_yield, &err_msg); + ret = user.remove(dpp(), user_op, null_yield, &err_msg); if (ret < 0) { cerr << "could not remove user: " << err_msg << std::endl; return -ret; @@ -5634,7 +5634,7 @@ int main(int argc, const char **argv) if (yes_i_really_mean_it) { user_op.set_overwrite_new_user(true); } - ret = user.rename(user_op, null_yield, &err_msg); + ret = user.rename(user_op, null_yield, dpp(), &err_msg); if (ret < 0) { if (ret == -EEXIST) { err_msg += ". to overwrite this user, add --yes-i-really-mean-it"; @@ -5647,7 +5647,7 @@ int main(int argc, const char **argv) case OPT::USER_ENABLE: case OPT::USER_SUSPEND: case OPT::USER_MODIFY: - ret = user.modify(user_op, null_yield, &err_msg); + ret = user.modify(dpp(), user_op, null_yield, &err_msg); if (ret < 0) { cerr << "could not modify user: " << err_msg << std::endl; return -ret; @@ -5655,7 +5655,7 @@ int main(int argc, const char **argv) break; case OPT::SUBUSER_CREATE: - ret = user.subusers.add(user_op, null_yield, &err_msg); + ret = user.subusers.add(dpp(), user_op, null_yield, &err_msg); if (ret < 0) { cerr << "could not create subuser: " << err_msg << std::endl; return -ret; @@ -5663,7 +5663,7 @@ int main(int argc, const char **argv) break; case OPT::SUBUSER_MODIFY: - ret = user.subusers.modify(user_op, null_yield, &err_msg); + ret = user.subusers.modify(dpp(), user_op, null_yield, &err_msg); if (ret < 0) { cerr << "could not modify subuser: " << err_msg << std::endl; return -ret; @@ -5671,7 +5671,7 @@ int main(int argc, const char **argv) break; case OPT::SUBUSER_RM: - ret = user.subusers.remove(user_op, null_yield, &err_msg); + ret = user.subusers.remove(dpp(), user_op, null_yield, &err_msg); if (ret < 0) { cerr << "could not remove subuser: " << err_msg << std::endl; return -ret; @@ -5679,7 +5679,7 @@ int main(int argc, const char **argv) break; case OPT::CAPS_ADD: - ret = user.caps.add(user_op, null_yield, &err_msg); + ret = user.caps.add(dpp(), user_op, null_yield, &err_msg); if (ret < 0) { cerr << "could not add caps: " << err_msg << std::endl; return -ret; @@ -5687,7 +5687,7 @@ int main(int argc, const char **argv) break; case OPT::CAPS_RM: - ret = user.caps.remove(user_op, null_yield, &err_msg); + ret = user.caps.remove(dpp(), user_op, null_yield, &err_msg); if (ret < 0) { cerr << "could not remove caps: " << err_msg << std::endl; return -ret; @@ -5695,7 +5695,7 @@ int main(int argc, const char **argv) break; case OPT::KEY_CREATE: - ret = user.keys.add(user_op, null_yield, &err_msg); + ret = user.keys.add(dpp(), user_op, null_yield, &err_msg); if (ret < 0) { cerr << "could not create key: " << err_msg << std::endl; return -ret; @@ -5703,7 +5703,7 @@ int main(int argc, const char **argv) break; case OPT::KEY_RM: - ret = user.keys.remove(user_op, null_yield, &err_msg); + ret = user.keys.remove(dpp(), user_op, null_yield, &err_msg); if (ret < 0) { cerr << "could not remove key: " << err_msg << std::endl; return -ret; @@ -5728,7 +5728,7 @@ int main(int argc, const char **argv) // load the period RGWPeriod period(period_id); - int ret = period.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = period.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "period init failed: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5762,13 +5762,13 @@ int main(int argc, const char **argv) { // read realm and staging period RGWRealm realm(realm_id, realm_name); - int ret = realm.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { cerr << "Error initializing realm: " << cpp_strerror(-ret) << std::endl; return -ret; } RGWPeriod period(RGWPeriod::get_staging_id(realm.get_id()), 1); - ret = period.init(g_ceph_context, store->svc()->sysobj, realm.get_id(), null_yield); + ret = period.init(dpp(), g_ceph_context, store->svc()->sysobj, realm.get_id(), null_yield); if (ret < 0) { cerr << "period init failed: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5803,7 +5803,7 @@ int main(int argc, const char **argv) return -EINVAL; } RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, path, assume_role_doc, tenant); - ret = role.create(true, null_yield); + ret = role.create(dpp(), true, null_yield); if (ret < 0) { return -ret; } @@ -5817,7 +5817,7 @@ int main(int argc, const char **argv) return -EINVAL; } RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, tenant); - ret = role.delete_obj(null_yield); + ret = role.delete_obj(dpp(), null_yield); if (ret < 0) { return -ret; } @@ -5831,7 +5831,7 @@ int main(int argc, const char **argv) return -EINVAL; } RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, tenant); - ret = role.get(null_yield); + ret = role.get(dpp(), null_yield); if (ret < 0) { return -ret; } @@ -5859,12 +5859,12 @@ int main(int argc, const char **argv) } RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, tenant); - ret = role.get(null_yield); + ret = role.get(dpp(), null_yield); if (ret < 0) { return -ret; } role.update_trust_policy(assume_role_doc); - ret = role.update(null_yield); + ret = role.update(dpp(), null_yield); if (ret < 0) { return -ret; } @@ -5874,7 +5874,7 @@ int main(int argc, const char **argv) case OPT::ROLE_LIST: { vector result; - ret = RGWRole::get_roles_by_path_prefix(store->getRados(), g_ceph_context, path_prefix, tenant, result, null_yield); + ret = RGWRole::get_roles_by_path_prefix(dpp(), store->getRados(), g_ceph_context, path_prefix, tenant, result, null_yield); if (ret < 0) { return -ret; } @@ -5907,12 +5907,12 @@ int main(int argc, const char **argv) } RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, tenant); - ret = role.get(null_yield); + ret = role.get(dpp(), null_yield); if (ret < 0) { return -ret; } role.set_perm_policy(policy_name, perm_policy_doc); - ret = role.update(null_yield); + ret = role.update(dpp(), null_yield); if (ret < 0) { return -ret; } @@ -5926,7 +5926,7 @@ int main(int argc, const char **argv) return -EINVAL; } RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, tenant); - ret = role.get(null_yield); + ret = role.get(dpp(), null_yield); if (ret < 0) { return -ret; } @@ -5946,7 +5946,7 @@ int main(int argc, const char **argv) return -EINVAL; } RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, tenant); - int ret = role.get(null_yield); + int ret = role.get(dpp(), null_yield); if (ret < 0) { return -ret; } @@ -5970,7 +5970,7 @@ int main(int argc, const char **argv) return -EINVAL; } RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, tenant); - ret = role.get(null_yield); + ret = role.get(dpp(), null_yield); if (ret < 0) { return -ret; } @@ -5978,7 +5978,7 @@ int main(int argc, const char **argv) if (ret < 0) { return -ret; } - ret = role.update(null_yield); + ret = role.update(dpp(), null_yield); if (ret < 0) { return -ret; } @@ -6002,13 +6002,13 @@ int main(int argc, const char **argv) if (opt_cmd == OPT::POLICY) { if (format == "xml") { - int ret = RGWBucketAdminOp::dump_s3_policy(store, bucket_op, cout); + int ret = RGWBucketAdminOp::dump_s3_policy(store, bucket_op, cout, dpp()); if (ret < 0) { cerr << "ERROR: failed to get policy: " << cpp_strerror(-ret) << std::endl; return -ret; } } else { - int ret = RGWBucketAdminOp::get_policy(store, bucket_op, f); + int ret = RGWBucketAdminOp::get_policy(store, bucket_op, f, dpp()); if (ret < 0) { cerr << "ERROR: failed to get policy: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -6028,11 +6028,11 @@ int main(int argc, const char **argv) user_ids.push_back(user_id.id); ret = RGWBucketAdminOp::limit_check(store, bucket_op, user_ids, f, - null_yield, warnings_only); + null_yield, dpp(), warnings_only); } else { /* list users in groups of max-keys, then perform user-bucket * limit-check on each group */ - ret = store->ctl()->meta.mgr->list_keys_init(metadata_key, &handle); + ret = store->ctl()->meta.mgr->list_keys_init(dpp(), metadata_key, &handle); if (ret < 0) { cerr << "ERROR: buckets limit check can't get user metadata_key: " << cpp_strerror(-ret) << std::endl; @@ -6050,7 +6050,7 @@ int main(int argc, const char **argv) /* ok, do the limit checks for this group */ ret = RGWBucketAdminOp::limit_check(store, bucket_op, user_ids, f, - null_yield, warnings_only); + null_yield, dpp(), warnings_only); if (ret < 0) break; } @@ -6069,7 +6069,7 @@ int main(int argc, const char **argv) return -ENOENT; } } - RGWBucketAdminOp::info(store, bucket_op, f, null_yield); + RGWBucketAdminOp::info(store, bucket_op, f, null_yield, dpp()); } else { RGWBucketInfo bucket_info; int ret = init_bucket(tenant, bucket_name, bucket_id, bucket_info, bucket); @@ -6109,7 +6109,7 @@ int main(int argc, const char **argv) do { const int remaining = max_entries - count; - ret = list_op.list_objects(std::min(remaining, paginate_size), + ret = list_op.list_objects(dpp(), std::min(remaining, paginate_size), &result, &common_prefixes, &truncated, null_yield); if (ret < 0) { @@ -6138,9 +6138,9 @@ int main(int argc, const char **argv) } if (bucket_name.empty()) { - ret = lister.run(); + ret = lister.run(dpp()); } else { - ret = lister.run(bucket_name); + ret = lister.run(dpp(), bucket_name); } if (ret < 0) { @@ -6160,7 +6160,7 @@ int main(int argc, const char **argv) if (opt_cmd == OPT::BUCKET_STATS) { if (bucket_name.empty() && !bucket_id.empty()) { rgw_bucket bucket; - if (!rgw_find_bucket_by_id(store->ctx(), store->ctl()->meta.mgr, marker, bucket_id, &bucket)) { + if (!rgw_find_bucket_by_id(dpp(), store->ctx(), store->ctl()->meta.mgr, marker, bucket_id, &bucket)) { cerr << "failure: no such bucket id" << std::endl; return -ENOENT; } @@ -6169,7 +6169,7 @@ int main(int argc, const char **argv) } bucket_op.set_fetch_stats(true); - int r = RGWBucketAdminOp::info(store, bucket_op, f, null_yield); + int r = RGWBucketAdminOp::info(store, bucket_op, f, null_yield, dpp()); if (r < 0) { cerr << "failure: " << cpp_strerror(-r) << ": " << err << std::endl; return -r; @@ -6180,7 +6180,7 @@ int main(int argc, const char **argv) bucket_op.set_bucket_id(bucket_id); bucket_op.set_new_bucket_name(new_bucket_name); string err; - int r = RGWBucketAdminOp::link(store, bucket_op, &err); + int r = RGWBucketAdminOp::link(store, bucket_op, dpp(), &err); if (r < 0) { cerr << "failure: " << cpp_strerror(-r) << ": " << err << std::endl; return -r; @@ -6188,7 +6188,7 @@ int main(int argc, const char **argv) } if (opt_cmd == OPT::BUCKET_UNLINK) { - int r = RGWBucketAdminOp::unlink(store, bucket_op); + int r = RGWBucketAdminOp::unlink(store, bucket_op, dpp()); if (r < 0) { cerr << "failure: " << cpp_strerror(-r) << std::endl; return -r; @@ -6202,7 +6202,7 @@ int main(int argc, const char **argv) string err; string marker; - int r = RGWBucketAdminOp::chown(store, bucket_op, marker, &err); + int r = RGWBucketAdminOp::chown(store, bucket_op, marker, dpp(), &err); if (r < 0) { cerr << "failure: " << cpp_strerror(-r) << ": " << err << std::endl; return -r; @@ -6219,7 +6219,7 @@ int main(int argc, const char **argv) formatter->reset(); formatter->open_array_section("logs"); RGWAccessHandle h; - int r = store->getRados()->log_list_init(date, &h); + int r = store->getRados()->log_list_init(dpp(), date, &h); if (r == -ENOENT) { // no logs. } else { @@ -6264,7 +6264,7 @@ int main(int argc, const char **argv) if (opt_cmd == OPT::LOG_SHOW) { RGWAccessHandle h; - int r = store->getRados()->log_show_init(oid, &h); + int r = store->getRados()->log_show_init(dpp(), oid, &h); if (r < 0) { cerr << "error opening log " << oid << ": " << cpp_strerror(-r) << std::endl; return -r; @@ -6335,7 +6335,7 @@ next: cout << std::endl; } if (opt_cmd == OPT::LOG_RM) { - int r = store->getRados()->log_remove(oid); + int r = store->getRados()->log_remove(dpp(), oid); if (r < 0) { cerr << "error removing log " << oid << ": " << cpp_strerror(-r) << std::endl; return -r; @@ -6349,7 +6349,7 @@ next: exit(1); } - int ret = store->svc()->zone->add_bucket_placement(pool, null_yield); + int ret = store->svc()->zone->add_bucket_placement(dpp(), pool, null_yield); if (ret < 0) cerr << "failed to add bucket placement: " << cpp_strerror(-ret) << std::endl; } @@ -6360,14 +6360,14 @@ next: exit(1); } - int ret = store->svc()->zone->remove_bucket_placement(pool, null_yield); + int ret = store->svc()->zone->remove_bucket_placement(dpp(), pool, null_yield); if (ret < 0) cerr << "failed to remove bucket placement: " << cpp_strerror(-ret) << std::endl; } if (opt_cmd == OPT::POOLS_LIST) { set pools; - int ret = store->svc()->zone->list_placement_set(pools, null_yield); + int ret = store->svc()->zone->list_placement_set(dpp(), pools, null_yield); if (ret < 0) { cerr << "could not list placement set: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -6406,7 +6406,7 @@ next: } - ret = RGWUsage::show(store->getRados(), user_id, bucket_name, start_epoch, end_epoch, + ret = RGWUsage::show(dpp(), store->getRados(), user_id, bucket_name, start_epoch, end_epoch, show_log_entries, show_log_sum, &categories, f); if (ret < 0) { @@ -6443,7 +6443,7 @@ next: } } - ret = RGWUsage::trim(store->getRados(), user_id, bucket_name, start_epoch, end_epoch); + ret = RGWUsage::trim(dpp(), store->getRados(), user_id, bucket_name, start_epoch, end_epoch); if (ret < 0) { cerr << "ERROR: read_usage() returned ret=" << ret << std::endl; return 1; @@ -6457,7 +6457,7 @@ next: return 1; } - ret = RGWUsage::clear(store->getRados()); + ret = RGWUsage::clear(dpp(), store->getRados()); if (ret < 0) { return ret; } @@ -6484,7 +6484,7 @@ next: } RGWOLHInfo olh; rgw_obj obj(bucket, object); - ret = store->getRados()->get_olh(bucket_info, obj, &olh); + ret = store->getRados()->get_olh(dpp(), bucket_info, obj, &olh); if (ret < 0) { cerr << "ERROR: failed reading olh: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -6508,12 +6508,12 @@ next: RGWObjState *state; - ret = store->getRados()->get_obj_state(&rctx, bucket_info, obj, &state, false, null_yield); /* don't follow olh */ + ret = store->getRados()->get_obj_state(dpp(), &rctx, bucket_info, obj, &state, false, null_yield); /* don't follow olh */ if (ret < 0) { return -ret; } - ret = store->getRados()->bucket_index_read_olh_log(bucket_info, *state, obj, 0, &log, &is_truncated); + ret = store->getRados()->bucket_index_read_olh_log(dpp(), bucket_info, *state, obj, 0, &log, &is_truncated); if (ret < 0) { cerr << "ERROR: failed reading olh: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -6547,7 +6547,7 @@ next: rgw_cls_bi_entry entry; - ret = store->getRados()->bi_get(bucket_info, obj, bi_index_type, &entry); + ret = store->getRados()->bi_get(dpp(), bucket_info, obj, bi_index_type, &entry); if (ret < 0) { cerr << "ERROR: bi_get(): " << cpp_strerror(-ret) << std::endl; return -ret; @@ -6578,7 +6578,7 @@ next: rgw_obj obj(bucket, key); - ret = store->getRados()->bi_put(bucket, obj, entry); + ret = store->getRados()->bi_put(dpp(), bucket, obj, entry); if (ret < 0) { cerr << "ERROR: bi_put(): " << cpp_strerror(-ret) << std::endl; return -ret; @@ -6612,7 +6612,7 @@ next: RGWRados::BucketShard bs(store->getRados()); int shard_id = (bucket_info.layout.current_index.layout.normal.num_shards > 0 ? i : -1); - int ret = bs.init(bucket, shard_id, bucket_info.layout.current_index, nullptr /* no RGWBucketInfo */); + int ret = bs.init(bucket, shard_id, bucket_info.layout.current_index, nullptr /* no RGWBucketInfo */, dpp()); marker.clear(); if (ret < 0) { @@ -6676,7 +6676,7 @@ next: for (int i = 0; i < max_shards; i++) { RGWRados::BucketShard bs(store->getRados()); int shard_id = (bucket_info.layout.current_index.layout.normal.num_shards > 0 ? i : -1); - int ret = bs.init(bucket, shard_id, bucket_info.layout.current_index, nullptr /* no RGWBucketInfo */); + int ret = bs.init(bucket, shard_id, bucket_info.layout.current_index, nullptr /* no RGWBucketInfo */, dpp()); if (ret < 0) { cerr << "ERROR: bs.init(bucket=" << bucket << ", shard=" << shard_id << "): " << cpp_strerror(-ret) << std::endl; return -ret; @@ -6706,7 +6706,7 @@ next: RGWDataAccess::BucketRef b; RGWDataAccess::ObjectRef obj; - int ret = data_access.get_bucket(tenant, bucket_name, bucket_id, &b, null_yield); + int ret = data_access.get_bucket(dpp(), tenant, bucket_name, bucket_id, &b, null_yield); if (ret < 0) { cerr << "ERROR: failed to init bucket: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -6739,7 +6739,7 @@ next: return -ret; } rgw_obj_key key(object, object_version); - ret = rgw_remove_object(store, bucket_info, bucket, key); + ret = rgw_remove_object(dpp(), store, bucket_info, bucket, key); if (ret < 0) { cerr << "ERROR: object remove returned: " << cpp_strerror(-ret) << std::endl; @@ -6771,7 +6771,7 @@ next: if (min_rewrite_stripe_size > 0) { ret = check_min_obj_stripe_size(store, bucket_info, &obj, min_rewrite_stripe_size, &need_rewrite); if (ret < 0) { - ldout(store->ctx(), 0) << "WARNING: check_min_obj_stripe_size failed, r=" << ret << dendl; + ldpp_dout(dpp(), 0) << "WARNING: check_min_obj_stripe_size failed, r=" << ret << dendl; } } if (need_rewrite) { @@ -6781,19 +6781,19 @@ next: return -ret; } } else { - ldout(store->ctx(), 20) << "skipped object" << dendl; + ldpp_dout(dpp(), 20) << "skipped object" << dendl; } } if (opt_cmd == OPT::OBJECTS_EXPIRE) { - if (!store->getRados()->process_expire_objects()) { + if (!store->getRados()->process_expire_objects(dpp())) { cerr << "ERROR: process_expire_objects() processing returned error." << std::endl; return 1; } } if (opt_cmd == OPT::OBJECTS_EXPIRE_STALE_LIST) { - ret = RGWBucketAdminOp::fix_obj_expiry(store, bucket_op, f, true); + ret = RGWBucketAdminOp::fix_obj_expiry(store, bucket_op, f, dpp(), true); if (ret < 0) { cerr << "ERROR: listing returned " << cpp_strerror(-ret) << std::endl; return -ret; @@ -6801,7 +6801,7 @@ next: } if (opt_cmd == OPT::OBJECTS_EXPIRE_STALE_RM) { - ret = RGWBucketAdminOp::fix_obj_expiry(store, bucket_op, f, false); + ret = RGWBucketAdminOp::fix_obj_expiry(store, bucket_op, f, dpp(), false); if (ret < 0) { cerr << "ERROR: removing returned " << cpp_strerror(-ret) << std::endl; return -ret; @@ -6857,7 +6857,7 @@ next: result.reserve(NUM_ENTRIES); int r = store->getRados()->cls_bucket_list_ordered( - bucket_info, RGW_NO_SHARD, + dpp(), bucket_info, RGW_NO_SHARD, marker, empty_prefix, empty_delimiter, NUM_ENTRIES, true, expansion_factor, result, &is_truncated, &cls_filtered, &marker, @@ -6900,7 +6900,7 @@ next: if (min_rewrite_stripe_size > 0) { r = check_min_obj_stripe_size(store, bucket_info, &obj, min_rewrite_stripe_size, &need_rewrite); if (r < 0) { - ldout(store->ctx(), 0) << "WARNING: check_min_obj_stripe_size failed, r=" << r << dendl; + ldpp_dout(dpp(), 0) << "WARNING: check_min_obj_stripe_size failed, r=" << r << dendl; } } if (!need_rewrite) { @@ -6951,7 +6951,7 @@ next: max_entries = DEFAULT_RESHARD_MAX_ENTRIES; } - return br.execute(num_shards, max_entries, + return br.execute(num_shards, max_entries, dpp(), verbose, &cout, formatter.get()); } @@ -6976,7 +6976,7 @@ next: int num_source_shards = (bucket_info.layout.current_index.layout.normal.num_shards > 0 ? bucket_info.layout.current_index.layout.normal.num_shards : 1); - RGWReshard reshard(store); + RGWReshard reshard(store, dpp()); cls_rgw_reshard_entry entry; entry.time = real_clock::now(); entry.tenant = tenant; @@ -6985,7 +6985,7 @@ next: entry.old_num_shards = num_source_shards; entry.new_num_shards = num_shards; - return reshard.add(entry); + return reshard.add(dpp(), entry); } if (opt_cmd == OPT::RESHARD_LIST) { @@ -6999,7 +6999,7 @@ next: int num_logshards = store->ctx()->_conf.get_val("rgw_reshard_num_logs"); - RGWReshard reshard(store); + RGWReshard reshard(store, dpp()); formatter->open_array_section("reshard"); for (int i = 0; i < num_logshards; i++) { @@ -7048,7 +7048,7 @@ next: RGWBucketReshard br(store, bucket_info, attrs, nullptr /* no callback */); list status; - int r = br.get_status(&status); + int r = br.get_status(dpp(), &status); if (r < 0) { cerr << "ERROR: could not get resharding status for bucket " << bucket_name << std::endl; @@ -7061,7 +7061,7 @@ next: if (opt_cmd == OPT::RESHARD_PROCESS) { RGWReshard reshard(store, true, &cout); - int ret = reshard.process_all_logshards(); + int ret = reshard.process_all_logshards(dpp()); if (ret < 0) { cerr << "ERROR: failed to process reshard logs, error=" << cpp_strerror(-ret) << std::endl; return -ret; @@ -7095,7 +7095,7 @@ next: // we did not encounter an error, so let's work with the bucket RGWBucketReshard br(store, bucket_info, attrs, nullptr /* no callback */); - int ret = br.cancel(); + int ret = br.cancel(dpp()); if (ret < 0) { if (ret == -EBUSY) { cerr << "There is ongoing resharding, please retry after " << @@ -7110,14 +7110,14 @@ next: } } - RGWReshard reshard(store); + RGWReshard reshard(store, dpp()); cls_rgw_reshard_entry entry; entry.tenant = tenant; entry.bucket_name = bucket_name; //entry.bucket_id = bucket_id; - ret = reshard.remove(entry); + ret = reshard.remove(dpp(), entry); if (ret < 0 && ret != -ENOENT) { cerr << "Error in updating reshard log with bucket " << bucket_name << ": " << cpp_strerror(-ret) << std::endl; @@ -7137,7 +7137,7 @@ next: rgw_obj_index_key index_key; key.get_index_key(&index_key); oid_list.push_back(index_key); - ret = store->getRados()->remove_objs_from_index(bucket_info, oid_list); + ret = store->getRados()->remove_objs_from_index(dpp(), bucket_info, oid_list); if (ret < 0) { cerr << "ERROR: remove_obj_from_index() returned error: " << cpp_strerror(-ret) << std::endl; return 1; @@ -7163,7 +7163,7 @@ next: read_op.params.attrs = &attrs; read_op.params.obj_size = &obj_size; - ret = read_op.prepare(null_yield); + ret = read_op.prepare(null_yield, dpp()); if (ret < 0) { cerr << "ERROR: failed to stat object, returned error: " << cpp_strerror(-ret) << std::endl; return 1; @@ -7212,20 +7212,20 @@ next: } do_check_object_locator(tenant, bucket_name, fix, remove_bad, formatter.get()); } else { - RGWBucketAdminOp::check_index(store, bucket_op, f, null_yield); + RGWBucketAdminOp::check_index(store, bucket_op, f, null_yield, dpp()); } } if (opt_cmd == OPT::BUCKET_RM) { if (!inconsistent_index) { - RGWBucketAdminOp::remove_bucket(store, bucket_op, null_yield, bypass_gc, true); + RGWBucketAdminOp::remove_bucket(store, bucket_op, null_yield, dpp(), bypass_gc, true); } else { if (!yes_i_really_mean_it) { cerr << "using --inconsistent_index can corrupt the bucket index " << std::endl << "do you really mean it? (requires --yes-i-really-mean-it)" << std::endl; return 1; } - RGWBucketAdminOp::remove_bucket(store, bucket_op, null_yield, bypass_gc, false); + RGWBucketAdminOp::remove_bucket(store, bucket_op, null_yield, dpp(), bypass_gc, false); } } @@ -7356,7 +7356,7 @@ next: if (opt_cmd == OPT::LC_RESHARD_FIX) { - ret = RGWBucketAdminOp::fix_lc_shards(store, bucket_op,f); + ret = RGWBucketAdminOp::fix_lc_shards(store, bucket_op, f, dpp()); if (ret < 0) { cerr << "ERROR: listing stale instances" << cpp_strerror(-ret) << std::endl; } @@ -7392,12 +7392,12 @@ next: info.job_name = job_id; info.num_shards = num_shards; - int ret = search.init(job_id, &info, detail); + int ret = search.init(dpp(), job_id, &info, detail); if (ret < 0) { cerr << "could not init search, ret=" << ret << std::endl; return -ret; } - ret = search.run(); + ret = search.run(dpp()); if (ret < 0) { return -ret; } @@ -7421,7 +7421,7 @@ next: cerr << "ERROR: --job-id not specified" << std::endl; return EINVAL; } - int ret = search.init(job_id, NULL); + int ret = search.init(dpp(), job_id, NULL); if (ret < 0) { if (ret == -ENOENT) { cerr << "job not found" << std::endl; @@ -7446,7 +7446,7 @@ next: } RGWOrphanStore orphan_store(store); - int ret = orphan_store.init(); + int ret = orphan_store.init(dpp()); if (ret < 0){ cerr << "connection to cluster failed!" << std::endl; return -ret; @@ -7471,7 +7471,7 @@ next: } if (opt_cmd == OPT::USER_CHECK) { - check_bad_user_bucket_mapping(store, user_id, fix, null_yield); + check_bad_user_bucket_mapping(store, user_id, fix, null_yield, dpp()); } if (opt_cmd == OPT::USER_STATS) { @@ -7491,7 +7491,7 @@ next: "so at most one of the two should be specified" << std::endl; return EINVAL; } - ret = store->ctl()->user->reset_stats(user_id, null_yield); + ret = store->ctl()->user->reset_stats(dpp(), user_id, null_yield); if (ret < 0) { cerr << "ERROR: could not reset user stats: " << cpp_strerror(-ret) << std::endl; @@ -7507,14 +7507,14 @@ next: cerr << "ERROR: could not init bucket: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = store->ctl()->bucket->sync_user_stats(user_id, bucket_info, null_yield); + ret = store->ctl()->bucket->sync_user_stats(dpp(), user_id, bucket_info, null_yield); if (ret < 0) { cerr << "ERROR: could not sync bucket stats: " << cpp_strerror(-ret) << std::endl; return -ret; } } else { - int ret = rgw_user_sync_all_stats(store, user_id, null_yield); + int ret = rgw_user_sync_all_stats(dpp(), store, user_id, null_yield); if (ret < 0) { cerr << "ERROR: could not sync user stats: " << cpp_strerror(-ret) << std::endl; @@ -7526,7 +7526,7 @@ next: RGWStorageStats stats; ceph::real_time last_stats_sync; ceph::real_time last_stats_update; - int ret = store->ctl()->user->read_stats(user_id, &stats, null_yield, + int ret = store->ctl()->user->read_stats(dpp(), user_id, &stats, null_yield, &last_stats_sync, &last_stats_update); if (ret < 0) { @@ -7551,7 +7551,7 @@ next: } if (opt_cmd == OPT::METADATA_GET) { - int ret = store->ctl()->meta.mgr->get(metadata_key, formatter.get(), null_yield); + int ret = store->ctl()->meta.mgr->get(metadata_key, formatter.get(), null_yield, dpp()); if (ret < 0) { cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -7567,7 +7567,7 @@ next: cerr << "ERROR: failed to read input: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = store->ctl()->meta.mgr->put(metadata_key, bl, null_yield, RGWMDLogSyncType::APPLY_ALWAYS, false); + ret = store->ctl()->meta.mgr->put(metadata_key, bl, null_yield, dpp(), RGWMDLogSyncType::APPLY_ALWAYS, false); if (ret < 0) { cerr << "ERROR: can't put key: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -7575,7 +7575,7 @@ next: } if (opt_cmd == OPT::METADATA_RM) { - int ret = store->ctl()->meta.mgr->remove(metadata_key, null_yield); + int ret = store->ctl()->meta.mgr->remove(metadata_key, null_yield, dpp()); if (ret < 0) { cerr << "ERROR: can't remove key: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -7588,7 +7588,7 @@ next: } void *handle; int max = 1000; - int ret = store->ctl()->meta.mgr->list_keys_init(metadata_key, marker, &handle); + int ret = store->ctl()->meta.mgr->list_keys_init(dpp(), metadata_key, marker, &handle); if (ret < 0) { cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -7676,7 +7676,7 @@ next: meta_log->init_list_entries(i, {}, {}, marker, &handle); bool truncated; do { - int ret = meta_log->list_entries(handle, 1000, entries, NULL, &truncated); + int ret = meta_log->list_entries(dpp(), handle, 1000, entries, NULL, &truncated); if (ret < 0) { cerr << "ERROR: meta_log->list_entries(): " << cpp_strerror(-ret) << std::endl; return -ret; @@ -7717,7 +7717,7 @@ next: for (; i < g_ceph_context->_conf->rgw_md_log_max_shards; i++) { RGWMetadataLogInfo info; - meta_log->get_info(i, &info); + meta_log->get_info(dpp(), i, &info); ::encode_json("info", info, formatter.get()); @@ -7732,7 +7732,7 @@ next: if (opt_cmd == OPT::MDLOG_AUTOTRIM) { // need a full history for purging old mdlog periods - store->svc()->mdlog->init_oldest_log_period(null_yield); + store->svc()->mdlog->init_oldest_log_period(null_yield, dpp()); RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry()); RGWHTTPManager http(store->ctx(), crs.get_completion_mgr()); @@ -7743,7 +7743,7 @@ next: } auto num_shards = g_conf()->rgw_md_log_max_shards; - ret = crs.run(create_admin_meta_log_trim_cr(dpp(), store, &http, num_shards)); + ret = crs.run(dpp(), create_admin_meta_log_trim_cr(dpp(), store, &http, num_shards)); if (ret < 0) { cerr << "automated mdlog trim failed with " << cpp_strerror(ret) << std::endl; return -ret; @@ -7785,7 +7785,7 @@ next: // trim until -ENODATA do { - ret = meta_log->trim(shard_id, {}, {}, {}, marker); + ret = meta_log->trim(dpp(), shard_id, {}, {}, {}, marker); } while (ret == 0); if (ret < 0 && ret != -ENODATA) { cerr << "ERROR: meta_log->trim(): " << cpp_strerror(-ret) << std::endl; @@ -7804,14 +7804,14 @@ next: if (opt_cmd == OPT::METADATA_SYNC_STATUS) { RGWMetaSyncStatusManager sync(store, store->svc()->rados->get_async_processor()); - int ret = sync.init(); + int ret = sync.init(dpp()); if (ret < 0) { cerr << "ERROR: sync.init() returned ret=" << ret << std::endl; return -ret; } rgw_meta_sync_status sync_status; - ret = sync.read_sync_status(&sync_status); + ret = sync.read_sync_status(dpp(), &sync_status); if (ret < 0) { cerr << "ERROR: sync.read_sync_status() returned ret=" << ret << std::endl; return -ret; @@ -7845,12 +7845,12 @@ next: if (opt_cmd == OPT::METADATA_SYNC_INIT) { RGWMetaSyncStatusManager sync(store, store->svc()->rados->get_async_processor()); - int ret = sync.init(); + int ret = sync.init(dpp()); if (ret < 0) { cerr << "ERROR: sync.init() returned ret=" << ret << std::endl; return -ret; } - ret = sync.init_sync_status(); + ret = sync.init_sync_status(dpp()); if (ret < 0) { cerr << "ERROR: sync.init_sync_status() returned ret=" << ret << std::endl; return -ret; @@ -7861,13 +7861,13 @@ next: if (opt_cmd == OPT::METADATA_SYNC_RUN) { RGWMetaSyncStatusManager sync(store, store->svc()->rados->get_async_processor()); - int ret = sync.init(); + int ret = sync.init(dpp()); if (ret < 0) { cerr << "ERROR: sync.init() returned ret=" << ret << std::endl; return -ret; } - ret = sync.run(null_yield); + ret = sync.run(dpp(), null_yield); if (ret < 0) { cerr << "ERROR: sync.run() returned ret=" << ret << std::endl; return -ret; @@ -7881,7 +7881,7 @@ next: } RGWDataSyncStatusManager sync(store, store->svc()->rados->get_async_processor(), source_zone, nullptr); - int ret = sync.init(); + int ret = sync.init(dpp()); if (ret < 0) { cerr << "ERROR: sync.init() returned ret=" << ret << std::endl; return -ret; @@ -7892,7 +7892,7 @@ next: set pending_buckets; set recovering_buckets; rgw_data_sync_marker sync_marker; - ret = sync.read_shard_status(shard_id, pending_buckets, recovering_buckets, &sync_marker, + ret = sync.read_shard_status(dpp(), shard_id, pending_buckets, recovering_buckets, &sync_marker, max_entries_specified ? max_entries : 20); if (ret < 0 && ret != -ENOENT) { cerr << "ERROR: sync.read_shard_status() returned ret=" << ret << std::endl; @@ -7906,7 +7906,7 @@ next: formatter->close_section(); formatter->flush(cout); } else { - ret = sync.read_sync_status(&sync_status); + ret = sync.read_sync_status(dpp(), &sync_status); if (ret < 0 && ret != -ENOENT) { cerr << "ERROR: sync.read_sync_status() returned ret=" << ret << std::endl; return -ret; @@ -7945,13 +7945,13 @@ next: RGWDataSyncStatusManager sync(store, store->svc()->rados->get_async_processor(), source_zone, nullptr); - int ret = sync.init(); + int ret = sync.init(dpp()); if (ret < 0) { cerr << "ERROR: sync.init() returned ret=" << ret << std::endl; return -ret; } - ret = sync.init_sync_status(); + ret = sync.init_sync_status(dpp()); if (ret < 0) { cerr << "ERROR: sync.init_sync_status() returned ret=" << ret << std::endl; return -ret; @@ -7968,19 +7968,19 @@ next: int ret = store->svc()->sync_modules->get_manager()->create_instance(g_ceph_context, store->svc()->zone->get_zone().tier_type, store->svc()->zone->get_zone_params().tier_config, &sync_module); if (ret < 0) { - lderr(cct) << "ERROR: failed to init sync module instance, ret=" << ret << dendl; + ldpp_dout(dpp(), -1) << "ERROR: failed to init sync module instance, ret=" << ret << dendl; return ret; } RGWDataSyncStatusManager sync(store, store->svc()->rados->get_async_processor(), source_zone, nullptr, sync_module); - ret = sync.init(); + ret = sync.init(dpp()); if (ret < 0) { cerr << "ERROR: sync.init() returned ret=" << ret << std::endl; return -ret; } - ret = sync.run(); + ret = sync.run(dpp()); if (ret < 0) { cerr << "ERROR: sync.run() returned ret=" << ret << std::endl; return -ret; @@ -8014,12 +8014,12 @@ next: RGWBucketPipeSyncStatusManager sync(store, source_zone, opt_sb, bucket); - ret = sync.init(); + ret = sync.init(dpp()); if (ret < 0) { cerr << "ERROR: sync.init() returned ret=" << ret << std::endl; return -ret; } - ret = sync.init_sync_status(); + ret = sync.init_sync_status(dpp()); if (ret < 0) { cerr << "ERROR: sync.init_sync_status() returned ret=" << ret << std::endl; return -ret; @@ -8042,13 +8042,13 @@ next: return -ret; } - if (!store->ctl()->bucket->bucket_imports_data(bucket_info.bucket, null_yield)) { + if (!store->ctl()->bucket->bucket_imports_data(bucket_info.bucket, null_yield, dpp())) { std::cout << "Sync is disabled for bucket " << bucket_name << std::endl; return 0; } RGWBucketSyncPolicyHandlerRef handler; - ret = store->ctl()->bucket->get_sync_policy_handler(std::nullopt, bucket, &handler, null_yield); + ret = store->ctl()->bucket->get_sync_policy_handler(std::nullopt, bucket, &handler, null_yield, dpp()); if (ret < 0) { std::cerr << "ERROR: failed to get policy handler for bucket (" << bucket_info.bucket << "): r=" << ret << ": " << cpp_strerror(-ret) << std::endl; @@ -8060,7 +8060,7 @@ next: opt_source_zone, opt_source_bucket, opt_retry_delay_ms, timeout_at); if (ret < 0) { - lderr(store->ctx()) << "bucket sync checkpoint failed: " << cpp_strerror(ret) << dendl; + ldpp_dout(dpp(), -1) << "bucket sync checkpoint failed: " << cpp_strerror(ret) << dendl; return -ret; } } @@ -8077,7 +8077,7 @@ next: } bucket_op.set_tenant(tenant); string err_msg; - ret = RGWBucketAdminOp::sync_bucket(store, bucket_op, &err_msg); + ret = RGWBucketAdminOp::sync_bucket(store, bucket_op, dpp(), &err_msg); if (ret < 0) { cerr << err_msg << std::endl; return -ret; @@ -8128,12 +8128,12 @@ next: } RGWBucketPipeSyncStatusManager sync(store, source_zone, opt_source_bucket, bucket); - ret = sync.init(); + ret = sync.init(dpp()); if (ret < 0) { cerr << "ERROR: sync.init() returned ret=" << ret << std::endl; return -ret; } - ret = sync.read_sync_status(); + ret = sync.read_sync_status(dpp()); if (ret < 0) { cerr << "ERROR: sync.read_sync_status() returned ret=" << ret << std::endl; return -ret; @@ -8161,13 +8161,13 @@ next: } RGWBucketPipeSyncStatusManager sync(store, source_zone, opt_source_bucket, bucket); - ret = sync.init(); + ret = sync.init(dpp()); if (ret < 0) { cerr << "ERROR: sync.init() returned ret=" << ret << std::endl; return -ret; } - ret = sync.run(); + ret = sync.run(dpp()); if (ret < 0) { cerr << "ERROR: sync.run() returned ret=" << ret << std::endl; return -ret; @@ -8193,7 +8193,7 @@ next: do { list entries; - ret = store->svc()->bilog_rados->log_list(bucket_info, shard_id, marker, max_entries - count, entries, &truncated); + ret = store->svc()->bilog_rados->log_list(dpp(), bucket_info, shard_id, marker, max_entries - count, entries, &truncated); if (ret < 0) { cerr << "ERROR: list_bi_log_entries(): " << cpp_strerror(-ret) << std::endl; return -ret; @@ -8257,7 +8257,7 @@ next: do { list entries; - ret = store->svc()->cls->timelog.list(oid, {}, {}, max_entries - count, entries, marker, &marker, &truncated, + ret = store->svc()->cls->timelog.list(dpp(), oid, {}, {}, max_entries - count, entries, marker, &marker, &truncated, null_yield); if (ret == -ENOENT) { break; @@ -8676,7 +8676,7 @@ next: cerr << "ERROR: could not init bucket: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = store->svc()->bilog_rados->log_trim(bucket_info, shard_id, start_marker, end_marker); + ret = store->svc()->bilog_rados->log_trim(dpp(), bucket_info, shard_id, start_marker, end_marker); if (ret < 0) { cerr << "ERROR: trim_bi_log_entries(): " << cpp_strerror(-ret) << std::endl; return -ret; @@ -8695,7 +8695,7 @@ next: return -ret; } map markers; - ret = store->svc()->bilog_rados->get_log_status(bucket_info, shard_id, + ret = store->svc()->bilog_rados->get_log_status(dpp(), bucket_info, shard_id, &markers, null_yield); if (ret < 0) { cerr << "ERROR: get_bi_log_status(): " << cpp_strerror(-ret) << std::endl; @@ -8725,7 +8725,7 @@ next: cerr << "trim manager init failed with " << cpp_strerror(ret) << std::endl; return -ret; } - ret = crs.run(trim.create_admin_bucket_trim_cr(&http)); + ret = crs.run(dpp(), trim.create_admin_bucket_trim_cr(&http)); if (ret < 0) { cerr << "automated bilog trim failed with " << cpp_strerror(ret) << std::endl; return -ret; @@ -8765,14 +8765,14 @@ next: do { std::vector entries; if (specified_shard_id) { - ret = datalog_svc->list_entries(shard_id, max_entries - count, + ret = datalog_svc->list_entries(dpp(), shard_id, max_entries - count, entries, marker.empty() ? std::nullopt : std::make_optional(marker), &marker, &truncated); } else { - ret = datalog_svc->list_entries(max_entries - count, entries, + ret = datalog_svc->list_entries(dpp(), max_entries - count, entries, log_marker, &truncated); } if (ret < 0) { @@ -8804,7 +8804,7 @@ next: list entries; RGWDataChangesLogInfo info; - store->svc()->datalog_rados->get_info(i, &info); + store->svc()->datalog_rados->get_info(dpp(), i, &info); ::encode_json("info", info, formatter.get()); @@ -8827,7 +8827,7 @@ next: auto num_shards = g_conf()->rgw_data_log_num_shards; std::vector markers(num_shards); - ret = crs.run(create_admin_data_log_trim_cr(store, &http, num_shards, markers)); + ret = crs.run(dpp(), create_admin_data_log_trim_cr(dpp(), store, &http, num_shards, markers)); if (ret < 0) { cerr << "automated datalog trim failed with " << cpp_strerror(ret) << std::endl; return -ret; @@ -8864,7 +8864,7 @@ next: // loop until -ENODATA do { auto datalog = store->svc()->datalog_rados; - ret = datalog->trim_entries(shard_id, marker); + ret = datalog->trim_entries(dpp(), shard_id, marker); } while (ret == 0); if (ret < 0 && ret != -ENODATA) { @@ -8879,7 +8879,7 @@ next: return -EINVAL; } auto datalog = static_cast(store)->svc()->datalog_rados; - ret = datalog->change_format(*opt_log_type, null_yield); + ret = datalog->change_format(dpp(), *opt_log_type, null_yield); if (ret < 0) { cerr << "ERROR: change_format(): " << cpp_strerror(-ret) << std::endl; return -ret; @@ -8889,7 +8889,7 @@ next: if (opt_cmd == OPT::DATALOG_PRUNE) { auto datalog = static_cast(store)->svc()->datalog_rados; std::optional through; - ret = datalog->trim_generations(through); + ret = datalog->trim_generations(dpp(), through); if (ret < 0) { cerr << "ERROR: trim_generations(): " << cpp_strerror(-ret) << std::endl; @@ -8976,10 +8976,10 @@ next: int ret = store->ctl()->meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user_id), mtime, &objv_tracker, - null_yield, + null_yield, dpp(), MDLOG_STATUS_WRITE, [&] { - return store->svc()->cls->mfa.create_mfa(user_id, config, &objv_tracker, mtime, null_yield); + return store->svc()->cls->mfa.create_mfa(dpp(), user_id, config, &objv_tracker, mtime, null_yield); }); if (ret < 0) { cerr << "MFA creation failed, error: " << cpp_strerror(-ret) << std::endl; @@ -8990,7 +8990,7 @@ next: user_info.mfa_ids.insert(totp_serial); user_op.set_mfa_ids(user_info.mfa_ids); string err; - ret = user.modify(user_op, null_yield, &err); + ret = user.modify(dpp(), user_op, null_yield, &err); if (ret < 0) { cerr << "ERROR: failed storing user info, error: " << err << std::endl; return -ret; @@ -9012,10 +9012,10 @@ next: int ret = store->ctl()->meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user_id), mtime, &objv_tracker, - null_yield, + null_yield, dpp(), MDLOG_STATUS_WRITE, [&] { - return store->svc()->cls->mfa.remove_mfa(user_id, totp_serial, &objv_tracker, mtime, null_yield); + return store->svc()->cls->mfa.remove_mfa(dpp(), user_id, totp_serial, &objv_tracker, mtime, null_yield); }); if (ret < 0) { cerr << "MFA removal failed, error: " << cpp_strerror(-ret) << std::endl; @@ -9026,7 +9026,7 @@ next: user_info.mfa_ids.erase(totp_serial); user_op.set_mfa_ids(user_info.mfa_ids); string err; - ret = user.modify(user_op, null_yield, &err); + ret = user.modify(dpp(), user_op, null_yield, &err); if (ret < 0) { cerr << "ERROR: failed storing user info, error: " << err << std::endl; return -ret; @@ -9045,7 +9045,7 @@ next: } rados::cls::otp::otp_info_t result; - int ret = store->svc()->cls->mfa.get_mfa(user_id, totp_serial, &result, null_yield); + int ret = store->svc()->cls->mfa.get_mfa(dpp(), user_id, totp_serial, &result, null_yield); if (ret < 0) { if (ret == -ENOENT || ret == -ENODATA) { cerr << "MFA serial id not found" << std::endl; @@ -9067,7 +9067,7 @@ next: } list result; - int ret = store->svc()->cls->mfa.list_mfa(user_id, &result, null_yield); + int ret = store->svc()->cls->mfa.list_mfa(dpp(), user_id, &result, null_yield); if (ret < 0) { cerr << "MFA listing failed, error: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -9095,7 +9095,7 @@ next: } list result; - int ret = store->svc()->cls->mfa.check_mfa(user_id, totp_serial, totp_pin.front(), null_yield); + int ret = store->svc()->cls->mfa.check_mfa(dpp(), user_id, totp_serial, totp_pin.front(), null_yield); if (ret < 0) { cerr << "MFA check failed, error: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -9120,7 +9120,7 @@ next: } rados::cls::otp::otp_info_t config; - int ret = store->svc()->cls->mfa.get_mfa(user_id, totp_serial, &config, null_yield); + int ret = store->svc()->cls->mfa.get_mfa(dpp(), user_id, totp_serial, &config, null_yield); if (ret < 0) { if (ret == -ENOENT || ret == -ENODATA) { cerr << "MFA serial id not found" << std::endl; @@ -9132,7 +9132,7 @@ next: ceph::real_time now; - ret = store->svc()->cls->mfa.otp_get_current_time(user_id, &now, null_yield); + ret = store->svc()->cls->mfa.otp_get_current_time(dpp(), user_id, &now, null_yield); if (ret < 0) { cerr << "ERROR: failed to fetch current time from osd: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -9156,10 +9156,10 @@ next: ret = store->ctl()->meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user_id), mtime, &objv_tracker, - null_yield, + null_yield, dpp(), MDLOG_STATUS_WRITE, [&] { - return store->svc()->cls->mfa.create_mfa(user_id, config, &objv_tracker, mtime, null_yield); + return store->svc()->cls->mfa.create_mfa(dpp(), user_id, config, &objv_tracker, mtime, null_yield); }); if (ret < 0) { cerr << "MFA update failed, error: " << cpp_strerror(-ret) << std::endl; @@ -9176,7 +9176,7 @@ next: return EINVAL; } - ret = RGWBucketAdminOp::list_stale_instances(store, bucket_op,f); + ret = RGWBucketAdminOp::list_stale_instances(store, bucket_op, f, dpp()); if (ret < 0) { cerr << "ERROR: listing stale instances" << cpp_strerror(-ret) << std::endl; } @@ -9188,7 +9188,7 @@ next: return EINVAL; } - ret = RGWBucketAdminOp::clear_stale_instances(store, bucket_op,f); + ret = RGWBucketAdminOp::clear_stale_instances(store, bucket_op,f, dpp()); if (ret < 0) { cerr << "ERROR: deleting stale instances" << cpp_strerror(-ret) << std::endl; } @@ -9254,7 +9254,7 @@ next: RGWPubSub ps(store, tenant); - ret = ps.remove_topic(topic_name, null_yield); + ret = ps.remove_topic(dpp(), topic_name, null_yield); if (ret < 0) { cerr << "ERROR: could not remove topic: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -9298,7 +9298,7 @@ next: RGWPubSub ps(store, tenant); auto sub = ps.get_sub(sub_name); - ret = sub->unsubscribe(topic_name, null_yield); + ret = sub->unsubscribe(dpp(), topic_name, null_yield); if (ret < 0) { cerr << "ERROR: could not get subscription info: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -9321,7 +9321,7 @@ next: max_entries = RGWPubSub::Sub::DEFAULT_MAX_EVENTS; } auto sub = ps.get_sub_with_events(sub_name); - ret = sub->list_events(marker, max_entries); + ret = sub->list_events(dpp(), marker, max_entries); if (ret < 0) { cerr << "ERROR: could not list events: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -9347,7 +9347,7 @@ next: RGWPubSub ps(store, tenant); auto sub = ps.get_sub_with_events(sub_name); - ret = sub->remove_event(event_id); + ret = sub->remove_event(dpp(), event_id); if (ret < 0) { cerr << "ERROR: could not remove event: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -9380,7 +9380,7 @@ next: cerr << "ERROR: invalid script context: " << *str_script_ctx << ". must be one of: preRequest, postRequest" << std::endl; return EINVAL; } - rc = rgw::lua::write_script(store, tenant, null_yield, script_ctx, script); + rc = rgw::lua::write_script(dpp(), store, tenant, null_yield, script_ctx, script); if (rc < 0) { cerr << "ERROR: failed to put script. error: " << rc << std::endl; return -rc; @@ -9420,7 +9420,7 @@ next: cerr << "ERROR: invalid script context: " << *str_script_ctx << ". must be one of: preRequest, postRequest" << std::endl; return EINVAL; } - const auto rc = rgw::lua::delete_script(store, tenant, null_yield, script_ctx); + const auto rc = rgw::lua::delete_script(dpp(), store, tenant, null_yield, script_ctx); if (rc < 0) { cerr << "ERROR: failed to remove script. error: " << rc << std::endl; return -rc; @@ -9433,7 +9433,7 @@ next: cerr << "ERROR: lua package name was not provided (via --package)" << std::endl; return EINVAL; } - const auto rc = rgw::lua::add_package(store, null_yield, *script_package, bool(allow_compilation)); + const auto rc = rgw::lua::add_package(dpp(), store, null_yield, *script_package, bool(allow_compilation)); if (rc < 0) { cerr << "ERROR: failed to add lua package: " << script_package << " .error: " << rc << std::endl; return -rc; @@ -9450,7 +9450,7 @@ next: cerr << "ERROR: lua package name was not provided (via --package)" << std::endl; return EINVAL; } - const auto rc = rgw::lua::remove_package(store, null_yield, *script_package); + const auto rc = rgw::lua::remove_package(dpp(), store, null_yield, *script_package); if (rc == -ENOENT) { cerr << "WARNING: package " << script_package << " did not exists or already removed" << std::endl; return 0; @@ -9468,7 +9468,7 @@ next: if (opt_cmd == OPT::SCRIPT_PACKAGE_LIST) { #ifdef WITH_RADOSGW_LUA_PACKAGES rgw::lua::packages_t packages; - const auto rc = rgw::lua::list_packages(store, null_yield, packages); + const auto rc = rgw::lua::list_packages(dpp(), store, null_yield, packages); if (rc == -ENOENT) { std::cout << "no lua packages in allowlist" << std::endl; } else if (rc < 0) { diff --git a/src/rgw/rgw_auth.cc b/src/rgw/rgw_auth.cc index c23eaee0e3429..9d0d3ea42b334 100644 --- a/src/rgw/rgw_auth.cc +++ b/src/rgw/rgw_auth.cc @@ -55,7 +55,7 @@ transform_old_authinfo(CephContext* const cct, } uint32_t get_perms_from_aclspec(const DoutPrefixProvider* dpp, const aclspec_t& aclspec) const override { - return rgw_perms_from_aclspec_default_strategy(id, aclspec); + return rgw_perms_from_aclspec_default_strategy(id, aclspec, dpp); } bool is_admin_of(const rgw_user& acct_id) const override { @@ -130,17 +130,18 @@ transform_old_authinfo(const req_state* const s) uint32_t rgw_perms_from_aclspec_default_strategy( const rgw_user& uid, - const rgw::auth::Identity::aclspec_t& aclspec) + const rgw::auth::Identity::aclspec_t& aclspec, + const DoutPrefixProvider *dpp) { - dout(5) << "Searching permissions for uid=" << uid << dendl; + ldpp_dout(dpp, 5) << "Searching permissions for uid=" << uid << dendl; const auto iter = aclspec.find(uid.to_str()); if (std::end(aclspec) != iter) { - dout(5) << "Found permission: " << iter->second << dendl; + ldpp_dout(dpp, 5) << "Found permission: " << iter->second << dendl; return iter->second; } - dout(5) << "Permissions for user not found" << dendl; + ldpp_dout(dpp, 5) << "Permissions for user not found" << dendl; return 0; } @@ -376,7 +377,7 @@ void rgw::auth::WebIdentityApplier::create_account(const DoutPrefixProvider* dpp rgw_apply_default_bucket_quota(user_info.bucket_quota, cct->_conf); rgw_apply_default_user_quota(user_info.user_quota, cct->_conf); - int ret = ctl->user->store_info(user_info, null_yield, + int ret = ctl->user->store_info(dpp, user_info, null_yield, RGWUserCtl::PutParams().set_exclusive(true)); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to store new user info: user=" @@ -392,21 +393,21 @@ void rgw::auth::WebIdentityApplier::load_acct_info(const DoutPrefixProvider* dpp federated_user.ns = "oidc"; //Check in oidc namespace - if (ctl->user->get_info_by_uid(federated_user, &user_info, null_yield) >= 0) { + if (ctl->user->get_info_by_uid(dpp, federated_user, &user_info, null_yield) >= 0) { /* Succeeded. */ return; } federated_user.ns.clear(); //Check for old users which wouldn't have been created in oidc namespace - if (ctl->user->get_info_by_uid(federated_user, &user_info, null_yield) >= 0) { + if (ctl->user->get_info_by_uid(dpp, federated_user, &user_info, null_yield) >= 0) { /* Succeeded. */ return; } //Check if user_id.buckets already exists, may have been from the time, when shadow users didnt exist RGWStorageStats stats; - int ret = ctl->user->read_stats(federated_user, &stats, null_yield); + int ret = ctl->user->read_stats(dpp, federated_user, &stats, null_yield); if (ret < 0 && ret != -ENOENT) { ldpp_dout(dpp, 0) << "ERROR: reading stats for the user returned error " << ret << dendl; return; @@ -464,7 +465,7 @@ uint32_t rgw::auth::RemoteApplier::get_perms_from_aclspec(const DoutPrefixProvid /* For backward compatibility with ACLOwner. */ perm |= rgw_perms_from_aclspec_default_strategy(info.acct_user, - aclspec); + aclspec, dpp); /* We also need to cover cases where rgw_keystone_implicit_tenants * was enabled. */ @@ -472,7 +473,7 @@ uint32_t rgw::auth::RemoteApplier::get_perms_from_aclspec(const DoutPrefixProvid const rgw_user tenanted_acct_user(info.acct_user.id, info.acct_user.id); perm |= rgw_perms_from_aclspec_default_strategy(tenanted_acct_user, - aclspec); + aclspec, dpp); } /* Now it's a time for invoking additional strategy that was supplied by @@ -599,7 +600,7 @@ void rgw::auth::RemoteApplier::create_account(const DoutPrefixProvider* dpp, rgw_apply_default_bucket_quota(user_info.bucket_quota, cct->_conf); rgw_apply_default_user_quota(user_info.user_quota, cct->_conf); - int ret = ctl->user->store_info(user_info, null_yield, + int ret = ctl->user->store_info(dpp, user_info, null_yield, RGWUserCtl::PutParams().set_exclusive(true)); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to store new user info: user=" @@ -641,7 +642,7 @@ void rgw::auth::RemoteApplier::load_acct_info(const DoutPrefixProvider* dpp, RGW else if (acct_user.tenant.empty()) { const rgw_user tenanted_uid(acct_user.id, acct_user.id); - if (ctl->user->get_info_by_uid(tenanted_uid, &user_info, null_yield) >= 0) { + if (ctl->user->get_info_by_uid(dpp, tenanted_uid, &user_info, null_yield) >= 0) { /* Succeeded. */ return; } @@ -649,7 +650,7 @@ void rgw::auth::RemoteApplier::load_acct_info(const DoutPrefixProvider* dpp, RGW if (split_mode && implicit_tenant) ; /* suppress lookup for id used by "other" protocol */ - else if (ctl->user->get_info_by_uid(acct_user, &user_info, null_yield) >= 0) { + else if (ctl->user->get_info_by_uid(dpp, acct_user, &user_info, null_yield) >= 0) { /* Succeeded. */ return; } @@ -666,7 +667,7 @@ const std::string rgw::auth::LocalApplier::NO_SUBUSER; uint32_t rgw::auth::LocalApplier::get_perms_from_aclspec(const DoutPrefixProvider* dpp, const aclspec_t& aclspec) const { - return rgw_perms_from_aclspec_default_strategy(user_info.user_id, aclspec); + return rgw_perms_from_aclspec_default_strategy(user_info.user_id, aclspec, dpp); } bool rgw::auth::LocalApplier::is_admin_of(const rgw_user& uid) const diff --git a/src/rgw/rgw_auth.h b/src/rgw/rgw_auth.h index 1bfccbfe8ddd7..c65f0b335b105 100644 --- a/src/rgw/rgw_auth.h +++ b/src/rgw/rgw_auth.h @@ -731,6 +731,7 @@ protected: uint32_t rgw_perms_from_aclspec_default_strategy( const rgw_user& uid, - const rgw::auth::Identity::aclspec_t& aclspec); + const rgw::auth::Identity::aclspec_t& aclspec, + const DoutPrefixProvider *dpp); #endif /* CEPH_RGW_AUTH_H */ diff --git a/src/rgw/rgw_auth_filters.h b/src/rgw/rgw_auth_filters.h index fbd80e3b90be5..bea9665e4167d 100644 --- a/src/rgw/rgw_auth_filters.h +++ b/src/rgw/rgw_auth_filters.h @@ -182,13 +182,13 @@ void ThirdPartyAccountApplier::load_acct_info(const DoutPrefixProvider* dpp, if (acct_user_override.tenant.empty()) { const rgw_user tenanted_uid(acct_user_override.id, acct_user_override.id); - if (ctl->user->get_info_by_uid(tenanted_uid, &user_info, null_yield) >= 0) { + if (ctl->user->get_info_by_uid(dpp, tenanted_uid, &user_info, null_yield) >= 0) { /* Succeeded. */ return; } } - const int ret = ctl->user->get_info_by_uid(acct_user_override, &user_info, null_yield); + const int ret = ctl->user->get_info_by_uid(dpp, acct_user_override, &user_info, null_yield); if (ret < 0) { /* We aren't trying to recover from ENOENT here. It's supposed that creating * someone else's account isn't a thing we want to support in this filter. */ @@ -258,7 +258,7 @@ void SysReqApplier::load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo * reasons. rgw_get_user_info_by_uid doesn't trigger the operator=() but * calls ::decode instead. */ RGWUserInfo euser_info; - if (ctl->user->get_info_by_uid(effective_uid, &euser_info, null_yield) < 0) { + if (ctl->user->get_info_by_uid(dpp, effective_uid, &euser_info, null_yield) < 0) { //ldpp_dout(dpp, 0) << "User lookup failed!" << dendl; throw -EACCES; } diff --git a/src/rgw/rgw_auth_s3.cc b/src/rgw/rgw_auth_s3.cc index 670313b603c1e..6fa6e651e50ca 100644 --- a/src/rgw/rgw_auth_s3.cc +++ b/src/rgw/rgw_auth_s3.cc @@ -77,7 +77,7 @@ get_canon_amz_hdr(const meta_map_t& meta_map) * ?get the canonical representation of the object's location */ static std::string -get_canon_resource(const char* const request_uri, +get_canon_resource(const DoutPrefixProvider *dpp, const char* const request_uri, const std::map& sub_resources) { std::string dest; @@ -107,7 +107,7 @@ get_canon_resource(const char* const request_uri, } } - dout(10) << "get_canon_resource(): dest=" << dest << dendl; + ldpp_dout(dpp, 10) << "get_canon_resource(): dest=" << dest << dendl; return dest; } @@ -116,6 +116,7 @@ get_canon_resource(const char* const request_uri, * compute a request's signature */ void rgw_create_s3_canonical_header( + const DoutPrefixProvider *dpp, const char* const method, const char* const content_md5, const char* const content_type, @@ -150,7 +151,7 @@ void rgw_create_s3_canonical_header( dest.append(get_canon_amz_hdr(meta_map)); dest.append(get_canon_amz_hdr(qs_map)); - dest.append(get_canon_resource(request_uri, sub_resources)); + dest.append(get_canon_resource(dpp, request_uri, sub_resources)); dest_str = dest; } @@ -177,7 +178,8 @@ static inline void get_v2_qs_map(const req_info& info, * get the header authentication information required to * compute a request's signature */ -bool rgw_create_s3_canonical_header(const req_info& info, +bool rgw_create_s3_canonical_header(const DoutPrefixProvider *dpp, + const req_info& info, utime_t* const header_time, std::string& dest, const bool qsr) @@ -186,7 +188,7 @@ bool rgw_create_s3_canonical_header(const req_info& info, if (content_md5) { for (const char *p = content_md5; *p; p++) { if (!is_base64_for_content_md5(*p)) { - dout(0) << "NOTICE: bad content-md5 provided (not base64)," + ldpp_dout(dpp, 0) << "NOTICE: bad content-md5 provided (not base64)," << " aborting request p=" << *p << " " << (int)*p << dendl; return false; } @@ -207,7 +209,7 @@ bool rgw_create_s3_canonical_header(const req_info& info, if (str == NULL) { req_date = info.env->get("HTTP_DATE"); if (!req_date) { - dout(0) << "NOTICE: missing date for auth header" << dendl; + ldpp_dout(dpp, 0) << "NOTICE: missing date for auth header" << dendl; return false; } date = req_date; @@ -216,11 +218,11 @@ bool rgw_create_s3_canonical_header(const req_info& info, if (header_time) { struct tm t; if (!parse_rfc2616(req_date, &t)) { - dout(0) << "NOTICE: failed to parse date for auth header" << dendl; + ldpp_dout(dpp, 0) << "NOTICE: failed to parse date for auth header" << dendl; return false; } if (t.tm_year < 70) { - dout(0) << "NOTICE: bad date (predates epoch): " << req_date << dendl; + ldpp_dout(dpp, 0) << "NOTICE: bad date (predates epoch): " << req_date << dendl; return false; } *header_time = utime_t(internal_timegm(&t), 0); @@ -238,7 +240,7 @@ bool rgw_create_s3_canonical_header(const req_info& info, request_uri = info.effective_uri; } - rgw_create_s3_canonical_header(info.method, content_md5, content_type, + rgw_create_s3_canonical_header(dpp, info.method, content_md5, content_type, date.c_str(), meta_map, qs_map, request_uri.c_str(), sub_resources, dest); return true; @@ -377,7 +379,8 @@ static inline int parse_v4_auth_header(const req_info& info, /* in std::string_view& signedheaders, /* out */ std::string_view& signature, /* out */ std::string_view& date, /* out */ - std::string_view& sessiontoken) /* out */ + std::string_view& sessiontoken, /* out */ + const DoutPrefixProvider *dpp) { std::string_view input(info.env->get("HTTP_AUTHORIZATION", "")); try { @@ -385,7 +388,7 @@ static inline int parse_v4_auth_header(const req_info& info, /* in } catch (std::out_of_range&) { /* We should never ever run into this situation as the presence of * AWS4_HMAC_SHA256_STR had been verified earlier. */ - dout(10) << "credentials string is too short" << dendl; + ldpp_dout(dpp, 10) << "credentials string is too short" << dendl; return -EINVAL; } @@ -395,7 +398,7 @@ static inline int parse_v4_auth_header(const req_info& info, /* in if (parsed_pair) { kv[parsed_pair->first] = parsed_pair->second; } else { - dout(10) << "NOTICE: failed to parse auth header (s=" << s << ")" + ldpp_dout(dpp, 10) << "NOTICE: failed to parse auth header (s=" << s << ")" << dendl; return -EINVAL; } @@ -410,7 +413,7 @@ static inline int parse_v4_auth_header(const req_info& info, /* in /* Ensure that the presigned required keys are really there. */ for (const auto& k : required_keys) { if (kv.find(k) == std::end(kv)) { - dout(10) << "NOTICE: auth header missing key: " << k << dendl; + ldpp_dout(dpp, 10) << "NOTICE: auth header missing key: " << k << dendl; return -EINVAL; } } @@ -420,7 +423,7 @@ static inline int parse_v4_auth_header(const req_info& info, /* in signature = kv["Signature"]; /* sig hex str */ - dout(10) << "v4 signature format = " << signature << dendl; + ldpp_dout(dpp, 10) << "v4 signature format = " << signature << dendl; /* ------------------------- handle x-amz-date header */ @@ -429,7 +432,7 @@ static inline int parse_v4_auth_header(const req_info& info, /* in const char *d = info.env->get("HTTP_X_AMZ_DATE"); struct tm t; if (!parse_iso8601(d, &t, NULL, false)) { - dout(10) << "error reading date via http_x_amz_date" << dendl; + ldpp_dout(dpp, 10) << "error reading date via http_x_amz_date" << dendl; return -EACCES; } date = d; @@ -452,7 +455,8 @@ int parse_v4_credentials(const req_info& info, /* in */ std::string_view& signature, /* out */ std::string_view& date, /* out */ std::string_view& session_token, /* out */ - const bool using_qs) /* in */ + const bool using_qs, /* in */ + const DoutPrefixProvider *dpp) { std::string_view credential; int ret; @@ -461,7 +465,7 @@ int parse_v4_credentials(const req_info& info, /* in */ signature, date, session_token); } else { ret = parse_v4_auth_header(info, credential, signedheaders, - signature, date, session_token); + signature, date, session_token, dpp); } if (ret < 0) { @@ -469,7 +473,7 @@ int parse_v4_credentials(const req_info& info, /* in */ } /* access_key/YYYYMMDD/region/service/aws4_request */ - dout(10) << "v4 credential format = " << credential << dendl; + ldpp_dout(dpp, 10) << "v4 credential format = " << credential << dendl; if (std::count(credential.begin(), credential.end(), '/') != 4) { return -EINVAL; @@ -483,11 +487,11 @@ int parse_v4_credentials(const req_info& info, /* in */ /* grab access key id */ const size_t pos = credential.find("/"); access_key_id = credential.substr(0, pos); - dout(10) << "access key id = " << access_key_id << dendl; + ldpp_dout(dpp, 10) << "access key id = " << access_key_id << dendl; /* grab credential scope */ credential_scope = credential.substr(pos + 1); - dout(10) << "credential scope = " << credential_scope << dendl; + ldpp_dout(dpp, 10) << "credential scope = " << credential_scope << dendl; return 0; } @@ -633,9 +637,10 @@ get_v4_canon_req_hash(CephContext* cct, const std::string& canonical_qs, const std::string& canonical_hdrs, const std::string_view& signed_hdrs, - const std::string_view& request_payload_hash) + const std::string_view& request_payload_hash, + const DoutPrefixProvider *dpp) { - ldout(cct, 10) << "payload request hash = " << request_payload_hash << dendl; + ldpp_dout(dpp, 10) << "payload request hash = " << request_payload_hash << dendl; const auto canonical_req = string_join_reserve("\n", http_verb, @@ -648,8 +653,8 @@ get_v4_canon_req_hash(CephContext* cct, const auto canonical_req_hash = calc_hash_sha256(canonical_req); using sanitize = rgw::crypt_sanitize::log_content; - ldout(cct, 10) << "canonical request = " << sanitize{canonical_req} << dendl; - ldout(cct, 10) << "canonical request hash = " + ldpp_dout(dpp, 10) << "canonical request = " << sanitize{canonical_req} << dendl; + ldpp_dout(dpp, 10) << "canonical request hash = " << canonical_req_hash << dendl; return canonical_req_hash; @@ -665,7 +670,8 @@ get_v4_string_to_sign(CephContext* const cct, const std::string_view& algorithm, const std::string_view& request_date, const std::string_view& credential_scope, - const sha256_digest_t& canonreq_hash) + const sha256_digest_t& canonreq_hash, + const DoutPrefixProvider *dpp) { const auto hexed_cr_hash = canonreq_hash.to_str(); const std::string_view hexed_cr_hash_str(hexed_cr_hash); @@ -676,7 +682,7 @@ get_v4_string_to_sign(CephContext* const cct, credential_scope, hexed_cr_hash_str); - ldout(cct, 10) << "string to sign = " + ldpp_dout(dpp, 10) << "string to sign = " << rgw::crypt_sanitize::log_content{string_to_sign} << dendl; @@ -734,7 +740,8 @@ transform_secret_key(const std::string_view& secret_access_key) static sha256_digest_t get_v4_signing_key(CephContext* const cct, const std::string_view& credential_scope, - const std::string_view& secret_access_key) + const std::string_view& secret_access_key, + const DoutPrefixProvider *dpp) { std::string_view date, region, service; std::tie(date, region, service) = parse_cred_scope(credential_scope); @@ -748,10 +755,10 @@ get_v4_signing_key(CephContext* const cct, const auto signing_key = calc_hmac_sha256(service_k, std::string_view("aws4_request")); - ldout(cct, 10) << "date_k = " << date_k << dendl; - ldout(cct, 10) << "region_k = " << region_k << dendl; - ldout(cct, 10) << "service_k = " << service_k << dendl; - ldout(cct, 10) << "signing_k = " << signing_key << dendl; + ldpp_dout(dpp, 10) << "date_k = " << date_k << dendl; + ldpp_dout(dpp, 10) << "region_k = " << region_k << dendl; + ldpp_dout(dpp, 10) << "service_k = " << service_k << dendl; + ldpp_dout(dpp, 10) << "signing_k = " << signing_key << dendl; return signing_key; } @@ -769,9 +776,10 @@ AWSEngine::VersionAbstractor::server_signature_t get_v4_signature(const std::string_view& credential_scope, CephContext* const cct, const std::string_view& secret_key, - const AWSEngine::VersionAbstractor::string_to_sign_t& string_to_sign) + const AWSEngine::VersionAbstractor::string_to_sign_t& string_to_sign, + const DoutPrefixProvider *dpp) { - auto signing_key = get_v4_signing_key(cct, credential_scope, secret_key); + auto signing_key = get_v4_signing_key(cct, credential_scope, secret_key, dpp); /* The server-side generated digest for comparison. */ const auto digest = calc_hmac_sha256(signing_key, string_to_sign); @@ -783,7 +791,7 @@ get_v4_signature(const std::string_view& credential_scope, digest.SIZE * 2); buf_to_hex(digest.v, digest.SIZE, signature.begin()); - ldout(cct, 10) << "generated signature = " << signature << dendl; + ldpp_dout(dpp, 10) << "generated signature = " << signature << dendl; return signature; } @@ -1076,7 +1084,7 @@ AWSv4ComplMulti::create(const req_state* const s, } const auto signing_key = \ - rgw::auth::s3::get_v4_signing_key(s->cct, credential_scope, *secret_key); + rgw::auth::s3::get_v4_signing_key(s->cct, credential_scope, *secret_key, s); return std::make_shared(s, std::move(date), diff --git a/src/rgw/rgw_auth_s3.h b/src/rgw/rgw_auth_s3.h index 210e48a67ffab..4288bd2f0d4e0 100644 --- a/src/rgw/rgw_auth_s3.h +++ b/src/rgw/rgw_auth_s3.h @@ -419,6 +419,7 @@ public: } /* namespace rgw */ void rgw_create_s3_canonical_header( + const DoutPrefixProvider *dpp, const char *method, const char *content_md5, const char *content_type, @@ -428,16 +429,17 @@ void rgw_create_s3_canonical_header( const char *request_uri, const std::map& sub_resources, std::string& dest_str); -bool rgw_create_s3_canonical_header(const req_info& info, +bool rgw_create_s3_canonical_header(const DoutPrefixProvider *dpp, + const req_info& info, utime_t *header_time, /* out */ std::string& dest, /* out */ bool qsr); static inline std::tuple -rgw_create_s3_canonical_header(const req_info& info, const bool qsr) { +rgw_create_s3_canonical_header(const DoutPrefixProvider *dpp, const req_info& info, const bool qsr) { std::string dest; utime_t header_time; - const bool ok = rgw_create_s3_canonical_header(info, &header_time, dest, qsr); + const bool ok = rgw_create_s3_canonical_header(dpp, info, &header_time, dest, qsr); return std::make_tuple(ok, dest, header_time); } @@ -463,7 +465,8 @@ int parse_v4_credentials(const req_info& info, /* in */ std::string_view& signature, /* out */ std::string_view& date, /* out */ std::string_view& session_token, /* out */ - const bool using_qs); /* in */ + const bool using_qs, /* in */ + const DoutPrefixProvider *dpp); /* in */ static inline bool char_needs_aws4_escaping(const char c, bool encode_slash) { @@ -593,20 +596,23 @@ get_v4_canon_req_hash(CephContext* cct, const std::string& canonical_qs, const std::string& canonical_hdrs, const std::string_view& signed_hdrs, - const std::string_view& request_payload_hash); + const std::string_view& request_payload_hash, + const DoutPrefixProvider *dpp); AWSEngine::VersionAbstractor::string_to_sign_t get_v4_string_to_sign(CephContext* cct, const std::string_view& algorithm, const std::string_view& request_date, const std::string_view& credential_scope, - const sha256_digest_t& canonreq_hash); + const sha256_digest_t& canonreq_hash, + const DoutPrefixProvider *dpp); extern AWSEngine::VersionAbstractor::server_signature_t get_v4_signature(const std::string_view& credential_scope, CephContext* const cct, const std::string_view& secret_key, - const AWSEngine::VersionAbstractor::string_to_sign_t& string_to_sign); + const AWSEngine::VersionAbstractor::string_to_sign_t& string_to_sign, + const DoutPrefixProvider *dpp); extern AWSEngine::VersionAbstractor::server_signature_t get_v2_signature(CephContext*, diff --git a/src/rgw/rgw_bucket.cc b/src/rgw/rgw_bucket.cc index 50b4d9e413287..7d836883fcacd 100644 --- a/src/rgw/rgw_bucket.cc +++ b/src/rgw/rgw_bucket.cc @@ -152,7 +152,8 @@ void rgw_parse_url_bucket(const string &bucket, const string& auth_tenant, * Get all the buckets owned by a user and fill up an RGWUserBuckets with them. * Returns: 0 on success, -ERR# on failure. */ -int rgw_read_user_buckets(rgw::sal::RGWRadosStore * store, +int rgw_read_user_buckets(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore * store, const rgw_user& user_id, rgw::sal::RGWBucketList& buckets, const string& marker, @@ -162,7 +163,7 @@ int rgw_read_user_buckets(rgw::sal::RGWRadosStore * store, optional_yield y) { rgw::sal::RGWRadosUser user(store, user_id); - return user.list_buckets(marker, end_marker, max, need_stats, buckets, y); + return user.list_buckets(dpp, marker, end_marker, max, need_stats, buckets, y); } int rgw_bucket_parse_bucket_instance(const string& bucket_instance, string *bucket_name, string *bucket_id, int *shard_id) @@ -261,7 +262,8 @@ static void dump_mulipart_index_results(list& objs_to_unlink, void check_bad_user_bucket_mapping(rgw::sal::RGWRadosStore *store, const rgw_user& user_id, bool fix, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { rgw::sal::RGWBucketList user_buckets; rgw::sal::RGWRadosUser user(store, user_id); @@ -272,7 +274,7 @@ void check_bad_user_bucket_mapping(rgw::sal::RGWRadosStore *store, const rgw_use size_t max_entries = cct->_conf->rgw_list_buckets_max_chunk; do { - int ret = user.list_buckets(marker, string(), max_entries, false, user_buckets, y); + int ret = user.list_buckets(dpp, marker, string(), max_entries, false, user_buckets, y); if (ret < 0) { ldout(store->ctx(), 0) << "failed to read user buckets: " << cpp_strerror(-ret) << dendl; @@ -289,7 +291,7 @@ void check_bad_user_bucket_mapping(rgw::sal::RGWRadosStore *store, const rgw_use RGWBucketInfo bucket_info; real_time mtime; - int r = store->getRados()->get_bucket_info(store->svc(), user_id.tenant, bucket->get_name(), bucket_info, &mtime, null_yield); + int r = store->getRados()->get_bucket_info(store->svc(), user_id.tenant, bucket->get_name(), bucket_info, &mtime, null_yield, dpp); if (r < 0) { ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket << dendl; continue; @@ -306,7 +308,7 @@ void check_bad_user_bucket_mapping(rgw::sal::RGWRadosStore *store, const rgw_use cout << "fixing" << std::endl; r = store->ctl()->bucket->link_bucket(user_id, actual_bucket, bucket_info.creation_time, - null_yield); + null_yield, dpp); if (r < 0) { cerr << "failed to fix bucket: " << cpp_strerror(-r) << std::endl; } @@ -324,7 +326,7 @@ bool rgw_bucket_object_check_filter(const string& oid) return rgw_obj_key::oid_to_key_in_ns(oid, &key, ns); } -int rgw_remove_object(rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info, const rgw_bucket& bucket, rgw_obj_key& key) +int rgw_remove_object(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info, const rgw_bucket& bucket, rgw_obj_key& key) { RGWObjectCtx rctx(store); @@ -334,7 +336,7 @@ int rgw_remove_object(rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucke rgw_obj obj(bucket, key); - return store->getRados()->delete_obj(rctx, bucket_info, obj, bucket_info.versioning_status()); + return store->getRados()->delete_obj(dpp, rctx, bucket_info, obj, bucket_info.versioning_status()); } static int aio_wait(librados::AioCompletion *handle) @@ -362,7 +364,8 @@ static int drain_handles(list& pending) int rgw_remove_bucket_bypass_gc(rgw::sal::RGWRadosStore *store, rgw_bucket& bucket, int concurrent_max, bool keep_index_consistent, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { int ret; map stats; @@ -374,17 +377,17 @@ int rgw_remove_bucket_bypass_gc(rgw::sal::RGWRadosStore *store, rgw_bucket& buck string bucket_ver, master_ver; - ret = store->getRados()->get_bucket_info(store->svc(), bucket.tenant, bucket.name, info, NULL, null_yield); + ret = store->getRados()->get_bucket_info(store->svc(), bucket.tenant, bucket.name, info, NULL, null_yield, dpp); if (ret < 0) return ret; - ret = store->getRados()->get_bucket_stats(info, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, NULL); + ret = store->getRados()->get_bucket_stats(dpp, info, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, NULL); if (ret < 0) return ret; string prefix, delimiter; - ret = abort_bucket_multiparts(store, cct, info, prefix, delimiter); + ret = abort_bucket_multiparts(dpp, store, cct, info, prefix, delimiter); if (ret < 0) { return ret; } @@ -402,7 +405,7 @@ int rgw_remove_bucket_bypass_gc(rgw::sal::RGWRadosStore *store, rgw_bucket& buck while (is_truncated) { objs.clear(); - ret = list_op.list_objects(listing_max_entries, &objs, &common_prefixes, + ret = list_op.list_objects(dpp, listing_max_entries, &objs, &common_prefixes, &is_truncated, null_yield); if (ret < 0) return ret; @@ -412,29 +415,29 @@ int rgw_remove_bucket_bypass_gc(rgw::sal::RGWRadosStore *store, rgw_bucket& buck RGWObjState *astate = NULL; rgw_obj obj(bucket, (*it).key); - ret = store->getRados()->get_obj_state(&obj_ctx, info, obj, &astate, false, y); + ret = store->getRados()->get_obj_state(dpp, &obj_ctx, info, obj, &astate, false, y); if (ret == -ENOENT) { - dout(1) << "WARNING: cannot find obj state for obj " << obj.get_oid() << dendl; + ldpp_dout(dpp, 1) << "WARNING: cannot find obj state for obj " << obj.get_oid() << dendl; continue; } if (ret < 0) { - lderr(store->ctx()) << "ERROR: get obj state returned with error " << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: get obj state returned with error " << ret << dendl; return ret; } if (astate->manifest) { RGWObjManifest& manifest = *astate->manifest; - RGWObjManifest::obj_iterator miter = manifest.obj_begin(); + RGWObjManifest::obj_iterator miter = manifest.obj_begin(dpp); rgw_obj head_obj = manifest.get_obj(); rgw_raw_obj raw_head_obj; store->getRados()->obj_to_raw(info.placement_rule, head_obj, &raw_head_obj); - for (; miter != manifest.obj_end() && max_aio--; ++miter) { + for (; miter != manifest.obj_end(dpp) && max_aio--; ++miter) { if (!max_aio) { ret = drain_handles(handles); if (ret < 0) { - lderr(store->ctx()) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl; return ret; } max_aio = concurrent_max; @@ -446,16 +449,16 @@ int rgw_remove_bucket_bypass_gc(rgw::sal::RGWRadosStore *store, rgw_bucket& buck continue; } - ret = store->getRados()->delete_raw_obj_aio(last_obj, handles); + ret = store->getRados()->delete_raw_obj_aio(dpp, last_obj, handles); if (ret < 0) { - lderr(store->ctx()) << "ERROR: delete obj aio failed with " << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: delete obj aio failed with " << ret << dendl; return ret; } } // for all shadow objs - ret = store->getRados()->delete_obj_aio(head_obj, info, astate, handles, keep_index_consistent, null_yield); + ret = store->getRados()->delete_obj_aio(dpp, head_obj, info, astate, handles, keep_index_consistent, null_yield); if (ret < 0) { - lderr(store->ctx()) << "ERROR: delete obj aio failed with " << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: delete obj aio failed with " << ret << dendl; return ret; } } @@ -463,7 +466,7 @@ int rgw_remove_bucket_bypass_gc(rgw::sal::RGWRadosStore *store, rgw_bucket& buck if (!max_aio) { ret = drain_handles(handles); if (ret < 0) { - lderr(store->ctx()) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl; return ret; } max_aio = concurrent_max; @@ -474,13 +477,13 @@ int rgw_remove_bucket_bypass_gc(rgw::sal::RGWRadosStore *store, rgw_bucket& buck ret = drain_handles(handles); if (ret < 0) { - lderr(store->ctx()) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl; return ret; } - ret = store->ctl()->bucket->sync_user_stats(info.owner, info, y); + ret = store->ctl()->bucket->sync_user_stats(dpp, info.owner, info, y); if (ret < 0) { - dout(1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret << dendl; } RGWObjVersionTracker objv_tracker; @@ -488,15 +491,15 @@ int rgw_remove_bucket_bypass_gc(rgw::sal::RGWRadosStore *store, rgw_bucket& buck // this function can only be run if caller wanted children to be // deleted, so we can ignore the check for children as any that // remain are detritus from a prior bug - ret = store->getRados()->delete_bucket(info, objv_tracker, y, false); + ret = store->getRados()->delete_bucket(info, objv_tracker, y, dpp, false); if (ret < 0) { - lderr(store->ctx()) << "ERROR: could not remove bucket " << bucket.name << dendl; + ldpp_dout(dpp, -1) << "ERROR: could not remove bucket " << bucket.name << dendl; return ret; } - ret = store->ctl()->bucket->unlink_bucket(info.owner, bucket, null_yield, false); + ret = store->ctl()->bucket->unlink_bucket(info.owner, bucket, null_yield, dpp, false); if (ret < 0) { - lderr(store->ctx()) << "ERROR: unable to remove user bucket information" << dendl; + ldpp_dout(dpp, -1) << "ERROR: unable to remove user bucket information" << dendl; } return ret; @@ -509,7 +512,7 @@ static void set_err_msg(std::string *sink, std::string msg) } int RGWBucket::init(rgw::sal::RGWRadosStore *storage, RGWBucketAdminOpState& op_state, - optional_yield y, std::string *err_msg, + optional_yield y, const DoutPrefixProvider *dpp, std::string *err_msg, map *pattrs) { if (!storage) { @@ -535,7 +538,7 @@ int RGWBucket::init(rgw::sal::RGWRadosStore *storage, RGWBucketAdminOpState& op_ if (!bucket.name.empty()) { int r = store->ctl()->bucket->read_bucket_info( - bucket, &bucket_info, y, + bucket, &bucket_info, y, dpp, RGWBucketCtl::BucketInstance::GetParams().set_attrs(pattrs), &ep_objv); if (r < 0) { @@ -547,7 +550,7 @@ int RGWBucket::init(rgw::sal::RGWRadosStore *storage, RGWBucketAdminOpState& op_ } if (!user_id.empty()) { - int r = store->ctl()->user->get_info_by_uid(user_id, &user_info, y); + int r = store->ctl()->user->get_info_by_uid(dpp, user_id, &user_info, y); if (r < 0) { set_err_msg(err_msg, "failed to fetch user info"); return r; @@ -560,14 +563,14 @@ int RGWBucket::init(rgw::sal::RGWRadosStore *storage, RGWBucketAdminOpState& op_ return 0; } -bool rgw_find_bucket_by_id(CephContext *cct, RGWMetadataManager *mgr, +bool rgw_find_bucket_by_id(const DoutPrefixProvider *dpp, CephContext *cct, RGWMetadataManager *mgr, const string& marker, const string& bucket_id, rgw_bucket* bucket_out) { void *handle = NULL; bool truncated = false; string s; - int ret = mgr->list_keys_init("bucket.instance", marker, &handle); + int ret = mgr->list_keys_init(dpp, "bucket.instance", marker, &handle); if (ret < 0) { cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl; mgr->list_keys_complete(handle); @@ -597,7 +600,7 @@ bool rgw_find_bucket_by_id(CephContext *cct, RGWMetadataManager *mgr, return false; } -int RGWBucket::link(RGWBucketAdminOpState& op_state, optional_yield y, +int RGWBucket::link(RGWBucketAdminOpState& op_state, optional_yield y, const DoutPrefixProvider *dpp, map& attrs, std::string *err_msg) { if (!op_state.is_user_op()) { @@ -633,7 +636,7 @@ int RGWBucket::link(RGWBucketAdminOpState& op_state, optional_yield y, map::iterator aiter = attrs.find(RGW_ATTR_ACL); if (aiter == attrs.end()) { // should never happen; only pre-argonaut buckets lacked this. - ldout(store->ctx(), 0) << "WARNING: can't bucket link because no acl on bucket=" << old_bucket.name << dendl; + ldpp_dout(dpp, 0) << "WARNING: can't bucket link because no acl on bucket=" << old_bucket.name << dendl; set_err_msg(err_msg, "While crossing the Anavros you have displeased the goddess Hera." " You must sacrifice your ancient bucket " + bucket.bucket_id); @@ -652,7 +655,7 @@ int RGWBucket::link(RGWBucketAdminOpState& op_state, optional_yield y, } auto bucket_ctl = store->ctl()->bucket; - int r = bucket_ctl->unlink_bucket(owner.get_id(), old_bucket, y, false); + int r = bucket_ctl->unlink_bucket(owner.get_id(), old_bucket, y, dpp, false); if (r < 0) { set_err_msg(err_msg, "could not unlink policy from user " + owner.get_id().to_str()); return r; @@ -660,7 +663,7 @@ int RGWBucket::link(RGWBucketAdminOpState& op_state, optional_yield y, // now update the user for the bucket... if (display_name.empty()) { - ldout(store->ctx(), 0) << "WARNING: user " << user_info.user_id << " has no display name set" << dendl; + ldpp_dout(dpp, 0) << "WARNING: user " << user_info.user_id << " has no display name set" << dendl; } RGWAccessControlPolicy policy_instance; @@ -679,7 +682,7 @@ int RGWBucket::link(RGWBucketAdminOpState& op_state, optional_yield y, instance_params.set_exclusive(true); } - r = bucket_ctl->store_bucket_instance_info(bucket, bucket_info, y, instance_params); + r = bucket_ctl->store_bucket_instance_info(bucket, bucket_info, y, dpp, instance_params); if (r < 0) { set_err_msg(err_msg, "ERROR: failed writing bucket instance info: " + cpp_strerror(-r)); return r; @@ -697,7 +700,7 @@ int RGWBucket::link(RGWBucketAdminOpState& op_state, optional_yield y, r = store->ctl()->bucket->link_bucket(user_info.user_id, bucket_info.bucket, ep.creation_time, - y, true, &ep_data); + y, dpp, true, &ep_data); if (r < 0) { set_err_msg(err_msg, "failed to relink bucket"); return r; @@ -705,7 +708,7 @@ int RGWBucket::link(RGWBucketAdminOpState& op_state, optional_yield y, if (bucket != old_bucket) { // like RGWRados::delete_bucket -- excepting no bucket_index work. - r = bucket_ctl->remove_bucket_entrypoint_info(old_bucket, y, + r = bucket_ctl->remove_bucket_entrypoint_info(old_bucket, y, dpp, RGWBucketCtl::Bucket::RemoveParams() .set_objv_tracker(&ep_data.ep_objv)); if (r < 0) { @@ -713,7 +716,7 @@ int RGWBucket::link(RGWBucketAdminOpState& op_state, optional_yield y, return r; } - r = bucket_ctl->remove_bucket_instance_info(old_bucket, bucket_info, y, + r = bucket_ctl->remove_bucket_instance_info(old_bucket, bucket_info, y, dpp, RGWBucketCtl::BucketInstance::RemoveParams() .set_objv_tracker(&old_version)); if (r < 0) { @@ -726,10 +729,10 @@ int RGWBucket::link(RGWBucketAdminOpState& op_state, optional_yield y, } int RGWBucket::chown(RGWBucketAdminOpState& op_state, const string& marker, - optional_yield y, std::string *err_msg) + optional_yield y, const DoutPrefixProvider *dpp, std::string *err_msg) { int ret = store->ctl()->bucket->chown(store, bucket_info, user_info.user_id, - user_info.display_name, marker, y); + user_info.display_name, marker, y, dpp); if (ret < 0) { set_err_msg(err_msg, "Failed to change object ownership: " + cpp_strerror(-ret)); } @@ -737,7 +740,7 @@ int RGWBucket::chown(RGWBucketAdminOpState& op_state, const string& marker, return ret; } -int RGWBucket::unlink(RGWBucketAdminOpState& op_state, optional_yield y, std::string *err_msg) +int RGWBucket::unlink(RGWBucketAdminOpState& op_state, optional_yield y, const DoutPrefixProvider *dpp, std::string *err_msg) { rgw_bucket bucket = op_state.get_bucket(); @@ -746,7 +749,7 @@ int RGWBucket::unlink(RGWBucketAdminOpState& op_state, optional_yield y, std::st return -EINVAL; } - int r = store->ctl()->bucket->unlink_bucket(user_info.user_id, bucket, y); + int r = store->ctl()->bucket->unlink_bucket(user_info.user_id, bucket, y, dpp); if (r < 0) { set_err_msg(err_msg, "error unlinking bucket" + cpp_strerror(-r)); } @@ -754,19 +757,19 @@ int RGWBucket::unlink(RGWBucketAdminOpState& op_state, optional_yield y, std::st return r; } -int RGWBucket::set_quota(RGWBucketAdminOpState& op_state, std::string *err_msg) +int RGWBucket::set_quota(RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, std::string *err_msg) { rgw_bucket bucket = op_state.get_bucket(); RGWBucketInfo bucket_info; map attrs; - int r = store->getRados()->get_bucket_info(store->svc(), bucket.tenant, bucket.name, bucket_info, NULL, null_yield, &attrs); + int r = store->getRados()->get_bucket_info(store->svc(), bucket.tenant, bucket.name, bucket_info, NULL, null_yield, dpp, &attrs); if (r < 0) { set_err_msg(err_msg, "could not get bucket info for bucket=" + bucket.name + ": " + cpp_strerror(-r)); return r; } bucket_info.quota = op_state.quota; - r = store->getRados()->put_bucket_instance_info(bucket_info, false, real_time(), &attrs); + r = store->getRados()->put_bucket_instance_info(bucket_info, false, real_time(), &attrs, dpp); if (r < 0) { set_err_msg(err_msg, "ERROR: failed writing bucket instance info: " + cpp_strerror(-r)); return r; @@ -774,14 +777,14 @@ int RGWBucket::set_quota(RGWBucketAdminOpState& op_state, std::string *err_msg) return r; } -int RGWBucket::remove_object(RGWBucketAdminOpState& op_state, std::string *err_msg) +int RGWBucket::remove_object(const DoutPrefixProvider *dpp, RGWBucketAdminOpState& op_state, std::string *err_msg) { rgw_bucket bucket = op_state.get_bucket(); std::string object_name = op_state.get_object_name(); rgw_obj_key key(object_name); - int ret = rgw_remove_object(store, bucket_info, bucket, key); + int ret = rgw_remove_object(dpp, store, bucket_info, bucket, key); if (ret < 0) { set_err_msg(err_msg, "unable to remove object" + cpp_strerror(-ret)); return ret; @@ -827,7 +830,8 @@ static void dump_index_check(map existing_stats } int RGWBucket::check_bad_index_multipart(RGWBucketAdminOpState& op_state, - RGWFormatterFlusher& flusher ,std::string *err_msg) + RGWFormatterFlusher& flusher, + const DoutPrefixProvider *dpp, std::string *err_msg) { bool fix_index = op_state.will_fix_index(); rgw_bucket bucket = op_state.get_bucket(); @@ -840,9 +844,9 @@ int RGWBucket::check_bad_index_multipart(RGWBucketAdminOpState& op_state, RGWBucketInfo bucket_info; auto obj_ctx = store->svc()->sysobj->init_obj_ctx(); - int r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket, bucket_info, nullptr, nullptr, null_yield); + int r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket, bucket_info, nullptr, nullptr, null_yield, dpp); if (r < 0) { - ldout(store->ctx(), 0) << "ERROR: " << __func__ << "(): get_bucket_instance_info(bucket=" << bucket << ") returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): get_bucket_instance_info(bucket=" << bucket << ") returned r=" << r << dendl; return r; } @@ -854,7 +858,7 @@ int RGWBucket::check_bad_index_multipart(RGWBucketAdminOpState& op_state, do { vector result; - int r = list_op.list_objects(listing_max_entries, &result, + int r = list_op.list_objects(dpp, listing_max_entries, &result, &common_prefixes, &is_truncated, null_yield); if (r < 0) { set_err_msg(err_msg, "failed to list objects in bucket=" + bucket.name + @@ -901,7 +905,7 @@ int RGWBucket::check_bad_index_multipart(RGWBucketAdminOpState& op_state, if (objs_to_unlink.size() > listing_max_entries) { if (fix_index) { - int r = store->getRados()->remove_objs_from_index(bucket_info, objs_to_unlink); + int r = store->getRados()->remove_objs_from_index(dpp, bucket_info, objs_to_unlink); if (r < 0) { set_err_msg(err_msg, "ERROR: remove_obj_from_index() returned error: " + cpp_strerror(-r)); @@ -916,7 +920,7 @@ int RGWBucket::check_bad_index_multipart(RGWBucketAdminOpState& op_state, } if (fix_index) { - int r = store->getRados()->remove_objs_from_index(bucket_info, objs_to_unlink); + int r = store->getRados()->remove_objs_from_index(dpp, bucket_info, objs_to_unlink); if (r < 0) { set_err_msg(err_msg, "ERROR: remove_obj_from_index() returned error: " + cpp_strerror(-r)); @@ -932,7 +936,8 @@ int RGWBucket::check_bad_index_multipart(RGWBucketAdminOpState& op_state, return 0; } -int RGWBucket::check_object_index(RGWBucketAdminOpState& op_state, +int RGWBucket::check_object_index(const DoutPrefixProvider *dpp, + RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y, std::string *err_msg) @@ -945,7 +950,7 @@ int RGWBucket::check_object_index(RGWBucketAdminOpState& op_state, return -EINVAL; } - store->getRados()->cls_obj_set_bucket_tag_timeout(bucket_info, BUCKET_TAG_TIMEOUT); + store->getRados()->cls_obj_set_bucket_tag_timeout(dpp, bucket_info, BUCKET_TAG_TIMEOUT); string prefix; string empty_delimiter; @@ -961,7 +966,7 @@ int RGWBucket::check_object_index(RGWBucketAdminOpState& op_state, result.reserve(listing_max_entries); int r = store->getRados()->cls_bucket_list_ordered( - bucket_info, RGW_NO_SHARD, marker, prefix, empty_delimiter, + dpp, bucket_info, RGW_NO_SHARD, marker, prefix, empty_delimiter, listing_max_entries, true, expansion_factor, result, &is_truncated, &cls_filtered, &marker, y, rgw_bucket_object_check_filter); @@ -984,27 +989,28 @@ int RGWBucket::check_object_index(RGWBucketAdminOpState& op_state, formatter->close_section(); - store->getRados()->cls_obj_set_bucket_tag_timeout(bucket_info, 0); + store->getRados()->cls_obj_set_bucket_tag_timeout(dpp, bucket_info, 0); return 0; } -int RGWBucket::check_index(RGWBucketAdminOpState& op_state, +int RGWBucket::check_index(const DoutPrefixProvider *dpp, + RGWBucketAdminOpState& op_state, map& existing_stats, map& calculated_stats, std::string *err_msg) { bool fix_index = op_state.will_fix_index(); - int r = store->getRados()->bucket_check_index(bucket_info, &existing_stats, &calculated_stats); + int r = store->getRados()->bucket_check_index(dpp, bucket_info, &existing_stats, &calculated_stats); if (r < 0) { set_err_msg(err_msg, "failed to check index error=" + cpp_strerror(-r)); return r; } if (fix_index) { - r = store->getRados()->bucket_rebuild_index(bucket_info); + r = store->getRados()->bucket_rebuild_index(dpp, bucket_info); if (r < 0) { set_err_msg(err_msg, "failed to rebuild index err=" + cpp_strerror(-r)); return r; @@ -1014,7 +1020,7 @@ int RGWBucket::check_index(RGWBucketAdminOpState& op_state, return 0; } -int RGWBucket::sync(RGWBucketAdminOpState& op_state, map *attrs, std::string *err_msg) +int RGWBucket::sync(RGWBucketAdminOpState& op_state, map *attrs, const DoutPrefixProvider *dpp, std::string *err_msg) { if (!store->svc()->zone->is_meta_master()) { set_err_msg(err_msg, "ERROR: failed to update bucket sync: only allowed on meta master zone"); @@ -1027,7 +1033,7 @@ int RGWBucket::sync(RGWBucketAdminOpState& op_state, map *at bucket_info.flags |= BUCKET_DATASYNC_DISABLED; } - int r = store->getRados()->put_bucket_instance_info(bucket_info, false, real_time(), attrs); + int r = store->getRados()->put_bucket_instance_info(bucket_info, false, real_time(), attrs, dpp); if (r < 0) { set_err_msg(err_msg, "ERROR: failed writing bucket instance info:" + cpp_strerror(-r)); return r; @@ -1037,13 +1043,13 @@ int RGWBucket::sync(RGWBucketAdminOpState& op_state, map *at int shard_id = bucket_info.layout.current_index.layout.normal.num_shards? 0 : -1; if (!sync) { - r = store->svc()->bilog_rados->log_stop(bucket_info, -1); + r = store->svc()->bilog_rados->log_stop(dpp, bucket_info, -1); if (r < 0) { set_err_msg(err_msg, "ERROR: failed writing stop bilog:" + cpp_strerror(-r)); return r; } } else { - r = store->svc()->bilog_rados->log_start(bucket_info, -1); + r = store->svc()->bilog_rados->log_start(dpp, bucket_info, -1); if (r < 0) { set_err_msg(err_msg, "ERROR: failed writing resync bilog:" + cpp_strerror(-r)); return r; @@ -1051,7 +1057,7 @@ int RGWBucket::sync(RGWBucketAdminOpState& op_state, map *at } for (int i = 0; i < shards_num; ++i, ++shard_id) { - r = store->svc()->datalog_rados->add_entry(bucket_info, shard_id); + r = store->svc()->datalog_rados->add_entry(dpp, bucket_info, shard_id); if (r < 0) { set_err_msg(err_msg, "ERROR: failed writing data log:" + cpp_strerror(-r)); return r; @@ -1073,7 +1079,8 @@ int RGWBucket::policy_bl_to_stream(bufferlist& bl, ostream& o) return 0; } -int rgw_object_get_attr(rgw::sal::RGWRadosStore* store, const RGWBucketInfo& bucket_info, +int rgw_object_get_attr(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore* store, const RGWBucketInfo& bucket_info, const rgw_obj& obj, const char* attr_name, bufferlist& out_bl, optional_yield y) { @@ -1081,17 +1088,17 @@ int rgw_object_get_attr(rgw::sal::RGWRadosStore* store, const RGWBucketInfo& buc RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, obj); RGWRados::Object::Read rop(&op_target); - return rop.get_attr(attr_name, out_bl, y); + return rop.get_attr(dpp, attr_name, out_bl, y); } -int RGWBucket::get_policy(RGWBucketAdminOpState& op_state, RGWAccessControlPolicy& policy, optional_yield y) +int RGWBucket::get_policy(RGWBucketAdminOpState& op_state, RGWAccessControlPolicy& policy, optional_yield y, const DoutPrefixProvider *dpp) { std::string object_name = op_state.get_object_name(); rgw_bucket bucket = op_state.get_bucket(); RGWBucketInfo bucket_info; map attrs; - int ret = store->getRados()->get_bucket_info(store->svc(), bucket.tenant, bucket.name, bucket_info, NULL, null_yield, &attrs); + int ret = store->getRados()->get_bucket_info(store->svc(), bucket.tenant, bucket.name, bucket_info, NULL, null_yield, dpp, &attrs); if (ret < 0) { return ret; } @@ -1100,7 +1107,7 @@ int RGWBucket::get_policy(RGWBucketAdminOpState& op_state, RGWAccessControlPolic bufferlist bl; rgw_obj obj(bucket, object_name); - ret = rgw_object_get_attr(store, bucket_info, obj, RGW_ATTR_ACL, bl, y); + ret = rgw_object_get_attr(dpp, store, bucket_info, obj, RGW_ATTR_ACL, bl, y); if (ret < 0){ return ret; } @@ -1127,15 +1134,15 @@ int RGWBucket::get_policy(RGWBucketAdminOpState& op_state, RGWAccessControlPolic int RGWBucketAdminOp::get_policy(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, - RGWAccessControlPolicy& policy) + RGWAccessControlPolicy& policy, const DoutPrefixProvider *dpp) { RGWBucket bucket; - int ret = bucket.init(store, op_state, null_yield); + int ret = bucket.init(store, op_state, null_yield, dpp); if (ret < 0) return ret; - ret = bucket.get_policy(op_state, policy, null_yield); + ret = bucket.get_policy(op_state, policy, null_yield, dpp); if (ret < 0) return ret; @@ -1146,11 +1153,11 @@ int RGWBucketAdminOp::get_policy(rgw::sal::RGWRadosStore *store, RGWBucketAdminO int RGWBucketAdminOp::get_policy(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, - RGWFormatterFlusher& flusher) + RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp) { RGWAccessControlPolicy policy(store->ctx()); - int ret = get_policy(store, op_state, policy); + int ret = get_policy(store, op_state, policy, dpp); if (ret < 0) return ret; @@ -1168,11 +1175,11 @@ int RGWBucketAdminOp::get_policy(rgw::sal::RGWRadosStore *store, RGWBucketAdminO } int RGWBucketAdminOp::dump_s3_policy(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, - ostream& os) + ostream& os, const DoutPrefixProvider *dpp) { RGWAccessControlPolicy_S3 policy(store->ctx()); - int ret = get_policy(store, op_state, policy); + int ret = get_policy(store, op_state, policy, dpp); if (ret < 0) return ret; @@ -1181,49 +1188,49 @@ int RGWBucketAdminOp::dump_s3_policy(rgw::sal::RGWRadosStore *store, RGWBucketAd return 0; } -int RGWBucketAdminOp::unlink(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state) +int RGWBucketAdminOp::unlink(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp) { RGWBucket bucket; - int ret = bucket.init(store, op_state, null_yield); + int ret = bucket.init(store, op_state, null_yield, dpp); if (ret < 0) return ret; - return bucket.unlink(op_state, null_yield); + return bucket.unlink(op_state, null_yield, dpp); } -int RGWBucketAdminOp::link(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, string *err) +int RGWBucketAdminOp::link(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, string *err) { RGWBucket bucket; map attrs; - int ret = bucket.init(store, op_state, null_yield, err, &attrs); + int ret = bucket.init(store, op_state, null_yield, dpp, err, &attrs); if (ret < 0) return ret; - return bucket.link(op_state, null_yield, attrs, err); + return bucket.link(op_state, null_yield, dpp, attrs, err); } -int RGWBucketAdminOp::chown(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, const string& marker, string *err) +int RGWBucketAdminOp::chown(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, const string& marker, const DoutPrefixProvider *dpp, string *err) { RGWBucket bucket; map attrs; - int ret = bucket.init(store, op_state, null_yield, err, &attrs); + int ret = bucket.init(store, op_state, null_yield, dpp, err, &attrs); if (ret < 0) return ret; - ret = bucket.link(op_state, null_yield, attrs, err); + ret = bucket.link(op_state, null_yield, dpp, attrs, err); if (ret < 0) return ret; - return bucket.chown(op_state, marker, null_yield, err); + return bucket.chown(op_state, marker, null_yield, dpp, err); } int RGWBucketAdminOp::check_index(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, - RGWFormatterFlusher& flusher, optional_yield y) + RGWFormatterFlusher& flusher, optional_yield y, const DoutPrefixProvider *dpp) { int ret; map existing_stats; @@ -1232,22 +1239,22 @@ int RGWBucketAdminOp::check_index(rgw::sal::RGWRadosStore *store, RGWBucketAdmin RGWBucket bucket; - ret = bucket.init(store, op_state, null_yield); + ret = bucket.init(store, op_state, null_yield, dpp); if (ret < 0) return ret; Formatter *formatter = flusher.get_formatter(); flusher.start(0); - ret = bucket.check_bad_index_multipart(op_state, flusher); + ret = bucket.check_bad_index_multipart(op_state, flusher, dpp); if (ret < 0) return ret; - ret = bucket.check_object_index(op_state, flusher, y); + ret = bucket.check_object_index(dpp, op_state, flusher, y); if (ret < 0) return ret; - ret = bucket.check_index(op_state, existing_stats, calculated_stats); + ret = bucket.check_index(dpp, op_state, existing_stats, calculated_stats); if (ret < 0) return ret; @@ -1258,52 +1265,54 @@ int RGWBucketAdminOp::check_index(rgw::sal::RGWRadosStore *store, RGWBucketAdmin } int RGWBucketAdminOp::remove_bucket(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, - optional_yield y, bool bypass_gc, bool keep_index_consistent) + optional_yield y, const DoutPrefixProvider *dpp, + bool bypass_gc, bool keep_index_consistent) { std::unique_ptr bucket; std::unique_ptr user = store->get_user(op_state.get_user_id()); - int ret = store->get_bucket(user.get(), user->get_tenant(), op_state.get_bucket_name(), + int ret = store->get_bucket(dpp, user.get(), user->get_tenant(), op_state.get_bucket_name(), &bucket, y); if (ret < 0) return ret; if (bypass_gc) - ret = rgw_remove_bucket_bypass_gc(store, bucket->get_key(), op_state.get_max_aio(), keep_index_consistent, y); + ret = rgw_remove_bucket_bypass_gc(store, bucket->get_key(), op_state.get_max_aio(), keep_index_consistent, y, dpp); else - ret = bucket->remove_bucket(op_state.will_delete_children(), string(), string(), + ret = bucket->remove_bucket(dpp, op_state.will_delete_children(), string(), string(), false, nullptr, y); return ret; } -int RGWBucketAdminOp::remove_object(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state) +int RGWBucketAdminOp::remove_object(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp) { RGWBucket bucket; - int ret = bucket.init(store, op_state, null_yield); + int ret = bucket.init(store, op_state, null_yield, dpp); if (ret < 0) return ret; - return bucket.remove_object(op_state); + return bucket.remove_object(dpp, op_state); } -int RGWBucketAdminOp::sync_bucket(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, string *err_msg) +int RGWBucketAdminOp::sync_bucket(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, string *err_msg) { RGWBucket bucket; map attrs; - int ret = bucket.init(store, op_state, null_yield, err_msg, &attrs); + int ret = bucket.init(store, op_state, null_yield, dpp, err_msg, &attrs); if (ret < 0) { return ret; } - return bucket.sync(op_state, &attrs, err_msg); + return bucket.sync(op_state, &attrs, dpp, err_msg); } static int bucket_stats(rgw::sal::RGWRadosStore *store, const std::string& tenant_name, const std::string& bucket_name, - Formatter *formatter) + Formatter *formatter, + const DoutPrefixProvider *dpp) { RGWBucketInfo bucket_info; map stats; @@ -1312,7 +1321,7 @@ static int bucket_stats(rgw::sal::RGWRadosStore *store, real_time mtime; int r = store->getRados()->get_bucket_info(store->svc(), tenant_name, bucket_name, bucket_info, - &mtime, null_yield, &attrs); + &mtime, null_yield, dpp, &attrs); if (r < 0) { return r; } @@ -1321,7 +1330,7 @@ static int bucket_stats(rgw::sal::RGWRadosStore *store, string bucket_ver, master_ver; string max_marker; - int ret = store->getRados()->get_bucket_stats(bucket_info, RGW_NO_SHARD, + int ret = store->getRados()->get_bucket_stats(dpp, bucket_info, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, &max_marker); if (ret < 0) { @@ -1376,6 +1385,7 @@ int RGWBucketAdminOp::limit_check(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, const std::list& user_ids, RGWFormatterFlusher& flusher, optional_yield y, + const DoutPrefixProvider *dpp, bool warnings_only) { int ret = 0; @@ -1406,7 +1416,7 @@ int RGWBucketAdminOp::limit_check(rgw::sal::RGWRadosStore *store, do { rgw::sal::RGWRadosUser user(store, rgw_user(user_id)); - ret = user.list_buckets(marker, string(), max_entries, false, buckets, y); + ret = user.list_buckets(dpp, marker, string(), max_entries, false, buckets, y); if (ret < 0) return ret; @@ -1427,14 +1437,14 @@ int RGWBucketAdminOp::limit_check(rgw::sal::RGWRadosStore *store, ret = store->getRados()->get_bucket_info(store->svc(), bucket->get_tenant(), bucket->get_name(), info, nullptr, - null_yield); + null_yield, dpp); if (ret < 0) continue; /* need stats for num_entries */ string bucket_ver, master_ver; std::map stats; - ret = store->getRados()->get_bucket_stats(info, RGW_NO_SHARD, &bucket_ver, + ret = store->getRados()->get_bucket_stats(dpp, info, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, nullptr); if (ret < 0) @@ -1492,13 +1502,14 @@ int RGWBucketAdminOp::limit_check(rgw::sal::RGWRadosStore *store, int RGWBucketAdminOp::info(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { RGWBucket bucket; int ret = 0; const std::string& bucket_name = op_state.get_bucket_name(); if (!bucket_name.empty()) { - ret = bucket.init(store, op_state, null_yield); + ret = bucket.init(store, op_state, null_yield, dpp); if (-ENOENT == ret) return -ERR_NO_SUCH_BUCKET; else if (ret < 0) @@ -1524,7 +1535,7 @@ int RGWBucketAdminOp::info(rgw::sal::RGWRadosStore *store, constexpr bool no_need_stats = false; // set need_stats to false do { - ret = user.list_buckets(marker, empty_end_marker, max_entries, + ret = user.list_buckets(dpp, marker, empty_end_marker, max_entries, no_need_stats, buckets, y); if (ret < 0) { return ret; @@ -1540,7 +1551,7 @@ int RGWBucketAdminOp::info(rgw::sal::RGWRadosStore *store, } if (show_stats) { - bucket_stats(store, user_id.tenant, obj_name, formatter); + bucket_stats(store, user_id.tenant, obj_name, formatter, dpp); } else { formatter->dump_string("bucket", obj_name); } @@ -1556,7 +1567,7 @@ int RGWBucketAdminOp::info(rgw::sal::RGWRadosStore *store, formatter->close_section(); } else if (!bucket_name.empty()) { - ret = bucket_stats(store, user_id.tenant, bucket_name, formatter); + ret = bucket_stats(store, user_id.tenant, bucket_name, formatter, dpp); if (ret < 0) { return ret; } @@ -1565,7 +1576,7 @@ int RGWBucketAdminOp::info(rgw::sal::RGWRadosStore *store, bool truncated = true; formatter->open_array_section("buckets"); - ret = store->ctl()->meta.mgr->list_keys_init("bucket", &handle); + ret = store->ctl()->meta.mgr->list_keys_init(dpp, "bucket", &handle); while (ret == 0 && truncated) { std::list buckets; constexpr int max_keys = 1000; @@ -1573,7 +1584,7 @@ int RGWBucketAdminOp::info(rgw::sal::RGWRadosStore *store, &truncated); for (auto& bucket_name : buckets) { if (show_stats) { - bucket_stats(store, user_id.tenant, bucket_name, formatter); + bucket_stats(store, user_id.tenant, bucket_name, formatter, dpp); } else { formatter->dump_string("bucket", bucket_name); } @@ -1589,23 +1600,23 @@ int RGWBucketAdminOp::info(rgw::sal::RGWRadosStore *store, return 0; } -int RGWBucketAdminOp::set_quota(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state) +int RGWBucketAdminOp::set_quota(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp) { RGWBucket bucket; - int ret = bucket.init(store, op_state, null_yield); + int ret = bucket.init(store, op_state, null_yield, dpp); if (ret < 0) return ret; - return bucket.set_quota(op_state); + return bucket.set_quota(op_state, dpp); } -static int purge_bucket_instance(rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info) +static int purge_bucket_instance(rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info, const DoutPrefixProvider *dpp) { int max_shards = (bucket_info.layout.current_index.layout.normal.num_shards > 0 ? bucket_info.layout.current_index.layout.normal.num_shards : 1); for (int i = 0; i < max_shards; i++) { RGWRados::BucketShard bs(store->getRados()); int shard_id = (bucket_info.layout.current_index.layout.normal.num_shards > 0 ? i : -1); - int ret = bs.init(bucket_info.bucket, shard_id, bucket_info.layout.current_index, nullptr); + int ret = bs.init(bucket_info.bucket, shard_id, bucket_info.layout.current_index, nullptr, dpp); if (ret < 0) { cerr << "ERROR: bs.init(bucket=" << bucket_info.bucket << ", shard=" << shard_id << "): " << cpp_strerror(-ret) << std::endl; @@ -1632,7 +1643,8 @@ inline auto split_tenant(const std::string& bucket_name){ using bucket_instance_ls = std::vector; void get_stale_instances(rgw::sal::RGWRadosStore *store, const std::string& bucket_name, const vector& lst, - bucket_instance_ls& stale_instances) + bucket_instance_ls& stale_instances, + const DoutPrefixProvider *dpp) { auto obj_ctx = store->svc()->sysobj->init_obj_ctx(); @@ -1643,10 +1655,10 @@ void get_stale_instances(rgw::sal::RGWRadosStore *store, const std::string& buck for (const auto& bucket_instance : lst){ RGWBucketInfo binfo; int r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket_instance, - binfo, nullptr,nullptr, null_yield); + binfo, nullptr,nullptr, null_yield, dpp); if (r < 0){ // this can only happen if someone deletes us right when we're processing - lderr(store->ctx()) << "Bucket instance is invalid: " << bucket_instance + ldpp_dout(dpp, -1) << "Bucket instance is invalid: " << bucket_instance << cpp_strerror(-r) << dendl; continue; } @@ -1661,7 +1673,7 @@ void get_stale_instances(rgw::sal::RGWRadosStore *store, const std::string& buck // all the instances auto [tenant, bucket] = split_tenant(bucket_name); RGWBucketInfo cur_bucket_info; - int r = store->getRados()->get_bucket_info(store->svc(), tenant, bucket, cur_bucket_info, nullptr, null_yield); + int r = store->getRados()->get_bucket_info(store->svc(), tenant, bucket, cur_bucket_info, nullptr, null_yield, dpp); if (r < 0) { if (r == -ENOENT) { // bucket doesn't exist, everything is stale then @@ -1670,7 +1682,7 @@ void get_stale_instances(rgw::sal::RGWRadosStore *store, const std::string& buck std::make_move_iterator(other_instances.end())); } else { // all bets are off if we can't read the bucket, just return the sureshot stale instances - lderr(store->ctx()) << "error: reading bucket info for bucket: " + ldpp_dout(dpp, -1) << "error: reading bucket info for bucket: " << bucket << cpp_strerror(-r) << dendl; } return; @@ -1701,7 +1713,7 @@ void get_stale_instances(rgw::sal::RGWRadosStore *store, const std::string& buck r = reshard_lock.lock(); if (r < 0) { // most likely bucket is under reshard, return the sureshot stale instances - ldout(store->ctx(), 5) << __func__ + ldpp_dout(dpp, 5) << __func__ << "failed to take reshard lock; reshard underway likey" << dendl; return; } @@ -1718,6 +1730,7 @@ void get_stale_instances(rgw::sal::RGWRadosStore *store, const std::string& buck static int process_stale_instances(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, + const DoutPrefixProvider *dpp, std::function process_f) @@ -1727,7 +1740,7 @@ static int process_stale_instances(rgw::sal::RGWRadosStore *store, RGWBucketAdmi Formatter *formatter = flusher.get_formatter(); static constexpr auto default_max_keys = 1000; - int ret = store->ctl()->meta.mgr->list_keys_init("bucket.instance", marker, &handle); + int ret = store->ctl()->meta.mgr->list_keys_init(dpp, "bucket.instance", marker, &handle); if (ret < 0) { cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl; return ret; @@ -1760,7 +1773,7 @@ static int process_stale_instances(rgw::sal::RGWRadosStore *store, RGWBucketAdmi } for (const auto& kv: bucket_instance_map) { bucket_instance_ls stale_lst; - get_stale_instances(store, kv.first, kv.second, stale_lst); + get_stale_instances(store, kv.first, kv.second, stale_lst, dpp); process_f(stale_lst, formatter, store); } } @@ -1771,7 +1784,8 @@ static int process_stale_instances(rgw::sal::RGWRadosStore *store, RGWBucketAdmi int RGWBucketAdminOp::list_stale_instances(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, - RGWFormatterFlusher& flusher) + RGWFormatterFlusher& flusher, + const DoutPrefixProvider *dpp) { auto process_f = [](const bucket_instance_ls& lst, Formatter *formatter, @@ -1779,22 +1793,23 @@ int RGWBucketAdminOp::list_stale_instances(rgw::sal::RGWRadosStore *store, for (const auto& binfo: lst) formatter->dump_string("key", binfo.bucket.get_key()); }; - return process_stale_instances(store, op_state, flusher, process_f); + return process_stale_instances(store, op_state, flusher, dpp, process_f); } int RGWBucketAdminOp::clear_stale_instances(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, - RGWFormatterFlusher& flusher) + RGWFormatterFlusher& flusher, + const DoutPrefixProvider *dpp) { - auto process_f = [](const bucket_instance_ls& lst, + auto process_f = [dpp](const bucket_instance_ls& lst, Formatter *formatter, - rgw::sal::RGWRadosStore *store){ + rgw::sal::RGWRadosStore *store) { for (const auto &binfo: lst) { - int ret = purge_bucket_instance(store, binfo); + int ret = purge_bucket_instance(store, binfo, dpp); if (ret == 0){ auto md_key = "bucket.instance:" + binfo.bucket.get_key(); - ret = store->ctl()->meta.mgr->remove(md_key, null_yield); + ret = store->ctl()->meta.mgr->remove(md_key, null_yield, dpp); } formatter->open_object_section("delete_status"); formatter->dump_string("bucket_instance", binfo.bucket.get_key()); @@ -1803,24 +1818,25 @@ int RGWBucketAdminOp::clear_stale_instances(rgw::sal::RGWRadosStore *store, } }; - return process_stale_instances(store, op_state, flusher, process_f); + return process_stale_instances(store, op_state, flusher, dpp, process_f); } static int fix_single_bucket_lc(rgw::sal::RGWRadosStore *store, const std::string& tenant_name, - const std::string& bucket_name) + const std::string& bucket_name, + const DoutPrefixProvider *dpp) { RGWBucketInfo bucket_info; map bucket_attrs; int ret = store->getRados()->get_bucket_info(store->svc(), tenant_name, bucket_name, - bucket_info, nullptr, null_yield, &bucket_attrs); + bucket_info, nullptr, null_yield, dpp, &bucket_attrs); if (ret < 0) { // TODO: Should we handle the case where the bucket could've been removed between // listing and fetching? return ret; } - return rgw::lc::fix_lc_shard_entry(store, store->get_rgwlc()->get_lc(), bucket_info, + return rgw::lc::fix_lc_shard_entry(dpp, store, store->get_rgwlc()->get_lc(), bucket_info, bucket_attrs); } @@ -1839,15 +1855,17 @@ static void format_lc_status(Formatter* formatter, static void process_single_lc_entry(rgw::sal::RGWRadosStore *store, Formatter *formatter, const std::string& tenant_name, - const std::string& bucket_name) + const std::string& bucket_name, + const DoutPrefixProvider *dpp) { - int ret = fix_single_bucket_lc(store, tenant_name, bucket_name); + int ret = fix_single_bucket_lc(store, tenant_name, bucket_name, dpp); format_lc_status(formatter, tenant_name, bucket_name, -ret); } int RGWBucketAdminOp::fix_lc_shards(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, - RGWFormatterFlusher& flusher) + RGWFormatterFlusher& flusher, + const DoutPrefixProvider *dpp) { std::string marker; void *handle; @@ -1858,10 +1876,10 @@ int RGWBucketAdminOp::fix_lc_shards(rgw::sal::RGWRadosStore *store, if (const std::string& bucket_name = op_state.get_bucket_name(); ! bucket_name.empty()) { const rgw_user user_id = op_state.get_user_id(); - process_single_lc_entry(store, formatter, user_id.tenant, bucket_name); + process_single_lc_entry(store, formatter, user_id.tenant, bucket_name, dpp); formatter->flush(cout); } else { - int ret = store->ctl()->meta.mgr->list_keys_init("bucket", marker, &handle); + int ret = store->ctl()->meta.mgr->list_keys_init(dpp, "bucket", marker, &handle); if (ret < 0) { std::cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl; return ret; @@ -1883,7 +1901,7 @@ int RGWBucketAdminOp::fix_lc_shards(rgw::sal::RGWRadosStore *store, } if (ret != -ENOENT) { for (const auto &key:keys) { auto [tenant_name, bucket_name] = split_tenant(key); - process_single_lc_entry(store, formatter, tenant_name, bucket_name); + process_single_lc_entry(store, formatter, tenant_name, bucket_name, dpp); } } formatter->flush(cout); // regularly flush every 1k entries @@ -1895,14 +1913,15 @@ int RGWBucketAdminOp::fix_lc_shards(rgw::sal::RGWRadosStore *store, } -static bool has_object_expired(rgw::sal::RGWRadosStore *store, +static bool has_object_expired(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info, const rgw_obj_key& key, utime_t& delete_at) { rgw_obj obj(bucket_info.bucket, key); bufferlist delete_at_bl; - int ret = rgw_object_get_attr(store, bucket_info, obj, RGW_ATTR_DELETE_AT, delete_at_bl, null_yield); + int ret = rgw_object_get_attr(dpp, store, bucket_info, obj, RGW_ATTR_DELETE_AT, delete_at_bl, null_yield); if (ret < 0) { return false; // no delete at attr, proceed } @@ -1919,12 +1938,13 @@ static bool has_object_expired(rgw::sal::RGWRadosStore *store, return false; } -static int fix_bucket_obj_expiry(rgw::sal::RGWRadosStore *store, +static int fix_bucket_obj_expiry(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info, RGWFormatterFlusher& flusher, bool dry_run) { if (bucket_info.bucket.bucket_id == bucket_info.bucket.marker) { - lderr(store->ctx()) << "Not a resharded bucket skipping" << dendl; + ldpp_dout(dpp, -1) << "Not a resharded bucket skipping" << dendl; return 0; // not a resharded bucket, move along } @@ -1945,22 +1965,22 @@ static int fix_bucket_obj_expiry(rgw::sal::RGWRadosStore *store, do { std::vector objs; - int ret = list_op.list_objects(listing_max_entries, &objs, nullptr, + int ret = list_op.list_objects(dpp, listing_max_entries, &objs, nullptr, &is_truncated, null_yield); if (ret < 0) { - lderr(store->ctx()) << "ERROR failed to list objects in the bucket" << dendl; + ldpp_dout(dpp, -1) << "ERROR failed to list objects in the bucket" << dendl; return ret; } for (const auto& obj : objs) { rgw_obj_key key(obj.key); utime_t delete_at; - if (has_object_expired(store, bucket_info, key, delete_at)) { + if (has_object_expired(dpp, store, bucket_info, key, delete_at)) { formatter->open_object_section("object_status"); formatter->dump_string("object", key.name); formatter->dump_stream("delete_at") << delete_at; if (!dry_run) { - ret = rgw_remove_object(store, bucket_info, bucket_info.bucket, key); + ret = rgw_remove_object(dpp, store, bucket_info, bucket_info.bucket, key); formatter->dump_int("status", ret); } @@ -1975,16 +1995,17 @@ static int fix_bucket_obj_expiry(rgw::sal::RGWRadosStore *store, int RGWBucketAdminOp::fix_obj_expiry(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, - RGWFormatterFlusher& flusher, bool dry_run) + RGWFormatterFlusher& flusher, + const DoutPrefixProvider *dpp, bool dry_run) { RGWBucket admin_bucket; - int ret = admin_bucket.init(store, op_state, null_yield); + int ret = admin_bucket.init(store, op_state, null_yield, dpp); if (ret < 0) { - lderr(store->ctx()) << "failed to initialize bucket" << dendl; + ldpp_dout(dpp, -1) << "failed to initialize bucket" << dendl; return ret; } - return fix_bucket_obj_expiry(store, admin_bucket.get_bucket_info(), flusher, dry_run); + return fix_bucket_obj_expiry(dpp, store, admin_bucket.get_bucket_info(), flusher, dry_run); } void RGWBucketCompleteInfo::dump(Formatter *f) const { @@ -2031,7 +2052,7 @@ public: return new RGWBucketEntryMetadataObject(be, objv, mtime); } - int do_get(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject **obj, optional_yield y) override { + int do_get(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject **obj, optional_yield y, const DoutPrefixProvider *dpp) override { RGWObjVersionTracker ot; RGWBucketEntryPoint be; @@ -2040,7 +2061,7 @@ public: RGWSI_Bucket_EP_Ctx ctx(op->ctx()); - int ret = svc.bucket->read_bucket_entrypoint_info(ctx, entry, &be, &ot, &mtime, &attrs, y); + int ret = svc.bucket->read_bucket_entrypoint_info(ctx, entry, &be, &ot, &mtime, &attrs, y, dpp); if (ret < 0) return ret; @@ -2055,17 +2076,18 @@ public: RGWMetadataObject *obj, RGWObjVersionTracker& objv_tracker, optional_yield y, + const DoutPrefixProvider *dpp, RGWMDLogSyncType type, bool from_remote_zone) override; int do_remove(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWObjVersionTracker& objv_tracker, - optional_yield y) override { + optional_yield y, const DoutPrefixProvider *dpp) override { RGWBucketEntryPoint be; real_time orig_mtime; RGWSI_Bucket_EP_Ctx ctx(op->ctx()); - int ret = svc.bucket->read_bucket_entrypoint_info(ctx, entry, &be, &objv_tracker, &orig_mtime, nullptr, y); + int ret = svc.bucket->read_bucket_entrypoint_info(ctx, entry, &be, &objv_tracker, &orig_mtime, nullptr, y, dpp); if (ret < 0) return ret; @@ -2074,14 +2096,14 @@ public: * it immediately and don't want to invalidate our cached objv_version or the bucket obj removal * will incorrectly fail. */ - ret = ctl.bucket->unlink_bucket(be.owner, be.bucket, y, false); + ret = ctl.bucket->unlink_bucket(be.owner, be.bucket, y, dpp, false); if (ret < 0) { - lderr(svc.bucket->ctx()) << "could not unlink bucket=" << entry << " owner=" << be.owner << dendl; + ldpp_dout(dpp, -1) << "could not unlink bucket=" << entry << " owner=" << be.owner << dendl; } - ret = svc.bucket->remove_bucket_entrypoint_info(ctx, entry, &objv_tracker, y); + ret = svc.bucket->remove_bucket_entrypoint_info(ctx, entry, &objv_tracker, y, dpp); if (ret < 0) { - lderr(svc.bucket->ctx()) << "could not delete bucket=" << entry << dendl; + ldpp_dout(dpp, -1) << "could not delete bucket=" << entry << dendl; } /* idempotent */ return 0; @@ -2119,21 +2141,22 @@ public: obj->get_ep().encode(*bl); } - int put_checked() override; - int put_post() override; + int put_checked(const DoutPrefixProvider *dpp) override; + int put_post(const DoutPrefixProvider *dpp) override; }; int RGWBucketMetadataHandler::do_put(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject *obj, RGWObjVersionTracker& objv_tracker, optional_yield y, + const DoutPrefixProvider *dpp, RGWMDLogSyncType type, bool from_remote_zone) { RGWMetadataHandlerPut_Bucket put_op(this, op, entry, obj, objv_tracker, y, type, from_remote_zone); - return do_put_operate(&put_op); + return do_put_operate(&put_op, dpp); } -int RGWMetadataHandlerPut_Bucket::put_checked() +int RGWMetadataHandlerPut_Bucket::put_checked(const DoutPrefixProvider *dpp) { RGWBucketEntryMetadataObject *orig_obj = static_cast(old_obj); @@ -2153,10 +2176,11 @@ int RGWMetadataHandlerPut_Bucket::put_checked() mtime, pattrs, &objv_tracker, - y); + y, + dpp); } -int RGWMetadataHandlerPut_Bucket::put_post() +int RGWMetadataHandlerPut_Bucket::put_post(const DoutPrefixProvider *dpp) { auto& be = obj->get_ep(); @@ -2164,9 +2188,9 @@ int RGWMetadataHandlerPut_Bucket::put_post() /* link bucket */ if (be.linked) { - ret = bhandler->ctl.bucket->link_bucket(be.owner, be.bucket, be.creation_time, y, false); + ret = bhandler->ctl.bucket->link_bucket(be.owner, be.bucket, be.creation_time, y, dpp, false); } else { - ret = bhandler->ctl.bucket->unlink_bucket(be.owner, be.bucket, y, false); + ret = bhandler->ctl.bucket->unlink_bucket(be.owner, be.bucket, y, dpp, false); } return ret; @@ -2238,12 +2262,12 @@ public: RGWArchiveBucketMetadataHandler() {} int do_remove(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWObjVersionTracker& objv_tracker, - optional_yield y) override { + optional_yield y, const DoutPrefixProvider *dpp) override { auto cct = svc.bucket->ctx(); RGWSI_Bucket_EP_Ctx ctx(op->ctx()); - ldout(cct, 5) << "SKIP: bucket removal is not allowed on archive zone: bucket:" << entry << " ... proceeding to rename" << dendl; + ldpp_dout(dpp, 5) << "SKIP: bucket removal is not allowed on archive zone: bucket:" << entry << " ... proceeding to rename" << dendl; string tenant_name, bucket_name; parse_bucket(entry, &tenant_name, &bucket_name); @@ -2257,7 +2281,7 @@ public: RGWBucketEntryPoint be; map attrs; - int ret = svc.bucket->read_bucket_entrypoint_info(ctx, entry, &be, &objv_tracker, &mtime, &attrs, y); + int ret = svc.bucket->read_bucket_entrypoint_info(ctx, entry, &be, &objv_tracker, &mtime, &attrs, y, dpp); if (ret < 0) { return ret; } @@ -2270,7 +2294,7 @@ public: ceph::real_time orig_mtime; RGWBucketInfo old_bi; - ret = ctl.bucket->read_bucket_instance_info(be.bucket, &old_bi, y, RGWBucketCtl::BucketInstance::GetParams() + ret = ctl.bucket->read_bucket_instance_info(be.bucket, &old_bi, y, dpp, RGWBucketCtl::BucketInstance::GetParams() .set_mtime(&orig_mtime) .set_attrs(&attrs_m)); if (ret < 0) { @@ -2306,13 +2330,13 @@ public: new_be.bucket.name = new_bucket_name; - ret = ctl.bucket->store_bucket_instance_info(be.bucket, new_bi, y, RGWBucketCtl::BucketInstance::PutParams() + ret = ctl.bucket->store_bucket_instance_info(be.bucket, new_bi, y, dpp, RGWBucketCtl::BucketInstance::PutParams() .set_exclusive(false) .set_mtime(orig_mtime) .set_attrs(&attrs_m) .set_orig_info(&old_bi)); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to put new bucket instance info for bucket=" << new_bi.bucket << " ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to put new bucket instance info for bucket=" << new_bi.bucket << " ret=" << ret << dendl; return ret; } @@ -2322,25 +2346,25 @@ public: ot.generate_new_write_ver(cct); ret = svc.bucket->store_bucket_entrypoint_info(ctx, RGWSI_Bucket::get_entrypoint_meta_key(new_be.bucket), - new_be, true, mtime, &attrs, nullptr, y); + new_be, true, mtime, &attrs, nullptr, y, dpp); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to put new bucket entrypoint for bucket=" << new_be.bucket << " ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to put new bucket entrypoint for bucket=" << new_be.bucket << " ret=" << ret << dendl; return ret; } /* link new bucket */ - ret = ctl.bucket->link_bucket(new_be.owner, new_be.bucket, new_be.creation_time, y, false); + ret = ctl.bucket->link_bucket(new_be.owner, new_be.bucket, new_be.creation_time, y, dpp, false); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to link new bucket for bucket=" << new_be.bucket << " ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to link new bucket for bucket=" << new_be.bucket << " ret=" << ret << dendl; return ret; } /* clean up old stuff */ - ret = ctl.bucket->unlink_bucket(be.owner, entry_bucket, y, false); + ret = ctl.bucket->unlink_bucket(be.owner, entry_bucket, y, dpp, false); if (ret < 0) { - lderr(cct) << "could not unlink bucket=" << entry << " owner=" << be.owner << dendl; + ldpp_dout(dpp, -1) << "could not unlink bucket=" << entry << " owner=" << be.owner << dendl; } // if (ret == -ECANCELED) it means that there was a race here, and someone @@ -2351,15 +2375,16 @@ public: ret = svc.bucket->remove_bucket_entrypoint_info(ctx, RGWSI_Bucket::get_entrypoint_meta_key(be.bucket), &objv_tracker, - y); + y, + dpp); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to put new bucket entrypoint for bucket=" << new_be.bucket << " ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to put new bucket entrypoint for bucket=" << new_be.bucket << " ret=" << ret << dendl; return ret; } - ret = ctl.bucket->remove_bucket_instance_info(be.bucket, old_bi, y); + ret = ctl.bucket->remove_bucket_instance_info(be.bucket, old_bi, y, dpp); if (ret < 0) { - lderr(cct) << "could not delete bucket=" << entry << dendl; + ldpp_dout(dpp, -1) << "could not delete bucket=" << entry << dendl; } @@ -2371,12 +2396,12 @@ public: int do_put(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject *obj, RGWObjVersionTracker& objv_tracker, - optional_yield y, + optional_yield y, const DoutPrefixProvider *dpp, RGWMDLogSyncType type, bool from_remote_zone) override { if (entry.find("-deleted-") != string::npos) { RGWObjVersionTracker ot; RGWMetadataObject *robj; - int ret = do_get(op, entry, &robj, y); + int ret = do_get(op, entry, &robj, y, dpp); if (ret != -ENOENT) { if (ret < 0) { return ret; @@ -2384,7 +2409,7 @@ public: ot.read_version = robj->get_version(); delete robj; - ret = do_remove(op, entry, ot, y); + ret = do_remove(op, entry, ot, y, dpp); if (ret < 0) { return ret; } @@ -2392,7 +2417,7 @@ public: } return RGWBucketMetadataHandler::do_put(op, entry, obj, - objv_tracker, y, type, from_remote_zone); + objv_tracker, y, dpp, type, from_remote_zone); } }; @@ -2402,12 +2427,14 @@ class RGWBucketInstanceMetadataHandler : public RGWBucketInstanceMetadataHandler const string& entry, RGWBucketCompleteInfo *bi, ceph::real_time *pmtime, - optional_yield y) { + optional_yield y, + const DoutPrefixProvider *dpp) { return svc.bucket->read_bucket_instance_info(ctx, entry, &bi->info, pmtime, &bi->attrs, - y); + y, + dpp); } public: @@ -2443,13 +2470,13 @@ public: return new RGWBucketInstanceMetadataObject(bci, objv, mtime); } - int do_get(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject **obj, optional_yield y) override { + int do_get(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject **obj, optional_yield y, const DoutPrefixProvider *dpp) override { RGWBucketCompleteInfo bci; real_time mtime; RGWSI_Bucket_BI_Ctx ctx(op->ctx()); - int ret = svc.bucket->read_bucket_instance_info(ctx, entry, &bci.info, &mtime, &bci.attrs, y); + int ret = svc.bucket->read_bucket_instance_info(ctx, entry, &bci.info, &mtime, &bci.attrs, y, dpp); if (ret < 0) return ret; @@ -2462,20 +2489,20 @@ public: int do_put(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject *_obj, RGWObjVersionTracker& objv_tracker, - optional_yield y, + optional_yield y, const DoutPrefixProvider *dpp, RGWMDLogSyncType sync_type, bool from_remote_zone) override; int do_remove(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWObjVersionTracker& objv_tracker, - optional_yield y) override { + optional_yield y, const DoutPrefixProvider *dpp) override { RGWBucketCompleteInfo bci; RGWSI_Bucket_BI_Ctx ctx(op->ctx()); - int ret = read_bucket_instance_entry(ctx, entry, &bci, nullptr, y); + int ret = read_bucket_instance_entry(ctx, entry, &bci, nullptr, y, dpp); if (ret < 0 && ret != -ENOENT) return ret; - return svc.bucket->remove_bucket_instance_info(ctx, entry, bci.info, &bci.info.objv_tracker, y); + return svc.bucket->remove_bucket_instance_info(ctx, entry, bci.info, &bci.info.objv_tracker, y, dpp); } int call(std::function f) { @@ -2514,9 +2541,9 @@ public: obj->get_bucket_info().encode(*bl); } - int put_check() override; - int put_checked() override; - int put_post() override; + int put_check(const DoutPrefixProvider *dpp) override; + int put_checked(const DoutPrefixProvider *dpp) override; + int put_post(const DoutPrefixProvider *dpp) override; }; int RGWBucketInstanceMetadataHandler::do_put(RGWSI_MetaBackend_Handler::Op *op, @@ -2524,11 +2551,12 @@ int RGWBucketInstanceMetadataHandler::do_put(RGWSI_MetaBackend_Handler::Op *op, RGWMetadataObject *obj, RGWObjVersionTracker& objv_tracker, optional_yield y, + const DoutPrefixProvider *dpp, RGWMDLogSyncType type, bool from_remote_zone) { RGWMetadataHandlerPut_BucketInstance put_op(svc.bucket->ctx(), this, op, entry, obj, objv_tracker, y, type, from_remote_zone); - return do_put_operate(&put_op); + return do_put_operate(&put_op, dpp); } void init_default_bucket_layout(CephContext *cct, rgw::BucketLayout& layout, @@ -2558,7 +2586,7 @@ void init_default_bucket_layout(CephContext *cct, rgw::BucketLayout& layout, } } -int RGWMetadataHandlerPut_BucketInstance::put_check() +int RGWMetadataHandlerPut_BucketInstance::put_check(const DoutPrefixProvider *dpp) { int ret; @@ -2597,9 +2625,9 @@ int RGWMetadataHandlerPut_BucketInstance::put_check() bci.info.bucket.tenant = tenant_name; // if the sync module never writes data, don't require the zone to specify all placement targets if (bihandler->svc.zone->sync_module_supports_writes()) { - ret = bihandler->svc.zone->select_bucket_location_by_rule(bci.info.placement_rule, &rule_info, y); + ret = bihandler->svc.zone->select_bucket_location_by_rule(dpp, bci.info.placement_rule, &rule_info, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: select_bucket_placement() returned " << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: select_bucket_placement() returned " << ret << dendl; return ret; } } @@ -2617,7 +2645,7 @@ int RGWMetadataHandlerPut_BucketInstance::put_check() return 0; } -int RGWMetadataHandlerPut_BucketInstance::put_checked() +int RGWMetadataHandlerPut_BucketInstance::put_checked(const DoutPrefixProvider *dpp) { RGWBucketInstanceMetadataObject *orig_obj = static_cast(old_obj); @@ -2636,16 +2664,17 @@ int RGWMetadataHandlerPut_BucketInstance::put_checked() false, mtime, pattrs, - y); + y, + dpp); } -int RGWMetadataHandlerPut_BucketInstance::put_post() +int RGWMetadataHandlerPut_BucketInstance::put_post(const DoutPrefixProvider *dpp) { RGWBucketCompleteInfo& bci = obj->get_bci(); objv_tracker = bci.info.objv_tracker; - int ret = bihandler->svc.bi->init_index(bci.info); + int ret = bihandler->svc.bi->init_index(dpp, bci.info); if (ret < 0) { return ret; } @@ -2657,8 +2686,8 @@ class RGWArchiveBucketInstanceMetadataHandler : public RGWBucketInstanceMetadata public: RGWArchiveBucketInstanceMetadataHandler() {} - int do_remove(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWObjVersionTracker& objv_tracker, optional_yield y) override { - ldout(cct, 0) << "SKIP: bucket instance removal is not allowed on archive zone: bucket.instance:" << entry << dendl; + int do_remove(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWObjVersionTracker& objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) override { + ldpp_dout(dpp, 0) << "SKIP: bucket instance removal is not allowed on archive zone: bucket.instance:" << entry << dendl; return 0; } }; @@ -2677,7 +2706,8 @@ RGWBucketCtl::RGWBucketCtl(RGWSI_Zone *zone_svc, void RGWBucketCtl::init(RGWUserCtl *user_ctl, RGWBucketMetadataHandler *_bm_handler, RGWBucketInstanceMetadataHandler *_bmi_handler, - RGWDataChangesLog *datalog) + RGWDataChangesLog *datalog, + const DoutPrefixProvider *dpp) { ctl.user = user_ctl; @@ -2688,8 +2718,8 @@ void RGWBucketCtl::init(RGWUserCtl *user_ctl, bi_be_handler = bmi_handler->get_be_handler(); datalog->set_bucket_filter( - [this](const rgw_bucket& bucket, optional_yield y) { - return bucket_exports_data(bucket, y); + [this](const rgw_bucket& bucket, optional_yield y, const DoutPrefixProvider *dpp) { + return bucket_exports_data(bucket, y, dpp); }); } @@ -2704,7 +2734,7 @@ int RGWBucketCtl::call(std::function f) { int RGWBucketCtl::read_bucket_entrypoint_info(const rgw_bucket& bucket, RGWBucketEntryPoint *info, - optional_yield y, + optional_yield y, const DoutPrefixProvider *dpp, const Bucket::GetParams& params) { return bm_handler->call(params.bectx_params, [&](RGWSI_Bucket_EP_Ctx& ctx) { @@ -2715,6 +2745,7 @@ int RGWBucketCtl::read_bucket_entrypoint_info(const rgw_bucket& bucket, params.mtime, params.attrs, y, + dpp, params.cache_info, params.refresh_version); }); @@ -2723,6 +2754,7 @@ int RGWBucketCtl::read_bucket_entrypoint_info(const rgw_bucket& bucket, int RGWBucketCtl::store_bucket_entrypoint_info(const rgw_bucket& bucket, RGWBucketEntryPoint& info, optional_yield y, + const DoutPrefixProvider *dpp, const Bucket::PutParams& params) { return bm_handler->call([&](RGWSI_Bucket_EP_Ctx& ctx) { @@ -2733,25 +2765,29 @@ int RGWBucketCtl::store_bucket_entrypoint_info(const rgw_bucket& bucket, params.mtime, params.attrs, params.objv_tracker, - y); + y, + dpp); }); } int RGWBucketCtl::remove_bucket_entrypoint_info(const rgw_bucket& bucket, optional_yield y, + const DoutPrefixProvider *dpp, const Bucket::RemoveParams& params) { return bm_handler->call([&](RGWSI_Bucket_EP_Ctx& ctx) { return svc.bucket->remove_bucket_entrypoint_info(ctx, RGWSI_Bucket::get_entrypoint_meta_key(bucket), params.objv_tracker, - y); + y, + dpp); }); } int RGWBucketCtl::read_bucket_instance_info(const rgw_bucket& bucket, RGWBucketInfo *info, optional_yield y, + const DoutPrefixProvider *dpp, const BucketInstance::GetParams& params) { int ret = bmi_handler->call(params.bectx_params, [&](RGWSI_Bucket_BI_Ctx& ctx) { @@ -2761,6 +2797,7 @@ int RGWBucketCtl::read_bucket_instance_info(const rgw_bucket& bucket, params.mtime, params.attrs, y, + dpp, params.cache_info, params.refresh_version); }); @@ -2779,6 +2816,7 @@ int RGWBucketCtl::read_bucket_instance_info(const rgw_bucket& bucket, int RGWBucketCtl::read_bucket_info(const rgw_bucket& bucket, RGWBucketInfo *info, optional_yield y, + const DoutPrefixProvider *dpp, const BucketInstance::GetParams& params, RGWObjVersionTracker *ep_objv_tracker) { @@ -2789,7 +2827,7 @@ int RGWBucketCtl::read_bucket_info(const rgw_bucket& bucket, if (b->bucket_id.empty()) { ep.emplace(); - int r = read_bucket_entrypoint_info(*b, &(*ep), y, RGWBucketCtl::Bucket::GetParams() + int r = read_bucket_entrypoint_info(*b, &(*ep), y, dpp, RGWBucketCtl::Bucket::GetParams() .set_bectx_params(params.bectx_params) .set_objv_tracker(ep_objv_tracker)); if (r < 0) { @@ -2805,7 +2843,7 @@ int RGWBucketCtl::read_bucket_info(const rgw_bucket& bucket, info, params.mtime, params.attrs, - y, + y, dpp, params.cache_info, params.refresh_version); }); @@ -2825,6 +2863,7 @@ int RGWBucketCtl::do_store_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, const rgw_bucket& bucket, RGWBucketInfo& info, optional_yield y, + const DoutPrefixProvider *dpp, const BucketInstance::PutParams& params) { if (params.objv_tracker) { @@ -2838,22 +2877,25 @@ int RGWBucketCtl::do_store_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, params.exclusive, params.mtime, params.attrs, - y); + y, + dpp); } int RGWBucketCtl::store_bucket_instance_info(const rgw_bucket& bucket, RGWBucketInfo& info, optional_yield y, + const DoutPrefixProvider *dpp, const BucketInstance::PutParams& params) { return bmi_handler->call([&](RGWSI_Bucket_BI_Ctx& ctx) { - return do_store_bucket_instance_info(ctx, bucket, info, y, params); + return do_store_bucket_instance_info(ctx, bucket, info, y, dpp, params); }); } int RGWBucketCtl::remove_bucket_instance_info(const rgw_bucket& bucket, RGWBucketInfo& info, optional_yield y, + const DoutPrefixProvider *dpp, const BucketInstance::RemoveParams& params) { if (params.objv_tracker) { @@ -2865,7 +2907,8 @@ int RGWBucketCtl::remove_bucket_instance_info(const rgw_bucket& bucket, RGWSI_Bucket::get_bi_meta_key(bucket), info, &info.objv_tracker, - y); + y, + dpp); }); } @@ -2876,7 +2919,7 @@ int RGWBucketCtl::do_store_linked_bucket_info(RGWSI_Bucket_X_Ctx& ctx, obj_version *pep_objv, map *pattrs, bool create_entry_point, - optional_yield y) + optional_yield y, const DoutPrefixProvider *dpp) { bool create_head = !info.has_instance_obj || create_entry_point; @@ -2886,7 +2929,7 @@ int RGWBucketCtl::do_store_linked_bucket_info(RGWSI_Bucket_X_Ctx& ctx, orig_info, exclusive, mtime, pattrs, - y); + y, dpp); if (ret < 0) { return ret; } @@ -2915,7 +2958,8 @@ int RGWBucketCtl::do_store_linked_bucket_info(RGWSI_Bucket_X_Ctx& ctx, mtime, pattrs, &ot, - y); + y, + dpp); if (ret < 0) return ret; @@ -2923,7 +2967,8 @@ int RGWBucketCtl::do_store_linked_bucket_info(RGWSI_Bucket_X_Ctx& ctx, } int RGWBucketCtl::convert_old_bucket_info(RGWSI_Bucket_X_Ctx& ctx, const rgw_bucket& bucket, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { RGWBucketEntryPoint entry_point; real_time ep_mtime; @@ -2932,13 +2977,13 @@ int RGWBucketCtl::convert_old_bucket_info(RGWSI_Bucket_X_Ctx& ctx, RGWBucketInfo info; auto cct = svc.bucket->ctx(); - ldout(cct, 10) << "RGWRados::convert_old_bucket_info(): bucket=" << bucket << dendl; + ldpp_dout(dpp, 10) << "RGWRados::convert_old_bucket_info(): bucket=" << bucket << dendl; int ret = svc.bucket->read_bucket_entrypoint_info(ctx.ep, RGWSI_Bucket::get_entrypoint_meta_key(bucket), - &entry_point, &ot, &ep_mtime, &attrs, y); + &entry_point, &ot, &ep_mtime, &attrs, y, dpp); if (ret < 0) { - ldout(cct, 0) << "ERROR: get_bucket_entrypoint_info() returned " << ret << " bucket=" << bucket << dendl; + ldpp_dout(dpp, 0) << "ERROR: get_bucket_entrypoint_info() returned " << ret << " bucket=" << bucket << dendl; return ret; } @@ -2951,9 +2996,9 @@ int RGWBucketCtl::convert_old_bucket_info(RGWSI_Bucket_X_Ctx& ctx, ot.generate_new_write_ver(cct); - ret = do_store_linked_bucket_info(ctx, info, nullptr, false, ep_mtime, &ot.write_version, &attrs, true, y); + ret = do_store_linked_bucket_info(ctx, info, nullptr, false, ep_mtime, &ot.write_version, &attrs, true, y, dpp); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to put_linked_bucket_info(): " << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to put_linked_bucket_info(): " << ret << dendl; return ret; } @@ -2963,16 +3008,17 @@ int RGWBucketCtl::convert_old_bucket_info(RGWSI_Bucket_X_Ctx& ctx, int RGWBucketCtl::set_bucket_instance_attrs(RGWBucketInfo& bucket_info, map& attrs, RGWObjVersionTracker *objv_tracker, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { return call([&](RGWSI_Bucket_X_Ctx& ctx) { rgw_bucket& bucket = bucket_info.bucket; if (!bucket_info.has_instance_obj) { /* an old bucket object, need to convert it */ - int ret = convert_old_bucket_info(ctx, bucket, y); + int ret = convert_old_bucket_info(ctx, bucket, y, dpp); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed converting old bucket info: " << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed converting old bucket info: " << ret << dendl; return ret; } } @@ -2981,6 +3027,7 @@ int RGWBucketCtl::set_bucket_instance_attrs(RGWBucketInfo& bucket_info, bucket, bucket_info, y, + dpp, BucketInstance::PutParams().set_attrs(&attrs) .set_objv_tracker(objv_tracker) .set_orig_info(&bucket_info)); @@ -2992,12 +3039,13 @@ int RGWBucketCtl::link_bucket(const rgw_user& user_id, const rgw_bucket& bucket, ceph::real_time creation_time, optional_yield y, + const DoutPrefixProvider *dpp, bool update_entrypoint, rgw_ep_info *pinfo) { return bm_handler->call([&](RGWSI_Bucket_EP_Ctx& ctx) { return do_link_bucket(ctx, user_id, bucket, creation_time, - update_entrypoint, pinfo, y); + update_entrypoint, pinfo, y, dpp); }); } @@ -3007,7 +3055,8 @@ int RGWBucketCtl::do_link_bucket(RGWSI_Bucket_EP_Ctx& ctx, ceph::real_time creation_time, bool update_entrypoint, rgw_ep_info *pinfo, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { int ret; @@ -3027,18 +3076,18 @@ int RGWBucketCtl::do_link_bucket(RGWSI_Bucket_EP_Ctx& ctx, meta_key, &ep, &rot, nullptr, &attrs, - y); + y, dpp); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "ERROR: store->get_bucket_entrypoint_info() returned: " + ldpp_dout(dpp, 0) << "ERROR: store->get_bucket_entrypoint_info() returned: " << cpp_strerror(-ret) << dendl; } pattrs = &attrs; } } - ret = ctl.user->add_bucket(user_id, bucket, creation_time, y); + ret = ctl.user->add_bucket(dpp, user_id, bucket, creation_time, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: error adding bucket to user directory:" + ldpp_dout(dpp, 0) << "ERROR: error adding bucket to user directory:" << " user=" << user_id << " bucket=" << bucket << " err=" << cpp_strerror(-ret) @@ -3053,25 +3102,25 @@ int RGWBucketCtl::do_link_bucket(RGWSI_Bucket_EP_Ctx& ctx, ep.owner = user_id; ep.bucket = bucket; ret = svc.bucket->store_bucket_entrypoint_info( - ctx, meta_key, ep, false, real_time(), pattrs, &rot, y); + ctx, meta_key, ep, false, real_time(), pattrs, &rot, y, dpp); if (ret < 0) goto done_err; return 0; done_err: - int r = do_unlink_bucket(ctx, user_id, bucket, true, y); + int r = do_unlink_bucket(ctx, user_id, bucket, true, y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed unlinking bucket on error cleanup: " + ldpp_dout(dpp, 0) << "ERROR: failed unlinking bucket on error cleanup: " << cpp_strerror(-r) << dendl; } return ret; } -int RGWBucketCtl::unlink_bucket(const rgw_user& user_id, const rgw_bucket& bucket, optional_yield y, bool update_entrypoint) +int RGWBucketCtl::unlink_bucket(const rgw_user& user_id, const rgw_bucket& bucket, optional_yield y, const DoutPrefixProvider *dpp, bool update_entrypoint) { return bm_handler->call([&](RGWSI_Bucket_EP_Ctx& ctx) { - return do_unlink_bucket(ctx, user_id, bucket, update_entrypoint, y); + return do_unlink_bucket(ctx, user_id, bucket, update_entrypoint, y, dpp); }); } @@ -3079,11 +3128,12 @@ int RGWBucketCtl::do_unlink_bucket(RGWSI_Bucket_EP_Ctx& ctx, const rgw_user& user_id, const rgw_bucket& bucket, bool update_entrypoint, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { - int ret = ctl.user->remove_bucket(user_id, bucket, y); + int ret = ctl.user->remove_bucket(dpp, user_id, bucket, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: error removing bucket from directory: " + ldpp_dout(dpp, 0) << "ERROR: error removing bucket from directory: " << cpp_strerror(-ret)<< dendl; } @@ -3094,7 +3144,7 @@ int RGWBucketCtl::do_unlink_bucket(RGWSI_Bucket_EP_Ctx& ctx, RGWObjVersionTracker ot; map attrs; string meta_key = RGWSI_Bucket::get_entrypoint_meta_key(bucket); - ret = svc.bucket->read_bucket_entrypoint_info(ctx, meta_key, &ep, &ot, nullptr, &attrs, y); + ret = svc.bucket->read_bucket_entrypoint_info(ctx, meta_key, &ep, &ot, nullptr, &attrs, y, dpp); if (ret == -ENOENT) return 0; if (ret < 0) @@ -3104,23 +3154,24 @@ int RGWBucketCtl::do_unlink_bucket(RGWSI_Bucket_EP_Ctx& ctx, return 0; if (ep.owner != user_id) { - ldout(cct, 0) << "bucket entry point user mismatch, can't unlink bucket: " << ep.owner << " != " << user_id << dendl; + ldpp_dout(dpp, 0) << "bucket entry point user mismatch, can't unlink bucket: " << ep.owner << " != " << user_id << dendl; return -EINVAL; } ep.linked = false; - return svc.bucket->store_bucket_entrypoint_info(ctx, meta_key, ep, false, real_time(), &attrs, &ot, y); + return svc.bucket->store_bucket_entrypoint_info(ctx, meta_key, ep, false, real_time(), &attrs, &ot, y, dpp); } int RGWBucketCtl::set_acl(ACLOwner& owner, rgw_bucket& bucket, RGWBucketInfo& bucket_info, bufferlist& bl, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { // set owner and acl bucket_info.owner = owner.get_id(); std::map attrs{{RGW_ATTR_ACL, bl}}; - int r = store_bucket_instance_info(bucket, bucket_info, y, + int r = store_bucket_instance_info(bucket, bucket_info, y, dpp, BucketInstance::PutParams().set_attrs(&attrs)); if (r < 0) { cerr << "ERROR: failed to set bucket owner: " << cpp_strerror(-r) << std::endl; @@ -3133,7 +3184,7 @@ int RGWBucketCtl::set_acl(ACLOwner& owner, rgw_bucket& bucket, // TODO: remove RGWRados dependency for bucket listing int RGWBucketCtl::chown(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info, const rgw_user& user_id, const std::string& display_name, - const std::string& marker, optional_yield y) + const std::string& marker, optional_yield y, const DoutPrefixProvider *dpp) { RGWObjectCtx obj_ctx(store); std::vector objs; @@ -3154,9 +3205,9 @@ int RGWBucketCtl::chown(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_in do { objs.clear(); - int ret = list_op.list_objects(max_entries, &objs, &common_prefixes, &is_truncated, y); + int ret = list_op.list_objects(dpp, max_entries, &objs, &common_prefixes, &is_truncated, y); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: list objects failed: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "ERROR: list objects failed: " << cpp_strerror(-ret) << dendl; return ret; } @@ -3171,14 +3222,14 @@ int RGWBucketCtl::chown(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_in map attrs; read_op.params.attrs = &attrs; - ret = read_op.prepare(y); + ret = read_op.prepare(y, dpp); if (ret < 0){ - ldout(store->ctx(), 0) << "ERROR: failed to read object " << obj.key.name << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to read object " << obj.key.name << cpp_strerror(-ret) << dendl; continue; } const auto& aiter = attrs.find(RGW_ATTR_ACL); if (aiter == attrs.end()) { - ldout(store->ctx(), 0) << "ERROR: no acls found for object " << obj.key.name << " .Continuing with next object." << dendl; + ldpp_dout(dpp, 0) << "ERROR: no acls found for object " << obj.key.name << " .Continuing with next object." << dendl; continue; } else { bufferlist& bl = aiter->second; @@ -3188,7 +3239,7 @@ int RGWBucketCtl::chown(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_in decode(policy, bl); owner = policy.get_owner(); } catch (buffer::error& err) { - ldout(store->ctx(), 0) << "ERROR: decode policy failed" << err.what() + ldpp_dout(dpp, 0) << "ERROR: decode policy failed" << err.what() << dendl; return -EIO; } @@ -3213,9 +3264,9 @@ int RGWBucketCtl::chown(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_in encode(policy, bl); obj_ctx.set_atomic(r_obj); - ret = store->getRados()->set_attr(&obj_ctx, bucket_info, r_obj, RGW_ATTR_ACL, bl); + ret = store->getRados()->set_attr(dpp, &obj_ctx, bucket_info, r_obj, RGW_ATTR_ACL, bl); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: modify attr failed " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "ERROR: modify attr failed " << cpp_strerror(-ret) << dendl; return ret; } } @@ -3228,22 +3279,24 @@ int RGWBucketCtl::chown(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_in int RGWBucketCtl::read_bucket_stats(const rgw_bucket& bucket, RGWBucketEnt *result, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { return call([&](RGWSI_Bucket_X_Ctx& ctx) { - return svc.bucket->read_bucket_stats(ctx, bucket, result, y); + return svc.bucket->read_bucket_stats(ctx, bucket, result, y, dpp); }); } int RGWBucketCtl::read_buckets_stats(map& m, - optional_yield y) + optional_yield y, const DoutPrefixProvider *dpp) { return call([&](RGWSI_Bucket_X_Ctx& ctx) { - return svc.bucket->read_buckets_stats(ctx, m, y); + return svc.bucket->read_buckets_stats(ctx, m, y, dpp); }); } -int RGWBucketCtl::sync_user_stats(const rgw_user& user_id, +int RGWBucketCtl::sync_user_stats(const DoutPrefixProvider *dpp, + const rgw_user& user_id, const RGWBucketInfo& bucket_info, optional_yield y, RGWBucketEnt* pent) @@ -3252,37 +3305,39 @@ int RGWBucketCtl::sync_user_stats(const rgw_user& user_id, if (!pent) { pent = &ent; } - int r = svc.bi->read_stats(bucket_info, pent, null_yield); + int r = svc.bi->read_stats(dpp, bucket_info, pent, null_yield); if (r < 0) { - ldout(cct, 20) << __func__ << "(): failed to read bucket stats (r=" << r << ")" << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): failed to read bucket stats (r=" << r << ")" << dendl; return r; } - return ctl.user->flush_bucket_stats(user_id, *pent, y); + return ctl.user->flush_bucket_stats(dpp, user_id, *pent, y); } int RGWBucketCtl::get_sync_policy_handler(std::optional zone, std::optional bucket, RGWBucketSyncPolicyHandlerRef *phandler, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { int r = call([&](RGWSI_Bucket_X_Ctx& ctx) { - return svc.bucket_sync->get_policy_handler(ctx, zone, bucket, phandler, y); + return svc.bucket_sync->get_policy_handler(ctx, zone, bucket, phandler, y, dpp); }); if (r < 0) { - ldout(cct, 20) << __func__ << "(): failed to get policy handler for bucket=" << bucket << " (r=" << r << ")" << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): failed to get policy handler for bucket=" << bucket << " (r=" << r << ")" << dendl; return r; } return 0; } int RGWBucketCtl::bucket_exports_data(const rgw_bucket& bucket, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { RGWBucketSyncPolicyHandlerRef handler; - int r = get_sync_policy_handler(std::nullopt, bucket, &handler, y); + int r = get_sync_policy_handler(std::nullopt, bucket, &handler, y, dpp); if (r < 0) { return r; } @@ -3291,12 +3346,12 @@ int RGWBucketCtl::bucket_exports_data(const rgw_bucket& bucket, } int RGWBucketCtl::bucket_imports_data(const rgw_bucket& bucket, - optional_yield y) + optional_yield y, const DoutPrefixProvider *dpp) { RGWBucketSyncPolicyHandlerRef handler; - int r = get_sync_policy_handler(std::nullopt, bucket, &handler, y); + int r = get_sync_policy_handler(std::nullopt, bucket, &handler, y, dpp); if (r < 0) { return r; } diff --git a/src/rgw/rgw_bucket.h b/src/rgw/rgw_bucket.h index 81d076bce5d91..7ab2dfc81bdcd 100644 --- a/src/rgw/rgw_bucket.h +++ b/src/rgw/rgw_bucket.h @@ -226,7 +226,8 @@ public: * Get all the buckets owned by a user and fill up an RGWUserBuckets with them. * Returns: 0 on success, -ERR# on failure. */ -extern int rgw_read_user_buckets(rgw::sal::RGWRadosStore *store, +extern int rgw_read_user_buckets(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, const rgw_user& user_id, rgw::sal::RGWBucketList& buckets, const string& marker, @@ -235,14 +236,14 @@ extern int rgw_read_user_buckets(rgw::sal::RGWRadosStore *store, bool need_stats, optional_yield y); -extern int rgw_remove_object(rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info, const rgw_bucket& bucket, rgw_obj_key& key); +extern int rgw_remove_object(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info, const rgw_bucket& bucket, rgw_obj_key& key); extern int rgw_remove_bucket_bypass_gc(rgw::sal::RGWRadosStore *store, rgw_bucket& bucket, int concurrent_max, optional_yield y); extern int rgw_object_get_attr(rgw::sal::RGWRadosStore* store, const RGWBucketInfo& bucket_info, const rgw_obj& obj, const char* attr_name, bufferlist& out_bl, optional_yield y); -extern void check_bad_user_bucket_mapping(rgw::sal::RGWRadosStore *store, const rgw_user& user_id, bool fix, optional_yield y); +extern void check_bad_user_bucket_mapping(rgw::sal::RGWRadosStore *store, const rgw_user& user_id, bool fix, optional_yield y, const DoutPrefixProvider *dpp); struct RGWBucketAdminOpState { rgw_user uid; @@ -348,32 +349,35 @@ class RGWBucket public: RGWBucket() : store(NULL), handle(NULL), failure(false) {} int init(rgw::sal::RGWRadosStore *storage, RGWBucketAdminOpState& op_state, optional_yield y, - std::string *err_msg = NULL, map *pattrs = NULL); + const DoutPrefixProvider *dpp, std::string *err_msg = NULL, map *pattrs = NULL); int check_bad_index_multipart(RGWBucketAdminOpState& op_state, - RGWFormatterFlusher& flusher, std::string *err_msg = NULL); + RGWFormatterFlusher& flusher, + const DoutPrefixProvider *dpp, std::string *err_msg = NULL); - int check_object_index(RGWBucketAdminOpState& op_state, + int check_object_index(const DoutPrefixProvider *dpp, + RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y, std::string *err_msg = NULL); - int check_index(RGWBucketAdminOpState& op_state, + int check_index(const DoutPrefixProvider *dpp, + RGWBucketAdminOpState& op_state, map& existing_stats, map& calculated_stats, std::string *err_msg = NULL); - int link(RGWBucketAdminOpState& op_state, optional_yield y, + int link(RGWBucketAdminOpState& op_state, optional_yield y, const DoutPrefixProvider *dpp, map& attrs, std::string *err_msg = NULL); int chown(RGWBucketAdminOpState& op_state, const string& marker, - optional_yield y, std::string *err_msg = NULL); - int unlink(RGWBucketAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL); - int set_quota(RGWBucketAdminOpState& op_state, std::string *err_msg = NULL); + optional_yield y, const DoutPrefixProvider *dpp, std::string *err_msg = NULL); + int unlink(RGWBucketAdminOpState& op_state, optional_yield y, const DoutPrefixProvider *dpp, std::string *err_msg = NULL); + int set_quota(RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, std::string *err_msg = NULL); - int remove_object(RGWBucketAdminOpState& op_state, std::string *err_msg = NULL); + int remove_object(const DoutPrefixProvider *dpp, RGWBucketAdminOpState& op_state, std::string *err_msg = NULL); int policy_bl_to_stream(bufferlist& bl, ostream& o); - int get_policy(RGWBucketAdminOpState& op_state, RGWAccessControlPolicy& policy, optional_yield y); - int sync(RGWBucketAdminOpState& op_state, map *attrs, std::string *err_msg = NULL); + int get_policy(RGWBucketAdminOpState& op_state, RGWAccessControlPolicy& policy, optional_yield y, const DoutPrefixProvider *dpp); + int sync(RGWBucketAdminOpState& op_state, map *attrs, const DoutPrefixProvider *dpp, std::string *err_msg = NULL); void clear_failure() { failure = false; } @@ -384,39 +388,41 @@ class RGWBucketAdminOp { public: static int get_policy(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, - RGWFormatterFlusher& flusher); + RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp); static int get_policy(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, - RGWAccessControlPolicy& policy); + RGWAccessControlPolicy& policy, const DoutPrefixProvider *dpp); static int dump_s3_policy(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, - ostream& os); + ostream& os, const DoutPrefixProvider *dpp); - static int unlink(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state); - static int link(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, string *err_msg = NULL); - static int chown(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, const string& marker, string *err_msg = NULL); + static int unlink(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp); + static int link(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, string *err_msg = NULL); + static int chown(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, const string& marker, const DoutPrefixProvider *dpp, string *err_msg = NULL); static int check_index(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, - RGWFormatterFlusher& flusher, optional_yield y); + RGWFormatterFlusher& flusher, optional_yield y, const DoutPrefixProvider *dpp); - static int remove_bucket(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, optional_yield y, bool bypass_gc = false, bool keep_index_consistent = true); - static int remove_object(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state); - static int info(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); + static int remove_bucket(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, optional_yield y, + const DoutPrefixProvider *dpp, bool bypass_gc = false, bool keep_index_consistent = true); + static int remove_object(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp); + static int info(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y, const DoutPrefixProvider *dpp); static int limit_check(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, const std::list& user_ids, RGWFormatterFlusher& flusher, optional_yield y, + const DoutPrefixProvider *dpp, bool warnings_only = false); - static int set_quota(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state); + static int set_quota(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp); static int list_stale_instances(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, - RGWFormatterFlusher& flusher); + RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp); static int clear_stale_instances(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, - RGWFormatterFlusher& flusher); + RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp); static int fix_lc_shards(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, - RGWFormatterFlusher& flusher); + RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp); static int fix_obj_expiry(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, - RGWFormatterFlusher& flusher, bool dry_run = false); + RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp, bool dry_run = false); - static int sync_bucket(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, string *err_msg = NULL); + static int sync_bucket(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, string *err_msg = NULL); }; struct rgw_ep_info { @@ -459,7 +465,8 @@ public: void init(RGWUserCtl *user_ctl, RGWBucketMetadataHandler *_bm_handler, RGWBucketInstanceMetadataHandler *_bmi_handler, - RGWDataChangesLog *datalog); + RGWDataChangesLog *datalog, + const DoutPrefixProvider *dpp); struct Bucket { struct GetParams { @@ -638,27 +645,33 @@ public: int read_bucket_entrypoint_info(const rgw_bucket& bucket, RGWBucketEntryPoint *info, optional_yield y, + const DoutPrefixProvider *dpp, const Bucket::GetParams& params = {}); int store_bucket_entrypoint_info(const rgw_bucket& bucket, RGWBucketEntryPoint& info, optional_yield y, + const DoutPrefixProvider *dpp, const Bucket::PutParams& params = {}); int remove_bucket_entrypoint_info(const rgw_bucket& bucket, optional_yield y, + const DoutPrefixProvider *dpp, const Bucket::RemoveParams& params = {}); /* bucket instance */ int read_bucket_instance_info(const rgw_bucket& bucket, RGWBucketInfo *info, optional_yield y, + const DoutPrefixProvider *dpp, const BucketInstance::GetParams& params = {}); int store_bucket_instance_info(const rgw_bucket& bucket, RGWBucketInfo& info, optional_yield y, + const DoutPrefixProvider *dpp, const BucketInstance::PutParams& params = {}); int remove_bucket_instance_info(const rgw_bucket& bucket, RGWBucketInfo& info, optional_yield y, + const DoutPrefixProvider *dpp, const BucketInstance::RemoveParams& params = {}); /* @@ -670,6 +683,7 @@ public: int read_bucket_info(const rgw_bucket& bucket, RGWBucketInfo *info, optional_yield y, + const DoutPrefixProvider *dpp, const BucketInstance::GetParams& params = {}, RGWObjVersionTracker *ep_objv_tracker = nullptr); @@ -677,37 +691,44 @@ public: int set_bucket_instance_attrs(RGWBucketInfo& bucket_info, map& attrs, RGWObjVersionTracker *objv_tracker, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); /* user/bucket */ int link_bucket(const rgw_user& user_id, const rgw_bucket& bucket, ceph::real_time creation_time, optional_yield y, + const DoutPrefixProvider *dpp, bool update_entrypoint = true, rgw_ep_info *pinfo = nullptr); int unlink_bucket(const rgw_user& user_id, const rgw_bucket& bucket, optional_yield y, + const DoutPrefixProvider *dpp, bool update_entrypoint = true); int chown(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info, const rgw_user& user_id, const std::string& display_name, - const std::string& marker, optional_yield y); + const std::string& marker, optional_yield y, const DoutPrefixProvider *dpp); int set_acl(ACLOwner& owner, rgw_bucket& bucket, - RGWBucketInfo& bucket_info, bufferlist& bl, optional_yield y); + RGWBucketInfo& bucket_info, bufferlist& bl, optional_yield y, + const DoutPrefixProvider *dpp); int read_buckets_stats(map& m, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); int read_bucket_stats(const rgw_bucket& bucket, RGWBucketEnt *result, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); /* quota related */ - int sync_user_stats(const rgw_user& user_id, const RGWBucketInfo& bucket_info, + int sync_user_stats(const DoutPrefixProvider *dpp, + const rgw_user& user_id, const RGWBucketInfo& bucket_info, optional_yield y, RGWBucketEnt* pent = nullptr); @@ -715,21 +736,26 @@ public: int get_sync_policy_handler(std::optional zone, std::optional bucket, RGWBucketSyncPolicyHandlerRef *phandler, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); int bucket_exports_data(const rgw_bucket& bucket, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); int bucket_imports_data(const rgw_bucket& bucket, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); private: int convert_old_bucket_info(RGWSI_Bucket_X_Ctx& ctx, const rgw_bucket& bucket, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); int do_store_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, const rgw_bucket& bucket, RGWBucketInfo& info, optional_yield y, + const DoutPrefixProvider *dpp, const BucketInstance::PutParams& params); int do_store_linked_bucket_info(RGWSI_Bucket_X_Ctx& ctx, @@ -739,7 +765,8 @@ private: obj_version *pep_objv, map *pattrs, bool create_entry_point, - optional_yield); + optional_yield, + const DoutPrefixProvider *dpp); int do_link_bucket(RGWSI_Bucket_EP_Ctx& ctx, const rgw_user& user, @@ -747,17 +774,19 @@ private: ceph::real_time creation_time, bool update_entrypoint, rgw_ep_info *pinfo, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); int do_unlink_bucket(RGWSI_Bucket_EP_Ctx& ctx, const rgw_user& user_id, const rgw_bucket& bucket, bool update_entrypoint, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); }; -bool rgw_find_bucket_by_id(CephContext *cct, RGWMetadataManager *mgr, const string& marker, +bool rgw_find_bucket_by_id(const DoutPrefixProvider *dpp, CephContext *cct, RGWMetadataManager *mgr, const string& marker, const string& bucket_id, rgw_bucket* bucket_out); #endif diff --git a/src/rgw/rgw_bucket_sync.cc b/src/rgw/rgw_bucket_sync.cc index 377bd8f056680..357b56d9a4067 100644 --- a/src/rgw/rgw_bucket_sync.cc +++ b/src/rgw/rgw_bucket_sync.cc @@ -734,14 +734,14 @@ RGWBucketSyncPolicyHandler *RGWBucketSyncPolicyHandler::alloc_child(const rgw_bu return new RGWBucketSyncPolicyHandler(this, bucket, sync_policy); } -int RGWBucketSyncPolicyHandler::init(optional_yield y) +int RGWBucketSyncPolicyHandler::init(const DoutPrefixProvider *dpp, optional_yield y) { - int r = bucket_sync_svc->get_bucket_sync_hints(bucket.value_or(rgw_bucket()), + int r = bucket_sync_svc->get_bucket_sync_hints(dpp, bucket.value_or(rgw_bucket()), &source_hints, &target_hints, y); if (r < 0) { - ldout(bucket_sync_svc->ctx(), 0) << "ERROR: failed to initialize bucket sync policy handler: get_bucket_sync_hints() on bucket=" + ldpp_dout(dpp, 0) << "ERROR: failed to initialize bucket sync policy handler: get_bucket_sync_hints() on bucket=" << bucket << " returned r=" << r << dendl; return r; } diff --git a/src/rgw/rgw_bucket_sync.h b/src/rgw/rgw_bucket_sync.h index 488060b7a60ac..d1d09bbfc0722 100644 --- a/src/rgw/rgw_bucket_sync.h +++ b/src/rgw/rgw_bucket_sync.h @@ -331,7 +331,7 @@ public: RGWBucketSyncPolicyHandler *alloc_child(const rgw_bucket& bucket, std::optional sync_policy) const; - int init(optional_yield y); + int init(const DoutPrefixProvider *dpp, optional_yield y); void reflect(RGWBucketSyncFlowManager::pipe_set *psource_pipes, RGWBucketSyncFlowManager::pipe_set *ptarget_pipes, diff --git a/src/rgw/rgw_cache.cc b/src/rgw/rgw_cache.cc index 6908e7f9d252b..e82c142eb0240 100644 --- a/src/rgw/rgw_cache.cc +++ b/src/rgw/rgw_cache.cc @@ -9,7 +9,7 @@ #define dout_subsys ceph_subsys_rgw -int ObjectCache::get(const string& name, ObjectCacheInfo& info, uint32_t mask, rgw_cache_entry_info *cache_info) +int ObjectCache::get(const DoutPrefixProvider *dpp, const string& name, ObjectCacheInfo& info, uint32_t mask, rgw_cache_entry_info *cache_info) { std::shared_lock rl{lock}; @@ -18,7 +18,7 @@ int ObjectCache::get(const string& name, ObjectCacheInfo& info, uint32_t mask, r } auto iter = cache_map.find(name); if (iter == cache_map.end()) { - ldout(cct, 10) << "cache get: name=" << name << " : miss" << dendl; + ldpp_dout(dpp, 10) << "cache get: name=" << name << " : miss" << dendl; if (perfcounter) { perfcounter->inc(l_rgw_cache_miss); } @@ -27,7 +27,7 @@ int ObjectCache::get(const string& name, ObjectCacheInfo& info, uint32_t mask, r if (expiry.count() && (ceph::coarse_mono_clock::now() - iter->second.info.time_added) > expiry) { - ldout(cct, 10) << "cache get: name=" << name << " : expiry miss" << dendl; + ldpp_dout(dpp, 10) << "cache get: name=" << name << " : expiry miss" << dendl; rl.unlock(); std::unique_lock wl{lock}; // write lock for insertion // check that wasn't already removed by other thread @@ -47,14 +47,14 @@ int ObjectCache::get(const string& name, ObjectCacheInfo& info, uint32_t mask, r ObjectCacheEntry *entry = &iter->second; if (lru_counter - entry->lru_promotion_ts > lru_window) { - ldout(cct, 20) << "cache get: touching lru, lru_counter=" << lru_counter + ldpp_dout(dpp, 20) << "cache get: touching lru, lru_counter=" << lru_counter << " promotion_ts=" << entry->lru_promotion_ts << dendl; rl.unlock(); std::unique_lock wl{lock}; // write lock for insertion /* need to redo this because entry might have dropped off the cache */ iter = cache_map.find(name); if (iter == cache_map.end()) { - ldout(cct, 10) << "lost race! cache get: name=" << name << " : miss" << dendl; + ldpp_dout(dpp, 10) << "lost race! cache get: name=" << name << " : miss" << dendl; if(perfcounter) perfcounter->inc(l_rgw_cache_miss); return -ENOENT; } @@ -62,24 +62,24 @@ int ObjectCache::get(const string& name, ObjectCacheInfo& info, uint32_t mask, r entry = &iter->second; /* check again, we might have lost a race here */ if (lru_counter - entry->lru_promotion_ts > lru_window) { - touch_lru(name, *entry, iter->second.lru_iter); + touch_lru(dpp, name, *entry, iter->second.lru_iter); } } ObjectCacheInfo& src = iter->second.info; if(src.status == -ENOENT) { - ldout(cct, 10) << "cache get: name=" << name << " : hit (negative entry)" << dendl; + ldpp_dout(dpp, 10) << "cache get: name=" << name << " : hit (negative entry)" << dendl; if (perfcounter) perfcounter->inc(l_rgw_cache_hit); return -ENODATA; } if ((src.flags & mask) != mask) { - ldout(cct, 10) << "cache get: name=" << name << " : type miss (requested=0x" + ldpp_dout(dpp, 10) << "cache get: name=" << name << " : type miss (requested=0x" << std::hex << mask << ", cached=0x" << src.flags << std::dec << ")" << dendl; if(perfcounter) perfcounter->inc(l_rgw_cache_miss); return -ENOENT; } - ldout(cct, 10) << "cache get: name=" << name << " : hit (requested=0x" + ldpp_dout(dpp, 10) << "cache get: name=" << name << " : hit (requested=0x" << std::hex << mask << ", cached=0x" << src.flags << std::dec << ")" << dendl; @@ -93,7 +93,8 @@ int ObjectCache::get(const string& name, ObjectCacheInfo& info, uint32_t mask, r return 0; } -bool ObjectCache::chain_cache_entry(std::initializer_list cache_info_entries, +bool ObjectCache::chain_cache_entry(const DoutPrefixProvider *dpp, + std::initializer_list cache_info_entries, RGWChainedCache::Entry *chained_entry) { std::unique_lock l{lock}; @@ -106,18 +107,18 @@ bool ObjectCache::chain_cache_entry(std::initializer_list entries.reserve(cache_info_entries.size()); /* first verify that all entries are still valid */ for (auto cache_info : cache_info_entries) { - ldout(cct, 10) << "chain_cache_entry: cache_locator=" + ldpp_dout(dpp, 10) << "chain_cache_entry: cache_locator=" << cache_info->cache_locator << dendl; auto iter = cache_map.find(cache_info->cache_locator); if (iter == cache_map.end()) { - ldout(cct, 20) << "chain_cache_entry: couldn't find cache locator" << dendl; + ldpp_dout(dpp, 20) << "chain_cache_entry: couldn't find cache locator" << dendl; return false; } auto entry = &iter->second; if (entry->gen != cache_info->gen) { - ldout(cct, 20) << "chain_cache_entry: entry.gen (" << entry->gen + ldpp_dout(dpp, 20) << "chain_cache_entry: entry.gen (" << entry->gen << ") != cache_info.gen (" << cache_info->gen << ")" << dendl; return false; @@ -136,7 +137,7 @@ bool ObjectCache::chain_cache_entry(std::initializer_list return true; } -void ObjectCache::put(const string& name, ObjectCacheInfo& info, rgw_cache_entry_info *cache_info) +void ObjectCache::put(const DoutPrefixProvider *dpp, const string& name, ObjectCacheInfo& info, rgw_cache_entry_info *cache_info) { std::unique_lock l{lock}; @@ -144,7 +145,7 @@ void ObjectCache::put(const string& name, ObjectCacheInfo& info, rgw_cache_entry return; } - ldout(cct, 10) << "cache put: name=" << name << " info.flags=0x" + ldpp_dout(dpp, 10) << "cache put: name=" << name << " info.flags=0x" << std::hex << info.flags << std::dec << dendl; auto [iter, inserted] = cache_map.emplace(name, ObjectCacheEntry{}); @@ -160,7 +161,7 @@ void ObjectCache::put(const string& name, ObjectCacheInfo& info, rgw_cache_entry entry.chained_entries.clear(); entry.gen++; - touch_lru(name, entry, entry.lru_iter); + touch_lru(dpp, name, entry, entry.lru_iter); target.status = info.status; @@ -190,16 +191,16 @@ void ObjectCache::put(const string& name, ObjectCacheInfo& info, rgw_cache_entry target.xattrs = info.xattrs; map::iterator iter; for (iter = target.xattrs.begin(); iter != target.xattrs.end(); ++iter) { - ldout(cct, 10) << "updating xattr: name=" << iter->first << " bl.length()=" << iter->second.length() << dendl; + ldpp_dout(dpp, 10) << "updating xattr: name=" << iter->first << " bl.length()=" << iter->second.length() << dendl; } } else if (info.flags & CACHE_FLAG_MODIFY_XATTRS) { map::iterator iter; for (iter = info.rm_xattrs.begin(); iter != info.rm_xattrs.end(); ++iter) { - ldout(cct, 10) << "removing xattr: name=" << iter->first << dendl; + ldpp_dout(dpp, 10) << "removing xattr: name=" << iter->first << dendl; target.xattrs.erase(iter->first); } for (iter = info.xattrs.begin(); iter != info.xattrs.end(); ++iter) { - ldout(cct, 10) << "appending xattr: name=" << iter->first << " bl.length()=" << iter->second.length() << dendl; + ldpp_dout(dpp, 10) << "appending xattr: name=" << iter->first << " bl.length()=" << iter->second.length() << dendl; target.xattrs[iter->first] = iter->second; } } @@ -211,7 +212,7 @@ void ObjectCache::put(const string& name, ObjectCacheInfo& info, rgw_cache_entry target.version = info.version; } -bool ObjectCache::remove(const string& name) +bool ObjectCache::remove(const DoutPrefixProvider *dpp, const string& name) { std::unique_lock l{lock}; @@ -223,7 +224,7 @@ bool ObjectCache::remove(const string& name) if (iter == cache_map.end()) return false; - ldout(cct, 10) << "removing " << name << " from cache" << dendl; + ldpp_dout(dpp, 10) << "removing " << name << " from cache" << dendl; ObjectCacheEntry& entry = iter->second; for (auto& kv : entry.chained_entries) { @@ -235,7 +236,7 @@ bool ObjectCache::remove(const string& name) return true; } -void ObjectCache::touch_lru(const string& name, ObjectCacheEntry& entry, +void ObjectCache::touch_lru(const DoutPrefixProvider *dpp, const string& name, ObjectCacheEntry& entry, std::list::iterator& lru_iter) { while (lru_size > (size_t)cct->_conf->rgw_cache_lru_size) { @@ -262,9 +263,9 @@ void ObjectCache::touch_lru(const string& name, ObjectCacheEntry& entry, lru.push_back(name); lru_size++; lru_iter--; - ldout(cct, 10) << "adding " << name << " to cache LRU end" << dendl; + ldpp_dout(dpp, 10) << "adding " << name << " to cache LRU end" << dendl; } else { - ldout(cct, 10) << "moving " << name << " to cache LRU end" << dendl; + ldpp_dout(dpp, 10) << "moving " << name << " to cache LRU end" << dendl; lru.erase(lru_iter); lru.push_back(name); lru_iter = lru.end(); diff --git a/src/rgw/rgw_cache.h b/src/rgw/rgw_cache.h index 659b5518128e8..852780cc66446 100644 --- a/src/rgw/rgw_cache.h +++ b/src/rgw/rgw_cache.h @@ -168,7 +168,7 @@ class ObjectCache { bool enabled; ceph::timespan expiry; - void touch_lru(const string& name, ObjectCacheEntry& entry, + void touch_lru(const DoutPrefixProvider *dpp, const string& name, ObjectCacheEntry& entry, std::list::iterator& lru_iter); void remove_lru(const string& name, std::list::iterator& lru_iter); void invalidate_lru(ObjectCacheEntry& entry); @@ -178,10 +178,10 @@ class ObjectCache { public: ObjectCache() : lru_size(0), lru_counter(0), lru_window(0), cct(NULL), enabled(false) { } ~ObjectCache(); - int get(const std::string& name, ObjectCacheInfo& bl, uint32_t mask, rgw_cache_entry_info *cache_info); - std::optional get(const std::string& name) { + int get(const DoutPrefixProvider *dpp, const std::string& name, ObjectCacheInfo& bl, uint32_t mask, rgw_cache_entry_info *cache_info); + std::optional get(const DoutPrefixProvider *dpp, const std::string& name) { std::optional info{std::in_place}; - auto r = get(name, *info, 0, nullptr); + auto r = get(dpp, name, *info, 0, nullptr); return r < 0 ? std::nullopt : info; } @@ -198,15 +198,16 @@ public: } } - void put(const std::string& name, ObjectCacheInfo& bl, rgw_cache_entry_info *cache_info); - bool remove(const std::string& name); + void put(const DoutPrefixProvider *dpp, const std::string& name, ObjectCacheInfo& bl, rgw_cache_entry_info *cache_info); + bool remove(const DoutPrefixProvider *dpp, const std::string& name); void set_ctx(CephContext *_cct) { cct = _cct; lru_window = cct->_conf->rgw_cache_lru_size / 2; expiry = std::chrono::seconds(cct->_conf.get_val( "rgw_cache_expiry_interval")); } - bool chain_cache_entry(std::initializer_list cache_info_entries, + bool chain_cache_entry(const DoutPrefixProvider *dpp, + std::initializer_list cache_info_entries, RGWChainedCache::Entry *chained_entry); void set_enabled(bool status); diff --git a/src/rgw/rgw_common.cc b/src/rgw/rgw_common.cc index 21a6ac5790f2b..57765bbfe922c 100644 --- a/src/rgw/rgw_common.cc +++ b/src/rgw/rgw_common.cc @@ -387,7 +387,7 @@ struct str_len meta_prefixes[] = { STR_LEN_ENTRY("HTTP_X_AMZ"), STR_LEN_ENTRY("HTTP_X_ACCOUNT"), {NULL, 0} }; -void req_info::init_meta_info(bool *found_bad_meta) +void req_info::init_meta_info(const DoutPrefixProvider *dpp, bool *found_bad_meta) { x_meta_map.clear(); @@ -399,7 +399,7 @@ void req_info::init_meta_info(bool *found_bad_meta) int len = meta_prefixes[prefix_num].len; const char *p = header_name.c_str(); if (strncmp(p, prefix, len) == 0) { - dout(10) << "meta>> " << p << dendl; + ldpp_dout(dpp, 10) << "meta>> " << p << dendl; const char *name = p+len; /* skip the prefix */ int name_len = header_name.size() - len; @@ -431,7 +431,7 @@ void req_info::init_meta_info(bool *found_bad_meta) } } for (const auto& kv: x_meta_map) { - dout(10) << "x>> " << kv.first << ":" << rgw::crypt_sanitize::x_meta_map{kv.first, kv.second} << dendl; + ldpp_dout(dpp, 10) << "x>> " << kv.first << ":" << rgw::crypt_sanitize::x_meta_map{kv.first, kv.second} << dendl; } } @@ -788,7 +788,7 @@ int NameVal::parse() return ret; } -int RGWHTTPArgs::parse() +int RGWHTTPArgs::parse(const DoutPrefixProvider *dpp) { int pos = 0; bool end = false; @@ -820,7 +820,7 @@ int RGWHTTPArgs::parse() }); } string& val = nv.get_val(); - dout(10) << "name: " << name << " val: " << val << dendl; + ldpp_dout(dpp, 10) << "name: " << name << " val: " << val << dendl; append(name, val); } diff --git a/src/rgw/rgw_common.h b/src/rgw/rgw_common.h index 75b805ddc56c8..7b20466775d2b 100644 --- a/src/rgw/rgw_common.h +++ b/src/rgw/rgw_common.h @@ -321,9 +321,9 @@ class RGWHTTPArgs { bool admin_subresource_added = false; public: RGWHTTPArgs() = default; - explicit RGWHTTPArgs(const std::string& s) { + explicit RGWHTTPArgs(const std::string& s, const DoutPrefixProvider *dpp) { set(s); - parse(); + parse(dpp); } /** Set the arguments; as received */ @@ -334,7 +334,7 @@ class RGWHTTPArgs { str = s; } /** parse the received arguments */ - int parse(); + int parse(const DoutPrefixProvider *dpp); void append(const std::string& name, const string& val); /** Get the value for a specific argument parameter */ const string& get(const std::string& name, bool *exists = NULL) const; @@ -1168,7 +1168,7 @@ struct req_info { req_info(CephContext *cct, const RGWEnv *env); void rebuild_from(req_info& src); - void init_meta_info(bool *found_bad_meta); + void init_meta_info(const DoutPrefixProvider *dpp, bool *found_bad_meta); }; typedef cls_rgw_obj_key rgw_obj_index_key; diff --git a/src/rgw/rgw_coroutine.cc b/src/rgw/rgw_coroutine.cc index 6278d695147b9..98397655ee435 100644 --- a/src/rgw/rgw_coroutine.cc +++ b/src/rgw/rgw_coroutine.cc @@ -228,15 +228,15 @@ RGWCoroutinesStack::~RGWCoroutinesStack() } } -int RGWCoroutinesStack::operate(RGWCoroutinesEnv *_env) +int RGWCoroutinesStack::operate(const DoutPrefixProvider *dpp, RGWCoroutinesEnv *_env) { env = _env; RGWCoroutine *op = *pos; op->stack = this; - ldout(cct, 20) << *op << ": operate()" << dendl; - int r = op->operate_wrapper(); + ldpp_dout(dpp, 20) << *op << ": operate()" << dendl; + int r = op->operate_wrapper(dpp); if (r < 0) { - ldout(cct, 20) << *op << ": operate() returned r=" << r << dendl; + ldpp_dout(dpp, 20) << *op << ": operate() returned r=" << r << dendl; } error_flag = op->is_error(); @@ -608,7 +608,7 @@ void RGWCoroutinesManager::io_complete(RGWCoroutine *cr, const rgw_io_id& io_id) cr->io_complete(io_id); } -int RGWCoroutinesManager::run(list& stacks) +int RGWCoroutinesManager::run(const DoutPrefixProvider *dpp, list& stacks) { int ret = 0; int blocked_count = 0; @@ -645,13 +645,13 @@ int RGWCoroutinesManager::run(list& stacks) lock.unlock(); - ret = stack->operate(&env); + ret = stack->operate(dpp, &env); lock.lock(); stack->set_is_scheduled(false); if (ret < 0) { - ldout(cct, 20) << "stack->operate() returned ret=" << ret << dendl; + ldpp_dout(dpp, 20) << "stack->operate() returned ret=" << ret << dendl; } if (stack->is_error()) { @@ -772,7 +772,7 @@ next: return ret; } -int RGWCoroutinesManager::run(RGWCoroutine *op) +int RGWCoroutinesManager::run(const DoutPrefixProvider *dpp, RGWCoroutine *op) { if (!op) { return 0; @@ -784,9 +784,9 @@ int RGWCoroutinesManager::run(RGWCoroutine *op) stacks.push_back(stack); - int r = run(stacks); + int r = run(dpp, stacks); if (r < 0) { - ldout(cct, 20) << "run(stacks) returned r=" << r << dendl; + ldpp_dout(dpp, 20) << "run(stacks) returned r=" << r << dendl; } else { r = op->get_ret_status(); } @@ -1074,12 +1074,12 @@ void RGWSimpleCoroutine::call_cleanup() request_cleanup(); } -int RGWSimpleCoroutine::operate() +int RGWSimpleCoroutine::operate(const DoutPrefixProvider *dpp) { int ret = 0; reenter(this) { yield return state_init(); - yield return state_send_request(); + yield return state_send_request(dpp); yield return state_request_complete(); yield return state_all_complete(); drain_all(); @@ -1099,9 +1099,9 @@ int RGWSimpleCoroutine::state_init() return 0; } -int RGWSimpleCoroutine::state_send_request() +int RGWSimpleCoroutine::state_send_request(const DoutPrefixProvider *dpp) { - int ret = send_request(); + int ret = send_request(dpp); if (ret < 0) { call_cleanup(); return set_state(RGWCoroutine_Error, ret); diff --git a/src/rgw/rgw_coroutine.h b/src/rgw/rgw_coroutine.h index 0d0b48bddc839..9f358116c26a9 100644 --- a/src/rgw/rgw_coroutine.h +++ b/src/rgw/rgw_coroutine.h @@ -272,14 +272,14 @@ protected: return status; } - virtual int operate_wrapper() { - return operate(); + virtual int operate_wrapper(const DoutPrefixProvider *dpp) { + return operate(dpp); } public: RGWCoroutine(CephContext *_cct) : status(_cct), _yield_ret(false), cct(_cct), stack(NULL), retcode(0), state(RGWCoroutine_Run) {} ~RGWCoroutine() override; - virtual int operate() = 0; + virtual int operate(const DoutPrefixProvider *dpp) = 0; bool is_done() { return (state == RGWCoroutine_Done || state == RGWCoroutine_Error); } bool is_error() { return (state == RGWCoroutine_Error); } @@ -474,7 +474,7 @@ public: return id; } - int operate(RGWCoroutinesEnv *env); + int operate(const DoutPrefixProvider *dpp, RGWCoroutinesEnv *env); bool is_done() { return done_flag; @@ -663,8 +663,8 @@ public: } } - int run(list& ops); - int run(RGWCoroutine *op); + int run(const DoutPrefixProvider *dpp, list& ops); + int run(const DoutPrefixProvider *dpp, RGWCoroutine *op); void stop() { bool expected = false; if (going_down.compare_exchange_strong(expected, true)) { @@ -715,10 +715,10 @@ RGWAioCompletionNotifier *RGWCoroutinesStack::create_completion_notifier(T value class RGWSimpleCoroutine : public RGWCoroutine { bool called_cleanup; - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; int state_init(); - int state_send_request(); + int state_send_request(const DoutPrefixProvider *dpp); int state_request_complete(); int state_all_complete(); @@ -729,7 +729,7 @@ public: ~RGWSimpleCoroutine() override; virtual int init() { return 0; } - virtual int send_request() = 0; + virtual int send_request(const DoutPrefixProvider *dpp) = 0; virtual int request_complete() = 0; virtual int finish() { return 0; } virtual void request_cleanup() {} diff --git a/src/rgw/rgw_cors_s3.cc b/src/rgw/rgw_cors_s3.cc index 9f81744cd5495..0275156d2c90d 100644 --- a/src/rgw/rgw_cors_s3.cc +++ b/src/rgw/rgw_cors_s3.cc @@ -80,7 +80,7 @@ bool RGWCORSRule_S3::xml_end(const char *el) { if (obj) { for( ; obj; obj = iter.get_next()) { const char *s = obj->get_data().c_str(); - dout(10) << "RGWCORSRule::xml_end, el : " << el << ", data : " << s << dendl; + ldpp_dout(dpp, 10) << "RGWCORSRule::xml_end, el : " << el << ", data : " << s << dendl; if (strcasecmp(s, "GET") == 0) { allowed_methods |= RGW_CORS_GET; } else if (strcasecmp(s, "POST") == 0) { @@ -103,20 +103,20 @@ bool RGWCORSRule_S3::xml_end(const char *el) { if (xml_id != NULL) { string data = xml_id->get_data(); if (data.length() > 255) { - dout(0) << "RGWCORSRule has id of length greater than 255" << dendl; + ldpp_dout(dpp, 0) << "RGWCORSRule has id of length greater than 255" << dendl; return false; } - dout(10) << "RGWCORRule id : " << data << dendl; + ldpp_dout(dpp, 10) << "RGWCORRule id : " << data << dendl; id = data; } /*Check if there is atleast one AllowedOrigin*/ iter = find("AllowedOrigin"); if (!(obj = iter.get_next())) { - dout(0) << "RGWCORSRule does not have even one AllowedOrigin" << dendl; + ldpp_dout(dpp, 0) << "RGWCORSRule does not have even one AllowedOrigin" << dendl; return false; } for( ; obj; obj = iter.get_next()) { - dout(10) << "RGWCORSRule - origin : " << obj->get_data() << dendl; + ldpp_dout(dpp, 10) << "RGWCORSRule - origin : " << obj->get_data() << dendl; /*Just take the hostname*/ string host = obj->get_data(); if (validate_name_string(host) != 0) @@ -130,7 +130,7 @@ bool RGWCORSRule_S3::xml_end(const char *el) { unsigned long long ull = strtoull(obj->get_data().c_str(), &end, 10); if (*end != '\0') { - dout(0) << "RGWCORSRule's MaxAgeSeconds " << obj->get_data() << " is an invalid integer" << dendl; + ldpp_dout(dpp, 0) << "RGWCORSRule's MaxAgeSeconds " << obj->get_data() << " is an invalid integer" << dendl; return false; } if (ull >= 0x100000000ull) { @@ -138,13 +138,13 @@ bool RGWCORSRule_S3::xml_end(const char *el) { } else { max_age = (uint32_t)ull; } - dout(10) << "RGWCORSRule : max_age : " << max_age << dendl; + ldpp_dout(dpp, 10) << "RGWCORSRule : max_age : " << max_age << dendl; } /*Check and update ExposeHeader*/ iter = find("ExposeHeader"); if ((obj = iter.get_next())) { for(; obj; obj = iter.get_next()) { - dout(10) << "RGWCORSRule - exp_hdr : " << obj->get_data() << dendl; + ldpp_dout(dpp, 10) << "RGWCORSRule - exp_hdr : " << obj->get_data() << dendl; exposable_hdrs.push_back(obj->get_data()); } } @@ -152,7 +152,7 @@ bool RGWCORSRule_S3::xml_end(const char *el) { iter = find("AllowedHeader"); if ((obj = iter.get_next())) { for(; obj; obj = iter.get_next()) { - dout(10) << "RGWCORSRule - allowed_hdr : " << obj->get_data() << dendl; + ldpp_dout(dpp, 10) << "RGWCORSRule - allowed_hdr : " << obj->get_data() << dendl; string s = obj->get_data(); if (validate_name_string(s) != 0) return false; @@ -177,7 +177,7 @@ bool RGWCORSConfiguration_S3::xml_end(const char *el) { XMLObjIter iter = find("CORSRule"); RGWCORSRule_S3 *obj; if (!(obj = static_cast(iter.get_next()))) { - dout(0) << "CORSConfiguration should have atleast one CORSRule" << dendl; + ldpp_dout(dpp, 0) << "CORSConfiguration should have atleast one CORSRule" << dendl; return false; } for(; obj; obj = static_cast(iter.get_next())) { @@ -224,9 +224,9 @@ class CORSRuleExposeHeader_S3 : public XMLObj { XMLObj *RGWCORSXMLParser_S3::alloc_obj(const char *el) { if (strcmp(el, "CORSConfiguration") == 0) { - return new RGWCORSConfiguration_S3; + return new RGWCORSConfiguration_S3(dpp); } else if (strcmp(el, "CORSRule") == 0) { - return new RGWCORSRule_S3; + return new RGWCORSRule_S3(dpp); } else if (strcmp(el, "ID") == 0) { return new CORSRuleID_S3; } else if (strcmp(el, "AllowedOrigin") == 0) { diff --git a/src/rgw/rgw_cors_s3.h b/src/rgw/rgw_cors_s3.h index 2dff567c9e92b..bc69c513b6883 100644 --- a/src/rgw/rgw_cors_s3.h +++ b/src/rgw/rgw_cors_s3.h @@ -22,13 +22,15 @@ #include #include +#include #include "rgw_xml.h" #include "rgw_cors.h" class RGWCORSRule_S3 : public RGWCORSRule, public XMLObj { + const DoutPrefixProvider *dpp; public: - RGWCORSRule_S3() {} + RGWCORSRule_S3(const DoutPrefixProvider *dpp) : dpp(dpp) {} ~RGWCORSRule_S3() override {} bool xml_end(const char *el) override; @@ -37,8 +39,9 @@ class RGWCORSRule_S3 : public RGWCORSRule, public XMLObj class RGWCORSConfiguration_S3 : public RGWCORSConfiguration, public XMLObj { + const DoutPrefixProvider *dpp; public: - RGWCORSConfiguration_S3() {} + RGWCORSConfiguration_S3(const DoutPrefixProvider *dpp) : dpp(dpp) {} ~RGWCORSConfiguration_S3() override {} bool xml_end(const char *el) override; @@ -47,10 +50,11 @@ class RGWCORSConfiguration_S3 : public RGWCORSConfiguration, public XMLObj class RGWCORSXMLParser_S3 : public RGWXMLParser { + const DoutPrefixProvider *dpp; CephContext *cct; XMLObj *alloc_obj(const char *el) override; public: - explicit RGWCORSXMLParser_S3(CephContext *_cct) : cct(_cct) {} + explicit RGWCORSXMLParser_S3(const DoutPrefixProvider *_dpp, CephContext *_cct) : dpp(_dpp), cct(_cct) {} }; #endif /*CEPH_RGW_CORS_S3_H*/ diff --git a/src/rgw/rgw_cr_rados.cc b/src/rgw/rgw_cr_rados.cc index 38217e1854c40..70975c22a4ee1 100644 --- a/src/rgw/rgw_cr_rados.cc +++ b/src/rgw/rgw_cr_rados.cc @@ -48,7 +48,7 @@ RGWAsyncRadosRequest *RGWAsyncRadosProcessor::RGWWQ::_dequeue() { } void RGWAsyncRadosProcessor::RGWWQ::_process(RGWAsyncRadosRequest *req, ThreadPool::TPHandle& handle) { - processor->handle_request(req); + processor->handle_request(this, req); processor->req_throttle.put(1); } @@ -89,8 +89,8 @@ void RGWAsyncRadosProcessor::stop() { } } -void RGWAsyncRadosProcessor::handle_request(RGWAsyncRadosRequest *req) { - req->send_request(); +void RGWAsyncRadosProcessor::handle_request(const DoutPrefixProvider *dpp, RGWAsyncRadosRequest *req) { + req->send_request(dpp); req->put(); } @@ -99,7 +99,7 @@ void RGWAsyncRadosProcessor::queue(RGWAsyncRadosRequest *req) { req_wq.queue(req); } -int RGWAsyncGetSystemObj::_send_request() +int RGWAsyncGetSystemObj::_send_request(const DoutPrefixProvider *dpp) { map *pattrs = want_attrs ? &attrs : nullptr; @@ -108,13 +108,13 @@ int RGWAsyncGetSystemObj::_send_request() .set_objv_tracker(&objv_tracker) .set_attrs(pattrs) .set_raw_attrs(raw_attrs) - .read(&bl, null_yield); + .read(dpp, &bl, null_yield); } -RGWAsyncGetSystemObj::RGWAsyncGetSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, +RGWAsyncGetSystemObj::RGWAsyncGetSystemObj(const DoutPrefixProvider *_dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, bool want_attrs, bool raw_attrs) - : RGWAsyncRadosRequest(caller, cn), obj_ctx(_svc), + : RGWAsyncRadosRequest(caller, cn), dpp(_dpp), obj_ctx(_svc), obj(_obj), want_attrs(want_attrs), raw_attrs(raw_attrs) { if (_objv_tracker) { @@ -122,9 +122,9 @@ RGWAsyncGetSystemObj::RGWAsyncGetSystemObj(RGWCoroutine *caller, RGWAioCompletio } } -int RGWSimpleRadosReadAttrsCR::send_request() +int RGWSimpleRadosReadAttrsCR::send_request(const DoutPrefixProvider *dpp) { - req = new RGWAsyncGetSystemObj(this, stack->create_completion_notifier(), + req = new RGWAsyncGetSystemObj(dpp, this, stack->create_completion_notifier(), svc, objv_tracker, obj, true, raw_attrs); async_rados->queue(req); return 0; @@ -141,21 +141,23 @@ int RGWSimpleRadosReadAttrsCR::request_complete() return req->get_ret_status(); } -int RGWAsyncPutSystemObj::_send_request() +int RGWAsyncPutSystemObj::_send_request(const DoutPrefixProvider *dpp) { auto obj_ctx = svc->init_obj_ctx(); auto sysobj = obj_ctx.get_obj(obj); return sysobj.wop() .set_objv_tracker(&objv_tracker) .set_exclusive(exclusive) - .write_data(bl, null_yield); + .write_data(dpp, bl, null_yield); } -RGWAsyncPutSystemObj::RGWAsyncPutSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, +RGWAsyncPutSystemObj::RGWAsyncPutSystemObj(const DoutPrefixProvider *_dpp, + RGWCoroutine *caller, + RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, bool _exclusive, bufferlist _bl) - : RGWAsyncRadosRequest(caller, cn), svc(_svc), + : RGWAsyncRadosRequest(caller, cn), dpp(_dpp), svc(_svc), obj(_obj), exclusive(_exclusive), bl(std::move(_bl)) { if (_objv_tracker) { @@ -163,7 +165,7 @@ RGWAsyncPutSystemObj::RGWAsyncPutSystemObj(RGWCoroutine *caller, RGWAioCompletio } } -int RGWAsyncPutSystemObjAttrs::_send_request() +int RGWAsyncPutSystemObjAttrs::_send_request(const DoutPrefixProvider *dpp) { auto obj_ctx = svc->init_obj_ctx(); auto sysobj = obj_ctx.get_obj(obj); @@ -171,14 +173,14 @@ int RGWAsyncPutSystemObjAttrs::_send_request() .set_objv_tracker(&objv_tracker) .set_exclusive(false) .set_attrs(attrs) - .write_attrs(null_yield); + .write_attrs(dpp, null_yield); } -RGWAsyncPutSystemObjAttrs::RGWAsyncPutSystemObjAttrs(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, +RGWAsyncPutSystemObjAttrs::RGWAsyncPutSystemObjAttrs(const DoutPrefixProvider *_dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, map _attrs) - : RGWAsyncRadosRequest(caller, cn), svc(_svc), + : RGWAsyncRadosRequest(caller, cn), dpp(_dpp), svc(_svc), obj(_obj), attrs(std::move(_attrs)) { if (_objv_tracker) { @@ -194,12 +196,12 @@ RGWOmapAppend::RGWOmapAppend(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGW { } -int RGWAsyncLockSystemObj::_send_request() +int RGWAsyncLockSystemObj::_send_request(const DoutPrefixProvider *dpp) { rgw_rados_ref ref; - int r = store->getRados()->get_raw_obj_ref(obj, &ref); + int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } @@ -222,12 +224,12 @@ RGWAsyncLockSystemObj::RGWAsyncLockSystemObj(RGWCoroutine *caller, RGWAioComplet { } -int RGWAsyncUnlockSystemObj::_send_request() +int RGWAsyncUnlockSystemObj::_send_request(const DoutPrefixProvider *dpp) { rgw_rados_ref ref; - int r = store->getRados()->get_raw_obj_ref(obj, &ref); + int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } @@ -264,11 +266,11 @@ RGWRadosSetOmapKeysCR::RGWRadosSetOmapKeysCR(rgw::sal::RGWRadosStore *_store, s << "]"; } -int RGWRadosSetOmapKeysCR::send_request() +int RGWRadosSetOmapKeysCR::send_request(const DoutPrefixProvider *dpp) { - int r = store->getRados()->get_raw_obj_ref(obj, &ref); + int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } @@ -303,10 +305,10 @@ RGWRadosGetOmapKeysCR::RGWRadosGetOmapKeysCR(rgw::sal::RGWRadosStore *_store, set_description() << "get omap keys dest=" << obj << " marker=" << marker; } -int RGWRadosGetOmapKeysCR::send_request() { - int r = store->getRados()->get_raw_obj_ref(obj, &result->ref); +int RGWRadosGetOmapKeysCR::send_request(const DoutPrefixProvider *dpp) { + int r = store->getRados()->get_raw_obj_ref(dpp, obj, &result->ref); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } @@ -341,10 +343,10 @@ RGWRadosGetOmapValsCR::RGWRadosGetOmapValsCR(rgw::sal::RGWRadosStore *_store, set_description() << "get omap keys dest=" << obj << " marker=" << marker; } -int RGWRadosGetOmapValsCR::send_request() { - int r = store->getRados()->get_raw_obj_ref(obj, &result->ref); +int RGWRadosGetOmapValsCR::send_request(const DoutPrefixProvider *dpp) { + int r = store->getRados()->get_raw_obj_ref(dpp, obj, &result->ref); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } @@ -376,10 +378,10 @@ RGWRadosRemoveOmapKeysCR::RGWRadosRemoveOmapKeysCR(rgw::sal::RGWRadosStore *_sto set_description() << "remove omap keys dest=" << obj << " keys=" << keys; } -int RGWRadosRemoveOmapKeysCR::send_request() { - int r = store->getRados()->get_raw_obj_ref(obj, &ref); +int RGWRadosRemoveOmapKeysCR::send_request(const DoutPrefixProvider *dpp) { + int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } @@ -409,7 +411,7 @@ RGWRadosRemoveCR::RGWRadosRemoveCR(rgw::sal::RGWRadosStore *store, const rgw_raw set_description() << "remove dest=" << obj; } -int RGWRadosRemoveCR::send_request() +int RGWRadosRemoveCR::send_request(const DoutPrefixProvider *dpp) { auto rados = store->getRados()->get_rados_handle(); int r = rados->ioctx_create(obj.pool.name.c_str(), ioctx); @@ -464,7 +466,7 @@ void RGWSimpleRadosLockCR::request_cleanup() } } -int RGWSimpleRadosLockCR::send_request() +int RGWSimpleRadosLockCR::send_request(const DoutPrefixProvider *dpp) { set_status() << "sending request"; req = new RGWAsyncLockSystemObj(this, stack->create_completion_notifier(), @@ -501,7 +503,7 @@ void RGWSimpleRadosUnlockCR::request_cleanup() } } -int RGWSimpleRadosUnlockCR::send_request() +int RGWSimpleRadosUnlockCR::send_request(const DoutPrefixProvider *dpp) { set_status() << "sending request"; @@ -517,7 +519,7 @@ int RGWSimpleRadosUnlockCR::request_complete() return req->get_ret_status(); } -int RGWOmapAppend::operate() { +int RGWOmapAppend::operate(const DoutPrefixProvider *dpp) { reenter(this) { for (;;) { if (!has_product() && going_down) { @@ -576,18 +578,18 @@ bool RGWOmapAppend::finish() { return (!is_done()); } -int RGWAsyncGetBucketInstanceInfo::_send_request() +int RGWAsyncGetBucketInstanceInfo::_send_request(const DoutPrefixProvider *dpp) { int r; if (!bucket.bucket_id.empty()) { RGWSysObjectCtx obj_ctx = store->svc()->sysobj->init_obj_ctx(); - r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket, bucket_info, nullptr, &attrs, null_yield); + r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket, bucket_info, nullptr, &attrs, null_yield, dpp); } else { - r = store->ctl()->bucket->read_bucket_info(bucket, &bucket_info, null_yield, + r = store->ctl()->bucket->read_bucket_info(bucket, &bucket_info, null_yield, dpp, RGWBucketCtl::BucketInstance::GetParams().set_attrs(&attrs)); } if (r < 0) { - ldout(store->ctx(), 0) << "ERROR: failed to get bucket instance info for " + ldpp_dout(dpp, 0) << "ERROR: failed to get bucket instance info for " << bucket << dendl; return r; } @@ -595,7 +597,8 @@ int RGWAsyncGetBucketInstanceInfo::_send_request() return 0; } -RGWRadosBILogTrimCR::RGWRadosBILogTrimCR(rgw::sal::RGWRadosStore *store, +RGWRadosBILogTrimCR::RGWRadosBILogTrimCR(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info, int shard_id, const std::string& start_marker, @@ -604,10 +607,10 @@ RGWRadosBILogTrimCR::RGWRadosBILogTrimCR(rgw::sal::RGWRadosStore *store, start_marker(BucketIndexShardsManager::get_shard_marker(start_marker)), end_marker(BucketIndexShardsManager::get_shard_marker(end_marker)) { - bs.init(bucket_info, bucket_info.layout.current_index, shard_id); + bs.init(dpp, bucket_info, bucket_info.layout.current_index, shard_id); } -int RGWRadosBILogTrimCR::send_request() +int RGWRadosBILogTrimCR::send_request(const DoutPrefixProvider *dpp) { bufferlist in; cls_rgw_bi_log_trim_op call; @@ -629,7 +632,7 @@ int RGWRadosBILogTrimCR::request_complete() return r; } -int RGWAsyncFetchRemoteObj::_send_request() +int RGWAsyncFetchRemoteObj::_send_request(const DoutPrefixProvider *dpp) { RGWObjectCtx obj_ctx(store); @@ -675,7 +678,7 @@ int RGWAsyncFetchRemoteObj::_send_request() &bytes_transferred); if (r < 0) { - ldout(store->ctx(), 0) << "store->fetch_remote_obj() returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "store->fetch_remote_obj() returned r=" << r << dendl; if (counters) { counters->inc(sync_counters::l_fetch_err, 1); } @@ -689,7 +692,7 @@ int RGWAsyncFetchRemoteObj::_send_request() return r; } -int RGWAsyncStatRemoteObj::_send_request() +int RGWAsyncStatRemoteObj::_send_request(const DoutPrefixProvider *dpp) { RGWObjectCtx obj_ctx(store); @@ -700,7 +703,8 @@ int RGWAsyncStatRemoteObj::_send_request() rgw::sal::RGWRadosBucket bucket(store, src_bucket); rgw::sal::RGWRadosObject src_obj(store, key, &bucket); - int r = store->getRados()->stat_remote_obj(obj_ctx, + int r = store->getRados()->stat_remote_obj(dpp, + obj_ctx, rgw_user(user_id), nullptr, /* req_info */ source_zone, @@ -720,33 +724,33 @@ int RGWAsyncStatRemoteObj::_send_request() petag); /* string *petag, */ if (r < 0) { - ldout(store->ctx(), 0) << "store->fetch_remote_obj() returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "store->fetch_remote_obj() returned r=" << r << dendl; } return r; } -int RGWAsyncRemoveObj::_send_request() +int RGWAsyncRemoveObj::_send_request(const DoutPrefixProvider *dpp) { RGWObjectCtx obj_ctx(store); rgw_obj obj(bucket_info.bucket, key); - ldout(store->ctx(), 0) << __func__ << "(): deleting obj=" << obj << dendl; + ldpp_dout(dpp, 0) << __func__ << "(): deleting obj=" << obj << dendl; obj_ctx.set_atomic(obj); RGWObjState *state; - int ret = store->getRados()->get_obj_state(&obj_ctx, bucket_info, obj, &state, null_yield); + int ret = store->getRados()->get_obj_state(dpp, &obj_ctx, bucket_info, obj, &state, null_yield); if (ret < 0) { - ldout(store->ctx(), 20) << __func__ << "(): get_obj_state() obj=" << obj << " returned ret=" << ret << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): get_obj_state() obj=" << obj << " returned ret=" << ret << dendl; return ret; } /* has there been any racing object write? */ if (del_if_older && (state->mtime > timestamp)) { - ldout(store->ctx(), 20) << __func__ << "(): skipping object removal obj=" << obj << " (obj mtime=" << state->mtime << ", request timestamp=" << timestamp << ")" << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): skipping object removal obj=" << obj << " (obj mtime=" << state->mtime << ", request timestamp=" << timestamp << ")" << dendl; return 0; } @@ -759,7 +763,7 @@ int RGWAsyncRemoveObj::_send_request() try { policy.decode(bliter); } catch (buffer::error& err) { - ldout(store->ctx(), 0) << "ERROR: could not decode policy, caught buffer::error" << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl; return -EIO; } } @@ -783,14 +787,14 @@ int RGWAsyncRemoveObj::_send_request() del_op.params.high_precision_time = true; del_op.params.zones_trace = &zones_trace; - ret = del_op.delete_obj(null_yield); + ret = del_op.delete_obj(null_yield, dpp); if (ret < 0) { - ldout(store->ctx(), 20) << __func__ << "(): delete_obj() obj=" << obj << " returned ret=" << ret << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): delete_obj() obj=" << obj << " returned ret=" << ret << dendl; } return ret; } -int RGWContinuousLeaseCR::operate() +int RGWContinuousLeaseCR::operate(const DoutPrefixProvider *dpp) { if (aborted) { caller->set_sleeping(false); @@ -816,8 +820,9 @@ int RGWContinuousLeaseCR::operate() return 0; } -RGWRadosTimelogAddCR::RGWRadosTimelogAddCR(rgw::sal::RGWRadosStore *_store, const string& _oid, +RGWRadosTimelogAddCR::RGWRadosTimelogAddCR(const DoutPrefixProvider *_dpp, rgw::sal::RGWRadosStore *_store, const string& _oid, const cls_log_entry& entry) : RGWSimpleCoroutine(_store->ctx()), + dpp(_dpp), store(_store), oid(_oid), cn(NULL) { @@ -826,12 +831,12 @@ RGWRadosTimelogAddCR::RGWRadosTimelogAddCR(rgw::sal::RGWRadosStore *_store, cons entries.push_back(entry); } -int RGWRadosTimelogAddCR::send_request() +int RGWRadosTimelogAddCR::send_request(const DoutPrefixProvider *dpp) { set_status() << "sending request"; cn = stack->create_completion_notifier(); - return store->svc()->cls->timelog.add(oid, entries, cn->completion(), true, null_yield); + return store->svc()->cls->timelog.add(dpp, oid, entries, cn->completion(), true, null_yield); } int RGWRadosTimelogAddCR::request_complete() @@ -843,13 +848,14 @@ int RGWRadosTimelogAddCR::request_complete() return r; } -RGWRadosTimelogTrimCR::RGWRadosTimelogTrimCR(rgw::sal::RGWRadosStore *store, +RGWRadosTimelogTrimCR::RGWRadosTimelogTrimCR(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, const std::string& oid, const real_time& start_time, const real_time& end_time, const std::string& from_marker, const std::string& to_marker) - : RGWSimpleCoroutine(store->ctx()), store(store), oid(oid), + : RGWSimpleCoroutine(store->ctx()), dpp(dpp), store(store), oid(oid), start_time(start_time), end_time(end_time), from_marker(from_marker), to_marker(to_marker) { @@ -858,12 +864,12 @@ RGWRadosTimelogTrimCR::RGWRadosTimelogTrimCR(rgw::sal::RGWRadosStore *store, << " from_marker=" << from_marker << " to_marker=" << to_marker; } -int RGWRadosTimelogTrimCR::send_request() +int RGWRadosTimelogTrimCR::send_request(const DoutPrefixProvider *dpp) { set_status() << "sending request"; cn = stack->create_completion_notifier(); - return store->svc()->cls->timelog.trim(oid, start_time, end_time, from_marker, + return store->svc()->cls->timelog.trim(dpp, oid, start_time, end_time, from_marker, to_marker, cn->completion(), null_yield); } @@ -878,10 +884,11 @@ int RGWRadosTimelogTrimCR::request_complete() } -RGWSyncLogTrimCR::RGWSyncLogTrimCR(rgw::sal::RGWRadosStore *store, const std::string& oid, +RGWSyncLogTrimCR::RGWSyncLogTrimCR(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, const std::string& oid, const std::string& to_marker, std::string *last_trim_marker) - : RGWRadosTimelogTrimCR(store, oid, real_time{}, real_time{}, + : RGWRadosTimelogTrimCR(dpp, store, oid, real_time{}, real_time{}, std::string{}, to_marker), cct(store->ctx()), last_trim_marker(last_trim_marker) { @@ -901,19 +908,20 @@ int RGWSyncLogTrimCR::request_complete() } -int RGWAsyncStatObj::_send_request() +int RGWAsyncStatObj::_send_request(const DoutPrefixProvider *dpp) { rgw_raw_obj raw_obj; store->getRados()->obj_to_raw(bucket_info.placement_rule, obj, &raw_obj); - return store->getRados()->raw_obj_stat(raw_obj, psize, pmtime, pepoch, + return store->getRados()->raw_obj_stat(dpp, raw_obj, psize, pmtime, pepoch, nullptr, nullptr, objv_tracker, null_yield); } -RGWStatObjCR::RGWStatObjCR(RGWAsyncRadosProcessor *async_rados, rgw::sal::RGWRadosStore *store, +RGWStatObjCR::RGWStatObjCR(const DoutPrefixProvider *dpp, + RGWAsyncRadosProcessor *async_rados, rgw::sal::RGWRadosStore *store, const RGWBucketInfo& _bucket_info, const rgw_obj& obj, uint64_t *psize, real_time* pmtime, uint64_t *pepoch, RGWObjVersionTracker *objv_tracker) - : RGWSimpleCoroutine(store->ctx()), store(store), async_rados(async_rados), + : RGWSimpleCoroutine(store->ctx()), dpp(dpp), store(store), async_rados(async_rados), bucket_info(_bucket_info), obj(obj), psize(psize), pmtime(pmtime), pepoch(pepoch), objv_tracker(objv_tracker) { @@ -927,9 +935,9 @@ void RGWStatObjCR::request_cleanup() } } -int RGWStatObjCR::send_request() +int RGWStatObjCR::send_request(const DoutPrefixProvider *dpp) { - req = new RGWAsyncStatObj(this, stack->create_completion_notifier(), + req = new RGWAsyncStatObj(dpp, this, stack->create_completion_notifier(), store, bucket_info, obj, psize, pmtime, pepoch, objv_tracker); async_rados->queue(req); return 0; @@ -949,11 +957,11 @@ RGWRadosNotifyCR::RGWRadosNotifyCR(rgw::sal::RGWRadosStore *store, const rgw_raw set_description() << "notify dest=" << obj; } -int RGWRadosNotifyCR::send_request() +int RGWRadosNotifyCR::send_request(const DoutPrefixProvider *dpp) { - int r = store->getRados()->get_raw_obj_ref(obj, &ref); + int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } diff --git a/src/rgw/rgw_cr_rados.h b/src/rgw/rgw_cr_rados.h index ac267b618a7f3..87fa65b5514a1 100644 --- a/src/rgw/rgw_cr_rados.h +++ b/src/rgw/rgw_cr_rados.h @@ -17,6 +17,8 @@ #include "services/svc_sys_obj.h" #include "services/svc_bucket.h" +#define dout_subsys ceph_subsys_rgw + class RGWAsyncRadosRequest : public RefCountedObject { RGWCoroutine *caller; RGWAioCompletionNotifier *notifier; @@ -26,7 +28,7 @@ class RGWAsyncRadosRequest : public RefCountedObject { ceph::mutex lock = ceph::make_mutex("RGWAsyncRadosRequest::lock"); protected: - virtual int _send_request() = 0; + virtual int _send_request(const DoutPrefixProvider *dpp) = 0; public: RGWAsyncRadosRequest(RGWCoroutine *_caller, RGWAioCompletionNotifier *_cn) : caller(_caller), notifier(_cn), retcode(0) { @@ -37,9 +39,9 @@ public: } } - void send_request() { + void send_request(const DoutPrefixProvider *dpp) { get(); - retcode = _send_request(); + retcode = _send_request(dpp); { std::lock_guard l{lock}; if (notifier) { @@ -74,7 +76,7 @@ protected: ThreadPool m_tp; Throttle req_throttle; - struct RGWWQ : public ThreadPool::WorkQueue { + struct RGWWQ : public DoutPrefixProvider, public ThreadPool::WorkQueue { RGWAsyncRadosProcessor *processor; RGWWQ(RGWAsyncRadosProcessor *p, ceph::timespan timeout, ceph::timespan suicide_timeout, @@ -93,6 +95,11 @@ protected: void _clear() override { ceph_assert(processor->m_req_queue.empty()); } + + CephContext *get_cct() const { return processor->cct; } + unsigned get_subsys() const { return ceph_subsys_rgw; } + std::ostream& gen_prefix(std::ostream& out) const { return out << "rgw async rados processor: ";} + } req_wq; public: @@ -100,12 +107,13 @@ public: ~RGWAsyncRadosProcessor() {} void start(); void stop(); - void handle_request(RGWAsyncRadosRequest *req); + void handle_request(const DoutPrefixProvider *dpp, RGWAsyncRadosRequest *req); void queue(RGWAsyncRadosRequest *req); bool is_going_down() { return going_down; } + }; template @@ -121,7 +129,7 @@ class RGWSimpleWriteOnlyAsyncCR : public RGWSimpleCoroutine { P params; const DoutPrefixProvider *dpp; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: Request(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, @@ -153,7 +161,7 @@ class RGWSimpleWriteOnlyAsyncCR : public RGWSimpleCoroutine { } } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { req = new Request(this, stack->create_completion_notifier(), store, @@ -176,33 +184,40 @@ class RGWSimpleAsyncCR : public RGWSimpleCoroutine { P params; std::shared_ptr result; + const DoutPrefixProvider *dpp; class Request : public RGWAsyncRadosRequest { rgw::sal::RGWRadosStore *store; P params; std::shared_ptr result; + const DoutPrefixProvider *dpp; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: - Request(RGWCoroutine *caller, + Request(const DoutPrefixProvider *dpp, + RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store, const P& _params, - std::shared_ptr& _result) : RGWAsyncRadosRequest(caller, cn), + std::shared_ptr& _result, + const DoutPrefixProvider *_dpp) : RGWAsyncRadosRequest(caller, cn), store(_store), params(_params), - result(_result) {} + result(_result), + dpp(_dpp) {} } *req{nullptr}; public: RGWSimpleAsyncCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store, const P& _params, - std::shared_ptr& _result) : RGWSimpleCoroutine(_store->ctx()), + std::shared_ptr& _result, + const DoutPrefixProvider *_dpp) : RGWSimpleCoroutine(_store->ctx()), async_rados(_async_rados), store(_store), params(_params), - result(_result) {} + result(_result), + dpp(_dpp) {} ~RGWSimpleAsyncCR() override { request_cleanup(); @@ -214,12 +229,14 @@ class RGWSimpleAsyncCR : public RGWSimpleCoroutine { } } - int send_request() override { - req = new Request(this, + int send_request(const DoutPrefixProvider *dpp) override { + req = new Request(dpp, + this, stack->create_completion_notifier(), store, params, - result); + result, + dpp); async_rados->queue(req); return 0; @@ -247,14 +264,15 @@ private: class Request : public RGWAsyncRadosRequest { std::shared_ptr action; protected: - int _send_request() override { + int _send_request(const DoutPrefixProvider *dpp) override { if (!action) { return 0; } return action->operate(); } public: - Request(RGWCoroutine *caller, + Request(const DoutPrefixProvider *dpp, + RGWCoroutine *caller, RGWAioCompletionNotifier *cn, std::shared_ptr& _action) : RGWAsyncRadosRequest(caller, cn), action(_action) {} @@ -283,8 +301,8 @@ private: } } - int send_request() override { - req = new Request(this, + int send_request(const DoutPrefixProvider *dpp) override { + req = new Request(dpp, this, stack->create_completion_notifier(), action); @@ -298,14 +316,16 @@ private: class RGWAsyncGetSystemObj : public RGWAsyncRadosRequest { + const DoutPrefixProvider *dpp; RGWSysObjectCtx obj_ctx; rgw_raw_obj obj; const bool want_attrs; const bool raw_attrs; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: - RGWAsyncGetSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, + RGWAsyncGetSystemObj(const DoutPrefixProvider *dpp, + RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, bool want_attrs, bool raw_attrs); @@ -315,15 +335,17 @@ public: }; class RGWAsyncPutSystemObj : public RGWAsyncRadosRequest { + const DoutPrefixProvider *dpp; RGWSI_SysObj *svc; rgw_raw_obj obj; bool exclusive; bufferlist bl; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: - RGWAsyncPutSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, + RGWAsyncPutSystemObj(const DoutPrefixProvider *dpp, RGWCoroutine *caller, + RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, bool _exclusive, bufferlist _bl); @@ -331,14 +353,15 @@ public: }; class RGWAsyncPutSystemObjAttrs : public RGWAsyncRadosRequest { + const DoutPrefixProvider *dpp; RGWSI_SysObj *svc; rgw_raw_obj obj; map attrs; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: - RGWAsyncPutSystemObjAttrs(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, + RGWAsyncPutSystemObjAttrs(const DoutPrefixProvider *dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, map _attrs); @@ -353,7 +376,7 @@ class RGWAsyncLockSystemObj : public RGWAsyncRadosRequest { uint32_t duration_secs; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: RGWAsyncLockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, @@ -367,7 +390,7 @@ class RGWAsyncUnlockSystemObj : public RGWAsyncRadosRequest { string cookie; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: RGWAsyncUnlockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, @@ -376,6 +399,7 @@ public: template class RGWSimpleRadosReadCR : public RGWSimpleCoroutine { + const DoutPrefixProvider *dpp; RGWAsyncRadosProcessor *async_rados; RGWSI_SysObj *svc; @@ -387,11 +411,12 @@ class RGWSimpleRadosReadCR : public RGWSimpleCoroutine { RGWAsyncGetSystemObj *req{nullptr}; public: - RGWSimpleRadosReadCR(RGWAsyncRadosProcessor *_async_rados, RGWSI_SysObj *_svc, + RGWSimpleRadosReadCR(const DoutPrefixProvider *_dpp, + RGWAsyncRadosProcessor *_async_rados, RGWSI_SysObj *_svc, const rgw_raw_obj& _obj, T *_result, bool empty_on_enoent = true, RGWObjVersionTracker *objv_tracker = nullptr) - : RGWSimpleCoroutine(_svc->ctx()), async_rados(_async_rados), svc(_svc), + : RGWSimpleCoroutine(_svc->ctx()), dpp(_dpp), async_rados(_async_rados), svc(_svc), obj(_obj), result(_result), empty_on_enoent(empty_on_enoent), objv_tracker(objv_tracker) {} ~RGWSimpleRadosReadCR() override { @@ -405,7 +430,7 @@ public: } } - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; virtual int handle_data(T& data) { @@ -414,9 +439,9 @@ public: }; template -int RGWSimpleRadosReadCR::send_request() +int RGWSimpleRadosReadCR::send_request(const DoutPrefixProvider *dpp) { - req = new RGWAsyncGetSystemObj(this, stack->create_completion_notifier(), svc, + req = new RGWAsyncGetSystemObj(dpp, this, stack->create_completion_notifier(), svc, objv_tracker, obj, false, false); async_rados->queue(req); return 0; @@ -453,6 +478,7 @@ int RGWSimpleRadosReadCR::request_complete() } class RGWSimpleRadosReadAttrsCR : public RGWSimpleCoroutine { + const DoutPrefixProvider *dpp; RGWAsyncRadosProcessor *async_rados; RGWSI_SysObj *svc; @@ -463,10 +489,11 @@ class RGWSimpleRadosReadAttrsCR : public RGWSimpleCoroutine { RGWAsyncGetSystemObj *req = nullptr; public: - RGWSimpleRadosReadAttrsCR(RGWAsyncRadosProcessor *_async_rados, RGWSI_SysObj *_svc, + RGWSimpleRadosReadAttrsCR(const DoutPrefixProvider *_dpp, RGWAsyncRadosProcessor *_async_rados, RGWSI_SysObj *_svc, const rgw_raw_obj& _obj, map *_pattrs, bool _raw_attrs, RGWObjVersionTracker* objv_tracker = nullptr) : RGWSimpleCoroutine(_svc->ctx()), + dpp(_dpp), async_rados(_async_rados), svc(_svc), obj(_obj), pattrs(_pattrs), @@ -484,12 +511,13 @@ public: } } - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; template class RGWSimpleRadosWriteCR : public RGWSimpleCoroutine { + const DoutPrefixProvider *dpp; RGWAsyncRadosProcessor *async_rados; RGWSI_SysObj *svc; bufferlist bl; @@ -498,10 +526,11 @@ class RGWSimpleRadosWriteCR : public RGWSimpleCoroutine { RGWAsyncPutSystemObj *req{nullptr}; public: - RGWSimpleRadosWriteCR(RGWAsyncRadosProcessor *_async_rados, RGWSI_SysObj *_svc, + RGWSimpleRadosWriteCR(const DoutPrefixProvider *_dpp, + RGWAsyncRadosProcessor *_async_rados, RGWSI_SysObj *_svc, const rgw_raw_obj& _obj, const T& _data, RGWObjVersionTracker *objv_tracker = nullptr) - : RGWSimpleCoroutine(_svc->ctx()), async_rados(_async_rados), + : RGWSimpleCoroutine(_svc->ctx()), dpp(_dpp), async_rados(_async_rados), svc(_svc), obj(_obj), objv_tracker(objv_tracker) { encode(_data, bl); } @@ -517,8 +546,8 @@ public: } } - int send_request() override { - req = new RGWAsyncPutSystemObj(this, stack->create_completion_notifier(), + int send_request(const DoutPrefixProvider *dpp) override { + req = new RGWAsyncPutSystemObj(dpp, this, stack->create_completion_notifier(), svc, objv_tracker, obj, false, std::move(bl)); async_rados->queue(req); return 0; @@ -533,6 +562,7 @@ public: }; class RGWSimpleRadosWriteAttrsCR : public RGWSimpleCoroutine { + const DoutPrefixProvider *dpp; RGWAsyncRadosProcessor *async_rados; RGWSI_SysObj *svc; RGWObjVersionTracker *objv_tracker; @@ -542,11 +572,12 @@ class RGWSimpleRadosWriteAttrsCR : public RGWSimpleCoroutine { RGWAsyncPutSystemObjAttrs *req = nullptr; public: - RGWSimpleRadosWriteAttrsCR(RGWAsyncRadosProcessor *_async_rados, + RGWSimpleRadosWriteAttrsCR(const DoutPrefixProvider *_dpp, + RGWAsyncRadosProcessor *_async_rados, RGWSI_SysObj *_svc, const rgw_raw_obj& _obj, map _attrs, RGWObjVersionTracker *objv_tracker = nullptr) - : RGWSimpleCoroutine(_svc->ctx()), async_rados(_async_rados), + : RGWSimpleCoroutine(_svc->ctx()), dpp(_dpp), async_rados(_async_rados), svc(_svc), objv_tracker(objv_tracker), obj(_obj), attrs(std::move(_attrs)) { } @@ -561,8 +592,8 @@ public: } } - int send_request() override { - req = new RGWAsyncPutSystemObjAttrs(this, stack->create_completion_notifier(), + int send_request(const DoutPrefixProvider *dpp) override { + req = new RGWAsyncPutSystemObjAttrs(dpp, this, stack->create_completion_notifier(), svc, objv_tracker, obj, std::move(attrs)); async_rados->queue(req); return 0; @@ -591,7 +622,7 @@ public: const rgw_raw_obj& _obj, map& _entries); - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; @@ -608,7 +639,7 @@ class RGWRadosGetOmapKeysCR : public RGWSimpleCoroutine { const string& _marker, int _max_entries, ResultPtr result); - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; private: @@ -633,7 +664,7 @@ class RGWRadosGetOmapValsCR : public RGWSimpleCoroutine { const string& _marker, int _max_entries, ResultPtr result); - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; private: @@ -661,7 +692,7 @@ public: const rgw_raw_obj& _obj, const set& _keys); - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; @@ -677,7 +708,7 @@ public: RGWRadosRemoveCR(rgw::sal::RGWRadosStore *store, const rgw_raw_obj& obj, RGWObjVersionTracker* objv_tracker = nullptr); - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; @@ -703,7 +734,7 @@ public: } void request_cleanup() override; - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; static std::string gen_random_cookie(CephContext* cct) { @@ -734,7 +765,7 @@ public: } void request_cleanup() override; - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; @@ -759,7 +790,7 @@ public: RGWOmapAppend(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store, const rgw_raw_obj& _obj, uint64_t _window_size = OMAP_APPEND_MAX_ENTRIES_DEFAULT); - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; void flush_pending(); bool append(const string& s); bool finish(); @@ -821,13 +852,15 @@ public: class RGWAsyncGetBucketInstanceInfo : public RGWAsyncRadosRequest { rgw::sal::RGWRadosStore *store; rgw_bucket bucket; + const DoutPrefixProvider *dpp; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: RGWAsyncGetBucketInstanceInfo(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, - rgw::sal::RGWRadosStore *_store, const rgw_bucket& bucket) - : RGWAsyncRadosRequest(caller, cn), store(_store), bucket(bucket) {} + rgw::sal::RGWRadosStore *_store, const rgw_bucket& bucket, + const DoutPrefixProvider *dpp) + : RGWAsyncRadosRequest(caller, cn), store(_store), bucket(bucket), dpp(dpp) {} RGWBucketInfo bucket_info; map attrs; @@ -839,6 +872,7 @@ class RGWGetBucketInstanceInfoCR : public RGWSimpleCoroutine { rgw_bucket bucket; RGWBucketInfo *bucket_info; map *pattrs; + const DoutPrefixProvider *dpp; RGWAsyncGetBucketInstanceInfo *req{nullptr}; @@ -846,9 +880,9 @@ public: // rgw_bucket constructor RGWGetBucketInstanceInfoCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store, const rgw_bucket& _bucket, RGWBucketInfo *_bucket_info, - map *_pattrs) + map *_pattrs, const DoutPrefixProvider *dpp) : RGWSimpleCoroutine(_store->ctx()), async_rados(_async_rados), store(_store), - bucket(_bucket), bucket_info(_bucket_info), pattrs(_pattrs) {} + bucket(_bucket), bucket_info(_bucket_info), pattrs(_pattrs), dpp(dpp) {} ~RGWGetBucketInstanceInfoCR() override { request_cleanup(); } @@ -859,8 +893,8 @@ public: } } - int send_request() override { - req = new RGWAsyncGetBucketInstanceInfo(this, stack->create_completion_notifier(), store, bucket); + int send_request(const DoutPrefixProvider *dpp) override { + req = new RGWAsyncGetBucketInstanceInfo(this, stack->create_completion_notifier(), store, bucket, dpp); async_rados->queue(req); return 0; } @@ -881,11 +915,12 @@ class RGWRadosBILogTrimCR : public RGWSimpleCoroutine { std::string end_marker; boost::intrusive_ptr cn; public: - RGWRadosBILogTrimCR(rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info, + RGWRadosBILogTrimCR(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info, int shard_id, const std::string& start_marker, const std::string& end_marker); - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; @@ -912,7 +947,7 @@ class RGWAsyncFetchRemoteObj : public RGWAsyncRadosRequest { const DoutPrefixProvider *dpp; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: RGWAsyncFetchRemoteObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store, const rgw_zone_id& _source_zone, @@ -1015,7 +1050,7 @@ public: } } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { req = new RGWAsyncFetchRemoteObj(this, stack->create_completion_notifier(), store, source_zone, user_id, src_bucket, dest_placement_rule, dest_bucket_info, key, dest_key, versioned_epoch, copy_if_newer, filter, @@ -1043,7 +1078,7 @@ class RGWAsyncStatRemoteObj : public RGWAsyncRadosRequest { map *pheaders; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: RGWAsyncStatRemoteObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store, const rgw_zone_id& _source_zone, @@ -1114,7 +1149,7 @@ public: } } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { req = new RGWAsyncStatRemoteObj(this, stack->create_completion_notifier(), store, source_zone, src_bucket, key, pmtime, psize, petag, pattrs, pheaders); async_rados->queue(req); @@ -1127,6 +1162,7 @@ public: }; class RGWAsyncRemoveObj : public RGWAsyncRadosRequest { + const DoutPrefixProvider *dpp; rgw::sal::RGWRadosStore *store; rgw_zone_id source_zone; @@ -1144,9 +1180,10 @@ class RGWAsyncRemoveObj : public RGWAsyncRadosRequest { rgw_zone_set zones_trace; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: - RGWAsyncRemoveObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store, + RGWAsyncRemoveObj(const DoutPrefixProvider *_dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, + rgw::sal::RGWRadosStore *_store, const rgw_zone_id& _source_zone, RGWBucketInfo& _bucket_info, const rgw_obj_key& _key, @@ -1157,7 +1194,7 @@ public: bool _delete_marker, bool _if_older, real_time& _timestamp, - rgw_zone_set* _zones_trace) : RGWAsyncRadosRequest(caller, cn), store(_store), + rgw_zone_set* _zones_trace) : RGWAsyncRadosRequest(caller, cn), dpp(_dpp), store(_store), source_zone(_source_zone), bucket_info(_bucket_info), key(_key), @@ -1178,6 +1215,7 @@ public: }; class RGWRemoveObjCR : public RGWSimpleCoroutine { + const DoutPrefixProvider *dpp; CephContext *cct; RGWAsyncRadosProcessor *async_rados; rgw::sal::RGWRadosStore *store; @@ -1200,7 +1238,7 @@ class RGWRemoveObjCR : public RGWSimpleCoroutine { rgw_zone_set *zones_trace; public: - RGWRemoveObjCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store, + RGWRemoveObjCR(const DoutPrefixProvider *_dpp, RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store, const rgw_zone_id& _source_zone, RGWBucketInfo& _bucket_info, const rgw_obj_key& _key, @@ -1210,7 +1248,7 @@ public: string *_owner_display_name, bool _delete_marker, real_time *_timestamp, - rgw_zone_set *_zones_trace) : RGWSimpleCoroutine(_store->ctx()), cct(_store->ctx()), + rgw_zone_set *_zones_trace) : RGWSimpleCoroutine(_store->ctx()), dpp(_dpp), cct(_store->ctx()), async_rados(_async_rados), store(_store), source_zone(_source_zone), bucket_info(_bucket_info), @@ -1242,8 +1280,8 @@ public: } } - int send_request() override { - req = new RGWAsyncRemoveObj(this, stack->create_completion_notifier(), store, source_zone, bucket_info, + int send_request(const DoutPrefixProvider *dpp) override { + req = new RGWAsyncRemoveObj(dpp, this, stack->create_completion_notifier(), store, source_zone, bucket_info, key, owner, owner_display_name, versioned, versioned_epoch, delete_marker, del_if_older, timestamp, zones_trace); async_rados->queue(req); @@ -1282,7 +1320,7 @@ public: interval(_interval), caller(_caller) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; bool is_locked() const { return locked; @@ -1303,6 +1341,7 @@ public: }; class RGWRadosTimelogAddCR : public RGWSimpleCoroutine { + const DoutPrefixProvider *dpp; rgw::sal::RGWRadosStore *store; list entries; @@ -1311,14 +1350,15 @@ class RGWRadosTimelogAddCR : public RGWSimpleCoroutine { boost::intrusive_ptr cn; public: - RGWRadosTimelogAddCR(rgw::sal::RGWRadosStore *_store, const string& _oid, + RGWRadosTimelogAddCR(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *_store, const string& _oid, const cls_log_entry& entry); - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; class RGWRadosTimelogTrimCR : public RGWSimpleCoroutine { + const DoutPrefixProvider *dpp; rgw::sal::RGWRadosStore *store; boost::intrusive_ptr cn; protected: @@ -1329,12 +1369,13 @@ class RGWRadosTimelogTrimCR : public RGWSimpleCoroutine { std::string to_marker; public: - RGWRadosTimelogTrimCR(rgw::sal::RGWRadosStore *store, const std::string& oid, + RGWRadosTimelogTrimCR(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, const std::string& oid, const real_time& start_time, const real_time& end_time, const std::string& from_marker, const std::string& to_marker); - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; @@ -1345,12 +1386,14 @@ class RGWSyncLogTrimCR : public RGWRadosTimelogTrimCR { public: static constexpr const char* max_marker = "99999999"; - RGWSyncLogTrimCR(rgw::sal::RGWRadosStore *store, const std::string& oid, + RGWSyncLogTrimCR(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, const std::string& oid, const std::string& to_marker, std::string *last_trim_marker); int request_complete() override; }; class RGWAsyncStatObj : public RGWAsyncRadosRequest { + const DoutPrefixProvider *dpp; rgw::sal::RGWRadosStore *store; RGWBucketInfo bucket_info; rgw_obj obj; @@ -1359,17 +1402,18 @@ class RGWAsyncStatObj : public RGWAsyncRadosRequest { uint64_t *pepoch; RGWObjVersionTracker *objv_tracker; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: - RGWAsyncStatObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *store, + RGWAsyncStatObj(const DoutPrefixProvider *dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *store, const RGWBucketInfo& _bucket_info, const rgw_obj& obj, uint64_t *psize = nullptr, real_time *pmtime = nullptr, uint64_t *pepoch = nullptr, RGWObjVersionTracker *objv_tracker = nullptr) - : RGWAsyncRadosRequest(caller, cn), store(store), obj(obj), psize(psize), + : RGWAsyncRadosRequest(caller, cn), dpp(dpp), store(store), obj(obj), psize(psize), pmtime(pmtime), pepoch(pepoch), objv_tracker(objv_tracker) {} }; class RGWStatObjCR : public RGWSimpleCoroutine { + const DoutPrefixProvider *dpp; rgw::sal::RGWRadosStore *store; RGWAsyncRadosProcessor *async_rados; RGWBucketInfo bucket_info; @@ -1380,7 +1424,7 @@ class RGWStatObjCR : public RGWSimpleCoroutine { RGWObjVersionTracker *objv_tracker; RGWAsyncStatObj *req = nullptr; public: - RGWStatObjCR(RGWAsyncRadosProcessor *async_rados, rgw::sal::RGWRadosStore *store, + RGWStatObjCR(const DoutPrefixProvider *dpp, RGWAsyncRadosProcessor *async_rados, rgw::sal::RGWRadosStore *store, const RGWBucketInfo& _bucket_info, const rgw_obj& obj, uint64_t *psize = nullptr, real_time* pmtime = nullptr, uint64_t *pepoch = nullptr, RGWObjVersionTracker *objv_tracker = nullptr); @@ -1389,7 +1433,7 @@ class RGWStatObjCR : public RGWSimpleCoroutine { } void request_cleanup() override; - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; @@ -1408,7 +1452,7 @@ public: bufferlist& request, uint64_t timeout_ms, bufferlist *response); - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; diff --git a/src/rgw/rgw_cr_rest.cc b/src/rgw/rgw_cr_rest.cc index 06c365222505f..f290d11929e31 100644 --- a/src/rgw/rgw_cr_rest.cc +++ b/src/rgw/rgw_cr_rest.cc @@ -87,7 +87,7 @@ RGWStreamReadHTTPResourceCRF::~RGWStreamReadHTTPResourceCRF() } } -int RGWStreamReadHTTPResourceCRF::init() +int RGWStreamReadHTTPResourceCRF::init(const DoutPrefixProvider *dpp) { env->stack->init_new_io(req); @@ -191,7 +191,7 @@ RGWStreamWriteHTTPResourceCRF::~RGWStreamWriteHTTPResourceCRF() } } -void RGWStreamWriteHTTPResourceCRF::send_ready(const rgw_rest_obj& rest_obj) +void RGWStreamWriteHTTPResourceCRF::send_ready(const DoutPrefixProvider *dpp, const rgw_rest_obj& rest_obj) { req->set_send_length(rest_obj.content_len); for (auto h : rest_obj.attrs) { @@ -264,10 +264,10 @@ RGWStreamSpliceCR::RGWStreamSpliceCR(CephContext *_cct, RGWHTTPManager *_mgr, in_crf(_in_crf), out_crf(_out_crf) {} RGWStreamSpliceCR::~RGWStreamSpliceCR() { } -int RGWStreamSpliceCR::operate() { +int RGWStreamSpliceCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { { - int ret = in_crf->init(); + int ret = in_crf->init(dpp); if (ret < 0) { return set_cr_error(ret); } @@ -303,7 +303,7 @@ int RGWStreamSpliceCR::operate() { if (ret < 0) { return set_cr_error(ret); } - out_crf->send_ready(in_crf->get_rest_obj()); + out_crf->send_ready(dpp, in_crf->get_rest_obj()); ret = out_crf->send(); if (ret < 0) { return set_cr_error(ret); diff --git a/src/rgw/rgw_cr_rest.h b/src/rgw/rgw_cr_rest.h index 0776c4284f67d..914eebee02de2 100644 --- a/src/rgw/rgw_cr_rest.h +++ b/src/rgw/rgw_cr_rest.h @@ -69,13 +69,13 @@ public: request_cleanup(); } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { auto op = boost::intrusive_ptr( new RGWRESTReadResource(conn, path, params, &extra_headers, http_manager)); init_new_io(op.get()); - int ret = op->aio_read(); + int ret = op->aio_read(dpp); if (ret < 0) { log_error() << "failed to send http operation: " << op->to_str() << " ret=" << ret << std::endl; @@ -186,15 +186,15 @@ class RGWSendRawRESTResourceCR: public RGWSimpleCoroutine { request_cleanup(); } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { auto op = boost::intrusive_ptr( new RGWRESTSendResource(conn, method, path, params, &headers, http_manager)); init_new_io(op.get()); - int ret = op->aio_send(input_bl); + int ret = op->aio_send(dpp, input_bl); if (ret < 0) { - lsubdout(cct, rgw, 0) << "ERROR: failed to send request" << dendl; + ldpp_subdout(dpp, rgw, 0) << "ERROR: failed to send request" << dendl; op->put(); return ret; } @@ -341,7 +341,7 @@ public: request_cleanup(); } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { auto op = boost::intrusive_ptr( new RGWRESTDeleteResource(conn, path, params, nullptr, http_manager)); @@ -349,9 +349,9 @@ public: bufferlist bl; - int ret = op->aio_send(bl); + int ret = op->aio_send(dpp, bl); if (ret < 0) { - lsubdout(cct, rgw, 0) << "ERROR: failed to send DELETE request" << dendl; + ldpp_subdout(dpp, rgw, 0) << "ERROR: failed to send DELETE request" << dendl; op->put(); return ret; } @@ -421,7 +421,7 @@ protected: boost::asio::coroutine read_state; public: - virtual int init() = 0; + virtual int init(const DoutPrefixProvider *dpp) = 0; virtual int read(bufferlist *data, uint64_t max, bool *need_retry) = 0; /* reentrant */ virtual int decode_rest_obj(map& headers, bufferlist& extra_data) = 0; virtual bool has_attrs() = 0; @@ -436,7 +436,7 @@ protected: public: virtual int init() = 0; - virtual void send_ready(const rgw_rest_obj& rest_obj) = 0; + virtual void send_ready(const DoutPrefixProvider *dpp, const rgw_rest_obj& rest_obj) = 0; virtual int send() = 0; virtual int write(bufferlist& data, bool *need_retry) = 0; /* reentrant */ virtual int drain_writes(bool *need_retry) = 0; /* reentrant */ @@ -486,7 +486,7 @@ public: } ~RGWStreamReadHTTPResourceCRF(); - int init() override; + int init(const DoutPrefixProvider *dpp) override; int read(bufferlist *data, uint64_t max, bool *need_retry) override; /* reentrant */ int decode_rest_obj(map& headers, bufferlist& extra_data) override; bool has_attrs() override; @@ -549,7 +549,7 @@ public: int init() override { return 0; } - void send_ready(const rgw_rest_obj& rest_obj) override; + void send_ready(const DoutPrefixProvider *dpp, const rgw_rest_obj& rest_obj) override; int send() override; int write(bufferlist& data, bool *need_retry) override; /* reentrant */ void write_drain_notify(uint64_t pending_size); @@ -586,5 +586,5 @@ public: std::shared_ptr& _out_crf); ~RGWStreamSpliceCR(); - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; diff --git a/src/rgw/rgw_cr_tools.cc b/src/rgw/rgw_cr_tools.cc index 74497d5fc05e4..3d932764a91d4 100644 --- a/src/rgw/rgw_cr_tools.cc +++ b/src/rgw/rgw_cr_tools.cc @@ -16,7 +16,7 @@ #define dout_subsys ceph_subsys_rgw template<> -int RGWUserCreateCR::Request::_send_request() +int RGWUserCreateCR::Request::_send_request(const DoutPrefixProvider *dpp) { CephContext *cct = store->ctx(); @@ -87,24 +87,24 @@ int RGWUserCreateCR::Request::_send_request() } RGWNullFlusher flusher; - return RGWUserAdminOp_User::create(store, op_state, flusher, null_yield); + return RGWUserAdminOp_User::create(dpp, store, op_state, flusher, null_yield); } template<> -int RGWGetUserInfoCR::Request::_send_request() +int RGWGetUserInfoCR::Request::_send_request(const DoutPrefixProvider *dpp) { - return store->ctl()->user->get_info_by_uid(params.user, result.get(), null_yield); + return store->ctl()->user->get_info_by_uid(dpp, params.user, result.get(), null_yield); } template<> -int RGWGetBucketInfoCR::Request::_send_request() +int RGWGetBucketInfoCR::Request::_send_request(const DoutPrefixProvider *dpp) { return store->getRados()->get_bucket_info(store->svc(), params.tenant, params.bucket_name, - result->bucket_info, &result->mtime, null_yield, &result->attrs); + result->bucket_info, &result->mtime, null_yield, dpp, &result->attrs); } template<> -int RGWBucketCreateLocalCR::Request::_send_request() +int RGWBucketCreateLocalCR::Request::_send_request(const DoutPrefixProvider *dpp) { CephContext *cct = store->ctx(); auto& zone_svc = store->svc()->zone; @@ -116,7 +116,7 @@ int RGWBucketCreateLocalCR::Request::_send_request() if (!placement_rule.empty() && !zone_svc->get_zone_params().valid_placement(placement_rule)) { - ldout(cct, 0) << "placement target (" << placement_rule << ")" + ldpp_dout(dpp, 0) << "placement target (" << placement_rule << ")" << " doesn't exist in the placement targets of zonegroup" << " (" << zone_svc->get_zonegroup().api_name << ")" << dendl; return -ERR_INVALID_LOCATION_CONSTRAINT; @@ -128,7 +128,7 @@ int RGWBucketCreateLocalCR::Request::_send_request() map bucket_attrs; int ret = store->getRados()->get_bucket_info(store->svc(), user.tenant, bucket_name, - bucket_info, nullptr, null_yield, &bucket_attrs); + bucket_info, nullptr, null_yield, dpp, &bucket_attrs); if (ret < 0 && ret != -ENOENT) return ret; bool bucket_exists = (ret != -ENOENT); @@ -138,7 +138,7 @@ int RGWBucketCreateLocalCR::Request::_send_request() bucket_owner.set_id(user); bucket_owner.set_name(user_info->display_name); if (bucket_exists) { - ret = rgw_op_get_bucket_policy_from_attr(cct, store, bucket_info, + ret = rgw_op_get_bucket_policy_from_attr(dpp, cct, store, bucket_info, bucket_attrs, &old_policy, null_yield); if (ret >= 0) { if (old_policy.get_owner().get_id().compare(user) != 0) { @@ -159,11 +159,11 @@ int RGWBucketCreateLocalCR::Request::_send_request() rgw_bucket bucket; bucket.tenant = user.tenant; bucket.name = bucket_name; - ret = zone_svc->select_bucket_placement(*user_info, zonegroup_id, + ret = zone_svc->select_bucket_placement(dpp, *user_info, zonegroup_id, placement_rule, &selected_placement_rule, nullptr, null_yield); if (selected_placement_rule != bucket_info.placement_rule) { - ldout(cct, 0) << "bucket already exists on a different placement rule: " + ldpp_dout(dpp, 0) << "bucket already exists on a different placement rule: " << " selected_rule= " << selected_placement_rule << " existing_rule= " << bucket_info.placement_rule << dendl; return -EEXIST; @@ -194,7 +194,7 @@ int RGWBucketCreateLocalCR::Request::_send_request() placement_rule, bucket_info.swift_ver_location, pquota_info, attrs, info, nullptr, &ep_objv, creation_time, - pmaster_bucket, pmaster_num_shards, null_yield, true); + pmaster_bucket, pmaster_num_shards, null_yield, dpp, true); if (ret && ret != -EEXIST) @@ -204,32 +204,32 @@ int RGWBucketCreateLocalCR::Request::_send_request() if (existed) { if (info.owner != user) { - ldout(cct, 20) << "NOTICE: bucket already exists under a different user (bucket=" << bucket << " user=" << user << " bucket_owner=" << info.owner << dendl; + ldpp_dout(dpp, 20) << "NOTICE: bucket already exists under a different user (bucket=" << bucket << " user=" << user << " bucket_owner=" << info.owner << dendl; return -EEXIST; } bucket = info.bucket; } - ret = store->ctl()->bucket->link_bucket(user, bucket, info.creation_time, null_yield, false); + ret = store->ctl()->bucket->link_bucket(user, bucket, info.creation_time, null_yield, dpp, false); if (ret && !existed && ret != -EEXIST) { /* if it exists (or previously existed), don't remove it! */ - int r = store->ctl()->bucket->unlink_bucket(user, bucket, null_yield); + int r = store->ctl()->bucket->unlink_bucket(user, bucket, null_yield, dpp); if (r < 0) { - ldout(cct, 0) << "WARNING: failed to unlink bucket: ret=" << r << dendl; + ldpp_dout(dpp, 0) << "WARNING: failed to unlink bucket: ret=" << r << dendl; } } else if (ret == -EEXIST || (ret == 0 && existed)) { ret = -ERR_BUCKET_EXISTS; } if (ret < 0) { - ldout(cct, 0) << "ERROR: bucket creation (bucket=" << bucket << ") return ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: bucket creation (bucket=" << bucket << ") return ret=" << ret << dendl; } return ret; } template<> -int RGWObjectSimplePutCR::Request::_send_request() +int RGWObjectSimplePutCR::Request::_send_request(const DoutPrefixProvider *dpp) { RGWDataAccess::ObjectRef obj; @@ -247,14 +247,14 @@ int RGWObjectSimplePutCR::Request::_send_request() ret = obj->put(params.data, params.attrs, dpp, null_yield); if (ret < 0) { - lderr(cct) << "ERROR: put object returned error: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, -1) << "ERROR: put object returned error: " << cpp_strerror(-ret) << dendl; } return 0; } template<> -int RGWBucketLifecycleConfigCR::Request::_send_request() +int RGWBucketLifecycleConfigCR::Request::_send_request(const DoutPrefixProvider *dpp) { CephContext *cct = store->ctx(); @@ -276,16 +276,15 @@ int RGWBucketLifecycleConfigCR::Request::_send_request() } template<> -int RGWBucketGetSyncPolicyHandlerCR::Request::_send_request() +int RGWBucketGetSyncPolicyHandlerCR::Request::_send_request(const DoutPrefixProvider *dpp) { - CephContext *cct = store->ctx(); - int r = store->ctl()->bucket->get_sync_policy_handler(params.zone, params.bucket, &result->policy_handler, - null_yield); + null_yield, + dpp); if (r < 0) { - lderr(cct) << "ERROR: " << __func__ << "(): get_sync_policy_handler() returned " << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: " << __func__ << "(): get_sync_policy_handler() returned " << r << dendl; return r; } diff --git a/src/rgw/rgw_crypt.cc b/src/rgw/rgw_crypt.cc index df0111afebbcd..16d226edfefb9 100644 --- a/src/rgw/rgw_crypt.cc +++ b/src/rgw/rgw_crypt.cc @@ -648,7 +648,7 @@ RGWGetObj_BlockDecrypt::RGWGetObj_BlockDecrypt(CephContext* cct, RGWGetObj_BlockDecrypt::~RGWGetObj_BlockDecrypt() { } -int RGWGetObj_BlockDecrypt::read_manifest(bufferlist& manifest_bl) { +int RGWGetObj_BlockDecrypt::read_manifest(const DoutPrefixProvider *dpp, bufferlist& manifest_bl) { parts_len.clear(); RGWObjManifest manifest; if (manifest_bl.length()) { @@ -656,11 +656,11 @@ int RGWGetObj_BlockDecrypt::read_manifest(bufferlist& manifest_bl) { try { decode(manifest, miter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: couldn't decode manifest" << dendl; + ldpp_dout(dpp, 0) << "ERROR: couldn't decode manifest" << dendl; return -EIO; } RGWObjManifest::obj_iterator mi; - for (mi = manifest.obj_begin(); mi != manifest.obj_end(); ++mi) { + for (mi = manifest.obj_begin(dpp); mi != manifest.obj_end(dpp); ++mi) { if (mi.get_cur_stripe() == 0) { parts_len.push_back(0); } @@ -668,7 +668,7 @@ int RGWGetObj_BlockDecrypt::read_manifest(bufferlist& manifest_bl) { } if (cct->_conf->subsys.should_gather()) { for (size_t i = 0; icct, 5) << "ERROR: Invalid value for header " + ldpp_dout(s, 5) << "ERROR: Invalid value for header " << "x-amz-server-side-encryption-customer-algorithm" << dendl; s->err.message = "The requested encryption algorithm is not valid, must be AES256."; @@ -919,7 +919,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, } if (s->cct->_conf->rgw_crypt_require_ssl && !rgw_transport_is_secure(s->cct, *s->info.env)) { - ldout(s->cct, 5) << "ERROR: Insecure request, rgw_crypt_require_ssl is set" << dendl; + ldpp_dout(s, 5) << "ERROR: Insecure request, rgw_crypt_require_ssl is set" << dendl; return -ERR_INVALID_REQUEST; } @@ -928,7 +928,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, key_bin = from_base64( get_crypt_attribute(s->info.env, parts, X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY) ); } catch (...) { - ldout(s->cct, 5) << "ERROR: rgw_s3_prepare_encrypt invalid encryption " + ldpp_dout(s, 5) << "ERROR: rgw_s3_prepare_encrypt invalid encryption " << "key which contains character that is not base64 encoded." << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " @@ -937,7 +937,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, } if (key_bin.size() != AES_256_CBC::AES_256_KEYSIZE) { - ldout(s->cct, 5) << "ERROR: invalid encryption key size" << dendl; + ldpp_dout(s, 5) << "ERROR: invalid encryption key size" << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " "provided keys must provide an appropriate secret key."; return -EINVAL; @@ -950,7 +950,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, try { keymd5_bin = from_base64(keymd5); } catch (...) { - ldout(s->cct, 5) << "ERROR: rgw_s3_prepare_encrypt invalid encryption key " + ldpp_dout(s, 5) << "ERROR: rgw_s3_prepare_encrypt invalid encryption key " << "md5 which contains character that is not base64 encoded." << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " @@ -959,7 +959,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, } if (keymd5_bin.size() != CEPH_CRYPTO_MD5_DIGESTSIZE) { - ldout(s->cct, 5) << "ERROR: Invalid key md5 size" << dendl; + ldpp_dout(s, 5) << "ERROR: Invalid key md5 size" << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " "provided keys must provide an appropriate secret key md5."; return -EINVAL; @@ -971,7 +971,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, key_hash.Final(key_hash_res); if (memcmp(key_hash_res, keymd5_bin.c_str(), CEPH_CRYPTO_MD5_DIGESTSIZE) != 0) { - ldout(s->cct, 5) << "ERROR: Invalid key md5 hash" << dendl; + ldpp_dout(s, 5) << "ERROR: Invalid key md5 hash" << dendl; s->err.message = "The calculated MD5 hash of the key did not match the hash that was provided."; return -EINVAL; } @@ -992,7 +992,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, std::string_view customer_key = get_crypt_attribute(s->info.env, parts, X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY); if (!customer_key.empty()) { - ldout(s->cct, 5) << "ERROR: SSE-C encryption request is missing the header " + ldpp_dout(s, 5) << "ERROR: SSE-C encryption request is missing the header " << "x-amz-server-side-encryption-customer-algorithm" << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " @@ -1003,7 +1003,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, std::string_view customer_key_md5 = get_crypt_attribute(s->info.env, parts, X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5); if (!customer_key_md5.empty()) { - ldout(s->cct, 5) << "ERROR: SSE-C encryption request is missing the header " + ldpp_dout(s, 5) << "ERROR: SSE-C encryption request is missing the header " << "x-amz-server-side-encryption-customer-algorithm" << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " @@ -1019,7 +1019,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, if (s->cct->_conf->rgw_crypt_require_ssl && !rgw_transport_is_secure(s->cct, *s->info.env)) { - ldout(s->cct, 5) << "ERROR: insecure request, rgw_crypt_require_ssl is set" << dendl; + ldpp_dout(s, 5) << "ERROR: insecure request, rgw_crypt_require_ssl is set" << dendl; return -ERR_INVALID_REQUEST; } @@ -1032,7 +1032,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, std::string_view key_id = get_crypt_attribute(s->info.env, parts, X_AMZ_SERVER_SIDE_ENCRYPTION_AWS_KMS_KEY_ID); if (key_id.empty()) { - ldout(s->cct, 5) << "ERROR: not provide a valid key id" << dendl; + ldpp_dout(s, 5) << "ERROR: not provide a valid key id" << dendl; s->err.message = "Server Side Encryption with KMS managed key requires " "HTTP header x-amz-server-side-encryption-aws-kms-key-id"; return -ERR_INVALID_ACCESS_KEY; @@ -1046,12 +1046,12 @@ int rgw_s3_prepare_encrypt(struct req_state* s, std::string actual_key; res = make_actual_key_from_kms(s->cct, attrs, actual_key); if (res != 0) { - ldout(s->cct, 5) << "ERROR: failed to retrieve actual key from key_id: " << key_id << dendl; + ldpp_dout(s, 5) << "ERROR: failed to retrieve actual key from key_id: " << key_id << dendl; s->err.message = "Failed to retrieve the actual key, kms-keyid: " + std::string(key_id); return res; } if (actual_key.size() != AES_256_KEYSIZE) { - ldout(s->cct, 5) << "ERROR: key obtained from key_id:" << + ldpp_dout(s, 5) << "ERROR: key obtained from key_id:" << key_id << " is not 256 bit size" << dendl; s->err.message = "KMS provided an invalid key for the given kms-keyid."; return -ERR_INVALID_ACCESS_KEY; @@ -1071,7 +1071,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, } else if (req_sse == "AES256") { /* if a default encryption key was provided, we will use it for SSE-S3 */ } else { - ldout(s->cct, 5) << "ERROR: Invalid value for header x-amz-server-side-encryption" + ldpp_dout(s, 5) << "ERROR: Invalid value for header x-amz-server-side-encryption" << dendl; s->err.message = "Server Side Encryption with KMS managed key requires " "HTTP header x-amz-server-side-encryption : aws:kms or AES256"; @@ -1083,7 +1083,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, get_crypt_attribute(s->info.env, parts, X_AMZ_SERVER_SIDE_ENCRYPTION_AWS_KMS_KEY_ID); if (!key_id.empty()) { - ldout(s->cct, 5) << "ERROR: SSE-KMS encryption request is missing the header " + ldpp_dout(s, 5) << "ERROR: SSE-KMS encryption request is missing the header " << "x-amz-server-side-encryption" << dendl; s->err.message = "Server Side Encryption with KMS managed key requires " @@ -1098,7 +1098,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, try { master_encryption_key = from_base64(s->cct->_conf->rgw_crypt_default_encryption_key); } catch (...) { - ldout(s->cct, 5) << "ERROR: rgw_s3_prepare_encrypt invalid default encryption key " + ldpp_dout(s, 5) << "ERROR: rgw_s3_prepare_encrypt invalid default encryption key " << "which contains character that is not base64 encoded." << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " @@ -1107,7 +1107,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, } if (master_encryption_key.size() != 256 / 8) { - ldout(s->cct, 0) << "ERROR: failed to decode 'rgw crypt default encryption key' to 256 bit string" << dendl; + ldpp_dout(s, 0) << "ERROR: failed to decode 'rgw crypt default encryption key' to 256 bit string" << dendl; /* not an error to return; missing encryption does not inhibit processing */ return 0; } @@ -1145,7 +1145,7 @@ int rgw_s3_prepare_decrypt(struct req_state* s, { int res = 0; std::string stored_mode = get_str_attribute(attrs, RGW_ATTR_CRYPT_MODE); - ldout(s->cct, 15) << "Encryption mode: " << stored_mode << dendl; + ldpp_dout(s, 15) << "Encryption mode: " << stored_mode << dendl; const char *req_sse = s->info.env->get("HTTP_X_AMZ_SERVER_SIDE_ENCRYPTION", NULL); if (nullptr != req_sse && (s->op == OP_GET || s->op == OP_HEAD)) { @@ -1155,21 +1155,21 @@ int rgw_s3_prepare_decrypt(struct req_state* s, if (stored_mode == "SSE-C-AES256") { if (s->cct->_conf->rgw_crypt_require_ssl && !rgw_transport_is_secure(s->cct, *s->info.env)) { - ldout(s->cct, 5) << "ERROR: Insecure request, rgw_crypt_require_ssl is set" << dendl; + ldpp_dout(s, 5) << "ERROR: Insecure request, rgw_crypt_require_ssl is set" << dendl; return -ERR_INVALID_REQUEST; } const char *req_cust_alg = s->info.env->get("HTTP_X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM", NULL); if (nullptr == req_cust_alg) { - ldout(s->cct, 5) << "ERROR: Request for SSE-C encrypted object missing " + ldpp_dout(s, 5) << "ERROR: Request for SSE-C encrypted object missing " << "x-amz-server-side-encryption-customer-algorithm" << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " "provided keys must provide a valid encryption algorithm."; return -EINVAL; } else if (strcmp(req_cust_alg, "AES256") != 0) { - ldout(s->cct, 5) << "ERROR: The requested encryption algorithm is not valid, must be AES256." << dendl; + ldpp_dout(s, 5) << "ERROR: The requested encryption algorithm is not valid, must be AES256." << dendl; s->err.message = "The requested encryption algorithm is not valid, must be AES256."; return -ERR_INVALID_ENCRYPTION_ALGORITHM; } @@ -1178,7 +1178,7 @@ int rgw_s3_prepare_decrypt(struct req_state* s, try { key_bin = from_base64(s->info.env->get("HTTP_X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY", "")); } catch (...) { - ldout(s->cct, 5) << "ERROR: rgw_s3_prepare_decrypt invalid encryption key " + ldpp_dout(s, 5) << "ERROR: rgw_s3_prepare_decrypt invalid encryption key " << "which contains character that is not base64 encoded." << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " @@ -1187,7 +1187,7 @@ int rgw_s3_prepare_decrypt(struct req_state* s, } if (key_bin.size() != AES_256_CBC::AES_256_KEYSIZE) { - ldout(s->cct, 5) << "ERROR: Invalid encryption key size" << dendl; + ldpp_dout(s, 5) << "ERROR: Invalid encryption key size" << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " "provided keys must provide an appropriate secret key."; return -EINVAL; @@ -1199,7 +1199,7 @@ int rgw_s3_prepare_decrypt(struct req_state* s, try { keymd5_bin = from_base64(keymd5); } catch (...) { - ldout(s->cct, 5) << "ERROR: rgw_s3_prepare_decrypt invalid encryption key md5 " + ldpp_dout(s, 5) << "ERROR: rgw_s3_prepare_decrypt invalid encryption key md5 " << "which contains character that is not base64 encoded." << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " @@ -1209,7 +1209,7 @@ int rgw_s3_prepare_decrypt(struct req_state* s, if (keymd5_bin.size() != CEPH_CRYPTO_MD5_DIGESTSIZE) { - ldout(s->cct, 5) << "ERROR: Invalid key md5 size " << dendl; + ldpp_dout(s, 5) << "ERROR: Invalid key md5 size " << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " "provided keys must provide an appropriate secret key md5."; return -EINVAL; @@ -1237,7 +1237,7 @@ int rgw_s3_prepare_decrypt(struct req_state* s, if (stored_mode == "SSE-KMS") { if (s->cct->_conf->rgw_crypt_require_ssl && !rgw_transport_is_secure(s->cct, *s->info.env)) { - ldout(s->cct, 5) << "ERROR: Insecure request, rgw_crypt_require_ssl is set" << dendl; + ldpp_dout(s, 5) << "ERROR: Insecure request, rgw_crypt_require_ssl is set" << dendl; return -ERR_INVALID_REQUEST; } /* try to retrieve actual key */ @@ -1245,12 +1245,12 @@ int rgw_s3_prepare_decrypt(struct req_state* s, std::string actual_key; res = reconstitute_actual_key_from_kms(s->cct, attrs, actual_key); if (res != 0) { - ldout(s->cct, 10) << "ERROR: failed to retrieve actual key from key_id: " << key_id << dendl; + ldpp_dout(s, 10) << "ERROR: failed to retrieve actual key from key_id: " << key_id << dendl; s->err.message = "Failed to retrieve the actual key, kms-keyid: " + key_id; return res; } if (actual_key.size() != AES_256_KEYSIZE) { - ldout(s->cct, 0) << "ERROR: key obtained from key_id:" << + ldpp_dout(s, 0) << "ERROR: key obtained from key_id:" << key_id << " is not 256 bit size" << dendl; s->err.message = "KMS provided an invalid key for the given kms-keyid."; return -ERR_INVALID_ACCESS_KEY; @@ -1271,7 +1271,7 @@ int rgw_s3_prepare_decrypt(struct req_state* s, try { master_encryption_key = from_base64(std::string(s->cct->_conf->rgw_crypt_default_encryption_key)); } catch (...) { - ldout(s->cct, 5) << "ERROR: rgw_s3_prepare_decrypt invalid default encryption key " + ldpp_dout(s, 5) << "ERROR: rgw_s3_prepare_decrypt invalid default encryption key " << "which contains character that is not base64 encoded." << dendl; s->err.message = "The default encryption key is not valid base64."; @@ -1279,12 +1279,12 @@ int rgw_s3_prepare_decrypt(struct req_state* s, } if (master_encryption_key.size() != 256 / 8) { - ldout(s->cct, 0) << "ERROR: failed to decode 'rgw crypt default encryption key' to 256 bit string" << dendl; + ldpp_dout(s, 0) << "ERROR: failed to decode 'rgw crypt default encryption key' to 256 bit string" << dendl; return -EIO; } std::string attr_key_selector = get_str_attribute(attrs, RGW_ATTR_CRYPT_KEYSEL); if (attr_key_selector.size() != AES_256_CBC::AES_256_KEYSIZE) { - ldout(s->cct, 0) << "ERROR: missing or invalid " RGW_ATTR_CRYPT_KEYSEL << dendl; + ldpp_dout(s, 0) << "ERROR: missing or invalid " RGW_ATTR_CRYPT_KEYSEL << dendl; return -EIO; } uint8_t actual_key[AES_256_KEYSIZE]; diff --git a/src/rgw/rgw_crypt.h b/src/rgw/rgw_crypt.h index f397941632e0b..ff221549d6fde 100644 --- a/src/rgw/rgw_crypt.h +++ b/src/rgw/rgw_crypt.h @@ -115,7 +115,7 @@ public: off_t bl_len) override; virtual int flush() override; - int read_manifest(bufferlist& manifest_bl); + int read_manifest(const DoutPrefixProvider *dpp, bufferlist& manifest_bl); }; /* RGWGetObj_BlockDecrypt */ diff --git a/src/rgw/rgw_data_sync.cc b/src/rgw/rgw_data_sync.cc index 9b179aa6e17b8..4f9dad49869be 100644 --- a/src/rgw/rgw_data_sync.cc +++ b/src/rgw/rgw_data_sync.cc @@ -95,7 +95,7 @@ bool RGWReadDataSyncStatusMarkersCR::spawn_next() return false; } using CR = RGWSimpleRadosReadCR; - spawn(new CR(env->async_rados, env->svc->sysobj, + spawn(new CR(env->dpp, env->async_rados, env->svc->sysobj, rgw_raw_obj(env->svc->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id)), &markers[shard_id]), false); @@ -150,22 +150,22 @@ public: rgw_data_sync_status *_status) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(sc->env), sync_status(_status) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int RGWReadDataSyncStatusCoroutine::operate() +int RGWReadDataSyncStatusCoroutine::operate(const DoutPrefixProvider *dpp) { reenter(this) { // read sync info using ReadInfoCR = RGWSimpleRadosReadCR; yield { bool empty_on_enoent = false; // fail on ENOENT - call(new ReadInfoCR(sync_env->async_rados, sync_env->svc->sysobj, + call(new ReadInfoCR(dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::sync_status_oid(sc->source_zone)), &sync_status->sync_info, empty_on_enoent)); } if (retcode < 0) { - ldout(sync_env->cct, 4) << "failed to read sync status info with " + ldpp_dout(dpp, 4) << "failed to read sync status info with " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } @@ -174,7 +174,7 @@ int RGWReadDataSyncStatusCoroutine::operate() yield call(new ReadMarkersCR(sc, sync_status->sync_info.num_shards, sync_status->sync_markers)); if (retcode < 0) { - ldout(sync_env->cct, 4) << "failed to read sync status markers with " + ldpp_dout(dpp, 4) << "failed to read sync status markers with " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } @@ -208,7 +208,7 @@ public: } } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { char buf[16]; @@ -224,9 +224,9 @@ public: init_new_io(http_op); - int ret = http_op->aio_read(); + int ret = http_op->aio_read(dpp); if (ret < 0) { - ldout(sync_env->cct, 0) << "ERROR: failed to read from " << p << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to read from " << p << dendl; log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl; return set_cr_error(ret); } @@ -289,7 +289,7 @@ public: } } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { char buf[16]; @@ -309,9 +309,9 @@ public: if (sync_env->counters) { timer.emplace(sync_env->counters, sync_counters::l_poll); } - int ret = http_op->aio_read(); + int ret = http_op->aio_read(dpp); if (ret < 0) { - ldout(sync_env->cct, 0) << "ERROR: failed to read from " << p << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to read from " << p << dendl; log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl; if (sync_env->counters) { sync_env->counters->inc(sync_counters::l_poll_err); @@ -386,7 +386,7 @@ public: : RGWSimpleCoroutine(sc->cct), sc(sc), sync_env(sc->env), http_op(NULL), shard_id(_shard_id), marker(_marker), max_entries(_max_entries), result(_result) {} - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { RGWRESTConn *conn = sc->conn; char buf[32]; @@ -408,9 +408,9 @@ public: http_op = new RGWRESTReadResource(conn, p, pairs, NULL, sync_env->http_manager); init_new_io(http_op); - int ret = http_op->aio_read(); + int ret = http_op->aio_read(dpp); if (ret < 0) { - ldout(sync_env->cct, 0) << "ERROR: failed to read from " << p << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to read from " << p << dendl; log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl; http_op->put(); return ret; @@ -423,7 +423,7 @@ public: int ret = http_op->wait(result, null_yield); http_op->put(); if (ret < 0 && ret != -ENOENT) { - ldout(sync_env->cct, 0) << "ERROR: failed to list remote datalog shard, ret=" << ret << dendl; + ldpp_dout(sync_env->dpp, 0) << "ERROR: failed to list remote datalog shard, ret=" << ret << dendl; return ret; } return 0; @@ -503,7 +503,7 @@ public: } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { int ret; reenter(this) { using LockCR = RGWSimpleRadosLockCR; @@ -515,7 +515,7 @@ public: return set_cr_error(retcode); } using WriteInfoCR = RGWSimpleRadosWriteCR; - yield call(new WriteInfoCR(sync_env->async_rados, sync_env->svc->sysobj, + yield call(new WriteInfoCR(dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj{pool, sync_status_oid}, status->sync_info)); if (retcode < 0) { @@ -560,7 +560,7 @@ public: marker.timestamp = info.last_update; const auto& oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, i); using WriteMarkerCR = RGWSimpleRadosWriteCR; - spawn(new WriteMarkerCR(sync_env->async_rados, sync_env->svc->sysobj, + spawn(new WriteMarkerCR(dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj{pool, oid}, marker), true); } } @@ -573,7 +573,7 @@ public: } status->sync_info.state = rgw_data_sync_info::StateBuildingFullSyncMaps; - yield call(new WriteInfoCR(sync_env->async_rados, sync_env->svc->sysobj, + yield call(new WriteInfoCR(dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj{pool, sync_status_oid}, status->sync_info)); if (retcode < 0) { @@ -602,12 +602,12 @@ RGWRemoteDataLog::RGWRemoteDataLog(const DoutPrefixProvider *dpp, { } -int RGWRemoteDataLog::read_log_info(rgw_datalog_info *log_info) +int RGWRemoteDataLog::read_log_info(const DoutPrefixProvider *dpp, rgw_datalog_info *log_info) { rgw_http_param_pair pairs[] = { { "type", "data" }, { NULL, NULL } }; - int ret = sc.conn->get_json_resource("/admin/log", pairs, null_yield, *log_info); + int ret = sc.conn->get_json_resource(dpp, "/admin/log", pairs, null_yield, *log_info); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to fetch datalog info" << dendl; return ret; @@ -618,20 +618,20 @@ int RGWRemoteDataLog::read_log_info(rgw_datalog_info *log_info) return 0; } -int RGWRemoteDataLog::read_source_log_shards_info(map *shards_info) +int RGWRemoteDataLog::read_source_log_shards_info(const DoutPrefixProvider *dpp, map *shards_info) { rgw_datalog_info log_info; - int ret = read_log_info(&log_info); + int ret = read_log_info(dpp, &log_info); if (ret < 0) { return ret; } - return run(new RGWReadRemoteDataLogInfoCR(&sc, log_info.num_shards, shards_info)); + return run(dpp, new RGWReadRemoteDataLogInfoCR(&sc, log_info.num_shards, shards_info)); } -int RGWRemoteDataLog::read_source_log_shards_next(map shard_markers, map *result) +int RGWRemoteDataLog::read_source_log_shards_next(const DoutPrefixProvider *dpp, map shard_markers, map *result) { - return run(new RGWListRemoteDataLogCR(&sc, shard_markers, 1, result)); + return run(dpp, new RGWListRemoteDataLogCR(&sc, shard_markers, 1, result)); } int RGWRemoteDataLog::init(const rgw_zone_id& _source_zone, RGWRESTConn *_conn, RGWSyncErrorLogger *_error_logger, @@ -664,7 +664,7 @@ void RGWRemoteDataLog::finish() stop(); } -int RGWRemoteDataLog::read_sync_status(rgw_data_sync_status *sync_status) +int RGWRemoteDataLog::read_sync_status(const DoutPrefixProvider *dpp, rgw_data_sync_status *sync_status) { // cannot run concurrently with run_sync(), so run in a separate manager RGWCoroutinesManager crs(cct, cr_registry); @@ -680,12 +680,12 @@ int RGWRemoteDataLog::read_sync_status(rgw_data_sync_status *sync_status) RGWDataSyncCtx sc_local = sc; sc_local.env = &sync_env_local; - ret = crs.run(new RGWReadDataSyncStatusCoroutine(&sc_local, sync_status)); + ret = crs.run(dpp, new RGWReadDataSyncStatusCoroutine(&sc_local, sync_status)); http_manager.stop(); return ret; } -int RGWRemoteDataLog::read_recovering_shards(const int num_shards, set& recovering_shards) +int RGWRemoteDataLog::read_recovering_shards(const DoutPrefixProvider *dpp, const int num_shards, set& recovering_shards) { // cannot run concurrently with run_sync(), so run in a separate manager RGWCoroutinesManager crs(cct, cr_registry); @@ -705,7 +705,7 @@ int RGWRemoteDataLog::read_recovering_shards(const int num_shards, set& rec omapkeys.resize(num_shards); uint64_t max_entries{1}; - ret = crs.run(new RGWReadDataSyncRecoveringShardsCR(&sc_local, max_entries, num_shards, omapkeys)); + ret = crs.run(dpp, new RGWReadDataSyncRecoveringShardsCR(&sc_local, max_entries, num_shards, omapkeys)); http_manager.stop(); if (ret == 0) { @@ -719,7 +719,7 @@ int RGWRemoteDataLog::read_recovering_shards(const int num_shards, set& rec return ret; } -int RGWRemoteDataLog::init_sync_status(int num_shards) +int RGWRemoteDataLog::init_sync_status(const DoutPrefixProvider *dpp, int num_shards) { rgw_data_sync_status sync_status; sync_status.sync_info.num_shards = num_shards; @@ -736,7 +736,7 @@ int RGWRemoteDataLog::init_sync_status(int num_shards) auto instance_id = ceph::util::generate_random_number(); RGWDataSyncCtx sc_local = sc; sc_local.env = &sync_env_local; - ret = crs.run(new RGWInitDataSyncStatusCoroutine(&sc_local, num_shards, instance_id, tn, &sync_status)); + ret = crs.run(dpp, new RGWInitDataSyncStatusCoroutine(&sc_local, num_shards, instance_id, tn, &sync_status)); http_manager.stop(); return ret; } @@ -821,7 +821,7 @@ public: delete entries_index; } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { entries_index = new RGWShardedOmapCRManager(sync_env->async_rados, store, this, num_shards, sync_env->svc->zone->get_zone_params().log_pool, @@ -840,12 +840,12 @@ public: entrypoint, pairs, &result)); } if (retcode < 0) { - ldout(sync_env->cct, 0) << "ERROR: failed to fetch metadata for section bucket.instance" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to fetch metadata for section bucket.instance" << dendl; return set_cr_error(retcode); } for (iter = result.keys.begin(); iter != result.keys.end(); ++iter) { - ldout(sync_env->cct, 20) << "list metadata: section=bucket.instance key=" << *iter << dendl; + ldpp_dout(dpp, 20) << "list metadata: section=bucket.instance key=" << *iter << dendl; key = *iter; yield { @@ -880,18 +880,18 @@ public: int shard_id = (int)iter->first; rgw_data_sync_marker& marker = iter->second; marker.total_entries = entries_index->get_total_entries(shard_id); - spawn(new RGWSimpleRadosWriteCR(sync_env->async_rados, sync_env->svc->sysobj, + spawn(new RGWSimpleRadosWriteCR(dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id)), marker), true); } } else { - yield call(sync_env->error_logger->log_error_cr(sc->conn->get_remote_id(), "data.init", "", + yield call(sync_env->error_logger->log_error_cr(dpp, sc->conn->get_remote_id(), "data.init", "", EIO, string("failed to build bucket instances map"))); } while (collect(&ret, NULL)) { if (ret < 0) { - yield call(sync_env->error_logger->log_error_cr(sc->conn->get_remote_id(), "data.init", "", + yield call(sync_env->error_logger->log_error_cr(dpp, sc->conn->get_remote_id(), "data.init", "", -ret, string("failed to store sync status: ") + cpp_strerror(-ret))); req_ret = ret; } @@ -934,7 +934,7 @@ public: tn->log(20, SSTR("updating marker marker_oid=" << marker_oid << " marker=" << new_marker)); - return new RGWSimpleRadosWriteCR(sync_env->async_rados, sync_env->svc->sysobj, + return new RGWSimpleRadosWriteCR(sync_env->dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, marker_oid), sync_marker); } @@ -1015,7 +1015,7 @@ public: SSTR(bucket_shard_str{_sync_pair.dest_bs} << "<-" << bucket_shard_str{_sync_pair.source_bs} ))) { } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; struct all_bucket_info { @@ -1197,7 +1197,7 @@ public: ~RGWRunBucketsSyncBySourceCR() override { } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; class RGWRunBucketSourcesSyncCR : public RGWCoroutine { @@ -1241,7 +1241,7 @@ public: const RGWSyncTraceNodeRef& _tn_parent, ceph::real_time* progress); - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; void handle_complete_stack(uint64_t stack_id) { auto iter = shard_progress.find(stack_id); @@ -1292,7 +1292,7 @@ public: tn = sync_env->sync_tracer->add_node(_tn_parent, "entry", obligation.key); } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { if (state->obligation) { // this is already syncing in another DataSyncSingleEntryCR @@ -1351,7 +1351,7 @@ public: if (sync_status < 0) { // write actual sync failures for 'radosgw-admin sync error list' if (sync_status != -EBUSY && sync_status != -EAGAIN) { - yield call(sync_env->error_logger->log_error_cr(sc->conn->get_remote_id(), "data", complete->key, + yield call(sync_env->error_logger->log_error_cr(dpp, sc->conn->get_remote_id(), "data", complete->key, -sync_status, string("failed to sync bucket instance: ") + cpp_strerror(-sync_status))); if (retcode < 0) { tn->log(0, SSTR("ERROR: failed to log sync failure: retcode=" << retcode)); @@ -1494,7 +1494,7 @@ public: modified_shards.insert(keys.begin(), keys.end()); } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { int r; while (true) { switch (sync_marker.state) { @@ -1615,7 +1615,7 @@ public: sync_marker.state = rgw_data_sync_marker::IncrementalSync; sync_marker.marker = sync_marker.next_step_marker; sync_marker.next_step_marker.clear(); - call(new RGWSimpleRadosWriteCR(sync_env->async_rados, sync_env->svc->sysobj, + call(new RGWSimpleRadosWriteCR(sync_env->dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj(pool, status_oid), sync_marker)); } @@ -1812,7 +1812,7 @@ public: } RGWCoroutine *alloc_finisher_cr() override { - return new RGWSimpleRadosReadCR(sync_env->async_rados, sync_env->svc->sysobj, + return new RGWSimpleRadosReadCR(sync_env->dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id)), &sync_marker); } @@ -1859,7 +1859,7 @@ public: } } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { /* read sync status */ @@ -1945,7 +1945,7 @@ public: } RGWCoroutine *set_sync_info_cr() { - return new RGWSimpleRadosWriteCR(sync_env->async_rados, sync_env->svc->sysobj, + return new RGWSimpleRadosWriteCR(sync_env->dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::sync_status_oid(sc->source_zone)), sync_status.sync_info); } @@ -2023,7 +2023,7 @@ class RGWUserPermHandler { int operate() override { auto user_ctl = sync_env->store->getRados()->ctl.user; - ret = user_ctl->get_info_by_uid(uid, &info->user_info, null_yield); + ret = user_ctl->get_info_by_uid(sync_env->dpp, uid, &info->user_info, null_yield); if (ret < 0) { return ret; } @@ -2036,7 +2036,7 @@ class RGWUserPermHandler { map uattrs; - ret = user_ctl->get_attrs_by_uid(uid, &uattrs, null_yield); + ret = user_ctl->get_attrs_by_uid(sync_env->dpp, uid, &uattrs, null_yield); if (ret == 0) { ret = RGWUserPermHandler::policy_from_attrs(sync_env->cct, uattrs, &info->user_acl); } @@ -2311,7 +2311,7 @@ public: } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { #define MAX_RACE_RETRIES_OBJ_FETCH 10 @@ -2432,7 +2432,7 @@ public: key, dest_key, versioned_epoch, true, std::static_pointer_cast(filter), - zones_trace, sync_env->counters, sync_env->dpp)); + zones_trace, sync_env->counters, dpp)); } if (retcode < 0) { if (*need_retry) { @@ -2461,7 +2461,7 @@ RGWCoroutine *RGWDefaultDataSyncModule::remove_object(RGWDataSyncCtx *sc, rgw_bu real_time& mtime, bool versioned, uint64_t versioned_epoch, rgw_zone_set *zones_trace) { auto sync_env = sc->env; - return new RGWRemoveObjCR(sync_env->async_rados, sync_env->store, sc->source_zone, + return new RGWRemoveObjCR(sync_env->dpp, sync_env->async_rados, sync_env->store, sc->source_zone, sync_pipe.dest_bucket_info, key, versioned, versioned_epoch, NULL, NULL, false, &mtime, zones_trace); } @@ -2470,7 +2470,7 @@ RGWCoroutine *RGWDefaultDataSyncModule::create_delete_marker(RGWDataSyncCtx *sc, rgw_bucket_entry_owner& owner, bool versioned, uint64_t versioned_epoch, rgw_zone_set *zones_trace) { auto sync_env = sc->env; - return new RGWRemoveObjCR(sync_env->async_rados, sync_env->store, sc->source_zone, + return new RGWRemoveObjCR(sync_env->dpp, sync_env->async_rados, sync_env->store, sc->source_zone, sync_pipe.dest_bucket_info, key, versioned, versioned_epoch, &owner.id, &owner.display_name, true, &mtime, zones_trace); } @@ -2514,9 +2514,9 @@ RGWCoroutine *RGWArchiveDataSyncModule::sync_object(RGWDataSyncCtx *sc, rgw_buck (sync_pipe.dest_bucket_info.flags & BUCKET_VERSIONS_SUSPENDED)) { ldout(sc->cct, 0) << "SYNC_ARCHIVE: sync_object: enabling object versioning for archive bucket" << dendl; sync_pipe.dest_bucket_info.flags = (sync_pipe.dest_bucket_info.flags & ~BUCKET_VERSIONS_SUSPENDED) | BUCKET_VERSIONED; - int op_ret = sync_env->store->getRados()->put_bucket_instance_info(sync_pipe.dest_bucket_info, false, real_time(), NULL); + int op_ret = sync_env->store->getRados()->put_bucket_instance_info(sync_pipe.dest_bucket_info, false, real_time(), NULL, sync_env->dpp); if (op_ret < 0) { - ldout(sc->cct, 0) << "SYNC_ARCHIVE: sync_object: error versioning archive bucket" << dendl; + ldpp_dout(sync_env->dpp, 0) << "SYNC_ARCHIVE: sync_object: error versioning archive bucket" << dendl; return NULL; } } @@ -2547,7 +2547,7 @@ RGWCoroutine *RGWArchiveDataSyncModule::create_delete_marker(RGWDataSyncCtx *sc, ldout(sc->cct, 0) << "SYNC_ARCHIVE: create_delete_marker: b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " mtime=" << mtime << " versioned=" << versioned << " versioned_epoch=" << versioned_epoch << dendl; auto sync_env = sc->env; - return new RGWRemoveObjCR(sync_env->async_rados, sync_env->store, sc->source_zone, + return new RGWRemoveObjCR(sync_env->dpp, sync_env->async_rados, sync_env->store, sc->source_zone, sync_pipe.dest_bucket_info, key, versioned, versioned_epoch, &owner.id, &owner.display_name, true, &mtime, zones_trace); } @@ -2602,14 +2602,14 @@ void RGWRemoteDataLog::wakeup(int shard_id, set& keys) { data_sync_cr->wakeup(shard_id, keys); } -int RGWRemoteDataLog::run_sync(int num_shards) +int RGWRemoteDataLog::run_sync(const DoutPrefixProvider *dpp, int num_shards) { lock.lock(); data_sync_cr = new RGWDataSyncControlCR(&sc, num_shards, tn); data_sync_cr->get(); // run() will drop a ref, so take another lock.unlock(); - int r = run(data_sync_cr); + int r = run(dpp, data_sync_cr); lock.lock(); data_sync_cr->put(); @@ -2628,7 +2628,7 @@ CephContext *RGWDataSyncStatusManager::get_cct() const return store->ctx(); } -int RGWDataSyncStatusManager::init() +int RGWDataSyncStatusManager::init(const DoutPrefixProvider *dpp) { RGWZone *zone_def; @@ -2664,7 +2664,7 @@ int RGWDataSyncStatusManager::init() } rgw_datalog_info datalog_info; - r = source_log.read_log_info(&datalog_info); + r = source_log.read_log_info(dpp, &datalog_info); if (r < 0) { ldpp_dout(this, 5) << "ERROR: master.read_log_info() returned r=" << r << dendl; finalize(); @@ -2727,7 +2727,7 @@ public: : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), instance_key(bs.get_key()), info(_info) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { rgw_http_param_pair pairs[] = { { "type" , "bucket-index" }, @@ -2768,7 +2768,7 @@ public: status(_status), objv_tracker(objv_tracker) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { /* fetch current position in logs */ yield call(new RGWReadRemoteBucketIndexLogInfoCR(sc, sync_pair.source_bs, &info)); @@ -2806,7 +2806,7 @@ public: if (write_status) { map attrs; status.encode_all_attrs(attrs); - call(new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, sync_env->svc->sysobj, obj, attrs, &objv_tracker)); + call(new RGWSimpleRadosWriteAttrsCR(dpp, sync_env->async_rados, sync_env->svc->sysobj, obj, attrs, &objv_tracker)); } else { call(new RGWRadosRemoveCR(store, obj, &objv_tracker)); } @@ -2941,13 +2941,13 @@ public: oid(RGWBucketPipeSyncStatusManager::status_oid(sc->source_zone, sync_pair)), status(_status), objv_tracker(objv_tracker) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int RGWReadBucketPipeSyncStatusCoroutine::operate() +int RGWReadBucketPipeSyncStatusCoroutine::operate(const DoutPrefixProvider *dpp) { reenter(this) { - yield call(new RGWSimpleRadosReadAttrsCR(sync_env->async_rados, sync_env->svc->sysobj, + yield call(new RGWSimpleRadosReadAttrsCR(dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, oid), &attrs, true, objv_tracker)); if (retcode == -ENOENT) { @@ -2955,7 +2955,7 @@ int RGWReadBucketPipeSyncStatusCoroutine::operate() return set_cr_done(); } if (retcode < 0) { - ldout(sync_env->cct, 0) << "ERROR: failed to call fetch bucket shard info oid=" << oid << " ret=" << retcode << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to call fetch bucket shard info oid=" << oid << " ret=" << retcode << dendl; return set_cr_error(retcode); } status->decode_from_attrs(sync_env->cct, attrs); @@ -2992,10 +2992,10 @@ public: error_oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id) + ".retry"; } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int RGWReadRecoveringBucketShardsCoroutine::operate() +int RGWReadRecoveringBucketShardsCoroutine::operate(const DoutPrefixProvider *dpp) { reenter(this){ //read recovering bucket shards @@ -3010,7 +3010,7 @@ int RGWReadRecoveringBucketShardsCoroutine::operate() } if (retcode < 0) { - ldout(sync_env->cct, 0) << "failed to read recovering bucket shards with " + ldpp_dout(dpp, 0) << "failed to read recovering bucket shards with " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } @@ -3062,19 +3062,19 @@ public: status_oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id); } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int RGWReadPendingBucketShardsCoroutine::operate() +int RGWReadPendingBucketShardsCoroutine::operate(const DoutPrefixProvider *dpp) { reenter(this){ //read sync status marker using CR = RGWSimpleRadosReadCR; - yield call(new CR(sync_env->async_rados, sync_env->svc->sysobj, + yield call(new CR(dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, status_oid), sync_marker)); if (retcode < 0) { - ldout(sync_env->cct,0) << "failed to read sync status marker with " + ldpp_dout(dpp, 0) << "failed to read sync status marker with " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } @@ -3091,7 +3091,7 @@ int RGWReadPendingBucketShardsCoroutine::operate() } if (retcode < 0) { - ldout(sync_env->cct,0) << "failed to read remote data log info with " + ldpp_dout(dpp, 0) << "failed to read remote data log info with " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } @@ -3112,7 +3112,7 @@ int RGWReadPendingBucketShardsCoroutine::operate() return 0; } -int RGWRemoteDataLog::read_shard_status(int shard_id, set& pending_buckets, set& recovering_buckets, rgw_data_sync_marker *sync_marker, const int max_entries) +int RGWRemoteDataLog::read_shard_status(const DoutPrefixProvider *dpp, int shard_id, set& pending_buckets, set& recovering_buckets, rgw_data_sync_marker *sync_marker, const int max_entries) { // cannot run concurrently with run_sync(), so run in a separate manager RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry()); @@ -3133,7 +3133,7 @@ int RGWRemoteDataLog::read_shard_status(int shard_id, set& pending_bucke RGWCoroutinesStack* pending_stack = new RGWCoroutinesStack(store->ctx(), &crs); pending_stack->call(new RGWReadPendingBucketShardsCoroutine(&sc_local, shard_id, pending_buckets, sync_marker, max_entries)); stacks.push_back(pending_stack); - ret = crs.run(stacks); + ret = crs.run(dpp, stacks); http_manager.stop(); return ret; } @@ -3269,7 +3269,7 @@ public: instance_key(bs.get_key()), marker_position(_marker_position), result(_result) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { rgw_http_param_pair pairs[] = { { "rgwx-bucket-instance", instance_key.c_str() }, @@ -3307,7 +3307,7 @@ public: : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), instance_key(bs.get_key()), marker(_marker), result(_result) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { if (sync_env->counters) { timer.emplace(sync_env->counters, sync_counters::l_poll); @@ -3364,7 +3364,7 @@ public: sync_marker.encode_attr(attrs); tn->log(20, SSTR("updating marker marker_oid=" << marker_oid << " marker=" << new_marker)); - return new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, sync_env->svc->sysobj, + return new RGWSimpleRadosWriteAttrsCR(sync_env->dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, marker_oid), attrs, &objv_tracker); } @@ -3392,11 +3392,11 @@ class RGWWriteBucketShardIncSyncStatus : public RGWCoroutine { sync_marker(sync_marker), stable_timestamp(stable_timestamp), objv_tracker(objv_tracker) {} - int operate() { + int operate(const DoutPrefixProvider *dpp) { reenter(this) { sync_marker.encode_attr(attrs); - yield call(new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, sync_env->svc->sysobj, + yield call(new RGWSimpleRadosWriteAttrsCR(sync_env->dpp, sync_env->async_rados, sync_env->svc->sysobj, obj, attrs, &objv_tracker)); if (retcode < 0) { return set_cr_error(retcode); @@ -3580,7 +3580,7 @@ public: zones_trace.insert(sync_env->svc->zone->get_zone().id, _sync_pipe.info.dest_bs.get_key()); } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { /* skip entries that are not complete */ if (op_state != CLS_RGW_STATE_COMPLETE) { @@ -3645,7 +3645,7 @@ public: } } if (!error_ss.str().empty()) { - yield call(sync_env->error_logger->log_error_cr(sc->conn->get_remote_id(), "data", error_ss.str(), -retcode, string("failed to sync object") + cpp_strerror(-sync_status))); + yield call(sync_env->error_logger->log_error_cr(dpp, sc->conn->get_remote_id(), "data", error_ss.str(), -retcode, string("failed to sync object") + cpp_strerror(-sync_status))); } done: if (sync_status == 0) { @@ -3752,10 +3752,10 @@ public: prefix_handler.set_rules(sync_pipe.get_rules()); } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int RGWBucketShardFullSyncCR::operate() +int RGWBucketShardFullSyncCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { list_marker = sync_info.full_marker.position; @@ -3842,7 +3842,7 @@ int RGWBucketShardFullSyncCR::operate() sync_info.state = rgw_bucket_shard_sync_info::StateIncrementalSync; map attrs; sync_info.encode_state_attr(attrs); - call(new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, sync_env->svc->sysobj, + call(new RGWSimpleRadosWriteAttrsCR(dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, status_oid), attrs)); } @@ -3927,10 +3927,10 @@ public: return boost::starts_with(key.name, iter->first); } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int RGWBucketShardIncrementalSyncCR::operate() +int RGWBucketShardIncrementalSyncCR::operate(const DoutPrefixProvider *dpp) { int ret; reenter(this) { @@ -3955,7 +3955,7 @@ int RGWBucketShardIncrementalSyncCR::operate() for (; entries_iter != entries_end; ++entries_iter) { auto e = *entries_iter; if (e.op == RGWModifyOp::CLS_RGW_OP_SYNCSTOP) { - ldout(sync_env->cct, 20) << "syncstop on " << e.timestamp << dendl; + ldpp_dout(dpp, 20) << "syncstop on " << e.timestamp << dendl; syncstopped = true; entries_end = std::next(entries_iter); // stop after this entry break; @@ -4000,7 +4000,7 @@ int RGWBucketShardIncrementalSyncCR::operate() sync_info.inc_marker.position = cur_id; if (entry->op == RGWModifyOp::CLS_RGW_OP_SYNCSTOP || entry->op == RGWModifyOp::CLS_RGW_OP_RESYNC) { - ldout(sync_env->cct, 20) << "detected syncstop or resync on " << entries_iter->timestamp << ", skipping entry" << dendl; + ldpp_dout(dpp, 20) << "detected syncstop or resync on " << entries_iter->timestamp << ", skipping entry" << dendl; marker_tracker.try_update_high_marker(cur_id, 0, entry->timestamp); continue; } @@ -4244,12 +4244,13 @@ class RGWGetBucketPeersCR : public RGWCoroutine { const rgw_bucket& _source_bucket) : sync_env(_sync_env), source_bucket(_source_bucket) {} int operate() override { - int r = sync_env->svc->bucket_sync->get_bucket_sync_hints(source_bucket, + int r = sync_env->svc->bucket_sync->get_bucket_sync_hints(sync_env->dpp, + source_bucket, nullptr, &targets, null_yield); if (r < 0) { - ldout(sync_env->cct, 0) << "ERROR: " << __func__ << "(): failed to fetch bucket sync hints for bucket=" << source_bucket << dendl; + ldpp_dout(sync_env->dpp, 0) << "ERROR: " << __func__ << "(): failed to fetch bucket sync hints for bucket=" << source_bucket << dendl; return r; } @@ -4279,7 +4280,7 @@ public: << ":source_zone=" << source_zone.value_or(rgw_zone_id("*")).id))) { } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; std::ostream& operator<<(std::ostream& out, std::optional& bs) { @@ -4311,7 +4312,7 @@ RGWRunBucketSourcesSyncCR::RGWRunBucketSourcesSyncCR(RGWDataSyncCtx *_sc, } } -int RGWRunBucketSourcesSyncCR::operate() +int RGWRunBucketSourcesSyncCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { yield call(new RGWGetBucketPeersCR(sync_env, target_bucket, sc->source_zone, source_bucket, &pipes, tn)); @@ -4320,16 +4321,16 @@ int RGWRunBucketSourcesSyncCR::operate() return set_cr_error(retcode); } - ldpp_dout(sync_env->dpp, 20) << __func__ << "(): requested source_bs=" << source_bs << " target_bs=" << target_bs << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): requested source_bs=" << source_bs << " target_bs=" << target_bs << dendl; if (pipes.empty()) { - ldpp_dout(sync_env->dpp, 20) << __func__ << "(): no relevant sync pipes found" << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): no relevant sync pipes found" << dendl; return set_cr_done(); } for (siter = pipes.begin(); siter != pipes.end(); ++siter) { { - ldpp_dout(sync_env->dpp, 20) << __func__ << "(): sync pipe=" << *siter << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): sync pipe=" << *siter << dendl; source_num_shards = siter->source.get_bucket_info().layout.current_index.layout.normal.num_shards; target_num_shards = siter->target.get_bucket_info().layout.current_index.layout.normal.num_shards; @@ -4351,7 +4352,7 @@ int RGWRunBucketSourcesSyncCR::operate() } } - ldpp_dout(sync_env->dpp, 20) << __func__ << "(): num shards=" << num_shards << " cur_shard=" << cur_shard << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): num shards=" << num_shards << " cur_shard=" << cur_shard << dendl; for (; num_shards > 0; --num_shards, ++cur_shard) { /* @@ -4365,7 +4366,7 @@ int RGWRunBucketSourcesSyncCR::operate() sync_pair.dest_bs.shard_id = -1; } - ldpp_dout(sync_env->dpp, 20) << __func__ << "(): sync_pair=" << sync_pair << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): sync_pair=" << sync_pair << dendl; cur_progress = (progress ? &shard_progress[prealloc_stack_id()] : nullptr); @@ -4421,20 +4422,20 @@ public: SSTR(bucket))) { } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int RGWSyncGetBucketInfoCR::operate() +int RGWSyncGetBucketInfoCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { - yield call(new RGWGetBucketInstanceInfoCR(sync_env->async_rados, sync_env->store, bucket, pbucket_info, pattrs)); + yield call(new RGWGetBucketInstanceInfoCR(sync_env->async_rados, sync_env->store, bucket, pbucket_info, pattrs, dpp)); if (retcode == -ENOENT) { /* bucket instance info has not been synced in yet, fetch it now */ yield { tn->log(10, SSTR("no local info for bucket:" << ": fetching metadata")); string raw_key = string("bucket.instance:") + bucket.get_key(); - meta_sync_env.init(sync_env->dpp, cct, sync_env->store, sync_env->svc->zone->get_master_conn(), sync_env->async_rados, + meta_sync_env.init(dpp, cct, sync_env->store, sync_env->svc->zone->get_master_conn(), sync_env->async_rados, sync_env->http_manager, sync_env->error_logger, sync_env->sync_tracer); call(new RGWMetaSyncSingleEntryCR(&meta_sync_env, raw_key, @@ -4448,7 +4449,7 @@ int RGWSyncGetBucketInfoCR::operate() return set_cr_error(retcode); } - yield call(new RGWGetBucketInstanceInfoCR(sync_env->async_rados, sync_env->store, bucket, pbucket_info, pattrs)); + yield call(new RGWGetBucketInstanceInfoCR(sync_env->async_rados, sync_env->store, bucket, pbucket_info, pattrs, dpp)); } if (retcode < 0) { tn->log(0, SSTR("ERROR: failed to retrieve bucket info for bucket=" << bucket_str{bucket})); @@ -4539,13 +4540,14 @@ public: get_policy_params.bucket = bucket; } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { for (i = 0; i < 2; ++i) { yield call(new RGWBucketGetSyncPolicyHandlerCR(sync_env->async_rados, sync_env->store, get_policy_params, - policy)); + policy, + dpp)); if (retcode < 0 && retcode != -ENOENT) { return set_cr_error(retcode); @@ -4575,7 +4577,7 @@ public: }; -int RGWGetBucketPeersCR::operate() +int RGWGetBucketPeersCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { if (pipes) { @@ -4634,7 +4636,7 @@ int RGWGetBucketPeersCR::operate() for (hiter = get_hint_targets_action->targets.begin(); hiter != get_hint_targets_action->targets.end(); ++hiter) { - ldpp_dout(sync_env->dpp, 20) << "Got sync hint for bucket=" << *source_bucket << ": " << hiter->get_key() << dendl; + ldpp_dout(dpp, 20) << "Got sync hint for bucket=" << *source_bucket << ": " << hiter->get_key() << dendl; target_policy = make_shared(); yield call(new RGWSyncGetBucketSyncPolicyHandlerCR(sync_env, @@ -4672,7 +4674,7 @@ int RGWGetBucketPeersCR::operate() return 0; } -int RGWRunBucketsSyncBySourceCR::operate() +int RGWRunBucketsSyncBySourceCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { return set_cr_done(); @@ -4681,7 +4683,7 @@ int RGWRunBucketsSyncBySourceCR::operate() return 0; } -int RGWRunBucketSyncCoroutine::operate() +int RGWRunBucketSyncCoroutine::operate(const DoutPrefixProvider *dpp) { reenter(this) { yield call(new RGWReadBucketPipeSyncStatusCoroutine(sc, sync_pair, &sync_status, &objv_tracker)); @@ -4771,7 +4773,7 @@ RGWCoroutine *RGWRemoteBucketManager::run_sync_cr(int num) return new RGWRunBucketSyncCoroutine(&sc, nullptr, sync_pairs[num], sync_env->sync_tracer->root_node, nullptr); } -int RGWBucketPipeSyncStatusManager::init() +int RGWBucketPipeSyncStatusManager::init(const DoutPrefixProvider *dpp) { int ret = http_manager.start(); if (ret < 0) { @@ -4791,7 +4793,7 @@ int RGWBucketPipeSyncStatusManager::init() rgw_sync_pipe_info_set pipes; - ret = cr_mgr.run(new RGWGetBucketPeersCR(&sync_env, + ret = cr_mgr.run(dpp, new RGWGetBucketPeersCR(&sync_env, dest_bucket, source_zone, source_bucket, @@ -4825,7 +4827,7 @@ int RGWBucketPipeSyncStatusManager::init() return 0; } -int RGWBucketPipeSyncStatusManager::init_sync_status() +int RGWBucketPipeSyncStatusManager::init_sync_status(const DoutPrefixProvider *dpp) { list stacks; // pass an empty objv tracker to each so that the version gets incremented @@ -4842,10 +4844,10 @@ int RGWBucketPipeSyncStatusManager::init_sync_status() stacks.push_back(stack); } - return cr_mgr.run(stacks); + return cr_mgr.run(dpp, stacks); } -int RGWBucketPipeSyncStatusManager::read_sync_status() +int RGWBucketPipeSyncStatusManager::read_sync_status(const DoutPrefixProvider *dpp) { list stacks; @@ -4858,7 +4860,7 @@ int RGWBucketPipeSyncStatusManager::read_sync_status() stacks.push_back(stack); } - int ret = cr_mgr.run(stacks); + int ret = cr_mgr.run(dpp, stacks); if (ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to read sync status for " << bucket_str{dest_bucket} << dendl; @@ -4868,7 +4870,7 @@ int RGWBucketPipeSyncStatusManager::read_sync_status() return 0; } -int RGWBucketPipeSyncStatusManager::run() +int RGWBucketPipeSyncStatusManager::run(const DoutPrefixProvider *dpp) { list stacks; @@ -4881,7 +4883,7 @@ int RGWBucketPipeSyncStatusManager::run() stacks.push_back(stack); } - int ret = cr_mgr.run(stacks); + int ret = cr_mgr.run(dpp, stacks); if (ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to read sync status for " << bucket_str{dest_bucket} << dendl; @@ -4925,7 +4927,8 @@ string RGWBucketPipeSyncStatusManager::obj_status_oid(const rgw_bucket_sync_pipe return prefix + ":" + obj->get_name() + ":" + obj->get_instance(); } -int rgw_read_remote_bilog_info(RGWRESTConn* conn, +int rgw_read_remote_bilog_info(const DoutPrefixProvider *dpp, + RGWRESTConn* conn, const rgw_bucket& bucket, BucketIndexShardsManager& markers, optional_yield y) @@ -4938,9 +4941,9 @@ int rgw_read_remote_bilog_info(RGWRESTConn* conn, { nullptr, nullptr } }; rgw_bucket_index_marker_info result; - int r = conn->get_json_resource("/admin/log/", params, y, result); + int r = conn->get_json_resource(dpp, "/admin/log/", params, y, result); if (r < 0) { - lderr(conn->get_ctx()) << "failed to fetch remote log markers: " << cpp_strerror(r) << dendl; + ldpp_dout(dpp, -1) << "failed to fetch remote log markers: " << cpp_strerror(r) << dendl; return r; } r = markers.from_string(result.max_marker, -1); @@ -5033,7 +5036,7 @@ int rgw_bucket_sync_status(const DoutPrefixProvider *dpp, if (!psource_bucket_info) { auto& bucket_ctl = store->getRados()->ctl.bucket; - int ret = bucket_ctl->read_bucket_info(source_bucket, &source_bucket_info, null_yield); + int ret = bucket_ctl->read_bucket_info(source_bucket, &source_bucket_info, null_yield, dpp); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to get bucket instance info: bucket=" << source_bucket << ": " << cpp_strerror(-ret) << dendl; return ret; @@ -5052,7 +5055,7 @@ int rgw_bucket_sync_status(const DoutPrefixProvider *dpp, sc.init(&env, nullptr, *pipe.source.zone); RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry()); - return crs.run(new RGWCollectBucketSyncStatusCR(store, &sc, + return crs.run(dpp, new RGWCollectBucketSyncStatusCR(store, &sc, *psource_bucket_info, dest_bucket_info, status)); diff --git a/src/rgw/rgw_data_sync.h b/src/rgw/rgw_data_sync.h index 9bf54edc5fbda..92e2909f9ef42 100644 --- a/src/rgw/rgw_data_sync.h +++ b/src/rgw/rgw_data_sync.h @@ -377,14 +377,14 @@ public: PerfCounters* _counters); void finish(); - int read_log_info(rgw_datalog_info *log_info); - int read_source_log_shards_info(map *shards_info); - int read_source_log_shards_next(map shard_markers, map *result); - int read_sync_status(rgw_data_sync_status *sync_status); - int read_recovering_shards(const int num_shards, set& recovering_shards); - int read_shard_status(int shard_id, set& lagging_buckets,set& recovering_buckets, rgw_data_sync_marker* sync_marker, const int max_entries); - int init_sync_status(int num_shards); - int run_sync(int num_shards); + int read_log_info(const DoutPrefixProvider *dpp, rgw_datalog_info *log_info); + int read_source_log_shards_info(const DoutPrefixProvider *dpp, map *shards_info); + int read_source_log_shards_next(const DoutPrefixProvider *dpp, map shard_markers, map *result); + int read_sync_status(const DoutPrefixProvider *dpp, rgw_data_sync_status *sync_status); + int read_recovering_shards(const DoutPrefixProvider *dpp, const int num_shards, set& recovering_shards); + int read_shard_status(const DoutPrefixProvider *dpp, int shard_id, set& lagging_buckets,set& recovering_buckets, rgw_data_sync_marker* sync_marker, const int max_entries); + int init_sync_status(const DoutPrefixProvider *dpp, int num_shards); + int run_sync(const DoutPrefixProvider *dpp, int num_shards); void wakeup(int shard_id, set& keys); }; @@ -422,36 +422,36 @@ public: ~RGWDataSyncStatusManager() { finalize(); } - int init(); + int init(const DoutPrefixProvider *dpp); void finalize(); static string shard_obj_name(const rgw_zone_id& source_zone, int shard_id); static string sync_status_oid(const rgw_zone_id& source_zone); - int read_sync_status(rgw_data_sync_status *sync_status) { - return source_log.read_sync_status(sync_status); + int read_sync_status(const DoutPrefixProvider *dpp, rgw_data_sync_status *sync_status) { + return source_log.read_sync_status(dpp, sync_status); } - int read_recovering_shards(const int num_shards, set& recovering_shards) { - return source_log.read_recovering_shards(num_shards, recovering_shards); + int read_recovering_shards(const DoutPrefixProvider *dpp, const int num_shards, set& recovering_shards) { + return source_log.read_recovering_shards(dpp, num_shards, recovering_shards); } - int read_shard_status(int shard_id, set& lagging_buckets, set& recovering_buckets, rgw_data_sync_marker *sync_marker, const int max_entries) { - return source_log.read_shard_status(shard_id, lagging_buckets, recovering_buckets,sync_marker, max_entries); + int read_shard_status(const DoutPrefixProvider *dpp, int shard_id, set& lagging_buckets, set& recovering_buckets, rgw_data_sync_marker *sync_marker, const int max_entries) { + return source_log.read_shard_status(dpp, shard_id, lagging_buckets, recovering_buckets,sync_marker, max_entries); } - int init_sync_status() { return source_log.init_sync_status(num_shards); } + int init_sync_status(const DoutPrefixProvider *dpp) { return source_log.init_sync_status(dpp, num_shards); } - int read_log_info(rgw_datalog_info *log_info) { - return source_log.read_log_info(log_info); + int read_log_info(const DoutPrefixProvider *dpp, rgw_datalog_info *log_info) { + return source_log.read_log_info(dpp, log_info); } - int read_source_log_shards_info(map *shards_info) { - return source_log.read_source_log_shards_info(shards_info); + int read_source_log_shards_info(const DoutPrefixProvider *dpp, map *shards_info) { + return source_log.read_source_log_shards_info(dpp, shards_info); } - int read_source_log_shards_next(map shard_markers, map *result) { - return source_log.read_source_log_shards_next(shard_markers, result); + int read_source_log_shards_next(const DoutPrefixProvider *dpp, map shard_markers, map *result) { + return source_log.read_source_log_shards_next(dpp, shard_markers, result); } - int run() { return source_log.run_sync(num_shards); } + int run(const DoutPrefixProvider *dpp) { return source_log.run_sync(dpp, num_shards); } void wakeup(int shard_id, set& keys) { return source_log.wakeup(shard_id, keys); } void stop() { @@ -615,7 +615,8 @@ public: class BucketIndexShardsManager; -int rgw_read_remote_bilog_info(RGWRESTConn* conn, +int rgw_read_remote_bilog_info(const DoutPrefixProvider *dpp, + RGWRESTConn* conn, const rgw_bucket& bucket, BucketIndexShardsManager& markers, optional_yield y); @@ -655,10 +656,10 @@ public: const rgw_bucket& dest_bucket); ~RGWBucketPipeSyncStatusManager(); - int init(); + int init(const DoutPrefixProvider *dpp); map& get_sync_status() { return sync_status; } - int init_sync_status(); + int init_sync_status(const DoutPrefixProvider *dpp); static string status_oid(const rgw_zone_id& source_zone, const rgw_bucket_sync_pair_info& bs); static string obj_status_oid(const rgw_bucket_sync_pipe& sync_pipe, @@ -670,8 +671,8 @@ public: unsigned get_subsys() const override; std::ostream& gen_prefix(std::ostream& out) const override; - int read_sync_status(); - int run(); + int read_sync_status(const DoutPrefixProvider *dpp); + int run(const DoutPrefixProvider *dpp); }; /// read the sync status of all bucket shards from the given source zone diff --git a/src/rgw/rgw_datalog.cc b/src/rgw/rgw_datalog.cc index cb5cba7269fb1..64d13270ac049 100644 --- a/src/rgw/rgw_datalog.cc +++ b/src/rgw/rgw_datalog.cc @@ -89,6 +89,7 @@ public: } } ~RGWDataChangesOmap() override = default; + void prepare(ceph::real_time ut, const std::string& key, ceph::buffer::list&& entry, entries& out) override { if (!std::holds_alternative(out)) { @@ -100,31 +101,31 @@ public: cls_log_add_prepare_entry(e, utime_t(ut), {}, key, entry); std::get(out).push_back(std::move(e)); } - int push(int index, entries&& items) override { + int push(const DoutPrefixProvider *dpp, int index, entries&& items) override { lr::ObjectWriteOperation op; cls_log_add(op, std::get(items), true); - auto r = rgw_rados_operate(ioctx, oids[index], &op, null_yield); + auto r = rgw_rados_operate(dpp, ioctx, oids[index], &op, null_yield); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to push to " << oids[index] << cpp_strerror(-r) << dendl; } return r; } - int push(int index, ceph::real_time now, + int push(const DoutPrefixProvider *dpp, int index, ceph::real_time now, const std::string& key, ceph::buffer::list&& bl) override { lr::ObjectWriteOperation op; cls_log_add(op, utime_t(now), {}, key, bl); - auto r = rgw_rados_operate(ioctx, oids[index], &op, null_yield); + auto r = rgw_rados_operate(dpp, ioctx, oids[index], &op, null_yield); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to push to " << oids[index] << cpp_strerror(-r) << dendl; } return r; } - int list(int index, int max_entries, + int list(const DoutPrefixProvider *dpp, int index, int max_entries, std::vector& entries, std::optional marker, std::string* out_marker, bool* truncated) override { @@ -132,13 +133,13 @@ public: lr::ObjectReadOperation op; cls_log_list(op, {}, {}, std::string(marker.value_or("")), max_entries, log_entries, out_marker, truncated); - auto r = rgw_rados_operate(ioctx, oids[index], &op, nullptr, null_yield); + auto r = rgw_rados_operate(dpp, ioctx, oids[index], &op, nullptr, null_yield); if (r == -ENOENT) { *truncated = false; return 0; } if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to list " << oids[index] << cpp_strerror(-r) << dendl; return r; @@ -152,7 +153,7 @@ public: try { decode(log_entry.entry, liter); } catch (ceph::buffer::error& err) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to decode data changes log entry: " << err.what() << dendl; return -EIO; @@ -161,14 +162,14 @@ public: } return 0; } - int get_info(int index, RGWDataChangesLogInfo *info) override { + int get_info(const DoutPrefixProvider *dpp, int index, RGWDataChangesLogInfo *info) override { cls_log_header header; lr::ObjectReadOperation op; cls_log_info(op, &header); - auto r = rgw_rados_operate(ioctx, oids[index], &op, nullptr, null_yield); + auto r = rgw_rados_operate(dpp, ioctx, oids[index], &op, nullptr, null_yield); if (r == -ENOENT) r = 0; if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to get info from " << oids[index] << cpp_strerror(-r) << dendl; } else { @@ -177,19 +178,19 @@ public: } return r; } - int trim(int index, std::string_view marker) override { + int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker) override { lr::ObjectWriteOperation op; cls_log_trim(op, {}, {}, {}, std::string(marker)); - auto r = rgw_rados_operate(ioctx, oids[index], &op, null_yield); + auto r = rgw_rados_operate(dpp, ioctx, oids[index], &op, null_yield); if (r == -ENOENT) r = -ENODATA; if (r < 0 && r != -ENODATA) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to get info from " << oids[index] << cpp_strerror(-r) << dendl; } return r; } - int trim(int index, std::string_view marker, + int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker, lr::AioCompletion* c) override { lr::ObjectWriteOperation op; cls_log_trim(op, {}, {}, {}, std::string(marker)); @@ -205,19 +206,19 @@ public: std::string_view max_marker() const override { return "99999999"sv; } - int is_empty() override { + int is_empty(const DoutPrefixProvider *dpp) override { for (auto shard = 0u; shard < oids.size(); ++shard) { std::list log_entries; lr::ObjectReadOperation op; std::string out_marker; bool truncated; cls_log_list(op, {}, {}, {}, 1, log_entries, &out_marker, &truncated); - auto r = rgw_rados_operate(ioctx, oids[shard], &op, nullptr, null_yield); + auto r = rgw_rados_operate(dpp, ioctx, oids[shard], &op, nullptr, null_yield); if (r == -ENOENT) { continue; } if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to list " << oids[shard] << cpp_strerror(-r) << dendl; return r; @@ -251,36 +252,36 @@ public: } std::get(out).push_back(std::move(entry)); } - int push(int index, entries&& items) override { - auto r = fifos[index].push(std::get(items), null_yield); + int push(const DoutPrefixProvider *dpp, int index, entries&& items) override { + auto r = fifos[index].push(dpp, std::get(items), null_yield); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": unable to push to FIFO: " << get_oid(index) << ": " << cpp_strerror(-r) << dendl; } return r; } - int push(int index, ceph::real_time, + int push(const DoutPrefixProvider *dpp, int index, ceph::real_time, const std::string&, ceph::buffer::list&& bl) override { - auto r = fifos[index].push(std::move(bl), null_yield); + auto r = fifos[index].push(dpp, std::move(bl), null_yield); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": unable to push to FIFO: " << get_oid(index) << ": " << cpp_strerror(-r) << dendl; } return r; } - int list(int index, int max_entries, + int list(const DoutPrefixProvider *dpp, int index, int max_entries, std::vector& entries, std::optional marker, std::string* out_marker, bool* truncated) override { std::vector log_entries; bool more = false; - auto r = fifos[index].list(max_entries, marker, &log_entries, &more, + auto r = fifos[index].list(dpp, max_entries, marker, &log_entries, &more, null_yield); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": unable to list FIFO: " << get_oid(index) << ": " << cpp_strerror(-r) << dendl; return r; @@ -293,7 +294,7 @@ public: try { decode(log_entry.entry, liter); } catch (const buffer::error& err) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to decode data changes log entry: " << err.what() << dendl; return -EIO; @@ -307,17 +308,17 @@ public: } return 0; } - int get_info(int index, RGWDataChangesLogInfo *info) override { + int get_info(const DoutPrefixProvider *dpp, int index, RGWDataChangesLogInfo *info) override { auto& fifo = fifos[index]; - auto r = fifo.read_meta(null_yield); + auto r = fifo.read_meta(dpp, null_yield); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": unable to get FIFO metadata: " << get_oid(index) << ": " << cpp_strerror(-r) << dendl; return r; } rados::cls::fifo::info m; - fifo.meta(m, null_yield); + fifo.meta(dpp, m, null_yield); auto p = m.head_part_num; if (p < 0) { info->marker = ""s; @@ -325,9 +326,9 @@ public: return 0; } rgw::cls::fifo::part_info h; - r = fifo.get_part_info(p, &h, null_yield); + r = fifo.get_part_info(dpp, p, &h, null_yield); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": unable to get part info: " << get_oid(index) << "/" << p << ": " << cpp_strerror(-r) << dendl; return r; @@ -336,22 +337,22 @@ public: info->last_update = h.max_time; return 0; } - int trim(int index, std::string_view marker) override { - auto r = fifos[index].trim(marker, false, null_yield); + int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker) override { + auto r = fifos[index].trim(dpp, marker, false, null_yield); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": unable to trim FIFO: " << get_oid(index) << ": " << cpp_strerror(-r) << dendl; } return r; } - int trim(int index, std::string_view marker, + int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker, librados::AioCompletion* c) override { int r = 0; if (marker == rgw::cls::fifo::marker(0, 0).to_string()) { rgw_complete_aio_completion(c, -ENODATA); } else { - fifos[index].trim(marker, false, c, null_yield); + fifos[index].trim(dpp, marker, false, c, null_yield); } return r; } @@ -360,14 +361,14 @@ public: rgw::cls::fifo::marker::max().to_string(); return std::string_view(mm); } - int is_empty() override { + int is_empty(const DoutPrefixProvider *dpp) override { std::vector log_entries; bool more = false; for (auto shard = 0u; shard < fifos.size(); ++shard) { - auto r = fifos[shard].list(1, {}, &log_entries, &more, + auto r = fifos[shard].list(dpp, 1, {}, &log_entries, &more, null_yield); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": unable to list FIFO: " << get_oid(shard) << ": " << cpp_strerror(-r) << dendl; return r; @@ -445,7 +446,8 @@ bs::error_code DataLogBackends::handle_empty_to(uint64_t new_tail) noexcept { } -int RGWDataChangesLog::start(const RGWZone* _zone, +int RGWDataChangesLog::start(const DoutPrefixProvider *dpp, + const RGWZone* _zone, const RGWZoneParams& zoneparams, librados::Rados* lr) { @@ -456,23 +458,23 @@ int RGWDataChangesLog::start(const RGWZone* _zone, // Should be guaranteed by `set_enum_allowed` ceph_assert(defbacking); auto log_pool = zoneparams.log_pool; - auto r = rgw_init_ioctx(lr, log_pool, ioctx, true, false); + auto r = rgw_init_ioctx(dpp, lr, log_pool, ioctx, true, false); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": Failed to initialized ioctx, r=" << r << ", pool=" << log_pool << dendl; return -r; } auto besr = logback_generations::init( - ioctx, metadata_log_oid(), [this](uint64_t gen_id, int shard) { + dpp, ioctx, metadata_log_oid(), [this](uint64_t gen_id, int shard) { return get_oid(gen_id, shard); }, num_shards, *defbacking, null_yield, *this); if (!besr) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": Error initializing backends: " << besr.error().message() << dendl; return ceph::from_error_code(besr.error()); @@ -492,7 +494,7 @@ int RGWDataChangesLog::choose_oid(const rgw_bucket_shard& bs) { return static_cast(r); } -int RGWDataChangesLog::renew_entries() +int RGWDataChangesLog::renew_entries(const DoutPrefixProvider *dpp) { if (!zone->log_data) return 0; @@ -528,11 +530,11 @@ int RGWDataChangesLog::renew_entries() auto now = real_clock::now(); - auto ret = be->push(index, std::move(entries)); + auto ret = be->push(dpp, index, std::move(entries)); if (ret < 0) { /* we don't really need to have a special handling for failed cases here, * as this is just an optimization. */ - lderr(cct) << "ERROR: svc.cls->timelog.add() returned " << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: svc.cls->timelog.add() returned " << ret << dendl; return ret; } @@ -580,14 +582,15 @@ int RGWDataChangesLog::get_log_shard_id(rgw_bucket& bucket, int shard_id) { return choose_oid(bs); } -bool RGWDataChangesLog::filter_bucket(const rgw_bucket& bucket, +bool RGWDataChangesLog::filter_bucket(const DoutPrefixProvider *dpp, + const rgw_bucket& bucket, optional_yield y) const { if (!bucket_filter) { return true; } - return bucket_filter(bucket, y); + return bucket_filter(bucket, y, dpp); } std::string RGWDataChangesLog::get_oid(uint64_t gen_id, int i) const { @@ -596,10 +599,10 @@ std::string RGWDataChangesLog::get_oid(uint64_t gen_id, int i) const { fmt::format("{}.{}", prefix, i)); } -int RGWDataChangesLog::add_entry(const RGWBucketInfo& bucket_info, int shard_id) { +int RGWDataChangesLog::add_entry(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id) { auto& bucket = bucket_info.bucket; - if (!filter_bucket(bucket, null_yield)) { + if (!filter_bucket(dpp, bucket, null_yield)) { return 0; } @@ -622,7 +625,7 @@ int RGWDataChangesLog::add_entry(const RGWBucketInfo& bucket_info, int shard_id) std::unique_lock sl(status->lock); - ldout(cct, 20) << "RGWDataChangesLog::add_entry() bucket.name=" << bucket.name + ldpp_dout(dpp, 20) << "RGWDataChangesLog::add_entry() bucket.name=" << bucket.name << " shard_id=" << shard_id << " now=" << now << " cur_expiration=" << status->cur_expiration << dendl; @@ -673,10 +676,10 @@ int RGWDataChangesLog::add_entry(const RGWBucketInfo& bucket_info, int shard_id) change.timestamp = now; encode(change, bl); - ldout(cct, 20) << "RGWDataChangesLog::add_entry() sending update with now=" << now << " cur_expiration=" << expiration << dendl; + ldpp_dout(dpp, 20) << "RGWDataChangesLog::add_entry() sending update with now=" << now << " cur_expiration=" << expiration << dendl; auto be = bes->head(); - ret = be->push(index, now, change.key, std::move(bl)); + ret = be->push(dpp, index, now, change.key, std::move(bl)); now = real_clock::now(); @@ -699,7 +702,7 @@ int RGWDataChangesLog::add_entry(const RGWBucketInfo& bucket_info, int shard_id) return ret; } -int DataLogBackends::list(int shard, int max_entries, +int DataLogBackends::list(const DoutPrefixProvider *dpp, int shard, int max_entries, std::vector& entries, std::optional marker, std::string* out_marker, bool* truncated) @@ -715,7 +718,7 @@ int DataLogBackends::list(int shard, int max_entries, auto be = i->second; l.unlock(); gen_id = be->gen_id; - auto r = be->list(shard, max_entries, gentries, + auto r = be->list(dpp, shard, max_entries, gentries, gen_id == start_id ? start_cursor : std::string{}, &out_cursor, truncated); if (r < 0) @@ -739,16 +742,16 @@ int DataLogBackends::list(int shard, int max_entries, return 0; } -int RGWDataChangesLog::list_entries(int shard, int max_entries, +int RGWDataChangesLog::list_entries(const DoutPrefixProvider *dpp, int shard, int max_entries, std::vector& entries, std::optional marker, std::string* out_marker, bool* truncated) { assert(shard < num_shards); - return bes->list(shard, max_entries, entries, marker, out_marker, truncated); + return bes->list(dpp, shard, max_entries, entries, marker, out_marker, truncated); } -int RGWDataChangesLog::list_entries(int max_entries, +int RGWDataChangesLog::list_entries(const DoutPrefixProvider *dpp, int max_entries, std::vector& entries, LogMarker& marker, bool *ptruncated) { @@ -756,7 +759,7 @@ int RGWDataChangesLog::list_entries(int max_entries, entries.clear(); for (; marker.shard < num_shards && int(entries.size()) < max_entries; marker.shard++, marker.marker.reset()) { - int ret = list_entries(marker.shard, max_entries - entries.size(), + int ret = list_entries(dpp, marker.shard, max_entries - entries.size(), entries, marker.marker, NULL, &truncated); if (ret == -ENOENT) { continue; @@ -773,18 +776,18 @@ int RGWDataChangesLog::list_entries(int max_entries, return 0; } -int RGWDataChangesLog::get_info(int shard_id, RGWDataChangesLogInfo *info) +int RGWDataChangesLog::get_info(const DoutPrefixProvider *dpp, int shard_id, RGWDataChangesLogInfo *info) { assert(shard_id < num_shards); auto be = bes->head(); - auto r = be->get_info(shard_id, info); + auto r = be->get_info(dpp, shard_id, info); if (!info->marker.empty()) { info->marker = gencursor(be->gen_id, info->marker); } return r; } -int DataLogBackends::trim_entries(int shard_id, std::string_view marker) +int DataLogBackends::trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker) { auto [target_gen, cursor] = cursorgen(marker); std::unique_lock l(m); @@ -797,7 +800,7 @@ int DataLogBackends::trim_entries(int shard_id, std::string_view marker) be = upper_bound(be->gen_id)->second) { l.unlock(); auto c = be->gen_id == target_gen ? cursor : be->max_marker(); - r = be->trim(shard_id, c); + r = be->trim(dpp, shard_id, c); if (r == -ENOENT) r = -ENODATA; if (r == -ENODATA && be->gen_id < target_gen) @@ -807,10 +810,10 @@ int DataLogBackends::trim_entries(int shard_id, std::string_view marker) return r; } -int RGWDataChangesLog::trim_entries(int shard_id, std::string_view marker) +int RGWDataChangesLog::trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker) { assert(shard_id < num_shards); - return bes->trim_entries(shard_id, marker); + return bes->trim_entries(dpp, shard_id, marker); } class GenTrim : public rgw::cls::fifo::Completion { @@ -823,15 +826,15 @@ public: const uint64_t tail_gen; boost::intrusive_ptr be; - GenTrim(DataLogBackends* bes, int shard_id, uint64_t target_gen, + GenTrim(const DoutPrefixProvider *dpp, DataLogBackends* bes, int shard_id, uint64_t target_gen, std::string cursor, uint64_t head_gen, uint64_t tail_gen, boost::intrusive_ptr be, lr::AioCompletion* super) - : Completion(super), bes(bes), shard_id(shard_id), target_gen(target_gen), + : Completion(dpp, super), bes(bes), shard_id(shard_id), target_gen(target_gen), cursor(std::move(cursor)), head_gen(head_gen), tail_gen(tail_gen), be(std::move(be)) {} - void handle(Ptr&& p, int r) { + void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { auto gen_id = be->gen_id; be.reset(); if (r == -ENOENT) @@ -854,11 +857,11 @@ public: be = i->second; } auto c = be->gen_id == target_gen ? cursor : be->max_marker(); - be->trim(shard_id, c, call(std::move(p))); + be->trim(dpp, shard_id, c, call(std::move(p))); } }; -void DataLogBackends::trim_entries(int shard_id, std::string_view marker, +void DataLogBackends::trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker, librados::AioCompletion* c) { auto [target_gen, cursor] = cursorgen(marker); @@ -872,15 +875,15 @@ void DataLogBackends::trim_entries(int shard_id, std::string_view marker, } auto be = begin()->second; l.unlock(); - auto gt = std::make_unique(this, shard_id, target_gen, + auto gt = std::make_unique(dpp, this, shard_id, target_gen, std::string(cursor), head_gen, tail_gen, be, c); auto cc = be->gen_id == target_gen ? cursor : be->max_marker(); - be->trim(shard_id, cc, GenTrim::call(std::move(gt))); + be->trim(dpp, shard_id, cc, GenTrim::call(std::move(gt))); } -int DataLogBackends::trim_generations(std::optional& through) { +int DataLogBackends::trim_generations(const DoutPrefixProvider *dpp, std::optional& through) { if (size() != 1) { std::vector candidates; { @@ -893,7 +896,7 @@ int DataLogBackends::trim_generations(std::optional& through) { std::optional highest; for (auto& be : candidates) { - auto r = be->is_empty(); + auto r = be->is_empty(dpp); if (r < 0) { return r; } else if (r == 1) { @@ -907,21 +910,21 @@ int DataLogBackends::trim_generations(std::optional& through) { if (!highest) { return 0; } - auto ec = empty_to(*highest, null_yield); + auto ec = empty_to(dpp, *highest, null_yield); if (ec) { return ceph::from_error_code(ec); } } - return ceph::from_error_code(remove_empty(null_yield)); + return ceph::from_error_code(remove_empty(dpp, null_yield)); } -int RGWDataChangesLog::trim_entries(int shard_id, std::string_view marker, +int RGWDataChangesLog::trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker, librados::AioCompletion* c) { assert(shard_id < num_shards); - bes->trim_entries(shard_id, marker, c); + bes->trim_entries(dpp, shard_id, marker, c); return 0; } @@ -942,10 +945,11 @@ void RGWDataChangesLog::renew_run() { static constexpr auto runs_per_prune = 150; auto run = 0; for (;;) { - dout(2) << "RGWDataChangesLog::ChangesRenewThread: start" << dendl; - int r = renew_entries(); + const DoutPrefix dp(cct, dout_subsys, "rgw data changes log: "); + ldpp_dout(&dp, 2) << "RGWDataChangesLog::ChangesRenewThread: start" << dendl; + int r = renew_entries(&dp); if (r < 0) { - dout(0) << "ERROR: RGWDataChangesLog::renew_entries returned error r=" << r << dendl; + ldpp_dout(&dp, 0) << "ERROR: RGWDataChangesLog::renew_entries returned error r=" << r << dendl; } if (going_down()) @@ -953,16 +957,16 @@ void RGWDataChangesLog::renew_run() { if (run == runs_per_prune) { std::optional through; - dout(2) << "RGWDataChangesLog::ChangesRenewThread: pruning old generations" << dendl; - trim_generations(through); + ldpp_dout(&dp, 2) << "RGWDataChangesLog::ChangesRenewThread: pruning old generations" << dendl; + trim_generations(&dp, through); if (r < 0) { derr << "RGWDataChangesLog::ChangesRenewThread: failed pruning r=" << r << dendl; } else if (through) { - dout(2) << "RGWDataChangesLog::ChangesRenewThread: pruned generations " + ldpp_dout(&dp, 2) << "RGWDataChangesLog::ChangesRenewThread: pruned generations " << "through " << *through << "." << dendl; } else { - dout(2) << "RGWDataChangesLog::ChangesRenewThread: nothing to prune." + ldpp_dout(&dp, 2) << "RGWDataChangesLog::ChangesRenewThread: nothing to prune." << dendl; } run = 0; @@ -1002,10 +1006,10 @@ std::string RGWDataChangesLog::max_marker() const { "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); } -int RGWDataChangesLog::change_format(log_type type, optional_yield y) { - return ceph::from_error_code(bes->new_backing(type, y)); +int RGWDataChangesLog::change_format(const DoutPrefixProvider *dpp, log_type type, optional_yield y) { + return ceph::from_error_code(bes->new_backing(dpp, type, y)); } -int RGWDataChangesLog::trim_generations(std::optional& through) { - return bes->trim_generations(through); +int RGWDataChangesLog::trim_generations(const DoutPrefixProvider *dpp, std::optional& through) { + return bes->trim_generations(dpp, through); } diff --git a/src/rgw/rgw_datalog.h b/src/rgw/rgw_datalog.h index 5886d51dac174..1ff714873367e 100644 --- a/src/rgw/rgw_datalog.h +++ b/src/rgw/rgw_datalog.h @@ -146,12 +146,12 @@ public: --i; return i->second; } - int list(int shard, int max_entries, + int list(const DoutPrefixProvider *dpp, int shard, int max_entries, std::vector& entries, std::optional marker, std::string* out_marker, bool* truncated); - int trim_entries(int shard_id, std::string_view marker); - void trim_entries(int shard_id, std::string_view marker, + int trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker); + void trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker, librados::AioCompletion* c); void set_zero(RGWDataChangesBE* be) { emplace(0, be); @@ -161,7 +161,7 @@ public: bs::error_code handle_new_gens(entries_t e) noexcept override; bs::error_code handle_empty_to(uint64_t new_tail) noexcept override; - int trim_generations(std::optional& through); + int trim_generations(const DoutPrefixProvider *dpp, std::optional& through); }; class RGWDataChangesLog { @@ -214,34 +214,34 @@ class RGWDataChangesLog { void renew_stop(); std::thread renew_thread; - std::function bucket_filter; + std::function bucket_filter; int choose_oid(const rgw_bucket_shard& bs); bool going_down() const; - bool filter_bucket(const rgw_bucket& bucket, optional_yield y) const; - int renew_entries(); + bool filter_bucket(const DoutPrefixProvider *dpp, const rgw_bucket& bucket, optional_yield y) const; + int renew_entries(const DoutPrefixProvider *dpp); public: RGWDataChangesLog(CephContext* cct); ~RGWDataChangesLog(); - int start(const RGWZone* _zone, const RGWZoneParams& zoneparams, + int start(const DoutPrefixProvider *dpp, const RGWZone* _zone, const RGWZoneParams& zoneparams, librados::Rados* lr); - int add_entry(const RGWBucketInfo& bucket_info, int shard_id); + int add_entry(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id); int get_log_shard_id(rgw_bucket& bucket, int shard_id); - int list_entries(int shard, int max_entries, + int list_entries(const DoutPrefixProvider *dpp, int shard, int max_entries, std::vector& entries, std::optional marker, std::string* out_marker, bool* truncated); - int trim_entries(int shard_id, std::string_view marker); - int trim_entries(int shard_id, std::string_view marker, + int trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker); + int trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker, librados::AioCompletion* c); // :( - int get_info(int shard_id, RGWDataChangesLogInfo *info); + int get_info(const DoutPrefixProvider *dpp, int shard_id, RGWDataChangesLogInfo *info); using LogMarker = RGWDataChangesLogMarker; - int list_entries(int max_entries, + int list_entries(const DoutPrefixProvider *dpp, int max_entries, std::vector& entries, LogMarker& marker, bool* ptruncated); @@ -266,8 +266,8 @@ public: std::string get_oid(uint64_t gen_id, int shard_id) const; - int change_format(log_type type, optional_yield y); - int trim_generations(std::optional& through); + int change_format(const DoutPrefixProvider *dpp, log_type type, optional_yield y); + int trim_generations(const DoutPrefixProvider *dpp, std::optional& through); }; class RGWDataChangesBE : public boost::intrusive_ref_counter { @@ -296,21 +296,21 @@ public: const std::string& key, ceph::buffer::list&& entry, entries& out) = 0; - virtual int push(int index, entries&& items) = 0; - virtual int push(int index, ceph::real_time now, + virtual int push(const DoutPrefixProvider *dpp, int index, entries&& items) = 0; + virtual int push(const DoutPrefixProvider *dpp, int index, ceph::real_time now, const std::string& key, ceph::buffer::list&& bl) = 0; - virtual int list(int shard, int max_entries, + virtual int list(const DoutPrefixProvider *dpp, int shard, int max_entries, std::vector& entries, std::optional marker, std::string* out_marker, bool* truncated) = 0; - virtual int get_info(int index, RGWDataChangesLogInfo *info) = 0; - virtual int trim(int index, std::string_view marker) = 0; - virtual int trim(int index, std::string_view marker, + virtual int get_info(const DoutPrefixProvider *dpp, int index, RGWDataChangesLogInfo *info) = 0; + virtual int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker) = 0; + virtual int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker, librados::AioCompletion* c) = 0; virtual std::string_view max_marker() const = 0; // 1 on empty, 0 on non-empty, negative on error. - virtual int is_empty() = 0; + virtual int is_empty(const DoutPrefixProvider *dpp) = 0; }; diff --git a/src/rgw/rgw_dencoder.cc b/src/rgw/rgw_dencoder.cc index 612d6d1e8b701..227d20e82c121 100644 --- a/src/rgw/rgw_dencoder.cc +++ b/src/rgw/rgw_dencoder.cc @@ -40,113 +40,6 @@ void RGWObjManifestPart::generate_test_instances(std::list& o.push_back(p); } -void RGWObjManifest::obj_iterator::seek(uint64_t o) -{ - ofs = o; - if (manifest->explicit_objs) { - explicit_iter = manifest->objs.upper_bound(ofs); - if (explicit_iter != manifest->objs.begin()) { - --explicit_iter; - } - if (ofs < manifest->obj_size) { - update_explicit_pos(); - } else { - ofs = manifest->obj_size; - } - update_location(); - return; - } - if (o < manifest->get_head_size()) { - rule_iter = manifest->rules.begin(); - stripe_ofs = 0; - stripe_size = manifest->get_head_size(); - if (rule_iter != manifest->rules.end()) { - cur_part_id = rule_iter->second.start_part_num; - cur_override_prefix = rule_iter->second.override_prefix; - } - update_location(); - return; - } - - rule_iter = manifest->rules.upper_bound(ofs); - next_rule_iter = rule_iter; - if (rule_iter != manifest->rules.begin()) { - --rule_iter; - } - - if (rule_iter == manifest->rules.end()) { - update_location(); - return; - } - - RGWObjManifestRule& rule = rule_iter->second; - - if (rule.part_size > 0) { - cur_part_id = rule.start_part_num + (ofs - rule.start_ofs) / rule.part_size; - } else { - cur_part_id = rule.start_part_num; - } - part_ofs = rule.start_ofs + (cur_part_id - rule.start_part_num) * rule.part_size; - - if (rule.stripe_max_size > 0) { - cur_stripe = (ofs - part_ofs) / rule.stripe_max_size; - - stripe_ofs = part_ofs + cur_stripe * rule.stripe_max_size; - if (!cur_part_id && manifest->get_head_size() > 0) { - cur_stripe++; - } - } else { - cur_stripe = 0; - stripe_ofs = part_ofs; - } - - if (!rule.part_size) { - stripe_size = rule.stripe_max_size; - stripe_size = std::min(manifest->get_obj_size() - stripe_ofs, stripe_size); - } else { - uint64_t next = std::min(stripe_ofs + rule.stripe_max_size, part_ofs + rule.part_size); - stripe_size = next - stripe_ofs; - } - - cur_override_prefix = rule.override_prefix; - - update_location(); -} - -void RGWObjManifest::obj_iterator::update_location() -{ - if (manifest->explicit_objs) { - if (manifest->empty()) { - location = rgw_obj_select{}; - } else { - location = explicit_iter->second.loc; - } - return; - } - - if (ofs < manifest->get_head_size()) { - location = manifest->get_obj(); - location.set_placement_rule(manifest->get_head_placement_rule()); - return; - } - - manifest->get_implicit_location(cur_part_id, cur_stripe, ofs, &cur_override_prefix, &location); -} - -void RGWObjManifest::obj_iterator::update_explicit_pos() -{ - ofs = explicit_iter->first; - stripe_ofs = ofs; - - map::iterator next_iter = explicit_iter; - ++next_iter; - if (next_iter != manifest->objs.end()) { - stripe_size = next_iter->first - ofs; - } else { - stripe_size = manifest->obj_size - ofs; - } -} - void RGWObjManifest::generate_test_instances(std::list& o) { RGWObjManifest *m = new RGWObjManifest; @@ -167,58 +60,6 @@ void RGWObjManifest::generate_test_instances(std::list& o) o.push_back(new RGWObjManifest); } -void RGWObjManifest::get_implicit_location(uint64_t cur_part_id, uint64_t cur_stripe, uint64_t ofs, string *override_prefix, rgw_obj_select *location) -{ - rgw_obj loc; - - string& oid = loc.key.name; - string& ns = loc.key.ns; - - if (!override_prefix || override_prefix->empty()) { - oid = prefix; - } else { - oid = *override_prefix; - } - - if (!cur_part_id) { - if (ofs < max_head_size) { - location->set_placement_rule(head_placement_rule); - *location = obj; - return; - } else { - char buf[16]; - snprintf(buf, sizeof(buf), "%d", (int)cur_stripe); - oid += buf; - ns = shadow_ns; - } - } else { - char buf[32]; - if (cur_stripe == 0) { - snprintf(buf, sizeof(buf), ".%d", (int)cur_part_id); - oid += buf; - ns= RGW_OBJ_NS_MULTIPART; - } else { - snprintf(buf, sizeof(buf), ".%d_%d", (int)cur_part_id, (int)cur_stripe); - oid += buf; - ns = shadow_ns; - } - } - - if (!tail_placement.bucket.name.empty()) { - loc.bucket = tail_placement.bucket; - } else { - loc.bucket = obj.bucket; - } - - // Always overwrite instance with tail_instance - // to get the right shadow object location - loc.key.set_instance(tail_instance); - - location->set_placement_rule(tail_placement.placement_rule); - *location = loc; -} - - void rgw_log_entry::generate_test_instances(list& o) { diff --git a/src/rgw/rgw_etag_verifier.cc b/src/rgw/rgw_etag_verifier.cc index 285d64cd7a98b..6a455e18b2311 100644 --- a/src/rgw/rgw_etag_verifier.cc +++ b/src/rgw/rgw_etag_verifier.cc @@ -7,7 +7,8 @@ namespace rgw::putobj { -int create_etag_verifier(CephContext* cct, DataProcessor* filter, +int create_etag_verifier(const DoutPrefixProvider *dpp, + CephContext* cct, DataProcessor* filter, const bufferlist& manifest_bl, const std::optional& compression, etag_verifier_ptr& verifier) @@ -18,14 +19,14 @@ int create_etag_verifier(CephContext* cct, DataProcessor* filter, auto miter = manifest_bl.cbegin(); decode(manifest, miter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: couldn't decode manifest" << dendl; + ldpp_dout(dpp, 0) << "ERROR: couldn't decode manifest" << dendl; return -EIO; } RGWObjManifestRule rule; bool found = manifest.get_rule(0, &rule); if (!found) { - lderr(cct) << "ERROR: manifest->get_rule() could not find rule" << dendl; + ldpp_dout(dpp, -1) << "ERROR: manifest->get_rule() could not find rule" << dendl; return -EIO; } @@ -43,11 +44,11 @@ int create_etag_verifier(CephContext* cct, DataProcessor* filter, * MPU part. These part ETags then become the input for the MPU object * Etag. */ - for (auto mi = manifest.obj_begin(); mi != manifest.obj_end(); ++mi) { + for (auto mi = manifest.obj_begin(dpp); mi != manifest.obj_end(dpp); ++mi) { if (cur_part_ofs == mi.get_part_ofs()) continue; cur_part_ofs = mi.get_part_ofs(); - ldout(cct, 20) << "MPU Part offset:" << cur_part_ofs << dendl; + ldpp_dout(dpp, 20) << "MPU Part offset:" << cur_part_ofs << dendl; part_ofs.push_back(cur_part_ofs); } @@ -64,12 +65,12 @@ int create_etag_verifier(CephContext* cct, DataProcessor* filter, }; block = std::lower_bound(block, blocks.end(), ofs, less); if (block == blocks.end() || block->new_ofs != ofs) { - ldout(cct, 4) << "no match for compressed offset " << ofs + ldpp_dout(dpp, 4) << "no match for compressed offset " << ofs << ", disabling etag verification" << dendl; return -EIO; } ofs = block->old_ofs; - ldout(cct, 20) << "MPU Part uncompressed offset:" << ofs << dendl; + ldpp_dout(dpp, 20) << "MPU Part uncompressed offset:" << ofs << dendl; } } diff --git a/src/rgw/rgw_etag_verifier.h b/src/rgw/rgw_etag_verifier.h index dac6ddab5f800..48007cf169984 100644 --- a/src/rgw/rgw_etag_verifier.h +++ b/src/rgw/rgw_etag_verifier.h @@ -75,7 +75,8 @@ constexpr auto max_etag_verifier_size = std::max( ); using etag_verifier_ptr = ceph::static_ptr; -int create_etag_verifier(CephContext* cct, DataProcessor* next, +int create_etag_verifier(const DoutPrefixProvider *dpp, + CephContext* cct, DataProcessor* next, const bufferlist& manifest_bl, const std::optional& compression, etag_verifier_ptr& verifier); diff --git a/src/rgw/rgw_file.cc b/src/rgw/rgw_file.cc index 1a30f11cc55ae..77e4db05e2b73 100644 --- a/src/rgw/rgw_file.cc +++ b/src/rgw/rgw_file.cc @@ -1942,7 +1942,7 @@ namespace rgw { cs_info.blocks = std::move(compressor->get_compression_blocks()); encode(cs_info, tmp); attrs[RGW_ATTR_COMPRESSION] = tmp; - ldout(state->cct, 20) << "storing " << RGW_ATTR_COMPRESSION + ldpp_dout(this, 20) << "storing " << RGW_ATTR_COMPRESSION << " with type=" << cs_info.compression_type << ", orig_size=" << cs_info.orig_size << ", blocks=" << cs_info.blocks.size() << dendl; @@ -1973,7 +1973,7 @@ namespace rgw { attrbl.append(val.c_str(), val.size() + 1); } - op_ret = rgw_get_request_metadata(state->cct, state->info, attrs); + op_ret = rgw_get_request_metadata(this, state->cct, state->info, attrs); if (op_ret < 0) { goto done; } @@ -2034,7 +2034,8 @@ void rgwfile_version(int *major, int *minor, int *extra) sec_key, "/"); ceph_assert(new_fs); - rc = new_fs->authorize(rgwlib.get_store()); + const DoutPrefix dp(rgwlib.get_store()->ctx(), dout_subsys, "rgw mount: "); + rc = new_fs->authorize(&dp, rgwlib.get_store()); if (rc != 0) { delete new_fs; return -EINVAL; @@ -2065,7 +2066,8 @@ int rgw_mount2(librgw_t rgw, const char *uid, const char *acc_key, sec_key, root); ceph_assert(new_fs); - rc = new_fs->authorize(rgwlib.get_store()); + const DoutPrefix dp(rgwlib.get_store()->ctx(), dout_subsys, "rgw mount2: "); + rc = new_fs->authorize(&dp, rgwlib.get_store()); if (rc != 0) { delete new_fs; return -EINVAL; diff --git a/src/rgw/rgw_file.h b/src/rgw/rgw_file.h index a97f54f2873a5..60845d9d896b5 100644 --- a/src/rgw/rgw_file.h +++ b/src/rgw/rgw_file.h @@ -984,8 +984,8 @@ namespace rgw { (void) fh_lru.unref(fh, cohort::lru::FLAG_NONE); } - int authorize(rgw::sal::RGWRadosStore* store) { - int ret = store->ctl()->user->get_info_by_access_key(key.id, &user, null_yield); + int authorize(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore* store) { + int ret = store->ctl()->user->get_info_by_access_key(dpp, key.id, &user, null_yield); if (ret == 0) { RGWAccessKey* k = user.get_key(key.id); if (!k || (k->key != key.key)) @@ -1004,8 +1004,8 @@ namespace rgw { } if (token.valid() && (ldh->auth(token.id, token.key) == 0)) { /* try to store user if it doesn't already exist */ - if (store->ctl()->user->get_info_by_uid(rgw_user(token.id), &user, null_yield) < 0) { - int ret = store->ctl()->user->store_info(user, null_yield, + if (store->ctl()->user->get_info_by_uid(dpp, rgw_user(token.id), &user, null_yield) < 0) { + int ret = store->ctl()->user->store_info(dpp, user, null_yield, RGWUserCtl::PutParams() .set_exclusive(true)); if (ret < 0) { @@ -1297,10 +1297,10 @@ namespace rgw { RGWUserInfo* get_user() { return &user; } - void update_user() { + void update_user(const DoutPrefixProvider *dpp) { RGWUserInfo _user = user; auto user_ctl = rgwlib.get_store()->ctl()->user; - int ret = user_ctl->get_info_by_access_key(key.id, &user, null_yield); + int ret = user_ctl->get_info_by_access_key(dpp, key.id, &user, null_yield); if (ret != 0) user = _user; } diff --git a/src/rgw/rgw_frontend.h b/src/rgw/rgw_frontend.h index e728a68292f63..08f6042a62ce6 100644 --- a/src/rgw/rgw_frontend.h +++ b/src/rgw/rgw_frontend.h @@ -18,7 +18,6 @@ #include "rgw_sal_rados.h" #define dout_context g_ceph_context -#define dout_subsys ceph_subsys_rgw namespace rgw::dmclock { class SyncScheduler; @@ -210,11 +209,25 @@ public: } }; /* RGWFCGXFrontend */ -class RGWLoadGenFrontend : public RGWProcessFrontend { +class RGWLoadGenFrontend : public RGWProcessFrontend, public DoutPrefixProvider { public: RGWLoadGenFrontend(RGWProcessEnv& pe, RGWFrontendConfig *_conf) : RGWProcessFrontend(pe, _conf) {} + CephContext *get_cct() const { + return env.store->ctx(); + } + + unsigned get_subsys() const + { + return ceph_subsys_rgw; + } + + std::ostream& gen_prefix(std::ostream& out) const + { + return out << "rgw loadgen frontend: "; + } + int init() override { int num_threads; conf->get_val("num_threads", g_conf()->rgw_thread_pool_size, &num_threads); @@ -234,7 +247,7 @@ public: rgw_user uid(uid_str); RGWUserInfo user_info; - int ret = env.store->ctl()->user->get_info_by_uid(uid, &user_info, null_yield); + int ret = env.store->ctl()->user->get_info_by_uid(this, uid, &user_info, null_yield); if (ret < 0) { derr << "ERROR: failed reading user info: uid=" << uid << " ret=" << ret << dendl; diff --git a/src/rgw/rgw_gc.cc b/src/rgw/rgw_gc.cc index 5e3070d3fcc1c..6e907c5dbf974 100644 --- a/src/rgw/rgw_gc.cc +++ b/src/rgw/rgw_gc.cc @@ -50,7 +50,7 @@ void RGWGC::initialize(CephContext *_cct, RGWRados *_store) { op.create(false); const uint64_t queue_size = cct->_conf->rgw_gc_max_queue_size, num_deferred_entries = cct->_conf->rgw_gc_max_deferred; gc_log_init2(op, queue_size, num_deferred_entries); - store->gc_operate(obj_names[i], &op); + store->gc_operate(this, obj_names[i], &op); } } @@ -76,13 +76,13 @@ int RGWGC::send_chain(cls_rgw_obj_chain& chain, const string& tag) ldpp_dout(this, 20) << "RGWGC::send_chain - on object name: " << obj_names[i] << "tag is: " << tag << dendl; - auto ret = store->gc_operate(obj_names[i], &op); + auto ret = store->gc_operate(this, obj_names[i], &op); if (ret != -ECANCELED && ret != -EPERM) { return ret; } ObjectWriteOperation set_entry_op; cls_rgw_gc_set_entry(set_entry_op, cct->_conf->rgw_gc_obj_min_wait, info); - return store->gc_operate(obj_names[i], &set_entry_op); + return store->gc_operate(this, obj_names[i], &set_entry_op); } struct defer_chain_state { @@ -188,7 +188,7 @@ int RGWGC::remove(int index, int num_entries) ObjectWriteOperation op; cls_rgw_gc_queue_remove_entries(op, num_entries); - return store->gc_operate(obj_names[index], &op); + return store->gc_operate(this, obj_names[index], &op); } int RGWGC::list(int *index, string& marker, uint32_t max, bool expired_only, std::list& result, bool *truncated, bool& processing_queue) @@ -606,7 +606,7 @@ int RGWGC::process(int index, int max_secs, bool expired_only, if (obj.pool != last_pool) { delete ctx; ctx = new IoCtx; - ret = rgw_init_ioctx(store->get_rados_handle(), obj.pool, *ctx); + ret = rgw_init_ioctx(this, store->get_rados_handle(), obj.pool, *ctx); if (ret < 0) { if (transitioned_objects_cache[index]) { goto done; diff --git a/src/rgw/rgw_json_enc.cc b/src/rgw/rgw_json_enc.cc index 7fc619186af48..b3ceff68e7570 100644 --- a/src/rgw/rgw_json_enc.cc +++ b/src/rgw/rgw_json_enc.cc @@ -143,8 +143,11 @@ void RGWObjManifest::dump(Formatter *f) const ::encode_json("tail_instance", tail_instance, f); ::encode_json("tail_placement", tail_placement, f); - f->dump_object("begin_iter", begin_iter); - f->dump_object("end_iter", end_iter); + // nullptr being passed into iterators since there + // is no cct and we aren't doing anything with these + // iterators that would write do the log + f->dump_object("begin_iter", obj_begin(nullptr)); + f->dump_object("end_iter", obj_end(nullptr)); } void rgw_log_entry::dump(Formatter *f) const diff --git a/src/rgw/rgw_lc.cc b/src/rgw/rgw_lc.cc index dd1b9b1b4ccaf..b2f5a96d76fa9 100644 --- a/src/rgw/rgw_lc.cc +++ b/src/rgw/rgw_lc.cc @@ -368,13 +368,13 @@ static bool obj_has_expired(CephContext *cct, ceph::real_time mtime, int days, return (timediff >= cmp); } -static bool pass_object_lock_check(rgw::sal::RGWStore* store, rgw::sal::RGWObject* obj, RGWObjectCtx& ctx) +static bool pass_object_lock_check(rgw::sal::RGWStore* store, rgw::sal::RGWObject* obj, RGWObjectCtx& ctx, const DoutPrefixProvider *dpp) { if (!obj->get_bucket()->get_info().obj_lock_enabled()) { return true; } std::unique_ptr read_op = obj->get_read_op(&ctx); - int ret = read_op->prepare(null_yield); + int ret = read_op->prepare(null_yield, dpp); if (ret < 0) { if (ret == -ENOENT) { return true; @@ -388,7 +388,7 @@ static bool pass_object_lock_check(rgw::sal::RGWStore* store, rgw::sal::RGWObjec try { decode(retention, iter->second); } catch (buffer::error& err) { - ldout(store->ctx(), 0) << "ERROR: failed to decode RGWObjectRetention" + ldpp_dout(dpp, 0) << "ERROR: failed to decode RGWObjectRetention" << dendl; return false; } @@ -403,7 +403,7 @@ static bool pass_object_lock_check(rgw::sal::RGWStore* store, rgw::sal::RGWObjec try { decode(obj_legal_hold, iter->second); } catch (buffer::error& err) { - ldout(store->ctx(), 0) << "ERROR: failed to decode RGWObjectLegalHold" + ldpp_dout(dpp, 0) << "ERROR: failed to decode RGWObjectLegalHold" << dendl; return false; } @@ -438,12 +438,12 @@ public: list_params.prefix = prefix; } - int init() { - return fetch(); + int init(const DoutPrefixProvider *dpp) { + return fetch(dpp); } - int fetch() { - int ret = bucket->list(list_params, 1000, list_results, null_yield); + int fetch(const DoutPrefixProvider *dpp) { + int ret = bucket->list(dpp, list_params, 1000, list_results, null_yield); if (ret < 0) { return ret; } @@ -457,7 +457,7 @@ public: std::this_thread::sleep_for(std::chrono::milliseconds(delay_ms)); } - bool get_obj(rgw_bucket_dir_entry **obj, + bool get_obj(const DoutPrefixProvider *dpp, rgw_bucket_dir_entry **obj, std::function fetch_barrier = []() { /* nada */}) { if (obj_iter == list_results.objs.end()) { @@ -467,7 +467,7 @@ public: } else { fetch_barrier(); list_params.marker = pre_obj.key; - int ret = fetch(); + int ret = fetch(dpp); if (ret < 0) { ldout(store->ctx(), 0) << "ERROR: list_op returned ret=" << ret << dendl; @@ -558,7 +558,7 @@ struct lc_op_ctx { }; /* lc_op_ctx */ -static int remove_expired_obj(lc_op_ctx& oc, bool remove_indeed) +static int remove_expired_obj(const DoutPrefixProvider *dpp, lc_op_ctx& oc, bool remove_indeed) { auto& store = oc.store; auto& bucket_info = oc.bucket->get_info(); @@ -590,7 +590,7 @@ static int remove_expired_obj(lc_op_ctx& oc, bool remove_indeed) ACLOwner bucket_owner; bucket_owner.set_id(bucket_info.owner); - return obj->delete_object(&oc.rctx, obj_owner, bucket_owner, meta.mtime, false, 0, + return obj->delete_object(dpp, &oc.rctx, obj_owner, bucket_owner, meta.mtime, false, 0, version_id, null_yield); } /* remove_expired_obj */ @@ -598,7 +598,7 @@ class LCOpAction { public: virtual ~LCOpAction() {} - virtual bool check(lc_op_ctx& oc, ceph::real_time *exp_time) { + virtual bool check(lc_op_ctx& oc, ceph::real_time *exp_time, const DoutPrefixProvider *dpp) { return false; } @@ -627,7 +627,7 @@ public: class LCOpFilter { public: virtual ~LCOpFilter() {} - virtual bool check(lc_op_ctx& oc) { + virtual bool check(const DoutPrefixProvider *dpp, lc_op_ctx& oc) { return false; } }; /* LCOpFilter */ @@ -846,7 +846,7 @@ int RGWLC::handle_multipart_expiration(rgw::sal::RGWBucket* target, return; } RGWObjectCtx rctx(store); - int ret = abort_multipart_upload(store, cct, &rctx, target->get_info(), mp_obj); + int ret = abort_multipart_upload(this, store, cct, &rctx, target->get_info(), mp_obj); if (ret == 0) { if (perfcounter) { perfcounter->inc(l_rgw_lc_abort_mpu, 1); @@ -887,7 +887,7 @@ int RGWLC::handle_multipart_expiration(rgw::sal::RGWBucket* target, params.prefix = prefix_iter->first; do { results.objs.clear(); - ret = target->list(params, 1000, results, null_yield); + ret = target->list(this, params, 1000, results, null_yield); if (ret < 0) { if (ret == (-ENOENT)) return 0; @@ -912,11 +912,11 @@ int RGWLC::handle_multipart_expiration(rgw::sal::RGWBucket* target, return 0; } -static int read_obj_tags(rgw::sal::RGWObject* obj, RGWObjectCtx& ctx, bufferlist& tags_bl) +static int read_obj_tags(const DoutPrefixProvider *dpp, rgw::sal::RGWObject* obj, RGWObjectCtx& ctx, bufferlist& tags_bl) { std::unique_ptr rop = obj->get_read_op(&ctx); - return rop->get_attr(RGW_ATTR_TAGS, tags_bl, null_yield); + return rop->get_attr(dpp, RGW_ATTR_TAGS, tags_bl, null_yield); } static bool is_valid_op(const lc_op& op) @@ -952,7 +952,7 @@ static inline bool has_all_tags(const lc_op& rule_action, return tag_count == rule_action.obj_tags->count(); } -static int check_tags(lc_op_ctx& oc, bool *skip) +static int check_tags(const DoutPrefixProvider *dpp, lc_op_ctx& oc, bool *skip) { auto& op = oc.op; @@ -960,7 +960,7 @@ static int check_tags(lc_op_ctx& oc, bool *skip) *skip = true; bufferlist tags_bl; - int ret = read_obj_tags(oc.obj.get(), oc.rctx, tags_bl); + int ret = read_obj_tags(dpp, oc.obj.get(), oc.rctx, tags_bl); if (ret < 0) { if (ret != -ENODATA) { ldout(oc.cct, 5) << "ERROR: read_obj_tags returned r=" @@ -992,7 +992,7 @@ static int check_tags(lc_op_ctx& oc, bool *skip) class LCOpFilter_Tags : public LCOpFilter { public: - bool check(lc_op_ctx& oc) override { + bool check(const DoutPrefixProvider *dpp, lc_op_ctx& oc) override { auto& o = oc.o; if (o.is_delete_marker()) { @@ -1001,7 +1001,7 @@ public: bool skip; - int ret = check_tags(oc, &skip); + int ret = check_tags(dpp, oc, &skip); if (ret < 0) { if (ret == -ENOENT) { return false; @@ -1020,10 +1020,10 @@ class LCOpAction_CurrentExpiration : public LCOpAction { public: LCOpAction_CurrentExpiration(op_env& env) {} - bool check(lc_op_ctx& oc, ceph::real_time *exp_time) override { + bool check(lc_op_ctx& oc, ceph::real_time *exp_time, const DoutPrefixProvider *dpp) override { auto& o = oc.o; if (!o.is_current()) { - ldout(oc.cct, 20) << __func__ << "(): key=" << o.key + ldpp_dout(dpp, 20) << __func__ << "(): key=" << o.key << ": not current, skipping " << oc.wq->thr_name() << dendl; return false; @@ -1032,12 +1032,12 @@ public: std::string nkn; if (oc.next_key_name) nkn = *oc.next_key_name; if (oc.next_has_same_name(o.key.name)) { - ldout(oc.cct, 7) << __func__ << "(): dm-check SAME: key=" << o.key + ldpp_dout(dpp, 7) << __func__ << "(): dm-check SAME: key=" << o.key << " next_key_name: %%" << nkn << "%% " << oc.wq->thr_name() << dendl; return false; } else { - ldout(oc.cct, 7) << __func__ << "(): dm-check DELE: key=" << o.key + ldpp_dout(dpp, 7) << __func__ << "(): dm-check DELE: key=" << o.key << " next_key_name: %%" << nkn << "%% " << oc.wq->thr_name() << dendl; *exp_time = real_clock::now(); @@ -1050,7 +1050,7 @@ public: auto& op = oc.op; if (op.expiration <= 0) { if (op.expiration_date == boost::none) { - ldout(oc.cct, 20) << __func__ << "(): key=" << o.key + ldpp_dout(dpp, 20) << __func__ << "(): key=" << o.key << ": no expiration set in rule, skipping " << oc.wq->thr_name() << dendl; return false; @@ -1062,7 +1062,7 @@ public: is_expired = obj_has_expired(oc.cct, mtime, op.expiration, exp_time); } - ldout(oc.cct, 20) << __func__ << "(): key=" << o.key << ": is_expired=" + ldpp_dout(dpp, 20) << __func__ << "(): key=" << o.key << ": is_expired=" << (int)is_expired << " " << oc.wq->thr_name() << dendl; return is_expired; @@ -1072,7 +1072,7 @@ public: auto& o = oc.o; int r; if (o.is_delete_marker()) { - r = remove_expired_obj(oc, true); + r = remove_expired_obj(oc.dpp, oc, true); if (r < 0) { ldout(oc.cct, 0) << "ERROR: current is-dm remove_expired_obj " << oc.bucket << ":" << o.key @@ -1085,7 +1085,7 @@ public: << " " << oc.wq->thr_name() << dendl; } else { /* ! o.is_delete_marker() */ - r = remove_expired_obj(oc, !oc.bucket->versioned()); + r = remove_expired_obj(oc.dpp, oc, !oc.bucket->versioned()); if (r < 0) { ldout(oc.cct, 0) << "ERROR: remove_expired_obj " << oc.bucket << ":" << o.key @@ -1109,10 +1109,10 @@ public: LCOpAction_NonCurrentExpiration(op_env& env) {} - bool check(lc_op_ctx& oc, ceph::real_time *exp_time) override { + bool check(lc_op_ctx& oc, ceph::real_time *exp_time, const DoutPrefixProvider *dpp) override { auto& o = oc.o; if (o.is_current()) { - ldout(oc.cct, 20) << __func__ << "(): key=" << o.key + ldpp_dout(dpp, 20) << __func__ << "(): key=" << o.key << ": current version, skipping " << oc.wq->thr_name() << dendl; return false; @@ -1122,17 +1122,17 @@ public: bool is_expired = obj_has_expired(oc.cct, oc.effective_mtime, expiration, exp_time); - ldout(oc.cct, 20) << __func__ << "(): key=" << o.key << ": is_expired=" + ldpp_dout(dpp, 20) << __func__ << "(): key=" << o.key << ": is_expired=" << is_expired << " " << oc.wq->thr_name() << dendl; return is_expired && - pass_object_lock_check(oc.store, oc.obj.get(), oc.rctx); + pass_object_lock_check(oc.store, oc.obj.get(), oc.rctx, dpp); } int process(lc_op_ctx& oc) { auto& o = oc.o; - int r = remove_expired_obj(oc, true); + int r = remove_expired_obj(oc.dpp, oc, true); if (r < 0) { ldout(oc.cct, 0) << "ERROR: remove_expired_obj (non-current expiration) " << oc.bucket << ":" << o.key @@ -1154,16 +1154,16 @@ class LCOpAction_DMExpiration : public LCOpAction { public: LCOpAction_DMExpiration(op_env& env) {} - bool check(lc_op_ctx& oc, ceph::real_time *exp_time) override { + bool check(lc_op_ctx& oc, ceph::real_time *exp_time, const DoutPrefixProvider *dpp) override { auto& o = oc.o; if (!o.is_delete_marker()) { - ldout(oc.cct, 20) << __func__ << "(): key=" << o.key + ldpp_dout(dpp, 20) << __func__ << "(): key=" << o.key << ": not a delete marker, skipping " << oc.wq->thr_name() << dendl; return false; } if (oc.next_has_same_name(o.key.name)) { - ldout(oc.cct, 20) << __func__ << "(): key=" << o.key + ldpp_dout(dpp, 20) << __func__ << "(): key=" << o.key << ": next is same object, skipping " << oc.wq->thr_name() << dendl; return false; @@ -1176,7 +1176,7 @@ public: int process(lc_op_ctx& oc) { auto& o = oc.o; - int r = remove_expired_obj(oc, true); + int r = remove_expired_obj(oc.dpp, oc, true); if (r < 0) { ldout(oc.cct, 0) << "ERROR: remove_expired_obj (delete marker expiration) " << oc.bucket << ":" << o.key @@ -1206,7 +1206,7 @@ public: LCOpAction_Transition(const transition_action& _transition) : transition(_transition) {} - bool check(lc_op_ctx& oc, ceph::real_time *exp_time) override { + bool check(lc_op_ctx& oc, ceph::real_time *exp_time, const DoutPrefixProvider *dpp) override { auto& o = oc.o; if (o.is_delete_marker()) { @@ -1221,7 +1221,7 @@ public: bool is_expired; if (transition.days < 0) { if (transition.date == boost::none) { - ldout(oc.cct, 20) << __func__ << "(): key=" << o.key + ldpp_dout(dpp, 20) << __func__ << "(): key=" << o.key << ": no transition day/date set in rule, skipping " << oc.wq->thr_name() << dendl; return false; @@ -1376,7 +1376,7 @@ int LCOpRule::process(rgw_bucket_dir_entry& o, for (auto& a : actions) { real_time action_exp; - if (a->check(ctx, &action_exp)) { + if (a->check(ctx, &action_exp, dpp)) { if (action_exp > exp) { exp = action_exp; selected = &a; @@ -1398,7 +1398,7 @@ int LCOpRule::process(rgw_bucket_dir_entry& o, bool cont = false; for (auto& f : filters) { - if (f->check(ctx)) { + if (f->check(dpp, ctx)) { cont = true; break; } @@ -1439,14 +1439,14 @@ int RGWLC::bucket_lc_process(string& shard_id, LCWorker* worker, string bucket_tenant = result[0]; string bucket_name = result[1]; string bucket_marker = result[2]; - int ret = store->get_bucket(nullptr, bucket_tenant, bucket_name, &bucket, null_yield); + int ret = store->get_bucket(this, nullptr, bucket_tenant, bucket_name, &bucket, null_yield); if (ret < 0) { ldpp_dout(this, 0) << "LC:get_bucket for " << bucket_name << " failed" << dendl; return ret; } - ret = bucket->get_bucket_info(null_yield); + ret = bucket->get_bucket_info(this, null_yield); if (ret < 0) { ldpp_dout(this, 0) << "LC:get_bucket_info for " << bucket_name << " failed" << dendl; @@ -1533,7 +1533,7 @@ int RGWLC::bucket_lc_process(string& shard_id, LCWorker* worker, LCObjsLister ol(store, bucket.get()); ol.set_prefix(prefix_iter->first); - ret = ol.init(); + ret = ol.init(this); if (ret < 0) { if (ret == (-ENOENT)) return 0; @@ -1545,7 +1545,7 @@ int RGWLC::bucket_lc_process(string& shard_id, LCWorker* worker, LCOpRule orule(oenv); orule.build(); // why can't ctor do it? rgw_bucket_dir_entry* o{nullptr}; - for (; ol.get_obj(&o /* , fetch_barrier */); ol.next()) { + for (; ol.get_obj(this, &o /* , fetch_barrier */); ol.next()) { orule.update(); std::tuple t1 = {orule, *o}; worker->workpool->enqueue(WorkItem{t1}); @@ -1572,7 +1572,7 @@ int RGWLC::bucket_lc_post(int index, int max_lock_sec, << dendl; do { - int ret = lock->try_lock(lock_duration, null_yield); + int ret = lock->try_lock(this, lock_duration, null_yield); if (ret == -EBUSY || ret == -EEXIST) { /* already locked by another lc processor */ ldpp_dout(this, 0) << "RGWLC::bucket_lc_post() failed to acquire lock on " @@ -1715,7 +1715,7 @@ int RGWLC::process(int index, int max_lock_secs, LCWorker* worker, utime_t time(max_lock_secs, 0); - int ret = lock->try_lock(time, null_yield); + int ret = lock->try_lock(this, time, null_yield); if (ret == -EBUSY || ret == -EEXIST) { /* already locked by another lc processor */ ldpp_dout(this, 0) << "RGWLC::process() failed to acquire lock on " @@ -1948,7 +1948,8 @@ static std::string get_lc_shard_name(const rgw_bucket& bucket){ } template -static int guard_lc_modify(rgw::sal::RGWRadosStore* store, +static int guard_lc_modify(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore* store, rgw::sal::Lifecycle* sal_lc, const rgw_bucket& bucket, const string& cookie, const F& f) { @@ -1973,21 +1974,21 @@ static int guard_lc_modify(rgw::sal::RGWRadosStore* store, int ret; do { - ret = lock->try_lock(time, null_yield); + ret = lock->try_lock(dpp, time, null_yield); if (ret == -EBUSY || ret == -EEXIST) { - ldout(cct, 0) << "RGWLC::RGWPutLC() failed to acquire lock on " + ldpp_dout(dpp, 0) << "RGWLC::RGWPutLC() failed to acquire lock on " << oid << ", sleep 5, try again" << dendl; sleep(5); // XXX: return retryable error continue; } if (ret < 0) { - ldout(cct, 0) << "RGWLC::RGWPutLC() failed to acquire lock on " + ldpp_dout(dpp, 0) << "RGWLC::RGWPutLC() failed to acquire lock on " << oid << ", ret=" << ret << dendl; break; } ret = f(sal_lc, oid, entry); if (ret < 0) { - ldout(cct, 0) << "RGWLC::RGWPutLC() failed to set entry on " + ldpp_dout(dpp, 0) << "RGWLC::RGWPutLC() failed to set entry on " << oid << ", ret=" << ret << dendl; } break; @@ -2009,14 +2010,14 @@ int RGWLC::set_bucket_config(RGWBucketInfo& bucket_info, int ret = store->ctl()->bucket->set_bucket_instance_attrs( - bucket_info, attrs, &bucket_info.objv_tracker, null_yield); + bucket_info, attrs, &bucket_info.objv_tracker, null_yield, this); if (ret < 0) return ret; rgw_bucket& bucket = bucket_info.bucket; - ret = guard_lc_modify(store, sal_lc.get(), bucket, cookie, + ret = guard_lc_modify(this, store, sal_lc.get(), bucket, cookie, [&](rgw::sal::Lifecycle* sal_lc, const string& oid, const rgw::sal::Lifecycle::LCEntry& entry) { return sal_lc->set_entry(oid, entry); @@ -2032,18 +2033,18 @@ int RGWLC::remove_bucket_config(RGWBucketInfo& bucket_info, attrs.erase(RGW_ATTR_LC); int ret = store->ctl()->bucket->set_bucket_instance_attrs( - bucket_info, attrs, &bucket_info.objv_tracker, null_yield); + bucket_info, attrs, &bucket_info.objv_tracker, null_yield, this); rgw_bucket& bucket = bucket_info.bucket; if (ret < 0) { - ldout(cct, 0) << "RGWLC::RGWDeleteLC() failed to set attrs on bucket=" + ldpp_dout(this, 0) << "RGWLC::RGWDeleteLC() failed to set attrs on bucket=" << bucket.name << " returned err=" << ret << dendl; return ret; } - ret = guard_lc_modify(store, sal_lc.get(), bucket, cookie, + ret = guard_lc_modify(this, store, sal_lc.get(), bucket, cookie, [&](rgw::sal::Lifecycle* sal_lc, const string& oid, const rgw::sal::Lifecycle::LCEntry& entry) { return sal_lc->rm_entry(oid, entry); @@ -2060,7 +2061,8 @@ RGWLC::~RGWLC() namespace rgw::lc { -int fix_lc_shard_entry(rgw::sal::RGWRadosStore* store, +int fix_lc_shard_entry(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore* store, rgw::sal::Lifecycle* sal_lc, const RGWBucketInfo& bucket_info, const map& battrs) @@ -2082,19 +2084,19 @@ int fix_lc_shard_entry(rgw::sal::RGWRadosStore* store, // We are not dropping the old marker here as that would be caught by the next LC process update int ret = sal_lc->get_entry(lc_oid, shard_name, entry); if (ret == 0) { - ldout(store->ctx(), 5) << "Entry already exists, nothing to do" << dendl; + ldpp_dout(dpp, 5) << "Entry already exists, nothing to do" << dendl; return ret; // entry is already existing correctly set to marker } - ldout(store->ctx(), 5) << "lc_get_entry errored ret code=" << ret << dendl; + ldpp_dout(dpp, 5) << "lc_get_entry errored ret code=" << ret << dendl; if (ret == -ENOENT) { - ldout(store->ctx(), 1) << "No entry for bucket=" << bucket_info.bucket.name + ldpp_dout(dpp, 1) << "No entry for bucket=" << bucket_info.bucket.name << " creating " << dendl; // TODO: we have too many ppl making cookies like this! char cookie_buf[COOKIE_LEN + 1]; gen_rand_alphanumeric(store->ctx(), cookie_buf, sizeof(cookie_buf) - 1); std::string cookie = cookie_buf; - ret = guard_lc_modify( + ret = guard_lc_modify(dpp, store, sal_lc, bucket_info.bucket, cookie, [&lc_oid](rgw::sal::Lifecycle* slc, const string& oid, diff --git a/src/rgw/rgw_lc.h b/src/rgw/rgw_lc.h index 8f231af6b6148..70e195a78ee17 100644 --- a/src/rgw/rgw_lc.h +++ b/src/rgw/rgw_lc.h @@ -546,7 +546,8 @@ public: namespace rgw::lc { -int fix_lc_shard_entry(rgw::sal::RGWRadosStore *store, +int fix_lc_shard_entry(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, rgw::sal::Lifecycle* sal_lc, const RGWBucketInfo& bucket_info, const map& battrs); diff --git a/src/rgw/rgw_lib.h b/src/rgw/rgw_lib.h index 63f34cf3d7942..9f43593fc720c 100644 --- a/src/rgw/rgw_lib.h +++ b/src/rgw/rgw_lib.h @@ -18,13 +18,15 @@ #include "services/svc_zone_utils.h" #include "include/ceph_assert.h" +#define dout_subsys ceph_subsys_rgw + class OpsLogSocket; namespace rgw { class RGWLibFrontend; - class RGWLib { + class RGWLib : public DoutPrefixProvider { RGWFrontendConfig* fec; RGWLibFrontend* fe; OpsLogSocket* olog; @@ -44,6 +46,10 @@ namespace rgw { rgw::LDAPHelper* get_ldh() { return ldh; } + CephContext *get_cct() const override { return cct.get(); } + unsigned get_subsys() const { return dout_subsys; } + std::ostream& gen_prefix(std::ostream& out) const { return out << "lib rgw: "; } + int init(); int init(vector& args); int stop(); diff --git a/src/rgw/rgw_lib_frontend.h b/src/rgw/rgw_lib_frontend.h index 0f2276f489669..461befd6bad71 100644 --- a/src/rgw/rgw_lib_frontend.h +++ b/src/rgw/rgw_lib_frontend.h @@ -66,7 +66,7 @@ namespace rgw { } /* enqueue_req */ /* "regular" requests */ - void handle_request(RGWRequest* req) override; // async handler, deletes req + void handle_request(const DoutPrefixProvider *dpp, RGWRequest* req) override; // async handler, deletes req int process_request(RGWLibRequest* req); int process_request(RGWLibRequest* req, RGWLibIO* io); void set_access_key(RGWAccessKey& key) { access_key = key; } diff --git a/src/rgw/rgw_loadgen.cc b/src/rgw/rgw_loadgen.cc index e8de0f4498eb3..22c133864b721 100644 --- a/src/rgw/rgw_loadgen.cc +++ b/src/rgw/rgw_loadgen.cc @@ -16,7 +16,7 @@ void RGWLoadGenRequestEnv::set_date(utime_t& tm) date_str = rgw_to_asctime(tm); } -int RGWLoadGenRequestEnv::sign(RGWAccessKey& access_key) +int RGWLoadGenRequestEnv::sign(const DoutPrefixProvider *dpp, RGWAccessKey& access_key) { meta_map_t meta_map; map sub_resources; @@ -24,7 +24,8 @@ int RGWLoadGenRequestEnv::sign(RGWAccessKey& access_key) string canonical_header; string digest; - rgw_create_s3_canonical_header(request_method.c_str(), + rgw_create_s3_canonical_header(dpp, + request_method.c_str(), nullptr, /* const char *content_md5 */ content_type.c_str(), date_str.c_str(), diff --git a/src/rgw/rgw_loadgen.h b/src/rgw/rgw_loadgen.h index 44f434f487829..5a0abca57f79c 100644 --- a/src/rgw/rgw_loadgen.h +++ b/src/rgw/rgw_loadgen.h @@ -27,7 +27,7 @@ struct RGWLoadGenRequestEnv { } void set_date(utime_t& tm); - int sign(RGWAccessKey& access_key); + int sign(const DoutPrefixProvider *dpp, RGWAccessKey& access_key); }; /* XXX does RGWLoadGenIO actually want to perform stream/HTTP I/O, diff --git a/src/rgw/rgw_loadgen_process.cc b/src/rgw/rgw_loadgen_process.cc index f2d3d217131e3..9235f13d1160f 100644 --- a/src/rgw/rgw_loadgen_process.cc +++ b/src/rgw/rgw_loadgen_process.cc @@ -113,7 +113,7 @@ void RGWLoadGenProcess::gen_request(const string& method, req_wq.queue(req); } /* RGWLoadGenProcess::gen_request */ -void RGWLoadGenProcess::handle_request(RGWRequest* r) +void RGWLoadGenProcess::handle_request(const DoutPrefixProvider *dpp, RGWRequest* r) { RGWLoadGenRequest* req = static_cast(r); @@ -127,7 +127,7 @@ void RGWLoadGenProcess::handle_request(RGWRequest* r) env.request_method = req->method; env.uri = req->resource; env.set_date(tm); - env.sign(access_key); + env.sign(dpp, access_key); RGWLoadGenIO real_client_io(&env); RGWRestfulIO client_io(cct, &real_client_io); diff --git a/src/rgw/rgw_log.cc b/src/rgw/rgw_log.cc index 85adfd7ea9d79..edfd0087b6df2 100644 --- a/src/rgw/rgw_log.cc +++ b/src/rgw/rgw_log.cc @@ -86,7 +86,7 @@ string render_log_object_name(const string& format, } /* usage logger */ -class UsageLogger { +class UsageLogger : public DoutPrefixProvider { CephContext *cct; RGWRados *store; map usage_map; @@ -165,8 +165,12 @@ public: num_entries = 0; lock.unlock(); - store->log_usage(old_map); + store->log_usage(this, old_map); } + + CephContext *get_cct() const override { return cct; } + unsigned get_subsys() const override { return dout_subsys; } + std::ostream& gen_prefix(std::ostream& out) const override { return out << "rgw UsageLogger: "; } }; static UsageLogger *usage_logger = NULL; @@ -225,7 +229,7 @@ static void log_usage(struct req_state *s, const string& op_name) if (!s->is_err()) data.successful_ops = 1; - ldout(s->cct, 30) << "log_usage: bucket_name=" << bucket_name + ldpp_dout(s, 30) << "log_usage: bucket_name=" << bucket_name << " tenant=" << s->bucket_tenant << ", bytes_sent=" << bytes_sent << ", bytes_received=" << bytes_received << ", success=" << data.successful_ops << dendl; @@ -341,12 +345,12 @@ int rgw_log_op(RGWRados *store, RGWREST* const rest, struct req_state *s, return 0; if (s->bucket_name.empty()) { - ldout(s->cct, 5) << "nothing to log for operation" << dendl; + ldpp_dout(s, 5) << "nothing to log for operation" << dendl; return -EINVAL; } if (s->err.ret == -ERR_NO_SUCH_BUCKET || rgw::sal::RGWBucket::empty(s->bucket.get())) { if (!s->cct->_conf->rgw_log_nonexistent_bucket) { - ldout(s->cct, 5) << "bucket " << s->bucket_name << " doesn't exist, not logging" << dendl; + ldpp_dout(s, 5) << "bucket " << s->bucket_name << " doesn't exist, not logging" << dendl; return 0; } bucket_id = ""; @@ -356,7 +360,7 @@ int rgw_log_op(RGWRados *store, RGWREST* const rest, struct req_state *s, entry.bucket = rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name); if (check_utf8(entry.bucket.c_str(), entry.bucket.size()) != 0) { - ldout(s->cct, 5) << "not logging op on bucket with non-utf8 name" << dendl; + ldpp_dout(s, 5) << "not logging op on bucket with non-utf8 name" << dendl; return 0; } @@ -465,13 +469,13 @@ int rgw_log_op(RGWRados *store, RGWREST* const rest, struct req_state *s, rgw_raw_obj obj(store->svc.zone->get_zone_params().log_pool, oid); - ret = store->append_async(obj, bl.length(), bl); + ret = store->append_async(s, obj, bl.length(), bl); if (ret == -ENOENT) { - ret = store->create_pool(store->svc.zone->get_zone_params().log_pool); + ret = store->create_pool(s, store->svc.zone->get_zone_params().log_pool); if (ret < 0) goto done; // retry - ret = store->append_async(obj, bl.length(), bl); + ret = store->append_async(s, obj, bl.length(), bl); } } @@ -480,7 +484,7 @@ int rgw_log_op(RGWRados *store, RGWREST* const rest, struct req_state *s, } done: if (ret < 0) - ldout(s->cct, 0) << "ERROR: failed to log entry" << dendl; + ldpp_dout(s, 0) << "ERROR: failed to log entry" << dendl; return ret; } diff --git a/src/rgw/rgw_log_backing.cc b/src/rgw/rgw_log_backing.cc index c3037e13048bb..baa1ea73836ef 100644 --- a/src/rgw/rgw_log_backing.cc +++ b/src/rgw/rgw_log_backing.cc @@ -31,22 +31,21 @@ inline std::ostream& operator <<(std::ostream& m, const shard_check& t) { namespace { /// Return the shard type, and a bool to see whether it has entries. std::pair -probe_shard(librados::IoCtx& ioctx, const std::string& oid, +probe_shard(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid, bool& fifo_unsupported, optional_yield y) { - auto cct = static_cast(ioctx.cct()); bool omap = false; { librados::ObjectReadOperation op; cls_log_header header; cls_log_info(op, &header); - auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y); if (r == -ENOENT) { return { shard_check::dne, {} }; } if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " error probing for omap: r=" << r << ", oid=" << oid << dendl; return { shard_check::corrupt, {} }; @@ -56,26 +55,26 @@ probe_shard(librados::IoCtx& ioctx, const std::string& oid, } if (!fifo_unsupported) { std::unique_ptr fifo; - auto r = rgw::cls::fifo::FIFO::open(ioctx, oid, + auto r = rgw::cls::fifo::FIFO::open(dpp, ioctx, oid, &fifo, y, std::nullopt, true); if (r < 0 && !(r == -ENOENT || r == -ENODATA || r == -EPERM)) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " error probing for fifo: r=" << r << ", oid=" << oid << dendl; return { shard_check::corrupt, {} }; } if (fifo && omap) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " fifo and omap found: oid=" << oid << dendl; return { shard_check::corrupt, {} }; } if (fifo) { bool more = false; std::vector entries; - r = fifo->list(1, nullopt, &entries, &more, y); + r = fifo->list(dpp, 1, nullopt, &entries, &more, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": unable to list entries: r=" << r << ", oid=" << oid << dendl; return { shard_check::corrupt, {} }; @@ -94,9 +93,9 @@ probe_shard(librados::IoCtx& ioctx, const std::string& oid, librados::ObjectReadOperation op; cls_log_list(op, {}, {}, {}, 1, entries, &out_marker, &truncated); - auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": failed to list: r=" << r << ", oid=" << oid << dendl; return { shard_check::corrupt, {} }; } @@ -109,26 +108,25 @@ probe_shard(librados::IoCtx& ioctx, const std::string& oid, } tl::expected -handle_dne(librados::IoCtx& ioctx, +handle_dne(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, log_type def, std::string oid, bool fifo_unsupported, optional_yield y) { - auto cct = static_cast(ioctx.cct()); if (def == log_type::fifo) { if (fifo_unsupported) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " WARNING: FIFO set as default but not supported by OSD. " << "Falling back to OMAP." << dendl; return log_type::omap; } std::unique_ptr fifo; - auto r = rgw::cls::fifo::FIFO::create(ioctx, oid, + auto r = rgw::cls::fifo::FIFO::create(dpp, ioctx, oid, &fifo, y, std::nullopt); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " error creating FIFO: r=" << r << ", oid=" << oid << dendl; return tl::unexpected(bs::error_code(-r, bs::system_category())); @@ -139,17 +137,17 @@ handle_dne(librados::IoCtx& ioctx, } tl::expected -log_backing_type(librados::IoCtx& ioctx, +log_backing_type(const DoutPrefixProvider *dpp, + librados::IoCtx& ioctx, log_type def, int shards, const fu2::unique_function& get_oid, optional_yield y) { - auto cct = static_cast(ioctx.cct()); auto check = shard_check::dne; bool fifo_unsupported = false; for (int i = 0; i < shards; ++i) { - auto [c, e] = probe_shard(ioctx, get_oid(i), fifo_unsupported, y); + auto [c, e] = probe_shard(dpp, ioctx, get_oid(i), fifo_unsupported, y); if (c == shard_check::corrupt) return tl::unexpected(bs::error_code(EIO, bs::system_category())); if (c == shard_check::dne) continue; @@ -159,20 +157,20 @@ log_backing_type(librados::IoCtx& ioctx, } if (check != c) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " clashing types: check=" << check << ", c=" << c << dendl; return tl::unexpected(bs::error_code(EIO, bs::system_category())); } } if (check == shard_check::corrupt) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " should be unreachable!" << dendl; return tl::unexpected(bs::error_code(EIO, bs::system_category())); } if (check == shard_check::dne) - return handle_dne(ioctx, + return handle_dne(dpp, ioctx, def, get_oid(0), fifo_unsupported, @@ -181,20 +179,20 @@ log_backing_type(librados::IoCtx& ioctx, return (check == shard_check::fifo ? log_type::fifo : log_type::omap); } -bs::error_code log_remove(librados::IoCtx& ioctx, +bs::error_code log_remove(const DoutPrefixProvider *dpp, + librados::IoCtx& ioctx, int shards, const fu2::unique_function& get_oid, bool leave_zero, optional_yield y) { bs::error_code ec; - auto cct = static_cast(ioctx.cct()); for (int i = 0; i < shards; ++i) { auto oid = get_oid(i); rados::cls::fifo::info info; uint32_t part_header_size = 0, part_entry_overhead = 0; - auto r = rgw::cls::fifo::get_meta(ioctx, oid, nullopt, &info, + auto r = rgw::cls::fifo::get_meta(dpp, ioctx, oid, nullopt, &info, &part_header_size, &part_entry_overhead, 0, y, true); if (r == -ENOENT) continue; @@ -203,11 +201,11 @@ bs::error_code log_remove(librados::IoCtx& ioctx, librados::ObjectWriteOperation op; op.remove(); auto part_oid = info.part_oid(j); - auto subr = rgw_rados_operate(ioctx, part_oid, &op, null_yield); + auto subr = rgw_rados_operate(dpp, ioctx, part_oid, &op, null_yield); if (subr < 0 && subr != -ENOENT) { if (!ec) ec = bs::error_code(-subr, bs::system_category()); - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": failed removing FIFO part: part_oid=" << part_oid << ", subr=" << subr << dendl; } @@ -216,7 +214,7 @@ bs::error_code log_remove(librados::IoCtx& ioctx, if (r < 0 && r != -ENODATA) { if (!ec) ec = bs::error_code(-r, bs::system_category()); - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": failed checking FIFO part: oid=" << oid << ", r=" << r << dendl; } @@ -231,11 +229,11 @@ bs::error_code log_remove(librados::IoCtx& ioctx, } else { op.remove(); } - r = rgw_rados_operate(ioctx, oid, &op, null_yield); + r = rgw_rados_operate(dpp, ioctx, oid, &op, null_yield); if (r < 0 && r != -ENOENT) { if (!ec) ec = bs::error_code(-r, bs::system_category()); - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": failed removing shard: oid=" << oid << ", r=" << r << dendl; } @@ -255,13 +253,14 @@ logback_generations::~logback_generations() { } } -bs::error_code logback_generations::setup(log_type def, +bs::error_code logback_generations::setup(const DoutPrefixProvider *dpp, + log_type def, optional_yield y) noexcept { try { - auto cct = static_cast(ioctx.cct()); // First, read. - auto res = read(y); + auto cct = static_cast(ioctx.cct()); + auto res = read(dpp, y); if (!res && res.error() != bs::errc::no_such_file_or_directory) { return res.error(); } @@ -272,7 +271,7 @@ bs::error_code logback_generations::setup(log_type def, // Are we the first? Then create generation 0 and the generations // metadata. librados::ObjectWriteOperation op; - auto type = log_backing_type(ioctx, def, shards, + auto type = log_backing_type(dpp, ioctx, def, shards, [this](int shard) { return this->get_oid(0, shard); }, y); @@ -295,16 +294,16 @@ bs::error_code logback_generations::setup(log_type def, lock.unlock(); op.write_full(bl); - auto r = rgw_rados_operate(ioctx, oid, &op, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y); if (r < 0 && r != -EEXIST) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": failed writing oid=" << oid << ", r=" << r << dendl; bs::system_error(-r, bs::system_category()); } // Did someone race us? Then re-read. if (r != 0) { - res = read(y); + res = read(dpp, y); if (!res) return res.error(); if (res->first.empty()) @@ -314,7 +313,7 @@ bs::error_code logback_generations::setup(log_type def, // generation zero, incremented, then erased generation zero, // don't leave generation zero lying around. if (l.gen_id != 0) { - auto ec = log_remove(ioctx, shards, + auto ec = log_remove(dpp, ioctx, shards, [this](int shard) { return this->get_oid(0, shard); }, true, y); @@ -333,7 +332,7 @@ bs::error_code logback_generations::setup(log_type def, m.unlock(); auto ec = watch(); if (ec) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": failed to re-establish watch, unsafe to continue: oid=" << oid << ", ec=" << ec.message() << dendl; } @@ -343,11 +342,10 @@ bs::error_code logback_generations::setup(log_type def, } } -bs::error_code logback_generations::update(optional_yield y) noexcept +bs::error_code logback_generations::update(const DoutPrefixProvider *dpp, optional_yield y) noexcept { try { - auto cct = static_cast(ioctx.cct()); - auto res = read(y); + auto res = read(dpp, y); if (!res) { return res.error(); } @@ -361,7 +359,7 @@ bs::error_code logback_generations::update(optional_yield y) noexcept // Check consistency and prepare update if (es.empty()) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": INCONSISTENCY! Read empty update." << dendl; return bs::error_code(EFAULT, bs::system_category()); } @@ -370,12 +368,12 @@ bs::error_code logback_generations::update(optional_yield y) noexcept assert(cur_lowest != entries_.cend()); auto new_lowest = lowest_nomempty(es); if (new_lowest == es.cend()) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": INCONSISTENCY! Read update with no active head." << dendl; return bs::error_code(EFAULT, bs::system_category()); } if (new_lowest->first < cur_lowest->first) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": INCONSISTENCY! Tail moved wrong way." << dendl; return bs::error_code(EFAULT, bs::system_category()); } @@ -389,7 +387,7 @@ bs::error_code logback_generations::update(optional_yield y) noexcept entries_t new_entries; if ((es.end() - 1)->first < (entries_.end() - 1)->first) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": INCONSISTENCY! Head moved wrong way." << dendl; return bs::error_code(EFAULT, bs::system_category()); } @@ -420,11 +418,10 @@ bs::error_code logback_generations::update(optional_yield y) noexcept return {}; } -auto logback_generations::read(optional_yield y) noexcept -> +auto logback_generations::read(const DoutPrefixProvider *dpp, optional_yield y) noexcept -> tl::expected, bs::error_code> { try { - auto cct = static_cast(ioctx.cct()); librados::ObjectReadOperation op; std::unique_lock l(m); cls_version_check(op, version, VER_COND_GE); @@ -433,14 +430,14 @@ auto logback_generations::read(optional_yield y) noexcept -> cls_version_read(op, &v2); cb::list bl; op.read(0, 0, &bl, nullptr); - auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y); if (r < 0) { if (r == -ENOENT) { - ldout(cct, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": oid=" << oid << " not found" << dendl; } else { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": failed reading oid=" << oid << ", r=" << r << dendl; } @@ -459,7 +456,7 @@ auto logback_generations::read(optional_yield y) noexcept -> } } -bs::error_code logback_generations::write(entries_t&& e, +bs::error_code logback_generations::write(const DoutPrefixProvider *dpp, entries_t&& e, std::unique_lock&& l_, optional_yield y) noexcept { @@ -467,14 +464,13 @@ bs::error_code logback_generations::write(entries_t&& e, ceph_assert(l.mutex() == &m && l.owns_lock()); try { - auto cct = static_cast(ioctx.cct()); librados::ObjectWriteOperation op; cls_version_check(op, version, VER_COND_GE); cb::list bl; encode(e, bl); op.write_full(bl); cls_version_inc(op); - auto r = rgw_rados_operate(ioctx, oid, &op, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y); if (r == 0) { entries_ = std::move(e); version.inc(); @@ -482,13 +478,13 @@ bs::error_code logback_generations::write(entries_t&& e, } l.unlock(); if (r < 0 && r != -ECANCELED) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": failed reading oid=" << oid << ", r=" << r << dendl; return { -r, bs::system_category() }; } if (r == -ECANCELED) { - auto ec = update(y); + auto ec = update(dpp, y); if (ec) { return ec; } else { @@ -518,12 +514,12 @@ bs::error_code logback_generations::watch() noexcept { return {}; } -bs::error_code logback_generations::new_backing(log_type type, +bs::error_code logback_generations::new_backing(const DoutPrefixProvider *dpp, + log_type type, optional_yield y) noexcept { - auto cct = static_cast(ioctx.cct()); static constexpr auto max_tries = 10; try { - auto ec = update(y); + auto ec = update(dpp, y); if (ec) return ec; auto tries = 0; entries_t new_entries; @@ -541,27 +537,27 @@ bs::error_code logback_generations::new_backing(log_type type, new_entries.emplace(newgenid, newgen); auto es = entries_; es.emplace(newgenid, std::move(newgen)); - ec = write(std::move(es), std::move(l), y); + ec = write(dpp, std::move(es), std::move(l), y); ++tries; } while (ec == bs::errc::operation_canceled && tries < max_tries); if (tries >= max_tries) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": exhausted retry attempts." << dendl; return ec; } if (ec) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": write failed with ec=" << ec.message() << dendl; return ec; } cb::list bl, rbl; - auto r = rgw_rados_notify(ioctx, oid, bl, 10'000, &rbl, y); + auto r = rgw_rados_notify(dpp, ioctx, oid, bl, 10'000, &rbl, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": notify failed with r=" << r << dendl; return { -r, bs::system_category() }; } @@ -572,12 +568,12 @@ bs::error_code logback_generations::new_backing(log_type type, return {}; } -bs::error_code logback_generations::empty_to(uint64_t gen_id, +bs::error_code logback_generations::empty_to(const DoutPrefixProvider *dpp, + uint64_t gen_id, optional_yield y) noexcept { - auto cct = static_cast(ioctx.cct()); static constexpr auto max_tries = 10; try { - auto ec = update(y); + auto ec = update(dpp, y); if (ec) return ec; auto tries = 0; uint64_t newtail = 0; @@ -586,7 +582,7 @@ bs::error_code logback_generations::empty_to(uint64_t gen_id, { auto last = entries_.end() - 1; if (gen_id >= last->first) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": Attempt to trim beyond the possible." << dendl; return bs::error_code(EINVAL, bs::system_category()); } @@ -601,27 +597,27 @@ bs::error_code logback_generations::empty_to(uint64_t gen_id, newtail = i->first; i->second.pruned = ceph::real_clock::now(); } - ec = write(std::move(es), std::move(l), y); + ec = write(dpp, std::move(es), std::move(l), y); ++tries; } while (ec == bs::errc::operation_canceled && tries < max_tries); if (tries >= max_tries) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": exhausted retry attempts." << dendl; return ec; } if (ec) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": write failed with ec=" << ec.message() << dendl; return ec; } cb::list bl, rbl; - auto r = rgw_rados_notify(ioctx, oid, bl, 10'000, &rbl, y); + auto r = rgw_rados_notify(dpp, ioctx, oid, bl, 10'000, &rbl, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": notify failed with r=" << r << dendl; return { -r, bs::system_category() }; } @@ -632,11 +628,10 @@ bs::error_code logback_generations::empty_to(uint64_t gen_id, return {}; } -bs::error_code logback_generations::remove_empty(optional_yield y) noexcept { - auto cct = static_cast(ioctx.cct()); +bs::error_code logback_generations::remove_empty(const DoutPrefixProvider *dpp, optional_yield y) noexcept { static constexpr auto max_tries = 10; try { - auto ec = update(y); + auto ec = update(dpp, y); if (ec) return ec; auto tries = 0; entries_t new_entries; @@ -664,12 +659,12 @@ bs::error_code logback_generations::remove_empty(optional_yield y) noexcept { auto es2 = entries_; for (const auto& [gen_id, e] : es) { ceph_assert(e.pruned); - auto ec = log_remove(ioctx, shards, + auto ec = log_remove(dpp, ioctx, shards, [this, gen_id](int shard) { return this->get_oid(gen_id, shard); }, (gen_id == 0), y); if (ec) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": Error pruning: gen_id=" << gen_id << " ec=" << ec.message() << dendl; } @@ -679,18 +674,18 @@ bs::error_code logback_generations::remove_empty(optional_yield y) noexcept { } l.lock(); es.clear(); - ec = write(std::move(es2), std::move(l), y); + ec = write(dpp, std::move(es2), std::move(l), y); ++tries; } while (ec == bs::errc::operation_canceled && tries < max_tries); if (tries >= max_tries) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": exhausted retry attempts." << dendl; return ec; } if (ec) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": write failed with ec=" << ec.message() << dendl; return ec; } @@ -706,8 +701,9 @@ void logback_generations::handle_notify(uint64_t notify_id, bufferlist& bl) { auto cct = static_cast(ioctx.cct()); + const DoutPrefix dp(cct, dout_subsys, "logback generations handle_notify: "); if (notifier_id != my_id) { - auto ec = update(null_yield); + auto ec = update(&dp, null_yield); if (ec) { lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ diff --git a/src/rgw/rgw_log_backing.h b/src/rgw/rgw_log_backing.h index 6f755efb46389..5b9f1bfd21cd0 100644 --- a/src/rgw/rgw_log_backing.h +++ b/src/rgw/rgw_log_backing.h @@ -74,7 +74,8 @@ inline std::ostream& operator <<(std::ostream& m, const log_type& t) { /// Look over the shards in a log and determine the type. tl::expected -log_backing_type(librados::IoCtx& ioctx, +log_backing_type(const DoutPrefixProvider *dpp, + librados::IoCtx& ioctx, log_type def, int shards, //< Total number of shards /// A function taking a shard number and @@ -147,10 +148,10 @@ private: entries_t entries_; tl::expected, bs::error_code> - read(optional_yield y) noexcept; - bs::error_code write(entries_t&& e, std::unique_lock&& l_, + read(const DoutPrefixProvider *dpp, optional_yield y) noexcept; + bs::error_code write(const DoutPrefixProvider *dpp, entries_t&& e, std::unique_lock&& l_, optional_yield y) noexcept; - bs::error_code setup(log_type def, optional_yield y) noexcept; + bs::error_code setup(const DoutPrefixProvider *dpp, log_type def, optional_yield y) noexcept; bs::error_code watch() noexcept; @@ -178,7 +179,7 @@ public: template static tl::expected, bs::error_code> - init(librados::IoCtx& ioctx_, std::string oid_, + init(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx_, std::string oid_, fu2::unique_function&& get_oid_, int shards_, log_type def, optional_yield y, Args&& ...args) noexcept { @@ -188,7 +189,7 @@ public: shards_, std::forward(args)...); std::unique_ptr lg(lgp); lgp = nullptr; - auto ec = lg->setup(def, y); + auto ec = lg->setup(dpp, def, y); if (ec) return tl::unexpected(ec); // Obnoxiousness for C++ Compiler in Bionic Beaver @@ -198,17 +199,17 @@ public: } } - bs::error_code update(optional_yield y) noexcept; + bs::error_code update(const DoutPrefixProvider *dpp, optional_yield y) noexcept; entries_t entries() const { return entries_; } - bs::error_code new_backing(log_type type, optional_yield y) noexcept; + bs::error_code new_backing(const DoutPrefixProvider *dpp, log_type type, optional_yield y) noexcept; - bs::error_code empty_to(uint64_t gen_id, optional_yield y) noexcept; + bs::error_code empty_to(const DoutPrefixProvider *dpp, uint64_t gen_id, optional_yield y) noexcept; - bs::error_code remove_empty(optional_yield y) noexcept; + bs::error_code remove_empty(const DoutPrefixProvider *dpp, optional_yield y) noexcept; // Callbacks, to be defined by descendant. @@ -264,10 +265,10 @@ class LazyFIFO { std::mutex m; std::unique_ptr fifo; - int lazy_init(optional_yield y) { + int lazy_init(const DoutPrefixProvider *dpp, optional_yield y) { std::unique_lock l(m); if (fifo) return 0; - auto r = rgw::cls::fifo::FIFO::create(ioctx, oid, &fifo, y); + auto r = rgw::cls::fifo::FIFO::create(dpp, ioctx, oid, &fifo, y); if (r) { fifo.reset(); } @@ -279,114 +280,120 @@ public: LazyFIFO(librados::IoCtx& ioctx, std::string oid) : ioctx(ioctx), oid(std::move(oid)) {} - int read_meta(optional_yield y) { - auto r = lazy_init(y); + int read_meta(const DoutPrefixProvider *dpp, optional_yield y) { + auto r = lazy_init(dpp, y); if (r < 0) return r; - return fifo->read_meta(y); + return fifo->read_meta(dpp, y); } - int meta(rados::cls::fifo::info& info, optional_yield y) { - auto r = lazy_init(y); + int meta(const DoutPrefixProvider *dpp, rados::cls::fifo::info& info, optional_yield y) { + auto r = lazy_init(dpp, y); if (r < 0) return r; info = fifo->meta(); return 0; } - int get_part_layout_info(std::uint32_t& part_header_size, + int get_part_layout_info(const DoutPrefixProvider *dpp, + std::uint32_t& part_header_size, std::uint32_t& part_entry_overhead, optional_yield y) { - auto r = lazy_init(y); + auto r = lazy_init(dpp, y); if (r < 0) return r; std::tie(part_header_size, part_entry_overhead) = fifo->get_part_layout_info(); return 0; } - int push(const ceph::buffer::list& bl, + int push(const DoutPrefixProvider *dpp, + const ceph::buffer::list& bl, optional_yield y) { - auto r = lazy_init(y); + auto r = lazy_init(dpp, y); if (r < 0) return r; - return fifo->push(bl, y); + return fifo->push(dpp, bl, y); } - int push(ceph::buffer::list& bl, + int push(const DoutPrefixProvider *dpp, + ceph::buffer::list& bl, librados::AioCompletion* c, optional_yield y) { - auto r = lazy_init(y); + auto r = lazy_init(dpp, y); if (r < 0) return r; - fifo->push(bl, c); + fifo->push(dpp, bl, c); return 0; } - int push(const std::vector& data_bufs, + int push(const DoutPrefixProvider *dpp, + const std::vector& data_bufs, optional_yield y) { - auto r = lazy_init(y); + auto r = lazy_init(dpp, y); if (r < 0) return r; - return fifo->push(data_bufs, y); + return fifo->push(dpp, data_bufs, y); } - int push(const std::vector& data_bufs, + int push(const DoutPrefixProvider *dpp, + const std::vector& data_bufs, librados::AioCompletion* c, optional_yield y) { - auto r = lazy_init(y); + auto r = lazy_init(dpp, y); if (r < 0) return r; - fifo->push(data_bufs, c); + fifo->push(dpp, data_bufs, c); return 0; } - int list(int max_entries, std::optional markstr, + int list(const DoutPrefixProvider *dpp, + int max_entries, std::optional markstr, std::vector* out, bool* more, optional_yield y) { - auto r = lazy_init(y); + auto r = lazy_init(dpp, y); if (r < 0) return r; - return fifo->list(max_entries, markstr, out, more, y); + return fifo->list(dpp, max_entries, markstr, out, more, y); } - int list(int max_entries, std::optional markstr, + int list(const DoutPrefixProvider *dpp, int max_entries, std::optional markstr, std::vector* out, bool* more, librados::AioCompletion* c, optional_yield y) { - auto r = lazy_init(y); + auto r = lazy_init(dpp, y); if (r < 0) return r; - fifo->list(max_entries, markstr, out, more, c); + fifo->list(dpp, max_entries, markstr, out, more, c); return 0; } - int trim(std::string_view markstr, bool exclusive, optional_yield y) { - auto r = lazy_init(y); + int trim(const DoutPrefixProvider *dpp, std::string_view markstr, bool exclusive, optional_yield y) { + auto r = lazy_init(dpp, y); if (r < 0) return r; - return fifo->trim(markstr, exclusive, y); + return fifo->trim(dpp, markstr, exclusive, y); } - int trim(std::string_view markstr, bool exclusive, librados::AioCompletion* c, + int trim(const DoutPrefixProvider *dpp, std::string_view markstr, bool exclusive, librados::AioCompletion* c, optional_yield y) { - auto r = lazy_init(y); + auto r = lazy_init(dpp, y); if (r < 0) return r; - fifo->trim(markstr, exclusive, c); + fifo->trim(dpp, markstr, exclusive, c); return 0; } - int get_part_info(int64_t part_num, rados::cls::fifo::part_header* header, + int get_part_info(const DoutPrefixProvider *dpp, int64_t part_num, rados::cls::fifo::part_header* header, optional_yield y) { - auto r = lazy_init(y); + auto r = lazy_init(dpp, y); if (r < 0) return r; - return fifo->get_part_info(part_num, header, y); + return fifo->get_part_info(dpp, part_num, header, y); } - int get_part_info(int64_t part_num, rados::cls::fifo::part_header* header, + int get_part_info(const DoutPrefixProvider *dpp, int64_t part_num, rados::cls::fifo::part_header* header, librados::AioCompletion* c, optional_yield y) { - auto r = lazy_init(y); + auto r = lazy_init(dpp, y); if (r < 0) return r; fifo->get_part_info(part_num, header, c); return 0; } - int get_head_info(fu2::unique_function< + int get_head_info(const DoutPrefixProvider *dpp, fu2::unique_function< void(int r, rados::cls::fifo::part_header&&)>&& f, librados::AioCompletion* c, optional_yield y) { - auto r = lazy_init(y); + auto r = lazy_init(dpp, y); if (r < 0) return r; - fifo->get_head_info(std::move(f), c); + fifo->get_head_info(dpp, std::move(f), c); return 0; } }; diff --git a/src/rgw/rgw_lua.cc b/src/rgw/rgw_lua.cc index 5be70c8dda891..987182c664645 100644 --- a/src/rgw/rgw_lua.cc +++ b/src/rgw/rgw_lua.cc @@ -97,7 +97,7 @@ int read_script(rgw::sal::RGWRadosStore* store, const std::string& tenant, optio return 0; } -int write_script(rgw::sal::RGWRadosStore* store, const std::string& tenant, optional_yield y, context ctx, const std::string& script) +int write_script(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore* store, const std::string& tenant, optional_yield y, context ctx, const std::string& script) { RGWSysObjectCtx obj_ctx(store->svc()->sysobj->init_obj_ctx()); RGWObjVersionTracker objv_tracker; @@ -108,6 +108,7 @@ int write_script(rgw::sal::RGWRadosStore* store, const std::string& tenant, opti ceph::encode(script, bl); const auto rc = rgw_put_system_obj( + dpp, obj_ctx, obj.pool, obj.oid, @@ -124,13 +125,14 @@ int write_script(rgw::sal::RGWRadosStore* store, const std::string& tenant, opti return 0; } -int delete_script(rgw::sal::RGWRadosStore* store, const std::string& tenant, optional_yield y, context ctx) +int delete_script(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore* store, const std::string& tenant, optional_yield y, context ctx) { RGWObjVersionTracker objv_tracker; rgw_raw_obj obj(store->svc()->zone->get_zone_params().log_pool, script_oid(ctx, tenant)); const auto rc = rgw_delete_system_obj( + dpp, store->svc()->sysobj, obj.pool, obj.oid, @@ -150,7 +152,7 @@ const std::string PACKAGE_LIST_OBJECT_NAME = "lua_package_allowlist"; namespace bp = boost::process; -int add_package(rgw::sal::RGWRadosStore* store, optional_yield y, const std::string& package_name, bool allow_compilation) { +int add_package(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore* store, optional_yield y, const std::string& package_name, bool allow_compilation) { // verify that luarocks can load this oackage const auto p = bp::search_path("luarocks"); if (p.empty()) { @@ -183,7 +185,7 @@ int add_package(rgw::sal::RGWRadosStore* store, optional_yield y, const std::str std::map new_package{{package_name, empty_bl}}; librados::ObjectWriteOperation op; op.omap_set(new_package); - ret = rgw_rados_operate(*(store->getRados()->get_lc_pool_ctx()), + ret = rgw_rados_operate(dpp, *(store->getRados()->get_lc_pool_ctx()), PACKAGE_LIST_OBJECT_NAME, &op, y); if (ret < 0) { @@ -192,10 +194,10 @@ int add_package(rgw::sal::RGWRadosStore* store, optional_yield y, const std::str return 0; } -int remove_package(rgw::sal::RGWRadosStore* store, optional_yield y, const std::string& package_name) { +int remove_package(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore* store, optional_yield y, const std::string& package_name) { librados::ObjectWriteOperation op; op.omap_rm_keys(std::set({package_name})); - const auto ret = rgw_rados_operate(*(store->getRados()->get_lc_pool_ctx()), + const auto ret = rgw_rados_operate(dpp, *(store->getRados()->get_lc_pool_ctx()), PACKAGE_LIST_OBJECT_NAME, &op, y); if (ret < 0) { @@ -205,7 +207,7 @@ int remove_package(rgw::sal::RGWRadosStore* store, optional_yield y, const std:: return 0; } -int list_packages(rgw::sal::RGWRadosStore* store, optional_yield y, packages_t& packages) { +int list_packages(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore* store, optional_yield y, packages_t& packages) { constexpr auto max_chunk = 1024U; std::string start_after; bool more = true; @@ -214,7 +216,7 @@ int list_packages(rgw::sal::RGWRadosStore* store, optional_yield y, packages_t& librados::ObjectReadOperation op; packages_t packages_chunk; op.omap_get_keys2(start_after, max_chunk, &packages_chunk, &more, &rval); - const auto ret = rgw_rados_operate(*(store->getRados()->get_lc_pool_ctx()), + const auto ret = rgw_rados_operate(dpp, *(store->getRados()->get_lc_pool_ctx()), PACKAGE_LIST_OBJECT_NAME, &op, nullptr, y); if (ret < 0) { @@ -227,7 +229,7 @@ int list_packages(rgw::sal::RGWRadosStore* store, optional_yield y, packages_t& return 0; } -int install_packages(rgw::sal::RGWRadosStore* store, optional_yield y, packages_t& failed_packages, std::string& output) { +int install_packages(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore* store, optional_yield y, packages_t& failed_packages, std::string& output) { // luarocks directory cleanup boost::system::error_code ec; const auto& luarocks_path = store->get_luarocks_path(); @@ -240,7 +242,7 @@ int install_packages(rgw::sal::RGWRadosStore* store, optional_yield y, packages_ } packages_t packages; - auto ret = list_packages(store, y, packages); + auto ret = list_packages(dpp, store, y, packages); if (ret == -ENOENT) { // allowlist is empty return 0; diff --git a/src/rgw/rgw_lua.h b/src/rgw/rgw_lua.h index 8241622b246ca..4e135a76349b1 100644 --- a/src/rgw/rgw_lua.h +++ b/src/rgw/rgw_lua.h @@ -26,13 +26,13 @@ context to_context(const std::string& s); bool verify(const std::string& script, std::string& err_msg); // store a lua script in a context -int write_script(rgw::sal::RGWRadosStore* store, const std::string& tenant, optional_yield y, context ctx, const std::string& script); +int write_script(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore* store, const std::string& tenant, optional_yield y, context ctx, const std::string& script); // read the stored lua script from a context int read_script(rgw::sal::RGWRadosStore* store, const std::string& tenant, optional_yield y, context ctx, std::string& script); // delete the stored lua script from a context -int delete_script(rgw::sal::RGWRadosStore* store, const std::string& tenant, optional_yield y, context ctx); +int delete_script(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore* store, const std::string& tenant, optional_yield y, context ctx); #ifdef WITH_RADOSGW_LUA_PACKAGES #include @@ -40,17 +40,17 @@ int delete_script(rgw::sal::RGWRadosStore* store, const std::string& tenant, opt using packages_t = std::set; // add a lua package to the allowlist -int add_package(rgw::sal::RGWRadosStore* store, optional_yield y, const std::string& package_name, bool allow_compilation); +int add_package(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore* store, optional_yield y, const std::string& package_name, bool allow_compilation); // remove a lua package from the allowlist -int remove_package(rgw::sal::RGWRadosStore* store, optional_yield y, const std::string& package_name); +int remove_package(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore* store, optional_yield y, const std::string& package_name); // list lua packages in the allowlist -int list_packages(rgw::sal::RGWRadosStore* store, optional_yield y, packages_t& packages); +int list_packages(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore* store, optional_yield y, packages_t& packages); // install all packages from the allowlist // return the list of packages that failed to install and the output of the install command -int install_packages(rgw::sal::RGWRadosStore* store, optional_yield y, packages_t& failed_packages, std::string& output); +int install_packages(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore* store, optional_yield y, packages_t& failed_packages, std::string& output); #endif } diff --git a/src/rgw/rgw_lua_request.cc b/src/rgw/rgw_lua_request.cc index b5654d6df1ea4..ebf1edf7d58b2 100644 --- a/src/rgw/rgw_lua_request.cc +++ b/src/rgw/rgw_lua_request.cc @@ -33,7 +33,7 @@ int RequestLog(lua_State* L) const auto rc = rgw_log_op(store->getRados(), rest, s, op_name, olog); lua_pushinteger(L, rc); } else { - ldout(s->cct, 1) << "Lua ERROR: missing rados store, cannot use ops log" << dendl; + ldpp_dout(s, 1) << "Lua ERROR: missing rados store, cannot use ops log" << dendl; lua_pushinteger(L, -EINVAL); } @@ -805,11 +805,11 @@ int execute( // execute the lua script if (luaL_dostring(L, script.c_str()) != LUA_OK) { const std::string err(lua_tostring(L, -1)); - ldout(s->cct, 1) << "Lua ERROR: " << err << dendl; + ldpp_dout(s, 1) << "Lua ERROR: " << err << dendl; return -1; } } catch (const std::runtime_error& e) { - ldout(s->cct, 1) << "Lua ERROR: " << e.what() << dendl; + ldpp_dout(s, 1) << "Lua ERROR: " << e.what() << dendl; return -1; } diff --git a/src/rgw/rgw_main.cc b/src/rgw/rgw_main.cc index f9f351fa9521a..8b0bbb663e90a 100644 --- a/src/rgw/rgw_main.cc +++ b/src/rgw/rgw_main.cc @@ -331,8 +331,9 @@ int radosgw_Main(int argc, const char **argv) FCGX_Init(); #endif + const DoutPrefix dp(cct.get(), dout_subsys, "rgw main: "); rgw::sal::RGWRadosStore *store = - RGWStoreManager::get_storage(g_ceph_context, + RGWStoreManager::get_storage(&dp, g_ceph_context, g_conf()->rgw_enable_gc_threads, g_conf()->rgw_enable_lc_threads, g_conf()->rgw_enable_quota_threads, @@ -420,7 +421,7 @@ int radosgw_Main(int argc, const char **argv) #ifdef WITH_RADOSGW_LUA_PACKAGES rgw::lua::packages_t failed_packages; std::string output; - r = rgw::lua::install_packages(store, null_yield, failed_packages, output); + r = rgw::lua::install_packages(&dp, store, null_yield, failed_packages, output); if (r < 0) { dout(1) << "ERROR: failed to install lua packages from allowlist" << dendl; } @@ -628,12 +629,12 @@ int radosgw_Main(int argc, const char **argv) // add a watcher to respond to realm configuration changes - RGWPeriodPusher pusher(store, null_yield); + RGWPeriodPusher pusher(&dp, store, null_yield); RGWFrontendPauser pauser(fes, implicit_tenant_context, &pusher); auto reloader = std::make_unique(store, service_map_meta, &pauser); - RGWRealmWatcher realm_watcher(g_ceph_context, store->svc()->zone->get_realm()); + RGWRealmWatcher realm_watcher(&dp, g_ceph_context, store->svc()->zone->get_realm()); realm_watcher.add_watcher(RGWRealmNotify::Reload, *reloader); realm_watcher.add_watcher(RGWRealmNotify::ZonesNeedPeriod, pusher); diff --git a/src/rgw/rgw_mdlog.h b/src/rgw/rgw_mdlog.h index 9064f8c041787..93abc1693c54e 100644 --- a/src/rgw/rgw_mdlog.h +++ b/src/rgw/rgw_mdlog.h @@ -100,9 +100,9 @@ public: oid = prefix + buf; } - int add_entry(const string& hash_key, const string& section, const string& key, bufferlist& bl); + int add_entry(const DoutPrefixProvider *dpp, const string& hash_key, const string& section, const string& key, bufferlist& bl); int get_shard_id(const string& hash_key, int *shard_id); - int store_entries_in_shard(list& entries, int shard_id, librados::AioCompletion *completion); + int store_entries_in_shard(const DoutPrefixProvider *dpp, list& entries, int shard_id, librados::AioCompletion *completion); struct LogListCtx { int cur_shard; @@ -121,17 +121,18 @@ public: const real_time& end_time, const string& marker, void **handle); void complete_list_entries(void *handle); - int list_entries(void *handle, + int list_entries(const DoutPrefixProvider *dpp, + void *handle, int max_entries, list& entries, string *out_marker, bool *truncated); - int trim(int shard_id, const real_time& from_time, const real_time& end_time, const string& start_marker, const string& end_marker); - int get_info(int shard_id, RGWMetadataLogInfo *info); - int get_info_async(int shard_id, RGWMetadataLogInfoCompletion *completion); - int lock_exclusive(int shard_id, timespan duration, string&zone_id, string& owner_id); - int unlock(int shard_id, string& zone_id, string& owner_id); + int trim(const DoutPrefixProvider *dpp, int shard_id, const real_time& from_time, const real_time& end_time, const string& start_marker, const string& end_marker); + int get_info(const DoutPrefixProvider *dpp, int shard_id, RGWMetadataLogInfo *info); + int get_info_async(const DoutPrefixProvider *dpp, int shard_id, RGWMetadataLogInfoCompletion *completion); + int lock_exclusive(const DoutPrefixProvider *dpp, int shard_id, timespan duration, string&zone_id, string& owner_id); + int unlock(const DoutPrefixProvider *dpp, int shard_id, string& zone_id, string& owner_id); int update_shards(list& shards); diff --git a/src/rgw/rgw_metadata.cc b/src/rgw/rgw_metadata.cc index be572ebc866a8..726802e4a4261 100644 --- a/src/rgw/rgw_metadata.cc +++ b/src/rgw/rgw_metadata.cc @@ -104,7 +104,7 @@ void RGWMetadataLogData::decode_json(JSONObj *obj) { } -int RGWMetadataLog::add_entry(const string& hash_key, const string& section, const string& key, bufferlist& bl) { +int RGWMetadataLog::add_entry(const DoutPrefixProvider *dpp, const string& hash_key, const string& section, const string& key, bufferlist& bl) { if (!svc.zone->need_to_log_metadata()) return 0; @@ -114,7 +114,7 @@ int RGWMetadataLog::add_entry(const string& hash_key, const string& section, con rgw_shard_name(prefix, cct->_conf->rgw_md_log_max_shards, hash_key, oid, &shard_id); mark_modified(shard_id); real_time now = real_clock::now(); - return svc.cls->timelog.add(oid, now, section, key, bl, null_yield); + return svc.cls->timelog.add(dpp, oid, now, section, key, bl, null_yield); } int RGWMetadataLog::get_shard_id(const string& hash_key, int *shard_id) @@ -125,13 +125,13 @@ int RGWMetadataLog::get_shard_id(const string& hash_key, int *shard_id) return 0; } -int RGWMetadataLog::store_entries_in_shard(list& entries, int shard_id, librados::AioCompletion *completion) +int RGWMetadataLog::store_entries_in_shard(const DoutPrefixProvider *dpp, list& entries, int shard_id, librados::AioCompletion *completion) { string oid; mark_modified(shard_id); rgw_shard_name(prefix, shard_id, oid); - return svc.cls->timelog.add(oid, entries, completion, false, null_yield); + return svc.cls->timelog.add(dpp, oid, entries, completion, false, null_yield); } void RGWMetadataLog::init_list_entries(int shard_id, const real_time& from_time, const real_time& end_time, @@ -154,7 +154,7 @@ void RGWMetadataLog::complete_list_entries(void *handle) { delete ctx; } -int RGWMetadataLog::list_entries(void *handle, +int RGWMetadataLog::list_entries(const DoutPrefixProvider *dpp, void *handle, int max_entries, list& entries, string *last_marker, @@ -167,7 +167,7 @@ int RGWMetadataLog::list_entries(void *handle, } std::string next_marker; - int ret = svc.cls->timelog.list(ctx->cur_oid, ctx->from_time, ctx->end_time, + int ret = svc.cls->timelog.list(dpp, ctx->cur_oid, ctx->from_time, ctx->end_time, max_entries, entries, ctx->marker, &next_marker, truncated, null_yield); if ((ret < 0) && (ret != -ENOENT)) @@ -184,14 +184,14 @@ int RGWMetadataLog::list_entries(void *handle, return 0; } -int RGWMetadataLog::get_info(int shard_id, RGWMetadataLogInfo *info) +int RGWMetadataLog::get_info(const DoutPrefixProvider *dpp, int shard_id, RGWMetadataLogInfo *info) { string oid; get_shard_oid(shard_id, oid); cls_log_header header; - int ret = svc.cls->timelog.info(oid, &header, null_yield); + int ret = svc.cls->timelog.info(dpp, oid, &header, null_yield); if ((ret < 0) && (ret != -ENOENT)) return ret; @@ -220,40 +220,40 @@ RGWMetadataLogInfoCompletion::~RGWMetadataLogInfoCompletion() completion->release(); } -int RGWMetadataLog::get_info_async(int shard_id, RGWMetadataLogInfoCompletion *completion) +int RGWMetadataLog::get_info_async(const DoutPrefixProvider *dpp, int shard_id, RGWMetadataLogInfoCompletion *completion) { string oid; get_shard_oid(shard_id, oid); completion->get(); // hold a ref until the completion fires - return svc.cls->timelog.info_async(completion->get_io_obj(), oid, + return svc.cls->timelog.info_async(dpp, completion->get_io_obj(), oid, &completion->get_header(), completion->get_completion()); } -int RGWMetadataLog::trim(int shard_id, const real_time& from_time, const real_time& end_time, +int RGWMetadataLog::trim(const DoutPrefixProvider *dpp, int shard_id, const real_time& from_time, const real_time& end_time, const string& start_marker, const string& end_marker) { string oid; get_shard_oid(shard_id, oid); - return svc.cls->timelog.trim(oid, from_time, end_time, start_marker, + return svc.cls->timelog.trim(dpp, oid, from_time, end_time, start_marker, end_marker, nullptr, null_yield); } -int RGWMetadataLog::lock_exclusive(int shard_id, timespan duration, string& zone_id, string& owner_id) { +int RGWMetadataLog::lock_exclusive(const DoutPrefixProvider *dpp, int shard_id, timespan duration, string& zone_id, string& owner_id) { string oid; get_shard_oid(shard_id, oid); - return svc.cls->lock.lock_exclusive(svc.zone->get_zone_params().log_pool, oid, duration, zone_id, owner_id); + return svc.cls->lock.lock_exclusive(dpp, svc.zone->get_zone_params().log_pool, oid, duration, zone_id, owner_id); } -int RGWMetadataLog::unlock(int shard_id, string& zone_id, string& owner_id) { +int RGWMetadataLog::unlock(const DoutPrefixProvider *dpp, int shard_id, string& zone_id, string& owner_id) { string oid; get_shard_oid(shard_id, oid); - return svc.cls->lock.unlock(svc.zone->get_zone_params().log_pool, oid, zone_id, owner_id); + return svc.cls->lock.unlock(dpp, svc.zone->get_zone_params().log_pool, oid, zone_id, owner_id); } void RGWMetadataLog::mark_modified(int shard_id) @@ -306,16 +306,16 @@ public: return new RGWMetadataObject; } - int get(string& entry, RGWMetadataObject **obj, optional_yield y) override { + int get(string& entry, RGWMetadataObject **obj, optional_yield y, const DoutPrefixProvider *dpp) override { return -ENOTSUP; } int put(string& entry, RGWMetadataObject *obj, RGWObjVersionTracker& objv_tracker, - optional_yield y, RGWMDLogSyncType type, bool from_remote_zone) override { + optional_yield y, const DoutPrefixProvider *dpp, RGWMDLogSyncType type, bool from_remote_zone) override { return -ENOTSUP; } - int remove(string& entry, RGWObjVersionTracker& objv_tracker, optional_yield y) override { + int remove(string& entry, RGWObjVersionTracker& objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) override { return -ENOTSUP; } @@ -323,12 +323,13 @@ public: const ceph::real_time& mtime, RGWObjVersionTracker *objv_tracker, optional_yield y, + const DoutPrefixProvider *dpp, RGWMDLogStatus op_type, std::function f) { return -ENOTSUP; } - int list_keys_init(const string& marker, void **phandle) override { + int list_keys_init(const DoutPrefixProvider *dpp, const string& marker, void **phandle) override { iter_data *data = new iter_data; list sections; mgr->get_sections(sections); @@ -407,9 +408,9 @@ RGWMetadataHandlerPut_SObj::RGWMetadataHandlerPut_SObj(RGWMetadataHandler_Generi RGWMetadataHandlerPut_SObj::~RGWMetadataHandlerPut_SObj() { } -int RGWMetadataHandlerPut_SObj::put_pre() +int RGWMetadataHandlerPut_SObj::put_pre(const DoutPrefixProvider *dpp) { - int ret = get(&old_obj); + int ret = get(&old_obj, dpp); if (ret < 0 && ret != -ENOENT) { return ret; } @@ -432,23 +433,23 @@ int RGWMetadataHandlerPut_SObj::put_pre() return 0; } -int RGWMetadataHandlerPut_SObj::put() +int RGWMetadataHandlerPut_SObj::put(const DoutPrefixProvider *dpp) { - int ret = put_check(); + int ret = put_check(dpp); if (ret != 0) { return ret; } - return put_checked(); + return put_checked(dpp); } -int RGWMetadataHandlerPut_SObj::put_checked() +int RGWMetadataHandlerPut_SObj::put_checked(const DoutPrefixProvider *dpp) { RGWSI_MBSObj_PutParams params(obj->get_pattrs(), obj->get_mtime()); encode_obj(¶ms.bl); - int ret = op->put(entry, params, &objv_tracker, y); + int ret = op->put(entry, params, &objv_tracker, y, dpp); if (ret < 0) { return ret; } @@ -456,19 +457,19 @@ int RGWMetadataHandlerPut_SObj::put_checked() return 0; } -int RGWMetadataHandler_GenericMetaBE::do_put_operate(Put *put_op) +int RGWMetadataHandler_GenericMetaBE::do_put_operate(Put *put_op, const DoutPrefixProvider *dpp) { - int r = put_op->put_pre(); + int r = put_op->put_pre(dpp); if (r != 0) { /* r can also be STATUS_NO_APPLY */ return r; } - r = put_op->put(); + r = put_op->put(dpp); if (r != 0) { return r; } - r = put_op->put_post(); + r = put_op->put_post(dpp); if (r != 0) { /* e.g., -error or STATUS_APPLIED */ return r; } @@ -476,25 +477,25 @@ int RGWMetadataHandler_GenericMetaBE::do_put_operate(Put *put_op) return 0; } -int RGWMetadataHandler_GenericMetaBE::get(string& entry, RGWMetadataObject **obj, optional_yield y) +int RGWMetadataHandler_GenericMetaBE::get(string& entry, RGWMetadataObject **obj, optional_yield y, const DoutPrefixProvider *dpp) { return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) { - return do_get(op, entry, obj, y); + return do_get(op, entry, obj, y, dpp); }); } int RGWMetadataHandler_GenericMetaBE::put(string& entry, RGWMetadataObject *obj, RGWObjVersionTracker& objv_tracker, - optional_yield y, RGWMDLogSyncType type, bool from_remote_zone) + optional_yield y, const DoutPrefixProvider *dpp, RGWMDLogSyncType type, bool from_remote_zone) { return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) { - return do_put(op, entry, obj, objv_tracker, y, type, from_remote_zone); + return do_put(op, entry, obj, objv_tracker, y, dpp, type, from_remote_zone); }); } -int RGWMetadataHandler_GenericMetaBE::remove(string& entry, RGWObjVersionTracker& objv_tracker, optional_yield y) +int RGWMetadataHandler_GenericMetaBE::remove(string& entry, RGWObjVersionTracker& objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) { return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) { - return do_remove(op, entry, objv_tracker, y); + return do_remove(op, entry, objv_tracker, y, dpp); }); } @@ -502,6 +503,7 @@ int RGWMetadataHandler_GenericMetaBE::mutate(const string& entry, const ceph::real_time& mtime, RGWObjVersionTracker *objv_tracker, optional_yield y, + const DoutPrefixProvider *dpp, RGWMDLogStatus op_type, std::function f) { @@ -511,7 +513,8 @@ int RGWMetadataHandler_GenericMetaBE::mutate(const string& entry, params, objv_tracker, y, - f); + f, + dpp); }); } @@ -522,11 +525,11 @@ int RGWMetadataHandler_GenericMetaBE::get_shard_id(const string& entry, int *sha }); } -int RGWMetadataHandler_GenericMetaBE::list_keys_init(const string& marker, void **phandle) +int RGWMetadataHandler_GenericMetaBE::list_keys_init(const DoutPrefixProvider *dpp, const string& marker, void **phandle) { auto op = std::make_unique(be_handler); - int ret = op->list_init(marker); + int ret = op->list_init(dpp, marker); if (ret < 0) { return ret; } @@ -626,7 +629,7 @@ int RGWMetadataManager::find_handler(const string& metadata_key, RGWMetadataHand } -int RGWMetadataManager::get(string& metadata_key, Formatter *f, optional_yield y) +int RGWMetadataManager::get(string& metadata_key, Formatter *f, optional_yield y, const DoutPrefixProvider *dpp) { RGWMetadataHandler *handler; string entry; @@ -637,7 +640,7 @@ int RGWMetadataManager::get(string& metadata_key, Formatter *f, optional_yield y RGWMetadataObject *obj; - ret = handler->get(entry, &obj, y); + ret = handler->get(entry, &obj, y, dpp); if (ret < 0) { return ret; } @@ -660,6 +663,7 @@ int RGWMetadataManager::get(string& metadata_key, Formatter *f, optional_yield y int RGWMetadataManager::put(string& metadata_key, bufferlist& bl, optional_yield y, + const DoutPrefixProvider *dpp, RGWMDLogSyncType sync_type, bool from_remote_zone, obj_version *existing_version) @@ -701,7 +705,7 @@ int RGWMetadataManager::put(string& metadata_key, bufferlist& bl, return -EINVAL; } - ret = handler->put(entry, obj, objv_tracker, y, sync_type, from_remote_zone); + ret = handler->put(entry, obj, objv_tracker, y, dpp, sync_type, from_remote_zone); if (existing_version) { *existing_version = objv_tracker.read_version; } @@ -711,7 +715,7 @@ int RGWMetadataManager::put(string& metadata_key, bufferlist& bl, return ret; } -int RGWMetadataManager::remove(string& metadata_key, optional_yield y) +int RGWMetadataManager::remove(string& metadata_key, optional_yield y, const DoutPrefixProvider *dpp) { RGWMetadataHandler *handler; string entry; @@ -722,7 +726,7 @@ int RGWMetadataManager::remove(string& metadata_key, optional_yield y) } RGWMetadataObject *obj; - ret = handler->get(entry, &obj, y); + ret = handler->get(entry, &obj, y, dpp); if (ret < 0) { return ret; } @@ -730,13 +734,14 @@ int RGWMetadataManager::remove(string& metadata_key, optional_yield y) objv_tracker.read_version = obj->get_version(); delete obj; - return handler->remove(entry, objv_tracker, y); + return handler->remove(entry, objv_tracker, y, dpp); } int RGWMetadataManager::mutate(const string& metadata_key, const ceph::real_time& mtime, RGWObjVersionTracker *objv_tracker, optional_yield y, + const DoutPrefixProvider *dpp, RGWMDLogStatus op_type, std::function f) { @@ -748,7 +753,7 @@ int RGWMetadataManager::mutate(const string& metadata_key, return ret; } - return handler->mutate(entry, mtime, objv_tracker, y, op_type, f); + return handler->mutate(entry, mtime, objv_tracker, y, dpp, op_type, f); } int RGWMetadataManager::get_shard_id(const string& section, const string& entry, int *shard_id) @@ -766,12 +771,12 @@ struct list_keys_handle { RGWMetadataHandler *handler; }; -int RGWMetadataManager::list_keys_init(const string& section, void **handle) +int RGWMetadataManager::list_keys_init(const DoutPrefixProvider *dpp, const string& section, void **handle) { - return list_keys_init(section, string(), handle); + return list_keys_init(dpp, section, string(), handle); } -int RGWMetadataManager::list_keys_init(const string& section, +int RGWMetadataManager::list_keys_init(const DoutPrefixProvider *dpp, const string& section, const string& marker, void **handle) { string entry; @@ -786,7 +791,7 @@ int RGWMetadataManager::list_keys_init(const string& section, list_keys_handle *h = new list_keys_handle; h->handler = handler; - ret = handler->list_keys_init(marker, &h->handle); + ret = handler->list_keys_init(dpp, marker, &h->handle); if (ret < 0) { delete h; return ret; diff --git a/src/rgw/rgw_metadata.h b/src/rgw/rgw_metadata.h index 97d4f0487606d..e717122bb703f 100644 --- a/src/rgw/rgw_metadata.h +++ b/src/rgw/rgw_metadata.h @@ -69,22 +69,25 @@ public: virtual RGWMetadataObject *get_meta_obj(JSONObj *jo, const obj_version& objv, const ceph::real_time& mtime) = 0; - virtual int get(string& entry, RGWMetadataObject **obj, optional_yield) = 0; + virtual int get(string& entry, RGWMetadataObject **obj, optional_yield, const DoutPrefixProvider *dpp) = 0; virtual int put(string& entry, RGWMetadataObject *obj, RGWObjVersionTracker& objv_tracker, - optional_yield, RGWMDLogSyncType type, + optional_yield, + const DoutPrefixProvider *dpp, + RGWMDLogSyncType type, bool from_remote_zone) = 0; - virtual int remove(string& entry, RGWObjVersionTracker& objv_tracker, optional_yield) = 0; + virtual int remove(string& entry, RGWObjVersionTracker& objv_tracker, optional_yield, const DoutPrefixProvider *dpp) = 0; virtual int mutate(const string& entry, const ceph::real_time& mtime, RGWObjVersionTracker *objv_tracker, optional_yield y, + const DoutPrefixProvider *dpp, RGWMDLogStatus op_type, std::function f) = 0; - virtual int list_keys_init(const string& marker, void **phandle) = 0; + virtual int list_keys_init(const DoutPrefixProvider *dpp, const string& marker, void **phandle) = 0; virtual int list_keys_next(void *handle, int max, list& keys, bool *truncated) = 0; virtual void list_keys_complete(void *handle) = 0; @@ -108,12 +111,13 @@ public: protected: RGWSI_MetaBackend_Handler *be_handler; - virtual int do_get(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject **obj, optional_yield y) = 0; + virtual int do_get(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject **obj, optional_yield y, const DoutPrefixProvider *dpp) = 0; virtual int do_put(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject *obj, RGWObjVersionTracker& objv_tracker, optional_yield y, - RGWMDLogSyncType type, bool from_remote_zone) = 0; - virtual int do_put_operate(Put *put_op); - virtual int do_remove(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWObjVersionTracker& objv_tracker, optional_yield y) = 0; + const DoutPrefixProvider *dpp, RGWMDLogSyncType type, + bool from_remote_zone) = 0; + virtual int do_put_operate(Put *put_op, const DoutPrefixProvider *dpp); + virtual int do_remove(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWObjVersionTracker& objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) = 0; public: RGWMetadataHandler_GenericMetaBE() {} @@ -139,8 +143,8 @@ public: optional_yield y; bool from_remote_zone{false}; - int get(RGWMetadataObject **obj) { - return handler->do_get(op, entry, obj, y); + int get(RGWMetadataObject **obj, const DoutPrefixProvider *dpp) { + return handler->do_get(op, entry, obj, y, dpp); } public: Put(RGWMetadataHandler_GenericMetaBE *_handler, RGWSI_MetaBackend_Handler::Op *_op, @@ -150,13 +154,13 @@ public: virtual ~Put() {} - virtual int put_pre() { + virtual int put_pre(const DoutPrefixProvider *dpp) { return 0; } - virtual int put() { + virtual int put(const DoutPrefixProvider *dpp) { return 0; } - virtual int put_post() { + virtual int put_post(const DoutPrefixProvider *dpp) { return 0; } virtual int finalize() { @@ -164,20 +168,21 @@ public: } }; - int get(string& entry, RGWMetadataObject **obj, optional_yield) override; - int put(string& entry, RGWMetadataObject *obj, RGWObjVersionTracker& objv_tracker, optional_yield, RGWMDLogSyncType type, bool from_remote_zone) override; - int remove(string& entry, RGWObjVersionTracker& objv_tracker, optional_yield) override; + int get(string& entry, RGWMetadataObject **obj, optional_yield, const DoutPrefixProvider *dpp) override; + int put(string& entry, RGWMetadataObject *obj, RGWObjVersionTracker& objv_tracker, optional_yield, const DoutPrefixProvider *dpp, RGWMDLogSyncType type, bool from_remote_zone) override; + int remove(string& entry, RGWObjVersionTracker& objv_tracker, optional_yield, const DoutPrefixProvider *dpp) override; int mutate(const string& entry, const ceph::real_time& mtime, RGWObjVersionTracker *objv_tracker, optional_yield y, + const DoutPrefixProvider *dpp, RGWMDLogStatus op_type, std::function f) override; int get_shard_id(const string& entry, int *shard_id) override; - int list_keys_init(const std::string& marker, void **phandle) override; + int list_keys_init(const DoutPrefixProvider *dpp, const std::string& marker, void **phandle) override; int list_keys_next(void *handle, int max, std::list& keys, bool *truncated) override; void list_keys_complete(void *handle) override; @@ -233,22 +238,24 @@ public: RGWMetadataHandler *get_handler(const string& type); - int get(string& metadata_key, Formatter *f, optional_yield y); + int get(string& metadata_key, Formatter *f, optional_yield y, const DoutPrefixProvider *dpp); int put(string& metadata_key, bufferlist& bl, optional_yield y, + const DoutPrefixProvider *dpp, RGWMDLogSyncType sync_mode, bool from_remote_zone, obj_version *existing_version = NULL); - int remove(string& metadata_key, optional_yield y); + int remove(string& metadata_key, optional_yield y, const DoutPrefixProvider *dpp); int mutate(const string& metadata_key, const ceph::real_time& mtime, RGWObjVersionTracker *objv_tracker, optional_yield y, + const DoutPrefixProvider *dpp, RGWMDLogStatus op_type, std::function f); - int list_keys_init(const string& section, void **phandle); - int list_keys_init(const string& section, const string& marker, void **phandle); + int list_keys_init(const DoutPrefixProvider *dpp, const string& section, void **phandle); + int list_keys_init(const DoutPrefixProvider *dpp, const string& section, const string& marker, void **phandle); int list_keys_next(void *handle, int max, list& keys, bool *truncated); void list_keys_complete(void *handle); @@ -277,12 +284,12 @@ public: RGWMDLogSyncType type, bool from_remote_zone); ~RGWMetadataHandlerPut_SObj(); - int put_pre() override; - int put() override; - virtual int put_check() { + int put_pre(const DoutPrefixProvider *dpp) override; + int put(const DoutPrefixProvider *dpp) override; + virtual int put_check(const DoutPrefixProvider *dpp) { return 0; } - virtual int put_checked(); + virtual int put_checked(const DoutPrefixProvider *dpp); virtual void encode_obj(bufferlist *bl) {} }; diff --git a/src/rgw/rgw_multi.cc b/src/rgw/rgw_multi.cc index 79284591b40ee..10bc6559f75d1 100644 --- a/src/rgw/rgw_multi.cc +++ b/src/rgw/rgw_multi.cc @@ -78,7 +78,7 @@ bool is_v2_upload_id(const string& upload_id) (strncmp(uid, MULTIPART_UPLOAD_ID_PREFIX_LEGACY, sizeof(MULTIPART_UPLOAD_ID_PREFIX_LEGACY) - 1) == 0); } -int list_multipart_parts(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info, +int list_multipart_parts(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info, CephContext *cct, const string& upload_id, const string& meta_oid, int num_parts, @@ -111,10 +111,10 @@ int list_multipart_parts(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_i snprintf(buf, sizeof(buf), "%08d", marker); p.append(buf); - ret = sysobj.omap().get_vals(p, num_parts + 1, &parts_map, + ret = sysobj.omap().get_vals(dpp, p, num_parts + 1, &parts_map, nullptr, null_yield); } else { - ret = sysobj.omap().get_all(&parts_map, null_yield); + ret = sysobj.omap().get_all(dpp, &parts_map, null_yield); } if (ret < 0) { return ret; @@ -134,7 +134,7 @@ int list_multipart_parts(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_i try { decode(info, bli); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: could not part info, caught buffer::error" << + ldpp_dout(dpp, 0) << "ERROR: could not part info, caught buffer::error" << dendl; return -EIO; } @@ -146,7 +146,7 @@ int list_multipart_parts(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_i * where one gateway doesn't support correctly sorted omap * keys for multipart upload just assume data is unsorted. */ - return list_multipart_parts(store, bucket_info, cct, upload_id, + return list_multipart_parts(dpp, store, bucket_info, cct, upload_id, meta_oid, num_parts, marker, parts, next_marker, truncated, true); } @@ -188,19 +188,21 @@ int list_multipart_parts(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_i return 0; } -int list_multipart_parts(rgw::sal::RGWRadosStore *store, struct req_state *s, +int list_multipart_parts(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, struct req_state *s, const string& upload_id, const string& meta_oid, int num_parts, int marker, map& parts, int *next_marker, bool *truncated, bool assume_unsorted) { - return list_multipart_parts(store, s->bucket->get_info(), s->cct, upload_id, + return list_multipart_parts(dpp, store, s->bucket->get_info(), s->cct, upload_id, meta_oid, num_parts, marker, parts, next_marker, truncated, assume_unsorted); } -int abort_multipart_upload(rgw::sal::RGWRadosStore *store, CephContext *cct, +int abort_multipart_upload(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, CephContext *cct, RGWObjectCtx *obj_ctx, RGWBucketInfo& bucket_info, RGWMPObj& mp_obj) { @@ -217,11 +219,11 @@ int abort_multipart_upload(rgw::sal::RGWRadosStore *store, CephContext *cct, uint64_t parts_accounted_size = 0; do { - ret = list_multipart_parts(store, bucket_info, cct, + ret = list_multipart_parts(dpp, store, bucket_info, cct, mp_obj.get_upload_id(), mp_obj.get_meta(), 1000, marker, obj_parts, &marker, &truncated); if (ret < 0) { - ldout(cct, 20) << __func__ << ": list_multipart_parts returned " << + ldpp_dout(dpp, 20) << __func__ << ": list_multipart_parts returned " << ret << dendl; return (ret == -ENOENT) ? -ERR_NO_SUCH_UPLOAD : ret; } @@ -235,13 +237,13 @@ int abort_multipart_upload(rgw::sal::RGWRadosStore *store, CephContext *cct, string oid = mp_obj.get_part(obj_iter->second.num); obj.init_ns(bucket_info.bucket, oid, RGW_OBJ_NS_MULTIPART); obj.index_hash_source = mp_obj.get_key(); - ret = store->getRados()->delete_obj(*obj_ctx, bucket_info, obj, 0); + ret = store->getRados()->delete_obj(dpp, *obj_ctx, bucket_info, obj, 0); if (ret < 0 && ret != -ENOENT) return ret; } else { - store->getRados()->update_gc_chain(meta_obj, obj_part.manifest, &chain); - RGWObjManifest::obj_iterator oiter = obj_part.manifest.obj_begin(); - if (oiter != obj_part.manifest.obj_end()) { + store->getRados()->update_gc_chain(dpp, meta_obj, obj_part.manifest, &chain); + RGWObjManifest::obj_iterator oiter = obj_part.manifest.obj_begin(dpp); + if (oiter != obj_part.manifest.obj_end(dpp)) { rgw_obj head; rgw_raw_obj raw_head = oiter.get_location().get_raw_obj(store); RGWSI_Tier_RADOS::raw_obj_to_obj(bucket_info.bucket, raw_head, &head); @@ -258,12 +260,12 @@ int abort_multipart_upload(rgw::sal::RGWRadosStore *store, CephContext *cct, /* use upload id as tag and do it synchronously */ ret = store->getRados()->send_chain_to_gc(chain, mp_obj.get_upload_id()); if (ret < 0) { - ldout(cct, 5) << __func__ << ": gc->send_chain() returned " << ret << dendl; + ldpp_dout(dpp, 5) << __func__ << ": gc->send_chain() returned " << ret << dendl; if (ret == -ENOENT) { return -ERR_NO_SUCH_UPLOAD; } //Delete objects inline if send chain to gc fails - store->getRados()->delete_objs_inline(chain, mp_obj.get_upload_id()); + store->getRados()->delete_objs_inline(dpp, chain, mp_obj.get_upload_id()); } RGWRados::Object del_target(store->getRados(), bucket_info, *obj_ctx, meta_obj); @@ -278,15 +280,16 @@ int abort_multipart_upload(rgw::sal::RGWRadosStore *store, CephContext *cct, del_op.params.parts_accounted_size = parts_accounted_size; // and also remove the metadata obj - ret = del_op.delete_obj(null_yield); + ret = del_op.delete_obj(null_yield, dpp); if (ret < 0) { - ldout(cct, 20) << __func__ << ": del_op.delete_obj returned " << + ldpp_dout(dpp, 20) << __func__ << ": del_op.delete_obj returned " << ret << dendl; } return (ret == -ENOENT) ? -ERR_NO_SUCH_UPLOAD : ret; } -int list_bucket_multiparts(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info, +int list_bucket_multiparts(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info, const string& prefix, const string& marker, const string& delim, const int& max_uploads, @@ -303,11 +306,12 @@ int list_bucket_multiparts(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket list_op.params.ns = RGW_OBJ_NS_MULTIPART; list_op.params.filter = &mp_filter; - return(list_op.list_objects(max_uploads, objs, common_prefixes, is_truncated, null_yield)); + return(list_op.list_objects(dpp, max_uploads, objs, common_prefixes, is_truncated, null_yield)); } -int abort_bucket_multiparts(rgw::sal::RGWRadosStore *store, CephContext *cct, RGWBucketInfo& bucket_info, - string& prefix, string& delim) +int abort_bucket_multiparts(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, CephContext *cct, + RGWBucketInfo& bucket_info, string& prefix, string& delim) { constexpr int max = 1000; int ret, num_deleted = 0; @@ -317,16 +321,16 @@ int abort_bucket_multiparts(rgw::sal::RGWRadosStore *store, CephContext *cct, RG bool is_truncated; do { - ret = list_bucket_multiparts(store, bucket_info, prefix, marker, delim, + ret = list_bucket_multiparts(dpp, store, bucket_info, prefix, marker, delim, max, &objs, nullptr, &is_truncated); if (ret < 0) { - ldout(store->ctx(), 0) << __func__ << + ldpp_dout(dpp, 0) << __func__ << " ERROR : calling list_bucket_multiparts; ret=" << ret << "; bucket=\"" << bucket_info.bucket << "\"; prefix=\"" << prefix << "\"; delim=\"" << delim << "\"" << dendl; return ret; } - ldout(store->ctx(), 20) << __func__ << + ldpp_dout(dpp, 20) << __func__ << " INFO: aborting and cleaning up multipart upload(s); bucket=\"" << bucket_info.bucket << "\"; objs.size()=" << objs.size() << "; is_truncated=" << is_truncated << dendl; @@ -337,17 +341,17 @@ int abort_bucket_multiparts(rgw::sal::RGWRadosStore *store, CephContext *cct, RG rgw_obj_key key(obj.key); if (!mp.from_meta(key.name)) continue; - ret = abort_multipart_upload(store, cct, &obj_ctx, bucket_info, mp); + ret = abort_multipart_upload(dpp, store, cct, &obj_ctx, bucket_info, mp); if (ret < 0) { // we're doing a best-effort; if something cannot be found, // log it and keep moving forward if (ret != -ENOENT && ret != -ERR_NO_SUCH_UPLOAD) { - ldout(store->ctx(), 0) << __func__ << + ldpp_dout(dpp, 0) << __func__ << " ERROR : failed to abort and clean-up multipart upload \"" << key.get_oid() << "\"" << dendl; return ret; } else { - ldout(store->ctx(), 10) << __func__ << + ldpp_dout(dpp, 10) << __func__ << " NOTE : unable to find part(s) of " "aborted multipart upload of \"" << key.get_oid() << "\" for cleaning up" << dendl; @@ -356,7 +360,7 @@ int abort_bucket_multiparts(rgw::sal::RGWRadosStore *store, CephContext *cct, RG num_deleted++; } if (num_deleted) { - ldout(store->ctx(), 0) << __func__ << + ldpp_dout(dpp, 0) << __func__ << " WARNING : aborted " << num_deleted << " incomplete multipart uploads" << dendl; } diff --git a/src/rgw/rgw_multi.h b/src/rgw/rgw_multi.h index 5f8fa11b336a5..c58901a9fb036 100644 --- a/src/rgw/rgw_multi.h +++ b/src/rgw/rgw_multi.h @@ -8,6 +8,7 @@ #include "rgw_xml.h" #include "rgw_obj_manifest.h" #include "rgw_compression_types.h" +#include "common/dout.h" namespace rgw { namespace sal { class RGWRadosStore; @@ -108,7 +109,8 @@ public: extern bool is_v2_upload_id(const string& upload_id); -extern int list_multipart_parts(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info, +extern int list_multipart_parts(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info, CephContext *cct, const string& upload_id, const string& meta_oid, int num_parts, @@ -116,17 +118,19 @@ extern int list_multipart_parts(rgw::sal::RGWRadosStore *store, RGWBucketInfo& b int *next_marker, bool *truncated, bool assume_unsorted = false); -extern int list_multipart_parts(rgw::sal::RGWRadosStore *store, struct req_state *s, +extern int list_multipart_parts(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, struct req_state *s, const string& upload_id, const string& meta_oid, int num_parts, int marker, map& parts, int *next_marker, bool *truncated, bool assume_unsorted = false); -extern int abort_multipart_upload(rgw::sal::RGWRadosStore *store, CephContext *cct, RGWObjectCtx *obj_ctx, +extern int abort_multipart_upload(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, CephContext *cct, RGWObjectCtx *obj_ctx, RGWBucketInfo& bucket_info, RGWMPObj& mp_obj); -extern int list_bucket_multiparts(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info, +extern int list_bucket_multiparts(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info, const string& prefix, const string& marker, const string& delim, @@ -134,6 +138,6 @@ extern int list_bucket_multiparts(rgw::sal::RGWRadosStore *store, RGWBucketInfo& vector *objs, map *common_prefixes, bool *is_truncated); -extern int abort_bucket_multiparts(rgw::sal::RGWRadosStore *store, CephContext *cct, RGWBucketInfo& bucket_info, +extern int abort_bucket_multiparts(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, CephContext *cct, RGWBucketInfo& bucket_info, string& prefix, string& delim); #endif diff --git a/src/rgw/rgw_notify.cc b/src/rgw/rgw_notify.cc index 5fa753b88830f..a4ad062005e70 100644 --- a/src/rgw/rgw_notify.cc +++ b/src/rgw/rgw_notify.cc @@ -52,7 +52,7 @@ auto make_stack_allocator() { return boost::context::protected_fixedsize_stack{128*1024}; } -class Manager { +class Manager : public DoutPrefixProvider { const size_t max_queue_size; const uint32_t queues_update_period_ms; const uint32_t queues_update_retry_ms; @@ -71,6 +71,10 @@ class Manager { const std::string Q_LIST_OBJECT_NAME = "queues_list_object"; + CephContext *get_cct() const override { return cct; } + unsigned get_subsys() const override { return dout_subsys; } + std::ostream& gen_prefix(std::ostream& out) const override { return out << "rgw notify: "; } + // read the list of queues from the queue list object int read_queue_list(queues_t& queues, optional_yield y) { constexpr auto max_chunk = 1024U; @@ -81,14 +85,14 @@ class Manager { librados::ObjectReadOperation op; queues_t queues_chunk; op.omap_get_keys2(start_after, max_chunk, &queues_chunk, &more, &rval); - const auto ret = rgw_rados_operate(rados_ioctx, Q_LIST_OBJECT_NAME, &op, nullptr, y); + const auto ret = rgw_rados_operate(this, rados_ioctx, Q_LIST_OBJECT_NAME, &op, nullptr, y); if (ret == -ENOENT) { // queue list object was not created - nothing to do return 0; } if (ret < 0) { // TODO: do we need to check on rval as well as ret? - ldout(cct, 1) << "ERROR: failed to read queue list. error: " << ret << dendl; + ldpp_dout(this, 1) << "ERROR: failed to read queue list. error: " << ret << dendl; return ret; } queues.merge(queues_chunk); @@ -160,29 +164,29 @@ class Manager { try { decode(event_entry, iter); } catch (buffer::error& err) { - ldout(cct, 5) << "WARNING: failed to decode entry. error: " << err.what() << dendl; + ldpp_dout(this, 5) << "WARNING: failed to decode entry. error: " << err.what() << dendl; return false; } try { // TODO move endpoint creation to queue level const auto push_endpoint = RGWPubSubEndpoint::create(event_entry.push_endpoint, event_entry.arn_topic, - RGWHTTPArgs(event_entry.push_endpoint_args), + RGWHTTPArgs(event_entry.push_endpoint_args, this), cct); - ldout(cct, 20) << "INFO: push endpoint created: " << event_entry.push_endpoint << + ldpp_dout(this, 20) << "INFO: push endpoint created: " << event_entry.push_endpoint << " for entry: " << entry.marker << dendl; const auto ret = push_endpoint->send_to_completion_async(cct, event_entry.event, optional_yield(io_context, yield)); if (ret < 0) { - ldout(cct, 5) << "WARNING: push entry: " << entry.marker << " to endpoint: " << event_entry.push_endpoint + ldpp_dout(this, 5) << "WARNING: push entry: " << entry.marker << " to endpoint: " << event_entry.push_endpoint << " failed. error: " << ret << " (will retry)" << dendl; return false; } else { - ldout(cct, 20) << "INFO: push entry: " << entry.marker << " to endpoint: " << event_entry.push_endpoint + ldpp_dout(this, 20) << "INFO: push entry: " << entry.marker << " to endpoint: " << event_entry.push_endpoint << " ok" << dendl; if (perfcounter) perfcounter->inc(l_rgw_pubsub_push_ok); return true; } } catch (const RGWPubSubEndpoint::configuration_error& e) { - ldout(cct, 5) << "WARNING: failed to create push endpoint: " + ldpp_dout(this, 5) << "WARNING: failed to create push endpoint: " << event_entry.push_endpoint << " for entry: " << entry.marker << ". error: " << e.what() << " (will retry) " << dendl; return false; } @@ -191,7 +195,7 @@ class Manager { // clean stale reservation from queue void cleanup_queue(const std::string& queue_name, spawn::yield_context yield) { while (true) { - ldout(cct, 20) << "INFO: trying to perform stale reservation cleanup for queue: " << queue_name << dendl; + ldpp_dout(this, 20) << "INFO: trying to perform stale reservation cleanup for queue: " << queue_name << dendl; const auto now = ceph::coarse_real_time::clock::now(); const auto stale_time = now - std::chrono::seconds(stale_reservations_period_s); librados::ObjectWriteOperation op; @@ -202,19 +206,19 @@ class Manager { "" /*no tag*/); cls_2pc_queue_expire_reservations(op, stale_time); // check ownership and do reservation cleanup in one batch - auto ret = rgw_rados_operate(rados_ioctx, queue_name, &op, optional_yield(io_context, yield)); + auto ret = rgw_rados_operate(this, rados_ioctx, queue_name, &op, optional_yield(io_context, yield)); if (ret == -ENOENT) { // queue was deleted - ldout(cct, 5) << "INFO: queue: " + ldpp_dout(this, 5) << "INFO: queue: " << queue_name << ". was removed. cleanup will stop" << dendl; return; } if (ret == -EBUSY) { - ldout(cct, 5) << "WARNING: queue: " << queue_name << " ownership moved to another daemon. processing will stop" << dendl; + ldpp_dout(this, 5) << "WARNING: queue: " << queue_name << " ownership moved to another daemon. processing will stop" << dendl; return; } if (ret < 0) { - ldout(cct, 5) << "WARNING: failed to cleanup stale reservation from queue and/or lock queue: " << queue_name + ldpp_dout(this, 5) << "WARNING: failed to cleanup stale reservation from queue and/or lock queue: " << queue_name << ". error: " << ret << dendl; } Timer timer(io_context); @@ -261,25 +265,25 @@ class Manager { "" /*no tag*/); cls_2pc_queue_list_entries(op, start_marker, max_elements, &obl, &rval); // check ownership and list entries in one batch - auto ret = rgw_rados_operate(rados_ioctx, queue_name, &op, nullptr, optional_yield(io_context, yield)); + auto ret = rgw_rados_operate(this, rados_ioctx, queue_name, &op, nullptr, optional_yield(io_context, yield)); if (ret == -ENOENT) { // queue was deleted - ldout(cct, 5) << "INFO: queue: " + ldpp_dout(this, 5) << "INFO: queue: " << queue_name << ". was removed. processing will stop" << dendl; return; } if (ret == -EBUSY) { - ldout(cct, 5) << "WARNING: queue: " << queue_name << " ownership moved to another daemon. processing will stop" << dendl; + ldpp_dout(this, 5) << "WARNING: queue: " << queue_name << " ownership moved to another daemon. processing will stop" << dendl; return; } if (ret < 0) { - ldout(cct, 5) << "WARNING: failed to get list of entries in queue and/or lock queue: " + ldpp_dout(this, 5) << "WARNING: failed to get list of entries in queue and/or lock queue: " << queue_name << ". error: " << ret << " (will retry)" << dendl; continue; } ret = cls_2pc_queue_list_entries_result(obl, entries, &truncated, end_marker); if (ret < 0) { - ldout(cct, 5) << "WARNING: failed to parse list of entries in queue: " + ldpp_dout(this, 5) << "WARNING: failed to parse list of entries in queue: " << queue_name << ". error: " << ret << " (will retry)" << dendl; continue; } @@ -290,7 +294,7 @@ class Manager { continue; } // log when queue is not idle - ldout(cct, 20) << "INFO: found: " << total_entries << " entries in: " << queue_name << + ldpp_dout(this, 20) << "INFO: found: " << total_entries << " entries in: " << queue_name << ". end marker is: " << end_marker << dendl; is_idle = false; @@ -307,17 +311,17 @@ class Manager { spawn::spawn(yield, [this, &queue_name, entry_idx, total_entries, &end_marker, &remove_entries, &has_error, &waiter, entry](spawn::yield_context yield) { const auto token = waiter.make_token(); if (process_entry(entry, yield)) { - ldout(cct, 20) << "INFO: processing of entry: " << + ldpp_dout(this, 20) << "INFO: processing of entry: " << entry.marker << " (" << entry_idx << "/" << total_entries << ") from: " << queue_name << " ok" << dendl; remove_entries = true; } else { if (set_min_marker(end_marker, entry.marker) < 0) { - ldout(cct, 1) << "ERROR: cannot determin minimum between malformed markers: " << end_marker << ", " << entry.marker << dendl; + ldpp_dout(this, 1) << "ERROR: cannot determin minimum between malformed markers: " << end_marker << ", " << entry.marker << dendl; } else { - ldout(cct, 20) << "INFO: new end marker for removal: " << end_marker << " from: " << queue_name << dendl; + ldpp_dout(this, 20) << "INFO: new end marker for removal: " << end_marker << " from: " << queue_name << dendl; } has_error = true; - ldout(cct, 20) << "INFO: processing of entry: " << + ldpp_dout(this, 20) << "INFO: processing of entry: " << entry.marker << " (" << entry_idx << "/" << total_entries << ") from: " << queue_name << " failed" << dendl; } }, make_stack_allocator()); @@ -337,22 +341,22 @@ class Manager { "" /*no tag*/); cls_2pc_queue_remove_entries(op, end_marker); // check ownership and deleted entries in one batch - const auto ret = rgw_rados_operate(rados_ioctx, queue_name, &op, optional_yield(io_context, yield)); + const auto ret = rgw_rados_operate(this, rados_ioctx, queue_name, &op, optional_yield(io_context, yield)); if (ret == -ENOENT) { // queue was deleted - ldout(cct, 5) << "INFO: queue: " + ldpp_dout(this, 5) << "INFO: queue: " << queue_name << ". was removed. processing will stop" << dendl; return; } if (ret == -EBUSY) { - ldout(cct, 5) << "WARNING: queue: " << queue_name << " ownership moved to another daemon. processing will stop" << dendl; + ldpp_dout(this, 5) << "WARNING: queue: " << queue_name << " ownership moved to another daemon. processing will stop" << dendl; return; } if (ret < 0) { - ldout(cct, 1) << "ERROR: failed to remove entries and/or lock queue up to: " << end_marker << " from queue: " + ldpp_dout(this, 1) << "ERROR: failed to remove entries and/or lock queue up to: " << end_marker << " from queue: " << queue_name << ". error: " << ret << dendl; } else { - ldout(cct, 20) << "INFO: removed entries up to: " << end_marker << " from queue: " + ldpp_dout(this, 20) << "INFO: removed entries up to: " << end_marker << " from queue: " << queue_name << dendl; } } @@ -384,7 +388,7 @@ class Manager { std::chrono::milliseconds(duration_jitter(rnd_gen)); timer.expires_from_now(duration); const auto tp = ceph::coarse_real_time::clock::to_time_t(ceph::coarse_real_time::clock::now() + duration); - ldout(cct, 20) << "INFO: next queues processing will happen at: " << std::ctime(&tp) << dendl; + ldpp_dout(this, 20) << "INFO: next queues processing will happen at: " << std::ctime(&tp) << dendl; boost::system::error_code ec; timer.async_wait(yield[ec]); @@ -410,27 +414,27 @@ class Manager { failover_time, LOCK_FLAG_MAY_RENEW); - ret = rgw_rados_operate(rados_ioctx, queue_name, &op, optional_yield(io_context, yield)); + ret = rgw_rados_operate(this, rados_ioctx, queue_name, &op, optional_yield(io_context, yield)); if (ret == -EBUSY) { // lock is already taken by another RGW - ldout(cct, 20) << "INFO: queue: " << queue_name << " owned (locked) by another daemon" << dendl; + ldpp_dout(this, 20) << "INFO: queue: " << queue_name << " owned (locked) by another daemon" << dendl; // if queue was owned by this RGW, processing should be stopped, queue would be deleted from list afterwards continue; } if (ret == -ENOENT) { // queue is deleted - processing will stop the next time we try to read from the queue - ldout(cct, 10) << "INFO: queue: " << queue_name << " should not be locked - already deleted" << dendl; + ldpp_dout(this, 10) << "INFO: queue: " << queue_name << " should not be locked - already deleted" << dendl; continue; } if (ret < 0) { // failed to lock for another reason, continue to process other queues - ldout(cct, 1) << "ERROR: failed to lock queue: " << queue_name << ". error: " << ret << dendl; + ldpp_dout(this, 1) << "ERROR: failed to lock queue: " << queue_name << ". error: " << ret << dendl; has_error = true; continue; } // add queue to list of owned queues if (owned_queues.insert(queue_name).second) { - ldout(cct, 10) << "INFO: queue: " << queue_name << " now owned (locked) by this daemon" << dendl; + ldpp_dout(this, 10) << "INFO: queue: " << queue_name << " now owned (locked) by this daemon" << dendl; // start processing this queue spawn::spawn(io_context, [this, &queue_gc, &queue_gc_lock, queue_name](spawn::yield_context yield) { process_queue(queue_name, yield); @@ -438,10 +442,10 @@ class Manager { // mark it for deletion std::lock_guard lock_guard(queue_gc_lock); queue_gc.push_back(queue_name); - ldout(cct, 10) << "INFO: queue: " << queue_name << " marked for removal" << dendl; + ldpp_dout(this, 10) << "INFO: queue: " << queue_name << " marked for removal" << dendl; }, make_stack_allocator()); } else { - ldout(cct, 20) << "INFO: queue: " << queue_name << " ownership (lock) renewed" << dendl; + ldpp_dout(this, 20) << "INFO: queue: " << queue_name << " ownership (lock) renewed" << dendl; } } // erase all queue that were deleted @@ -449,7 +453,7 @@ class Manager { std::lock_guard lock_guard(queue_gc_lock); std::for_each(queue_gc.begin(), queue_gc.end(), [this, &owned_queues](const std::string& queue_name) { owned_queues.erase(queue_name); - ldout(cct, 20) << "INFO: queue: " << queue_name << " removed" << dendl; + ldpp_dout(this, 20) << "INFO: queue: " << queue_name << " removed" << dendl; }); queue_gc.clear(); } @@ -494,64 +498,64 @@ public: (WORKER_THREAD_NAME+std::to_string(worker_id)).c_str()); ceph_assert(rc == 0); } - ldout(cct, 10) << "Started notification manager with: " << worker_count << " workers" << dendl; + ldpp_dout(this, 10) << "Started notification manager with: " << worker_count << " workers" << dendl; } int add_persistent_topic(const std::string& topic_name, optional_yield y) { if (topic_name == Q_LIST_OBJECT_NAME) { - ldout(cct, 1) << "ERROR: topic name cannot be: " << Q_LIST_OBJECT_NAME << " (conflict with queue list object name)" << dendl; + ldpp_dout(this, 1) << "ERROR: topic name cannot be: " << Q_LIST_OBJECT_NAME << " (conflict with queue list object name)" << dendl; return -EINVAL; } librados::ObjectWriteOperation op; op.create(true); cls_2pc_queue_init(op, topic_name, max_queue_size); - auto ret = rgw_rados_operate(rados_ioctx, topic_name, &op, y); + auto ret = rgw_rados_operate(this, rados_ioctx, topic_name, &op, y); if (ret == -EEXIST) { // queue already exists - nothing to do - ldout(cct, 20) << "INFO: queue for topic: " << topic_name << " already exists. nothing to do" << dendl; + ldpp_dout(this, 20) << "INFO: queue for topic: " << topic_name << " already exists. nothing to do" << dendl; return 0; } if (ret < 0) { // failed to create queue - ldout(cct, 1) << "ERROR: failed to create queue for topic: " << topic_name << ". error: " << ret << dendl; + ldpp_dout(this, 1) << "ERROR: failed to create queue for topic: " << topic_name << ". error: " << ret << dendl; return ret; } bufferlist empty_bl; std::map new_topic{{topic_name, empty_bl}}; op.omap_set(new_topic); - ret = rgw_rados_operate(rados_ioctx, Q_LIST_OBJECT_NAME, &op, y); + ret = rgw_rados_operate(this, rados_ioctx, Q_LIST_OBJECT_NAME, &op, y); if (ret < 0) { - ldout(cct, 1) << "ERROR: failed to add queue: " << topic_name << " to queue list. error: " << ret << dendl; + ldpp_dout(this, 1) << "ERROR: failed to add queue: " << topic_name << " to queue list. error: " << ret << dendl; return ret; } - ldout(cct, 20) << "INFO: queue: " << topic_name << " added to queue list" << dendl; + ldpp_dout(this, 20) << "INFO: queue: " << topic_name << " added to queue list" << dendl; return 0; } int remove_persistent_topic(const std::string& topic_name, optional_yield y) { librados::ObjectWriteOperation op; op.remove(); - auto ret = rgw_rados_operate(rados_ioctx, topic_name, &op, y); + auto ret = rgw_rados_operate(this, rados_ioctx, topic_name, &op, y); if (ret == -ENOENT) { // queue already removed - nothing to do - ldout(cct, 20) << "INFO: queue for topic: " << topic_name << " already removed. nothing to do" << dendl; + ldpp_dout(this, 20) << "INFO: queue for topic: " << topic_name << " already removed. nothing to do" << dendl; return 0; } if (ret < 0) { // failed to remove queue - ldout(cct, 1) << "ERROR: failed to remove queue for topic: " << topic_name << ". error: " << ret << dendl; + ldpp_dout(this, 1) << "ERROR: failed to remove queue for topic: " << topic_name << ". error: " << ret << dendl; return ret; } std::set topic_to_remove{{topic_name}}; op.omap_rm_keys(topic_to_remove); - ret = rgw_rados_operate(rados_ioctx, Q_LIST_OBJECT_NAME, &op, y); + ret = rgw_rados_operate(this, rados_ioctx, Q_LIST_OBJECT_NAME, &op, y); if (ret < 0) { - ldout(cct, 1) << "ERROR: failed to remove queue: " << topic_name << " from queue list. error: " << ret << dendl; + ldpp_dout(this, 1) << "ERROR: failed to remove queue: " << topic_name << " from queue list. error: " << ret << dendl; return ret; } - ldout(cct, 20) << "INFO: queue: " << topic_name << " removed from queue list" << dendl; + ldpp_dout(this, 20) << "INFO: queue: " << topic_name << " removed from queue list" << dendl; return 0; } }; @@ -570,7 +574,7 @@ constexpr uint32_t WORKER_COUNT = 1; // 1 worker thread constexpr uint32_t STALE_RESERVATIONS_PERIOD_S = 120; // cleanup reservations that are more than 2 minutes old constexpr uint32_t RESERVATIONS_CLEANUP_PERIOD_S = 30; // reservation cleanup every 30 seconds -bool init(CephContext* cct, rgw::sal::RGWRadosStore* store) { +bool init(CephContext* cct, rgw::sal::RGWRadosStore* store, const DoutPrefixProvider *dpp) { if (s_manager) { return false; } @@ -610,7 +614,7 @@ rgw::sal::RGWObject* get_object_with_atttributes(const req_state* s, rgw::sal::R if (!src_obj->get_bucket()) { src_obj->set_bucket(s->bucket.get()); } - if (src_obj->get_obj_attrs(s->obj_ctx, s->yield) < 0) { + if (src_obj->get_obj_attrs(s->obj_ctx, s->yield, s) < 0) { return nullptr; } } @@ -748,7 +752,7 @@ bool notification_match(const rgw_pubsub_topic_filter& filter, const req_state* return true; } -int publish_reserve(EventType event_type, +int publish_reserve(const DoutPrefixProvider *dpp, EventType event_type, reservation_t& res, const RGWObjTags* req_tags) { @@ -767,7 +771,7 @@ int publish_reserve(EventType event_type, // notification does not apply to req_state continue; } - ldout(res.s->cct, 20) << "INFO: notification: '" << topic_filter.s3_id << + ldpp_dout(dpp, 20) << "INFO: notification: '" << topic_filter.s3_id << "' on topic: '" << topic_cfg.dest.arn_topic << "' and bucket: '" << res.s->bucket->get_name() << "' (unique topic: '" << topic_cfg.name << @@ -783,17 +787,17 @@ int publish_reserve(EventType event_type, int rval; const auto& queue_name = topic_cfg.dest.arn_topic; cls_2pc_queue_reserve(op, res.size, 1, &obl, &rval); - auto ret = rgw_rados_operate(res.store->getRados()->get_notif_pool_ctx(), + auto ret = rgw_rados_operate(dpp, res.store->getRados()->get_notif_pool_ctx(), queue_name, &op, res.s->yield, librados::OPERATION_RETURNVEC); if (ret < 0) { - ldout(res.s->cct, 1) << "ERROR: failed to reserve notification on queue: " << queue_name + ldpp_dout(dpp, 1) << "ERROR: failed to reserve notification on queue: " << queue_name << ". error: " << ret << dendl; // if no space is left in queue we ask client to slow down return (ret == -ENOSPC) ? -ERR_RATE_LIMITED : ret; } ret = cls_2pc_queue_reserve_result(obl, res_id); if (ret < 0) { - ldout(res.s->cct, 1) << "ERROR: failed to parse reservation id. error: " << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to parse reservation id. error: " << ret << dendl; return ret; } } @@ -807,7 +811,8 @@ int publish_commit(rgw::sal::RGWObject* obj, const ceph::real_time& mtime, const std::string& etag, EventType event_type, - reservation_t& res) + reservation_t& res, + const DoutPrefixProvider *dpp) { for (auto& topic : res.topics) { if (topic.cfg.dest.persistent && topic.res_id == cls_2pc_reservation::NO_ID) { @@ -827,16 +832,16 @@ int publish_commit(rgw::sal::RGWObject* obj, const auto& queue_name = topic.cfg.dest.arn_topic; if (bl.length() > res.size) { // try to make a larger reservation, fail only if this is not possible - ldout(res.s->cct, 5) << "WARNING: committed size: " << bl.length() << " exceeded reserved size: " << res.size << + ldpp_dout(dpp, 5) << "WARNING: committed size: " << bl.length() << " exceeded reserved size: " << res.size << " . trying to make a larger reservation on queue:" << queue_name << dendl; // first cancel the existing reservation librados::ObjectWriteOperation op; cls_2pc_queue_abort(op, topic.res_id); - auto ret = rgw_rados_operate(res.store->getRados()->get_notif_pool_ctx(), + auto ret = rgw_rados_operate(dpp, res.store->getRados()->get_notif_pool_ctx(), topic.cfg.dest.arn_topic, &op, res.s->yield); if (ret < 0) { - ldout(res.s->cct, 1) << "ERROR: failed to abort reservation: " << topic.res_id << + ldpp_dout(dpp, 1) << "ERROR: failed to abort reservation: " << topic.res_id << " when trying to make a larger reservation on queue: " << queue_name << ". error: " << ret << dendl; return ret; @@ -845,28 +850,28 @@ int publish_commit(rgw::sal::RGWObject* obj, bufferlist obl; int rval; cls_2pc_queue_reserve(op, bl.length(), 1, &obl, &rval); - ret = rgw_rados_operate(res.store->getRados()->get_notif_pool_ctx(), + ret = rgw_rados_operate(dpp, res.store->getRados()->get_notif_pool_ctx(), queue_name, &op, res.s->yield, librados::OPERATION_RETURNVEC); if (ret < 0) { - ldout(res.s->cct, 1) << "ERROR: failed to reserve extra space on queue: " << queue_name + ldpp_dout(dpp, 1) << "ERROR: failed to reserve extra space on queue: " << queue_name << ". error: " << ret << dendl; return (ret == -ENOSPC) ? -ERR_RATE_LIMITED : ret; } ret = cls_2pc_queue_reserve_result(obl, topic.res_id); if (ret < 0) { - ldout(res.s->cct, 1) << "ERROR: failed to parse reservation id for extra space. error: " << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to parse reservation id for extra space. error: " << ret << dendl; return ret; } } std::vector bl_data_vec{std::move(bl)}; librados::ObjectWriteOperation op; cls_2pc_queue_commit(op, bl_data_vec, topic.res_id); - const auto ret = rgw_rados_operate(res.store->getRados()->get_notif_pool_ctx(), + const auto ret = rgw_rados_operate(dpp, res.store->getRados()->get_notif_pool_ctx(), queue_name, &op, res.s->yield); topic.res_id = cls_2pc_reservation::NO_ID; if (ret < 0) { - ldout(res.s->cct, 1) << "ERROR: failed to commit reservation to queue: " << queue_name + ldpp_dout(dpp, 1) << "ERROR: failed to commit reservation to queue: " << queue_name << ". error: " << ret << dendl; return ret; } @@ -875,18 +880,18 @@ int publish_commit(rgw::sal::RGWObject* obj, // TODO add endpoint LRU cache const auto push_endpoint = RGWPubSubEndpoint::create(topic.cfg.dest.push_endpoint, topic.cfg.dest.arn_topic, - RGWHTTPArgs(topic.cfg.dest.push_endpoint_args), + RGWHTTPArgs(topic.cfg.dest.push_endpoint_args, dpp), res.s->cct); - ldout(res.s->cct, 20) << "INFO: push endpoint created: " << topic.cfg.dest.push_endpoint << dendl; + ldpp_dout(dpp, 20) << "INFO: push endpoint created: " << topic.cfg.dest.push_endpoint << dendl; const auto ret = push_endpoint->send_to_completion_async(res.s->cct, event_entry.event, res.s->yield); if (ret < 0) { - ldout(res.s->cct, 1) << "ERROR: push to endpoint " << topic.cfg.dest.push_endpoint << " failed. error: " << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: push to endpoint " << topic.cfg.dest.push_endpoint << " failed. error: " << ret << dendl; if (perfcounter) perfcounter->inc(l_rgw_pubsub_push_failed); return ret; } if (perfcounter) perfcounter->inc(l_rgw_pubsub_push_ok); } catch (const RGWPubSubEndpoint::configuration_error& e) { - ldout(res.s->cct, 1) << "ERROR: failed to create push endpoint: " + ldpp_dout(dpp, 1) << "ERROR: failed to create push endpoint: " << topic.cfg.dest.push_endpoint << ". error: " << e.what() << dendl; if (perfcounter) perfcounter->inc(l_rgw_pubsub_push_failed); return -EINVAL; @@ -896,7 +901,7 @@ int publish_commit(rgw::sal::RGWObject* obj, return 0; } -int publish_abort(reservation_t& res) { +int publish_abort(const DoutPrefixProvider *dpp, reservation_t& res) { for (auto& topic : res.topics) { if (!topic.cfg.dest.persistent || topic.res_id == cls_2pc_reservation::NO_ID) { // nothing to abort or already committed/aborted @@ -905,11 +910,11 @@ int publish_abort(reservation_t& res) { const auto& queue_name = topic.cfg.dest.arn_topic; librados::ObjectWriteOperation op; cls_2pc_queue_abort(op, topic.res_id); - const auto ret = rgw_rados_operate(res.store->getRados()->get_notif_pool_ctx(), + const auto ret = rgw_rados_operate(dpp, res.store->getRados()->get_notif_pool_ctx(), queue_name, &op, res.s->yield); if (ret < 0) { - ldout(res.s->cct, 1) << "ERROR: failed to abort reservation: " << topic.res_id << + ldpp_dout(dpp, 1) << "ERROR: failed to abort reservation: " << topic.res_id << " from queue: " << queue_name << ". error: " << ret << dendl; return ret; } @@ -919,7 +924,7 @@ int publish_abort(reservation_t& res) { } reservation_t::~reservation_t() { - publish_abort(*this); + publish_abort(dpp, *this); } } diff --git a/src/rgw/rgw_notify.h b/src/rgw/rgw_notify.h index bcbc31f3628ad..b7bfc28342ce6 100644 --- a/src/rgw/rgw_notify.h +++ b/src/rgw/rgw_notify.h @@ -25,7 +25,7 @@ namespace rgw::notify { // initialize the notification manager // notification manager is dequeing the 2-phase-commit queues // and send the notifications to the endpoints -bool init(CephContext* cct, rgw::sal::RGWRadosStore* store); +bool init(CephContext* cct, rgw::sal::RGWRadosStore* store, const DoutPrefixProvider *dpp); // shutdown the notification manager void shutdown(); @@ -52,14 +52,15 @@ struct reservation_t { cls_2pc_reservation::id_t res_id; }; + const DoutPrefixProvider *dpp; std::vector topics; rgw::sal::RGWRadosStore* const store; const req_state* const s; size_t size; rgw::sal::RGWObject* const object; - reservation_t(rgw::sal::RGWRadosStore* _store, const req_state* _s, rgw::sal::RGWObject* _object) : - store(_store), s(_s), object(_object) {} + reservation_t(const DoutPrefixProvider *_dpp, rgw::sal::RGWRadosStore* _store, const req_state* _s, rgw::sal::RGWObject* _object) : + dpp(_dpp), store(_store), s(_s), object(_object) {} // dtor doing resource leak guarding // aborting the reservation if not already committed or aborted @@ -67,7 +68,8 @@ struct reservation_t { }; // create a reservation on the 2-phase-commit queue -int publish_reserve(EventType event_type, +int publish_reserve(const DoutPrefixProvider *dpp, + EventType event_type, reservation_t& reservation, const RGWObjTags* req_tags); @@ -77,10 +79,11 @@ int publish_commit(rgw::sal::RGWObject* obj, const ceph::real_time& mtime, const std::string& etag, EventType event_type, - reservation_t& reservation); + reservation_t& reservation, + const DoutPrefixProvider *dpp); // cancel the reservation -int publish_abort(reservation_t& reservation); +int publish_abort(const DoutPrefixProvider *dpp, reservation_t& reservation); } diff --git a/src/rgw/rgw_obj_manifest.cc b/src/rgw/rgw_obj_manifest.cc index a91c51c4658ea..87dbb9a942fbd 100644 --- a/src/rgw/rgw_obj_manifest.cc +++ b/src/rgw/rgw_obj_manifest.cc @@ -5,6 +5,7 @@ #include "services/svc_zone.h" #include "services/svc_tier_rados.h" +#include "rgw_rados.h" // RGW_OBJ_NS_SHADOW and RGW_OBJ_NS_MULTIPART #define dout_context g_ceph_context #define dout_subsys ceph_subsys_rgw @@ -35,36 +36,14 @@ int RGWObjManifest::generator::create_next(uint64_t ofs) manifest->get_implicit_location(cur_part_id, cur_stripe, ofs, NULL, &cur_obj); - manifest->update_iterators(); - return 0; } -const RGWObjManifest::obj_iterator& RGWObjManifest::obj_begin() -{ - return begin_iter; -} - -const RGWObjManifest::obj_iterator& RGWObjManifest::obj_end() -{ - return end_iter; -} - -RGWObjManifest::obj_iterator RGWObjManifest::obj_find(uint64_t ofs) -{ - if (ofs > obj_size) { - ofs = obj_size; - } - RGWObjManifest::obj_iterator iter(this); - iter.seek(ofs); - return iter; -} - -int RGWObjManifest::append(RGWObjManifest& m, const RGWZoneGroup& zonegroup, +int RGWObjManifest::append(const DoutPrefixProvider *dpp, RGWObjManifest& m, const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params) { if (explicit_objs || m.explicit_objs) { - return append_explicit(m, zonegroup, zone_params); + return append_explicit(dpp, m, zonegroup, zone_params); } if (rules.empty()) { @@ -84,7 +63,7 @@ int RGWObjManifest::append(RGWObjManifest& m, const RGWZoneGroup& zonegroup, map::iterator miter = m.rules.begin(); if (miter == m.rules.end()) { - return append_explicit(m, zonegroup, zone_params); + return append_explicit(dpp, m, zonegroup, zone_params); } for (; miter != m.rules.end(); ++miter) { @@ -138,9 +117,9 @@ int RGWObjManifest::append(RGWObjManifest& m, const RGWZoneGroup& zonegroup, return 0; } -int RGWObjManifest::append(RGWObjManifest& m, RGWSI_Zone *zone_svc) +int RGWObjManifest::append(const DoutPrefixProvider *dpp, RGWObjManifest& m, RGWSI_Zone *zone_svc) { - return append(m, zone_svc->get_zonegroup(), zone_svc->get_zone_params()); + return append(dpp, m, zone_svc->get_zonegroup(), zone_svc->get_zone_params()); } void RGWObjManifest::append_rules(RGWObjManifest& m, map::iterator& miter, @@ -155,14 +134,14 @@ void RGWObjManifest::append_rules(RGWObjManifest& m, map::iterator iter; uint64_t base = obj_size; @@ -246,7 +225,7 @@ void RGWObjManifest::obj_iterator::operator++() /* are we still pointing at the head? */ if (ofs < head_size) { rule_iter = manifest->rules.begin(); - RGWObjManifestRule *rule = &rule_iter->second; + const RGWObjManifestRule *rule = &rule_iter->second; ofs = std::min(head_size, obj_size); stripe_ofs = ofs; cur_stripe = 1; @@ -258,16 +237,16 @@ void RGWObjManifest::obj_iterator::operator++() return; } - RGWObjManifestRule *rule = &rule_iter->second; + const RGWObjManifestRule *rule = &rule_iter->second; stripe_ofs += rule->stripe_max_size; cur_stripe++; - dout(20) << "RGWObjManifest::operator++(): rule->part_size=" << rule->part_size << " rules.size()=" << manifest->rules.size() << dendl; + ldpp_dout(dpp, 20) << "RGWObjManifest::operator++(): rule->part_size=" << rule->part_size << " rules.size()=" << manifest->rules.size() << dendl; if (rule->part_size > 0) { /* multi part, multi stripes object */ - dout(20) << "RGWObjManifest::operator++(): stripe_ofs=" << stripe_ofs << " part_ofs=" << part_ofs << " rule->part_size=" << rule->part_size << dendl; + ldpp_dout(dpp, 20) << "RGWObjManifest::operator++(): stripe_ofs=" << stripe_ofs << " part_ofs=" << part_ofs << " rule->part_size=" << rule->part_size << dendl; if (stripe_ofs >= part_ofs + rule->part_size) { /* moved to the next part */ @@ -303,7 +282,7 @@ void RGWObjManifest::obj_iterator::operator++() stripe_size = 0; } - dout(20) << "RGWObjManifest::operator++(): result: ofs=" << ofs << " stripe_ofs=" << stripe_ofs << " part_ofs=" << part_ofs << " rule->part_size=" << rule->part_size << dendl; + ldpp_dout(dpp, 20) << "RGWObjManifest::operator++(): result: ofs=" << ofs << " stripe_ofs=" << stripe_ofs << " part_ofs=" << part_ofs << " rule->part_size=" << rule->part_size << dendl; update_location(); } @@ -357,8 +336,164 @@ int RGWObjManifest::generator::create_begin(CephContext *cct, RGWObjManifest *_m // Normal object which not generated through copy operation manifest->set_tail_instance(_obj.key.instance); - manifest->update_iterators(); - return 0; } +void RGWObjManifest::obj_iterator::seek(uint64_t o) +{ + ofs = o; + if (manifest->explicit_objs) { + explicit_iter = manifest->objs.upper_bound(ofs); + if (explicit_iter != manifest->objs.begin()) { + --explicit_iter; + } + if (ofs < manifest->obj_size) { + update_explicit_pos(); + } else { + ofs = manifest->obj_size; + } + update_location(); + return; + } + if (o < manifest->get_head_size()) { + rule_iter = manifest->rules.begin(); + stripe_ofs = 0; + stripe_size = manifest->get_head_size(); + if (rule_iter != manifest->rules.end()) { + cur_part_id = rule_iter->second.start_part_num; + cur_override_prefix = rule_iter->second.override_prefix; + } + update_location(); + return; + } + + rule_iter = manifest->rules.upper_bound(ofs); + next_rule_iter = rule_iter; + if (rule_iter != manifest->rules.begin()) { + --rule_iter; + } + + if (rule_iter == manifest->rules.end()) { + update_location(); + return; + } + + const RGWObjManifestRule& rule = rule_iter->second; + + if (rule.part_size > 0) { + cur_part_id = rule.start_part_num + (ofs - rule.start_ofs) / rule.part_size; + } else { + cur_part_id = rule.start_part_num; + } + part_ofs = rule.start_ofs + (cur_part_id - rule.start_part_num) * rule.part_size; + + if (rule.stripe_max_size > 0) { + cur_stripe = (ofs - part_ofs) / rule.stripe_max_size; + + stripe_ofs = part_ofs + cur_stripe * rule.stripe_max_size; + if (!cur_part_id && manifest->get_head_size() > 0) { + cur_stripe++; + } + } else { + cur_stripe = 0; + stripe_ofs = part_ofs; + } + + if (!rule.part_size) { + stripe_size = rule.stripe_max_size; + stripe_size = std::min(manifest->get_obj_size() - stripe_ofs, stripe_size); + } else { + uint64_t next = std::min(stripe_ofs + rule.stripe_max_size, part_ofs + rule.part_size); + stripe_size = next - stripe_ofs; + } + + cur_override_prefix = rule.override_prefix; + + update_location(); +} + +void RGWObjManifest::obj_iterator::update_location() +{ + if (manifest->explicit_objs) { + if (manifest->empty()) { + location = rgw_obj_select{}; + } else { + location = explicit_iter->second.loc; + } + return; + } + + if (ofs < manifest->get_head_size()) { + location = manifest->get_obj(); + location.set_placement_rule(manifest->get_head_placement_rule()); + return; + } + + manifest->get_implicit_location(cur_part_id, cur_stripe, ofs, &cur_override_prefix, &location); +} + +void RGWObjManifest::obj_iterator::update_explicit_pos() +{ + ofs = explicit_iter->first; + stripe_ofs = ofs; + + auto next_iter = explicit_iter; + ++next_iter; + if (next_iter != manifest->objs.end()) { + stripe_size = next_iter->first - ofs; + } else { + stripe_size = manifest->obj_size - ofs; + } +} + +void RGWObjManifest::get_implicit_location(uint64_t cur_part_id, uint64_t cur_stripe, + uint64_t ofs, string *override_prefix, rgw_obj_select *location) const +{ + rgw_obj loc; + + string& oid = loc.key.name; + string& ns = loc.key.ns; + + if (!override_prefix || override_prefix->empty()) { + oid = prefix; + } else { + oid = *override_prefix; + } + + if (!cur_part_id) { + if (ofs < max_head_size) { + location->set_placement_rule(head_placement_rule); + *location = obj; + return; + } else { + char buf[16]; + snprintf(buf, sizeof(buf), "%d", (int)cur_stripe); + oid += buf; + ns = RGW_OBJ_NS_SHADOW; + } + } else { + char buf[32]; + if (cur_stripe == 0) { + snprintf(buf, sizeof(buf), ".%d", (int)cur_part_id); + oid += buf; + ns= RGW_OBJ_NS_MULTIPART; + } else { + snprintf(buf, sizeof(buf), ".%d_%d", (int)cur_part_id, (int)cur_stripe); + oid += buf; + ns = RGW_OBJ_NS_SHADOW; + } + } + + if (!tail_placement.bucket.name.empty()) { + loc.bucket = tail_placement.bucket; + } else { + loc.bucket = obj.bucket; + } + + // Always overwrite instance with tail_instance + // to get the right shadow object location + loc.key.set_instance(tail_instance); + + location->set_placement_rule(tail_placement.placement_rule); + *location = loc; +} diff --git a/src/rgw/rgw_obj_manifest.h b/src/rgw/rgw_obj_manifest.h index 5423dea356483..95e3f8b239927 100644 --- a/src/rgw/rgw_obj_manifest.h +++ b/src/rgw/rgw_obj_manifest.h @@ -163,17 +163,13 @@ protected: string tail_instance; /* tail object's instance */ - void convert_to_explicit(const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params); - int append_explicit(RGWObjManifest& m, const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params); + void convert_to_explicit(const DoutPrefixProvider *dpp, const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params); + int append_explicit(const DoutPrefixProvider *dpp, RGWObjManifest& m, const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params); void append_rules(RGWObjManifest& m, map::iterator& iter, string *override_prefix); - void update_iterators() { - begin_iter.seek(0); - end_iter.seek(obj_size); - } public: - RGWObjManifest() : begin_iter(this), end_iter(this) {} + RGWObjManifest() = default; RGWObjManifest(const RGWObjManifest& rhs) { *this = rhs; } @@ -188,13 +184,6 @@ public: tail_placement = rhs.tail_placement; rules = rhs.rules; tail_instance = rhs.tail_instance; - - begin_iter.set_manifest(this); - end_iter.set_manifest(this); - - begin_iter.seek(rhs.begin_iter.get_ofs()); - end_iter.seek(rhs.end_iter.get_ofs()); - return *this; } @@ -209,7 +198,8 @@ public: set_obj_size(_size); } - void get_implicit_location(uint64_t cur_part_id, uint64_t cur_stripe, uint64_t ofs, string *override_prefix, rgw_obj_select *location); + void get_implicit_location(uint64_t cur_part_id, uint64_t cur_stripe, uint64_t ofs, + string *override_prefix, rgw_obj_select *location) const; void set_trivial_rule(uint64_t tail_ofs, uint64_t stripe_max_size) { RGWObjManifestRule rule(0, tail_ofs, 0, stripe_max_size); @@ -318,34 +308,33 @@ public: decode(tail_placement.placement_rule, bl); } - update_iterators(); DECODE_FINISH(bl); } void dump(Formatter *f) const; static void generate_test_instances(list& o); - int append(RGWObjManifest& m, const RGWZoneGroup& zonegroup, + int append(const DoutPrefixProvider *dpp, RGWObjManifest& m, const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params); - int append(RGWObjManifest& m, RGWSI_Zone *zone_svc); + int append(const DoutPrefixProvider *dpp, RGWObjManifest& m, RGWSI_Zone *zone_svc); bool get_rule(uint64_t ofs, RGWObjManifestRule *rule); - bool empty() { + bool empty() const { if (explicit_objs) return objs.empty(); return rules.empty(); } - bool has_explicit_objs() { + bool has_explicit_objs() const { return explicit_objs; } - bool has_tail() { + bool has_tail() const { if (explicit_objs) { if (objs.size() == 1) { - map::iterator iter = objs.begin(); - rgw_obj& o = iter->second.loc; + auto iter = objs.begin(); + const rgw_obj& o = iter->second.loc; return !(obj == o); } return (objs.size() >= 2); @@ -364,7 +353,7 @@ public: } } - const rgw_obj& get_obj() { + const rgw_obj& get_obj() const { return obj; } @@ -373,11 +362,11 @@ public: tail_placement.bucket = _b; } - const rgw_bucket_placement& get_tail_placement() { + const rgw_bucket_placement& get_tail_placement() const { return tail_placement; } - const rgw_placement_rule& get_head_placement_rule() { + const rgw_placement_rule& get_head_placement_rule() const { return head_placement_rule; } @@ -385,7 +374,7 @@ public: prefix = _p; } - const string& get_prefix() { + const string& get_prefix() const { return prefix; } @@ -393,7 +382,7 @@ public: tail_instance = _ti; } - const string& get_tail_instance() { + const string& get_tail_instance() const { return tail_instance; } @@ -403,24 +392,23 @@ public: void set_obj_size(uint64_t s) { obj_size = s; - - update_iterators(); } - uint64_t get_obj_size() { + uint64_t get_obj_size() const { return obj_size; } - uint64_t get_head_size() { + uint64_t get_head_size() const { return head_size; } - uint64_t get_max_head_size() { + uint64_t get_max_head_size() const { return max_head_size; } class obj_iterator { - RGWObjManifest *manifest = nullptr; + const DoutPrefixProvider *dpp; + const RGWObjManifest *manifest = nullptr; uint64_t part_ofs = 0; /* where current part starts */ uint64_t stripe_ofs = 0; /* where current stripe starts */ uint64_t ofs = 0; /* current position within the object */ @@ -432,26 +420,18 @@ public: rgw_obj_select location; - map::iterator rule_iter; - map::iterator next_rule_iter; - - map::iterator explicit_iter; + map::const_iterator rule_iter; + map::const_iterator next_rule_iter; + map::const_iterator explicit_iter; void update_explicit_pos(); - - protected: - - void set_manifest(RGWObjManifest *m) { - manifest = m; - } - public: obj_iterator() = default; - explicit obj_iterator(RGWObjManifest *_m) - : obj_iterator(_m, 0) + explicit obj_iterator(const DoutPrefixProvider *_dpp, const RGWObjManifest *_m) + : obj_iterator(_dpp, _m, 0) {} - obj_iterator(RGWObjManifest *_m, uint64_t _ofs) : manifest(_m) { + obj_iterator(const DoutPrefixProvider *_dpp, const RGWObjManifest *_m, uint64_t _ofs) : dpp(_dpp), manifest(_m) { seek(_ofs); } void seek(uint64_t ofs); @@ -508,16 +488,14 @@ public: void update_location(); - friend class RGWObjManifest; void dump(Formatter *f) const; }; // class obj_iterator - const obj_iterator& obj_begin(); - const obj_iterator& obj_end(); - obj_iterator obj_find(uint64_t ofs); - - obj_iterator begin_iter; - obj_iterator end_iter; + obj_iterator obj_begin(const DoutPrefixProvider *dpp) const { return obj_iterator{dpp, this}; } + obj_iterator obj_end(const DoutPrefixProvider *dpp) const { return obj_iterator{dpp, this, obj_size}; } + obj_iterator obj_find(const DoutPrefixProvider *dpp, uint64_t ofs) const { + return obj_iterator{dpp, this, std::min(ofs, obj_size)}; + } /* * simple object generator. Using a simple single rule manifest. diff --git a/src/rgw/rgw_object_expirer.cc b/src/rgw/rgw_object_expirer.cc index cf30693baa496..42aefea6712a1 100644 --- a/src/rgw/rgw_object_expirer.cc +++ b/src/rgw/rgw_object_expirer.cc @@ -80,7 +80,8 @@ int main(const int argc, const char **argv) common_init_finish(g_ceph_context); - store = RGWStoreManager::get_storage(g_ceph_context, false, false, false, false, false); + const DoutPrefix dp(cct.get(), dout_subsys, "rgw object expirer: "); + store = RGWStoreManager::get_storage(&dp, g_ceph_context, false, false, false, false, false); if (!store) { std::cerr << "couldn't init storage provider" << std::endl; return EIO; diff --git a/src/rgw/rgw_object_expirer_core.cc b/src/rgw/rgw_object_expirer_core.cc index d390d491013d0..52f241082cc70 100644 --- a/src/rgw/rgw_object_expirer_core.cc +++ b/src/rgw/rgw_object_expirer_core.cc @@ -85,7 +85,8 @@ static int objexp_hint_parse(CephContext *cct, cls_timeindex_entry &ti_entry, return 0; } -int RGWObjExpStore::objexp_hint_add(const ceph::real_time& delete_at, +int RGWObjExpStore::objexp_hint_add(const DoutPrefixProvider *dpp, + const ceph::real_time& delete_at, const string& tenant_name, const string& bucket_name, const string& bucket_id, @@ -106,15 +107,16 @@ int RGWObjExpStore::objexp_hint_add(const ceph::real_time& delete_at, string shard_name = objexp_hint_get_shardname(objexp_key_shard(obj_key, cct->_conf->rgw_objexp_hints_num_shards)); auto obj = rados_svc->obj(rgw_raw_obj(zone_svc->get_zone_params().log_pool, shard_name)); - int r = obj.open(); + int r = obj.open(dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl; return r; } - return obj.operate(&op, null_yield); + return obj.operate(dpp, &op, null_yield); } -int RGWObjExpStore::objexp_hint_list(const string& oid, +int RGWObjExpStore::objexp_hint_list(const DoutPrefixProvider *dpp, + const string& oid, const ceph::real_time& start_time, const ceph::real_time& end_time, const int max_entries, @@ -128,13 +130,13 @@ int RGWObjExpStore::objexp_hint_list(const string& oid, out_marker, truncated); auto obj = rados_svc->obj(rgw_raw_obj(zone_svc->get_zone_params().log_pool, oid)); - int r = obj.open(); + int r = obj.open(dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl; return r; } bufferlist obl; - int ret = obj.operate(&op, &obl, null_yield); + int ret = obj.operate(dpp, &op, &obl, null_yield); if ((ret < 0 ) && (ret != -ENOENT)) { return ret; @@ -147,7 +149,8 @@ int RGWObjExpStore::objexp_hint_list(const string& oid, return 0; } -static int cls_timeindex_trim_repeat(rgw_rados_ref ref, +static int cls_timeindex_trim_repeat(const DoutPrefixProvider *dpp, + rgw_rados_ref ref, const string& oid, const utime_t& from_time, const utime_t& to_time, @@ -158,7 +161,7 @@ static int cls_timeindex_trim_repeat(rgw_rados_ref ref, do { librados::ObjectWriteOperation op; cls_timeindex_trim(op, from_time, to_time, from_marker, to_marker); - int r = rgw_rados_operate(ref.pool.ioctx(), oid, &op, null_yield); + int r = rgw_rados_operate(dpp, ref.pool.ioctx(), oid, &op, null_yield); if (r == -ENODATA) done = true; else if (r < 0) @@ -168,20 +171,21 @@ static int cls_timeindex_trim_repeat(rgw_rados_ref ref, return 0; } -int RGWObjExpStore::objexp_hint_trim(const string& oid, +int RGWObjExpStore::objexp_hint_trim(const DoutPrefixProvider *dpp, + const string& oid, const ceph::real_time& start_time, const ceph::real_time& end_time, const string& from_marker, const string& to_marker) { auto obj = rados_svc->obj(rgw_raw_obj(zone_svc->get_zone_params().log_pool, oid)); - int r = obj.open(); + int r = obj.open(dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl; return r; } auto& ref = obj.get_ref(); - int ret = cls_timeindex_trim_repeat(ref, oid, utime_t(start_time), utime_t(end_time), + int ret = cls_timeindex_trim_repeat(dpp, ref, oid, utime_t(start_time), utime_t(end_time), from_marker, to_marker); if ((ret < 0 ) && (ret != -ENOENT)) { return ret; @@ -210,18 +214,18 @@ int RGWObjectExpirer::init_bucket_info(const string& tenant_name, } -int RGWObjectExpirer::garbage_single_object(objexp_hint_entry& hint) +int RGWObjectExpirer::garbage_single_object(const DoutPrefixProvider *dpp, objexp_hint_entry& hint) { RGWBucketInfo bucket_info; int ret = init_bucket_info(hint.tenant, hint.bucket_name, hint.bucket_id, bucket_info); if (-ENOENT == ret) { - ldout(store->ctx(), 15) << "NOTICE: cannot find bucket = " \ + ldpp_dout(dpp, 15) << "NOTICE: cannot find bucket = " \ << hint.bucket_name << ". The object must be already removed" << dendl; return -ERR_PRECONDITION_FAILED; } else if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: could not init bucket = " \ + ldpp_dout(dpp, 1) << "ERROR: could not init bucket = " \ << hint.bucket_name << "due to ret = " << ret << dendl; return ret; } @@ -235,13 +239,14 @@ int RGWObjectExpirer::garbage_single_object(objexp_hint_entry& hint) rgw_obj obj(bucket_info.bucket, key); store->getRados()->set_atomic(&rctx, obj); - ret = store->getRados()->delete_obj(rctx, bucket_info, obj, + ret = store->getRados()->delete_obj(dpp, rctx, bucket_info, obj, bucket_info.versioning_status(), 0, hint.exp_time); return ret; } -void RGWObjectExpirer::garbage_chunk(list& entries, /* in */ +void RGWObjectExpirer::garbage_chunk(const DoutPrefixProvider *dpp, + list& entries, /* in */ bool& need_trim) /* out */ { need_trim = false; @@ -251,22 +256,22 @@ void RGWObjectExpirer::garbage_chunk(list& entries, /* ++iter) { objexp_hint_entry hint; - ldout(store->ctx(), 15) << "got removal hint for: " << iter->key_ts.sec() \ + ldpp_dout(dpp, 15) << "got removal hint for: " << iter->key_ts.sec() \ << " - " << iter->key_ext << dendl; int ret = objexp_hint_parse(store->getRados()->ctx(), *iter, &hint); if (ret < 0) { - ldout(store->ctx(), 1) << "cannot parse removal hint for " << hint.obj_key << dendl; + ldpp_dout(dpp, 1) << "cannot parse removal hint for " << hint.obj_key << dendl; continue; } /* PRECOND_FAILED simply means that our hint is not valid. * We can silently ignore that and move forward. */ - ret = garbage_single_object(hint); + ret = garbage_single_object(dpp, hint); if (ret == -ERR_PRECONDITION_FAILED) { - ldout(store->ctx(), 15) << "not actual hint for object: " << hint.obj_key << dendl; + ldpp_dout(dpp, 15) << "not actual hint for object: " << hint.obj_key << dendl; } else if (ret < 0) { - ldout(store->ctx(), 1) << "cannot remove expired object: " << hint.obj_key << dendl; + ldpp_dout(dpp, 1) << "cannot remove expired object: " << hint.obj_key << dendl; } need_trim = true; @@ -275,28 +280,30 @@ void RGWObjectExpirer::garbage_chunk(list& entries, /* return; } -void RGWObjectExpirer::trim_chunk(const string& shard, +void RGWObjectExpirer::trim_chunk(const DoutPrefixProvider *dpp, + const string& shard, const utime_t& from, const utime_t& to, const string& from_marker, const string& to_marker) { - ldout(store->ctx(), 20) << "trying to trim removal hints to=" << to + ldpp_dout(dpp, 20) << "trying to trim removal hints to=" << to << ", to_marker=" << to_marker << dendl; real_time rt_from = from.to_real_time(); real_time rt_to = to.to_real_time(); - int ret = exp_store.objexp_hint_trim(shard, rt_from, rt_to, + int ret = exp_store.objexp_hint_trim(dpp, shard, rt_from, rt_to, from_marker, to_marker); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR during trim: " << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR during trim: " << ret << dendl; } return; } -bool RGWObjectExpirer::process_single_shard(const string& shard, +bool RGWObjectExpirer::process_single_shard(const DoutPrefixProvider *dpp, + const string& shard, const utime_t& last_run, const utime_t& round_start) { @@ -319,7 +326,7 @@ bool RGWObjectExpirer::process_single_shard(const string& shard, int ret = l.lock_exclusive(&store->getRados()->objexp_pool_ctx, shard); if (ret == -EBUSY) { /* already locked by another processor */ - dout(5) << __func__ << "(): failed to acquire lock on " << shard << dendl; + ldpp_dout(dpp, 5) << __func__ << "(): failed to acquire lock on " << shard << dendl; return false; } @@ -328,20 +335,20 @@ bool RGWObjectExpirer::process_single_shard(const string& shard, real_time rt_start = round_start.to_real_time(); list entries; - ret = exp_store.objexp_hint_list(shard, rt_last, rt_start, + ret = exp_store.objexp_hint_list(dpp, shard, rt_last, rt_start, num_entries, marker, entries, &out_marker, &truncated); if (ret < 0) { - ldout(cct, 10) << "cannot get removal hints from shard: " << shard + ldpp_dout(dpp, 10) << "cannot get removal hints from shard: " << shard << dendl; continue; } bool need_trim; - garbage_chunk(entries, need_trim); + garbage_chunk(dpp, entries, need_trim); if (need_trim) { - trim_chunk(shard, last_run, round_start, marker, out_marker); + trim_chunk(dpp, shard, last_run, round_start, marker, out_marker); } utime_t now = ceph_clock_now(); @@ -358,7 +365,8 @@ bool RGWObjectExpirer::process_single_shard(const string& shard, } /* Returns true if all shards have been processed successfully. */ -bool RGWObjectExpirer::inspect_all_shards(const utime_t& last_run, +bool RGWObjectExpirer::inspect_all_shards(const DoutPrefixProvider *dpp, + const utime_t& last_run, const utime_t& round_start) { CephContext * const cct = store->ctx(); @@ -369,9 +377,9 @@ bool RGWObjectExpirer::inspect_all_shards(const utime_t& last_run, string shard; objexp_get_shard(i, &shard); - ldout(store->ctx(), 20) << "processing shard = " << shard << dendl; + ldpp_dout(dpp, 20) << "processing shard = " << shard << dendl; - if (! process_single_shard(shard, last_run, round_start)) { + if (! process_single_shard(dpp, shard, last_run, round_start)) { all_done = false; } } @@ -406,7 +414,7 @@ void *RGWObjectExpirer::OEWorker::entry() { do { utime_t start = ceph_clock_now(); ldout(cct, 2) << "object expiration: start" << dendl; - if (oe->inspect_all_shards(last_run, start)) { + if (oe->inspect_all_shards(this, last_run, start)) { /* All shards have been processed properly. Next time we can start * from this moment. */ last_run = start; @@ -439,3 +447,17 @@ void RGWObjectExpirer::OEWorker::stop() cond.notify_all(); } +CephContext *RGWObjectExpirer::OEWorker::get_cct() const +{ + return cct; +} + +unsigned RGWObjectExpirer::OEWorker::get_subsys() const +{ + return dout_subsys; +} + +std::ostream& RGWObjectExpirer::OEWorker::gen_prefix(std::ostream& out) const +{ + return out << "rgw object expirer Worker thread: "; +} diff --git a/src/rgw/rgw_object_expirer_core.h b/src/rgw/rgw_object_expirer_core.h index f76fe56cb6561..a17f447c4aae3 100644 --- a/src/rgw/rgw_object_expirer_core.h +++ b/src/rgw/rgw_object_expirer_core.h @@ -46,13 +46,15 @@ public: rados_svc(_rados_svc), zone_svc(_zone_svc) {} - int objexp_hint_add(const ceph::real_time& delete_at, + int objexp_hint_add(const DoutPrefixProvider *dpp, + const ceph::real_time& delete_at, const string& tenant_name, const string& bucket_name, const string& bucket_id, const rgw_obj_index_key& obj_key); - int objexp_hint_list(const string& oid, + int objexp_hint_list(const DoutPrefixProvider *dpp, + const string& oid, const ceph::real_time& start_time, const ceph::real_time& end_time, const int max_entries, @@ -61,7 +63,8 @@ public: string *out_marker, /* out */ bool *truncated); /* out */ - int objexp_hint_trim(const string& oid, + int objexp_hint_trim(const DoutPrefixProvider *dpp, + const string& oid, const ceph::real_time& start_time, const ceph::real_time& end_time, const string& from_marker, @@ -78,7 +81,7 @@ protected: const std::string& bucket_id, RGWBucketInfo& bucket_info); - class OEWorker : public Thread { + class OEWorker : public Thread, public DoutPrefixProvider { CephContext *cct; RGWObjectExpirer *oe; ceph::mutex lock = ceph::make_mutex("OEWorker"); @@ -93,6 +96,10 @@ protected: void *entry() override; void stop(); + + CephContext *get_cct() const override; + unsigned get_subsys() const; + std::ostream& gen_prefix(std::ostream& out) const; }; OEWorker *worker{nullptr}; @@ -108,31 +115,36 @@ public: stop_processor(); } - int hint_add(const ceph::real_time& delete_at, + int hint_add(const DoutPrefixProvider *dpp, + const ceph::real_time& delete_at, const string& tenant_name, const string& bucket_name, const string& bucket_id, const rgw_obj_index_key& obj_key) { - return exp_store.objexp_hint_add(delete_at, tenant_name, bucket_name, + return exp_store.objexp_hint_add(dpp, delete_at, tenant_name, bucket_name, bucket_id, obj_key); } - int garbage_single_object(objexp_hint_entry& hint); + int garbage_single_object(const DoutPrefixProvider *dpp, objexp_hint_entry& hint); - void garbage_chunk(std::list& entries, /* in */ + void garbage_chunk(const DoutPrefixProvider *dpp, + std::list& entries, /* in */ bool& need_trim); /* out */ - void trim_chunk(const std::string& shard, + void trim_chunk(const DoutPrefixProvider *dpp, + const std::string& shard, const utime_t& from, const utime_t& to, const string& from_marker, const string& to_marker); - bool process_single_shard(const std::string& shard, + bool process_single_shard(const DoutPrefixProvider *dpp, + const std::string& shard, const utime_t& last_run, const utime_t& round_start); - bool inspect_all_shards(const utime_t& last_run, + bool inspect_all_shards(const DoutPrefixProvider *dpp, + const utime_t& last_run, const utime_t& round_start); bool going_down(); diff --git a/src/rgw/rgw_oidc_provider.cc b/src/rgw/rgw_oidc_provider.cc index b9aceac83d04d..757ae90ca25fa 100644 --- a/src/rgw/rgw_oidc_provider.cc +++ b/src/rgw/rgw_oidc_provider.cc @@ -27,7 +27,7 @@ const string RGWOIDCProvider::oidc_url_oid_prefix = "oidc_url."; const string RGWOIDCProvider::oidc_arn_prefix = "arn:aws:iam::"; -int RGWOIDCProvider::store_url(const string& url, bool exclusive, +int RGWOIDCProvider::store_url(const DoutPrefixProvider *dpp, const string& url, bool exclusive, optional_yield y) { using ceph::encode; @@ -38,7 +38,7 @@ int RGWOIDCProvider::store_url(const string& url, bool exclusive, bufferlist bl; encode(*this, bl); auto obj_ctx = svc->sysobj->init_obj_ctx(); - return rgw_put_system_obj(obj_ctx, svc->zone->get_zone_params().oidc_pool, oid, + return rgw_put_system_obj(dpp, obj_ctx, svc->zone->get_zone_params().oidc_pool, oid, bl, exclusive, NULL, real_time(), y); } @@ -57,7 +57,7 @@ int RGWOIDCProvider::get_tenant_url_from_arn(string& tenant, string& url) return 0; } -int RGWOIDCProvider::create(bool exclusive, optional_yield y) +int RGWOIDCProvider::create(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) { int ret; @@ -68,13 +68,13 @@ int RGWOIDCProvider::create(bool exclusive, optional_yield y) string idp_url = url_remove_prefix(provider_url); /* check to see the name is not used */ - ret = read_url(idp_url, tenant); + ret = read_url(dpp, idp_url, tenant); if (exclusive && ret == 0) { - ldout(cct, 0) << "ERROR: url " << provider_url << " already in use" + ldpp_dout(dpp, 0) << "ERROR: url " << provider_url << " already in use" << id << dendl; return -EEXIST; } else if ( ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "failed reading provider url " << provider_url << ": " + ldpp_dout(dpp, 0) << "failed reading provider url " << provider_url << ": " << cpp_strerror(-ret) << dendl; return ret; } @@ -98,9 +98,9 @@ int RGWOIDCProvider::create(bool exclusive, optional_yield y) auto svc = ctl->svc; auto& pool = svc->zone->get_zone_params().oidc_pool; - ret = store_url(idp_url, exclusive, y); + ret = store_url(dpp, idp_url, exclusive, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: storing role info in pool: " << pool.name << ": " + ldpp_dout(dpp, 0) << "ERROR: storing role info in pool: " << pool.name << ": " << provider_url << ": " << cpp_strerror(-ret) << dendl; return ret; } @@ -108,7 +108,7 @@ int RGWOIDCProvider::create(bool exclusive, optional_yield y) return 0; } -int RGWOIDCProvider::delete_obj(optional_yield y) +int RGWOIDCProvider::delete_obj(const DoutPrefixProvider *dpp, optional_yield y) { auto svc = ctl->svc; auto& pool = svc->zone->get_zone_params().oidc_pool; @@ -116,43 +116,43 @@ int RGWOIDCProvider::delete_obj(optional_yield y) string url, tenant; auto ret = get_tenant_url_from_arn(tenant, url); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to parse arn" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to parse arn" << dendl; return -EINVAL; } if (this->tenant != tenant) { - ldout(cct, 0) << "ERROR: tenant in arn doesn't match that of user " << this->tenant << ", " + ldpp_dout(dpp, 0) << "ERROR: tenant in arn doesn't match that of user " << this->tenant << ", " << tenant << ": " << dendl; return -EINVAL; } // Delete url string oid = tenant + get_url_oid_prefix() + url; - ret = rgw_delete_system_obj(svc->sysobj, pool, oid, NULL, y); + ret = rgw_delete_system_obj(dpp, svc->sysobj, pool, oid, NULL, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: deleting oidc url from pool: " << pool.name << ": " + ldpp_dout(dpp, 0) << "ERROR: deleting oidc url from pool: " << pool.name << ": " << provider_url << ": " << cpp_strerror(-ret) << dendl; } return ret; } -int RGWOIDCProvider::get() +int RGWOIDCProvider::get(const DoutPrefixProvider *dpp) { string url, tenant; auto ret = get_tenant_url_from_arn(tenant, url); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to parse arn" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to parse arn" << dendl; return -EINVAL; } if (this->tenant != tenant) { - ldout(cct, 0) << "ERROR: tenant in arn doesn't match that of user " << this->tenant << ", " + ldpp_dout(dpp, 0) << "ERROR: tenant in arn doesn't match that of user " << this->tenant << ", " << tenant << ": " << dendl; return -EINVAL; } - ret = read_url(url, tenant); + ret = read_url(dpp, url, tenant); if (ret < 0) { return ret; } @@ -186,7 +186,7 @@ void RGWOIDCProvider::decode_json(JSONObj *obj) JSONDecoder::decode_json("OpenIDConnectProviderArn", arn, obj); } -int RGWOIDCProvider::read_url(const string& url, const string& tenant) +int RGWOIDCProvider::read_url(const DoutPrefixProvider *dpp, const string& url, const string& tenant) { auto svc = ctl->svc; auto& pool = svc->zone->get_zone_params().oidc_pool; @@ -194,7 +194,7 @@ int RGWOIDCProvider::read_url(const string& url, const string& tenant) bufferlist bl; auto obj_ctx = svc->sysobj->init_obj_ctx(); - int ret = rgw_get_system_obj(obj_ctx, pool, oid, bl, NULL, NULL, null_yield); + int ret = rgw_get_system_obj(obj_ctx, pool, oid, bl, NULL, NULL, null_yield, dpp); if (ret < 0) { return ret; } @@ -204,7 +204,7 @@ int RGWOIDCProvider::read_url(const string& url, const string& tenant) auto iter = bl.cbegin(); decode(*this, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: failed to decode oidc provider info from pool: " << pool.name << + ldpp_dout(dpp, 0) << "ERROR: failed to decode oidc provider info from pool: " << pool.name << ": " << url << dendl; return -EIO; } @@ -243,7 +243,7 @@ bool RGWOIDCProvider::validate_input() return true; } -int RGWOIDCProvider::get_providers(RGWRados *store, +int RGWOIDCProvider::get_providers(const DoutPrefixProvider *dpp, RGWRados *store, const string& tenant, vector& providers) { @@ -258,9 +258,9 @@ int RGWOIDCProvider::get_providers(RGWRados *store, RGWListRawObjsCtx ctx; do { list oids; - int r = store->list_raw_objects(pool, prefix, 1000, ctx, oids, &is_truncated); + int r = store->list_raw_objects(dpp, pool, prefix, 1000, ctx, oids, &is_truncated); if (r < 0) { - ldout(ctl->cct, 0) << "ERROR: listing filtered objects failed: " << pool.name << ": " + ldpp_dout(dpp, 0) << "ERROR: listing filtered objects failed: " << pool.name << ": " << prefix << ": " << cpp_strerror(-r) << dendl; return r; } @@ -269,7 +269,7 @@ int RGWOIDCProvider::get_providers(RGWRados *store, bufferlist bl; auto obj_ctx = svc->sysobj->init_obj_ctx(); - int ret = rgw_get_system_obj(obj_ctx, pool, iter, bl, NULL, NULL, null_yield); + int ret = rgw_get_system_obj(obj_ctx, pool, iter, bl, NULL, NULL, null_yield, dpp); if (ret < 0) { return ret; } @@ -279,7 +279,7 @@ int RGWOIDCProvider::get_providers(RGWRados *store, auto iter = bl.cbegin(); decode(provider, iter); } catch (buffer::error& err) { - ldout(ctl->cct, 0) << "ERROR: failed to decode oidc provider info from pool: " << pool.name << + ldpp_dout(dpp, 0) << "ERROR: failed to decode oidc provider info from pool: " << pool.name << ": " << iter << dendl; return -EIO; } diff --git a/src/rgw/rgw_oidc_provider.h b/src/rgw/rgw_oidc_provider.h index f8ecb7b59a912..4b6ecda9d76c3 100644 --- a/src/rgw/rgw_oidc_provider.h +++ b/src/rgw/rgw_oidc_provider.h @@ -35,8 +35,8 @@ class RGWOIDCProvider vector thumbprints; int get_tenant_url_from_arn(string& tenant, string& url); - int store_url(const string& url, bool exclusive, optional_yield y); - int read_url(const string& url, const string& tenant); + int store_url(const DoutPrefixProvider *dpp, const string& url, bool exclusive, optional_yield y); + int read_url(const DoutPrefixProvider *dpp, const string& url, const string& tenant); bool validate_input(); public: @@ -110,15 +110,15 @@ public: const vector& get_client_ids() const { return client_ids;} const vector& get_thumbprints() const { return thumbprints; } - int create(bool exclusive, optional_yield y); - int delete_obj(optional_yield y); - int get(); + int create(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y); + int delete_obj(const DoutPrefixProvider *dpp, optional_yield y); + int get(const DoutPrefixProvider *dpp); void dump(Formatter *f) const; void dump_all(Formatter *f) const; void decode_json(JSONObj *obj); static const string& get_url_oid_prefix(); - static int get_providers(RGWRados *store, + static int get_providers(const DoutPrefixProvider *dpp, RGWRados *store, const string& tenant, vector& providers); }; diff --git a/src/rgw/rgw_op.cc b/src/rgw/rgw_op.cc index d6bc277e6dbd0..2165da1607a66 100644 --- a/src/rgw/rgw_op.cc +++ b/src/rgw/rgw_op.cc @@ -163,7 +163,8 @@ done: return r; } -static int decode_policy(CephContext *cct, +static int decode_policy(const DoutPrefixProvider *dpp, + CephContext *cct, bufferlist& bl, RGWAccessControlPolicy *policy) { @@ -171,11 +172,11 @@ static int decode_policy(CephContext *cct, try { policy->decode(iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl; return -EIO; } if (cct->_conf->subsys.should_gather()) { - ldout(cct, 15) << __func__ << " Read AccessControlPolicy"; + ldpp_dout(dpp, 15) << __func__ << " Read AccessControlPolicy"; RGWAccessControlPolicy_S3 *s3policy = static_cast(policy); s3policy->to_xml(*_dout); *_dout << dendl; @@ -184,14 +185,15 @@ static int decode_policy(CephContext *cct, } -static int get_user_policy_from_attr(CephContext * const cct, +static int get_user_policy_from_attr(const DoutPrefixProvider *dpp, + CephContext * const cct, rgw::sal::RGWRadosStore * const store, map& attrs, RGWAccessControlPolicy& policy /* out */) { auto aiter = attrs.find(RGW_ATTR_ACL); if (aiter != attrs.end()) { - int ret = decode_policy(cct, aiter->second, &policy); + int ret = decode_policy(dpp, cct, aiter->second, &policy); if (ret < 0) { return ret; } @@ -209,7 +211,8 @@ static int get_user_policy_from_attr(CephContext * const cct, * object: name of the object to get the ACL for. * Returns: 0 on success, -ERR# otherwise. */ -int rgw_op_get_bucket_policy_from_attr(CephContext *cct, +int rgw_op_get_bucket_policy_from_attr(const DoutPrefixProvider *dpp, + CephContext *cct, rgw::sal::RGWStore *store, RGWBucketInfo& bucket_info, map& bucket_attrs, @@ -219,14 +222,14 @@ int rgw_op_get_bucket_policy_from_attr(CephContext *cct, map::iterator aiter = bucket_attrs.find(RGW_ATTR_ACL); if (aiter != bucket_attrs.end()) { - int ret = decode_policy(cct, aiter->second, policy); + int ret = decode_policy(dpp, cct, aiter->second, policy); if (ret < 0) return ret; } else { - ldout(cct, 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl; + ldpp_dout(dpp, 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl; std::unique_ptr user = store->get_user(bucket_info.owner); /* object exists, but policy is broken */ - int r = user->load_by_id(y); + int r = user->load_by_id(dpp, y); if (r < 0) return r; @@ -235,7 +238,8 @@ int rgw_op_get_bucket_policy_from_attr(CephContext *cct, return 0; } -static int get_obj_policy_from_attr(CephContext *cct, +static int get_obj_policy_from_attr(const DoutPrefixProvider *dpp, + CephContext *cct, rgw::sal::RGWStore *store, RGWObjectCtx& obj_ctx, RGWBucketInfo& bucket_info, @@ -250,16 +254,16 @@ static int get_obj_policy_from_attr(CephContext *cct, std::unique_ptr rop = obj->get_read_op(&obj_ctx); - ret = rop->get_attr(RGW_ATTR_ACL, bl, y); + ret = rop->get_attr(dpp, RGW_ATTR_ACL, bl, y); if (ret >= 0) { - ret = decode_policy(cct, bl, policy); + ret = decode_policy(dpp, cct, bl, policy); if (ret < 0) return ret; } else if (ret == -ENODATA) { /* object exists, but policy is broken */ - ldout(cct, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl; + ldpp_dout(dpp, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl; std::unique_ptr user = store->get_user(bucket_info.owner); - ret = user->load_by_id(y); + ret = user->load_by_id(dpp, y); if (ret < 0) return ret; @@ -268,7 +272,7 @@ static int get_obj_policy_from_attr(CephContext *cct, if (storage_class) { bufferlist scbl; - int r = rop->get_attr(RGW_ATTR_STORAGE_CLASS, scbl, y); + int r = rop->get_attr(dpp, RGW_ATTR_STORAGE_CLASS, scbl, y); if (r >= 0) { *storage_class = scbl.to_str(); } else { @@ -326,14 +330,15 @@ vector get_iam_user_policy_from_attr(CephContext* cct, return policies; } -static int get_obj_head(struct req_state *s, +static int get_obj_head(const DoutPrefixProvider *dpp, + struct req_state *s, rgw::sal::RGWObject* obj, bufferlist *pbl) { std::unique_ptr read_op = obj->get_read_op(s->obj_ctx); obj->set_prefetch_data(s->obj_ctx); - int ret = read_op->prepare(s->yield); + int ret = read_op->prepare(s->yield, dpp); if (ret < 0) { return ret; } @@ -342,7 +347,7 @@ static int get_obj_head(struct req_state *s, return 0; } - ret = read_op->read(0, s->cct->_conf->rgw_max_chunk_size, *pbl, s->yield); + ret = read_op->read(0, s->cct->_conf->rgw_max_chunk_size, *pbl, s->yield, dpp); return 0; } @@ -365,7 +370,7 @@ struct multipart_upload_info }; WRITE_CLASS_ENCODER(multipart_upload_info) -static int get_multipart_info(struct req_state *s, +static int get_multipart_info(const DoutPrefixProvider *dpp, struct req_state *s, rgw::sal::RGWObject* obj, multipart_upload_info *upload_info) { @@ -374,7 +379,7 @@ static int get_multipart_info(struct req_state *s, bufferlist headbl; bufferlist *pheadbl = (upload_info ? &headbl : nullptr); - int op_ret = get_obj_head(s, obj, pheadbl); + int op_ret = get_obj_head(dpp, s, obj, pheadbl); if (op_ret < 0) { if (op_ret == -ENOENT) { return -ERR_NO_SUCH_UPLOAD; @@ -395,7 +400,7 @@ static int get_multipart_info(struct req_state *s, return 0; } -static int get_multipart_info(struct req_state *s, +static int get_multipart_info(const DoutPrefixProvider *dpp, struct req_state *s, const string& meta_oid, multipart_upload_info *upload_info) { @@ -406,10 +411,11 @@ static int get_multipart_info(struct req_state *s, meta_obj = s->bucket->get_object(rgw_obj_key(meta_oid, string(), mp_ns)); meta_obj->set_in_extra_data(true); - return get_multipart_info(s, meta_obj.get(), upload_info); + return get_multipart_info(dpp, s, meta_obj.get(), upload_info); } -static int read_bucket_policy(rgw::sal::RGWStore *store, +static int read_bucket_policy(const DoutPrefixProvider *dpp, + rgw::sal::RGWStore *store, struct req_state *s, RGWBucketInfo& bucket_info, map& bucket_attrs, @@ -418,7 +424,7 @@ static int read_bucket_policy(rgw::sal::RGWStore *store, optional_yield y) { if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) { - ldpp_dout(s, 0) << "NOTICE: bucket " << bucket_info.bucket.name + ldpp_dout(dpp, 0) << "NOTICE: bucket " << bucket_info.bucket.name << " is suspended" << dendl; return -ERR_USER_SUSPENDED; } @@ -427,7 +433,7 @@ static int read_bucket_policy(rgw::sal::RGWStore *store, return 0; } - int ret = rgw_op_get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, policy, y); + int ret = rgw_op_get_bucket_policy_from_attr(dpp, s->cct, store, bucket_info, bucket_attrs, policy, y); if (ret == -ENOENT) { ret = -ERR_NO_SUCH_BUCKET; } @@ -435,7 +441,8 @@ static int read_bucket_policy(rgw::sal::RGWStore *store, return ret; } -static int read_obj_policy(rgw::sal::RGWStore *store, +static int read_obj_policy(const DoutPrefixProvider *dpp, + rgw::sal::RGWStore *store, struct req_state *s, RGWBucketInfo& bucket_info, map& bucket_attrs, @@ -453,7 +460,7 @@ static int read_obj_policy(rgw::sal::RGWStore *store, rgw_obj obj; if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) { - ldpp_dout(s, 0) << "NOTICE: bucket " << bucket_info.bucket.name + ldpp_dout(dpp, 0) << "NOTICE: bucket " << bucket_info.bucket.name << " is suspended" << dendl; return -ERR_USER_SUSPENDED; } @@ -471,13 +478,13 @@ static int read_obj_policy(rgw::sal::RGWStore *store, policy = get_iam_policy_from_attr(s->cct, bucket_attrs, bucket->get_tenant()); RGWObjectCtx *obj_ctx = static_cast(s->obj_ctx); - int ret = get_obj_policy_from_attr(s->cct, store, *obj_ctx, + int ret = get_obj_policy_from_attr(dpp, s->cct, store, *obj_ctx, bucket_info, bucket_attrs, acl, storage_class, object, s->yield); if (ret == -ENOENT) { /* object does not exist checking the bucket's ACL to make sure that we send a proper error code */ RGWAccessControlPolicy bucket_policy(s->cct); - ret = rgw_op_get_bucket_policy_from_attr(s->cct, store, bucket_info, bucket_attrs, &bucket_policy, y); + ret = rgw_op_get_bucket_policy_from_attr(dpp, s->cct, store, bucket_info, bucket_attrs, &bucket_policy, y); if (ret < 0) { return ret; } @@ -509,7 +516,7 @@ static int read_obj_policy(rgw::sal::RGWStore *store, * only_bucket: If true, reads the user and bucket ACLs rather than the object ACL. * Returns: 0 on success, -ERR# otherwise. */ -int rgw_build_bucket_policies(rgw::sal::RGWRadosStore* store, struct req_state* s, optional_yield y) +int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore* store, struct req_state* s, optional_yield y) { int ret = 0; auto obj_ctx = store->svc()->sysobj->init_obj_ctx(); @@ -540,9 +547,9 @@ int rgw_build_bucket_policies(rgw::sal::RGWRadosStore* store, struct req_state* /* check if copy source is within the current domain */ if (!s->src_bucket_name.empty()) { std::unique_ptr src_bucket; - ret = store->get_bucket(nullptr, s->src_tenant_name, s->src_bucket_name, &src_bucket, y); + ret = store->get_bucket(dpp, nullptr, s->src_tenant_name, s->src_bucket_name, &src_bucket, y); if (ret == 0) { - ret = src_bucket->load_by_name(s->src_tenant_name, s->src_bucket_name, + ret = src_bucket->load_by_name(dpp, s->src_tenant_name, s->src_bucket_name, s->bucket_instance_id, &obj_ctx, s->yield); } if (ret == 0) { @@ -562,12 +569,12 @@ int rgw_build_bucket_policies(rgw::sal::RGWRadosStore* store, struct req_state* if (!s->bucket_name.empty()) { s->bucket_exists = true; - ret = store->get_bucket(s->user.get(), rgw_bucket(rgw_bucket_key(s->bucket_tenant, s->bucket_name, s->bucket_instance_id)), &s->bucket, y); + ret = store->get_bucket(dpp, s->user.get(), rgw_bucket(rgw_bucket_key(s->bucket_tenant, s->bucket_name, s->bucket_instance_id)), &s->bucket, y); if (ret < 0) { if (ret != -ENOENT) { string bucket_log; bucket_log = rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name); - ldpp_dout(s, 0) << "NOTICE: couldn't get bucket from bucket_name (name=" + ldpp_dout(dpp, 0) << "NOTICE: couldn't get bucket from bucket_name (name=" << bucket_log << ")" << dendl; return ret; } @@ -577,7 +584,7 @@ int rgw_build_bucket_policies(rgw::sal::RGWRadosStore* store, struct req_state* s->bucket_mtime = s->bucket->get_modification_time(); s->bucket_attrs = s->bucket->get_attrs(); - ret = read_bucket_policy(store, s, s->bucket->get_info(), + ret = read_bucket_policy(dpp, store, s, s->bucket->get_info(), s->bucket->get_attrs(), s->bucket_acl.get(), s->bucket->get_key(), y); acct_acl_user = { @@ -606,7 +613,7 @@ int rgw_build_bucket_policies(rgw::sal::RGWRadosStore* store, struct req_state* } if (!store->get_zonegroup().equals(s->bucket->get_info().zonegroup)) { - ldpp_dout(s, 0) << "NOTICE: request for data in a different zonegroup (" + ldpp_dout(dpp, 0) << "NOTICE: request for data in a different zonegroup (" << s->bucket->get_info().zonegroup << " != " << store->get_zonegroup().get_id() << ")" << dendl; /* we now need to make sure that the operation actually requires copy source, that is @@ -628,7 +635,7 @@ int rgw_build_bucket_policies(rgw::sal::RGWRadosStore* store, struct req_state* s->dest_placement.inherit_from(s->bucket->get_placement_rule()); if (!store->svc()->zone->get_zone_params().valid_placement(s->dest_placement)) { - ldpp_dout(s, 0) << "NOTICE: invalid dest placement: " << s->dest_placement.to_str() << dendl; + ldpp_dout(dpp, 0) << "NOTICE: invalid dest placement: " << s->dest_placement.to_str() << dendl; return -EINVAL; } @@ -638,9 +645,9 @@ int rgw_build_bucket_policies(rgw::sal::RGWRadosStore* store, struct req_state* /* handle user ACL only for those APIs which support it */ if (s->user_acl) { map uattrs; - ret = store->ctl()->user->get_attrs_by_uid(acct_acl_user.uid, &uattrs, s->yield); + ret = store->ctl()->user->get_attrs_by_uid(dpp, acct_acl_user.uid, &uattrs, s->yield); if (!ret) { - ret = get_user_policy_from_attr(s->cct, store, uattrs, *s->user_acl); + ret = get_user_policy_from_attr(dpp, s->cct, store, uattrs, *s->user_acl); } if (-ENOENT == ret) { /* In already existing clusters users won't have ACL. In such case @@ -654,7 +661,7 @@ int rgw_build_bucket_policies(rgw::sal::RGWRadosStore* store, struct req_state* acct_acl_user.display_name); ret = 0; } else if (ret < 0) { - ldpp_dout(s, 0) << "NOTICE: couldn't get user attrs for handling ACL " + ldpp_dout(dpp, 0) << "NOTICE: couldn't get user attrs for handling ACL " "(user_id=" << s->user->get_id() << ", ret=" << ret << ")" << dendl; return ret; } @@ -664,7 +671,7 @@ int rgw_build_bucket_policies(rgw::sal::RGWRadosStore* store, struct req_state* if (! s->user->get_id().empty() && s->auth.identity->get_identity_type() != TYPE_ROLE) { try { map uattrs; - if (ret = store->ctl()->user->get_attrs_by_uid(s->user->get_id(), &uattrs, s->yield); ! ret) { + if (ret = store->ctl()->user->get_attrs_by_uid(dpp, s->user->get_id(), &uattrs, s->yield); ! ret) { auto user_policies = get_iam_user_policy_from_attr(s->cct, store, uattrs, s->user->get_tenant()); s->iam_user_policies.insert(s->iam_user_policies.end(), std::make_move_iterator(user_policies.begin()), @@ -675,7 +682,7 @@ int rgw_build_bucket_policies(rgw::sal::RGWRadosStore* store, struct req_state* else ret = -EACCES; } } catch (const std::exception& e) { - lderr(s->cct) << "Error reading IAM User Policy: " << e.what() << dendl; + ldpp_dout(dpp, -1) << "Error reading IAM User Policy: " << e.what() << dendl; ret = -EACCES; } } @@ -686,13 +693,13 @@ int rgw_build_bucket_policies(rgw::sal::RGWRadosStore* store, struct req_state* // Really this is a can't happen condition. We parse the policy // when it's given to us, so perhaps we should abort or otherwise // raise bloody murder. - ldpp_dout(s, 0) << "Error reading IAM Policy: " << e.what() << dendl; + ldpp_dout(dpp, 0) << "Error reading IAM Policy: " << e.what() << dendl; ret = -EACCES; } bool success = store->svc()->zone->get_redirect_zone_endpoint(&s->redirect_zone_endpoint); if (success) { - ldpp_dout(s, 20) << "redirect_zone_endpoint=" << s->redirect_zone_endpoint << dendl; + ldpp_dout(dpp, 20) << "redirect_zone_endpoint=" << s->redirect_zone_endpoint << dendl; } return ret; @@ -704,7 +711,7 @@ int rgw_build_bucket_policies(rgw::sal::RGWRadosStore* store, struct req_state* * only_bucket: If true, reads the bucket ACL rather than the object ACL. * Returns: 0 on success, -ERR# otherwise. */ -int rgw_build_object_policies(rgw::sal::RGWRadosStore *store, struct req_state *s, +int rgw_build_object_policies(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, struct req_state *s, bool prefetch_data, optional_yield y) { int ret = 0; @@ -721,7 +728,7 @@ int rgw_build_object_policies(rgw::sal::RGWRadosStore *store, struct req_state * if (prefetch_data) { s->object->set_prefetch_data(s->obj_ctx); } - ret = read_obj_policy(store, s, s->bucket->get_info(), s->bucket_attrs, + ret = read_obj_policy(dpp, store, s, s->bucket->get_info(), s->bucket_attrs, s->object_acl.get(), nullptr, s->iam_policy, s->bucket.get(), s->object.get(), y); } @@ -752,9 +759,9 @@ static int rgw_iam_add_tags_from_bl(struct req_state* s, bufferlist& bl){ return 0; } -static int rgw_iam_add_existing_objtags(rgw::sal::RGWRadosStore* store, struct req_state* s, std::uint64_t action) { +static int rgw_iam_add_existing_objtags(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore* store, struct req_state* s, std::uint64_t action) { s->object->set_atomic(s->obj_ctx); - int op_ret = s->object->get_obj_attrs(s->obj_ctx, s->yield); + int op_ret = s->object->get_obj_attrs(s->obj_ctx, s->yield, dpp); if (op_ret < 0) return op_ret; rgw::sal::RGWAttrs attrs = s->object->get_attrs(); @@ -871,10 +878,10 @@ void rgw_bucket_object_pre_exec(struct req_state *s) // general, they should just return op_ret. namespace { template -int retry_raced_bucket_write(rgw::sal::RGWBucket* b, const F& f) { +int retry_raced_bucket_write(const DoutPrefixProvider *dpp, rgw::sal::RGWBucket* b, const F& f) { auto r = f(); for (auto i = 0u; i < 15u && r == -ECANCELED; ++i) { - r = b->try_refresh_info(nullptr); + r = b->try_refresh_info(dpp, nullptr); if (r >= 0) { r = f(); } @@ -905,11 +912,11 @@ int RGWGetObj::verify_permission(optional_yield y) action = rgw::IAM::s3GetObjectVersion; } if (s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG)) - rgw_iam_add_existing_objtags(store, s, action); + rgw_iam_add_existing_objtags(this, store, s, action); if (! s->iam_user_policies.empty()) { for (auto& user_policy : s->iam_user_policies) { if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) - rgw_iam_add_existing_objtags(store, s, action); + rgw_iam_add_existing_objtags(this, store, s, action); } } } @@ -954,12 +961,12 @@ int RGWGetObjTags::verify_permission(optional_yield y) // TODO since we are parsing the bl now anyway, we probably change // the send_response function to accept RGWObjTag instead of a bl if (s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG)){ - rgw_iam_add_existing_objtags(store, s, iam_action); + rgw_iam_add_existing_objtags(this, store, s, iam_action); } if (! s->iam_user_policies.empty()) { for (auto& user_policy : s->iam_user_policies) { if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) { - rgw_iam_add_existing_objtags(store, s, iam_action); + rgw_iam_add_existing_objtags(this, store, s, iam_action); } } } @@ -980,7 +987,7 @@ void RGWGetObjTags::execute(optional_yield y) s->object->set_atomic(s->obj_ctx); - op_ret = s->object->get_obj_attrs(s->obj_ctx, y); + op_ret = s->object->get_obj_attrs(s->obj_ctx, y, this); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << s->object << " ret=" << op_ret << dendl; @@ -1003,12 +1010,12 @@ int RGWPutObjTags::verify_permission(optional_yield y) rgw::IAM::s3PutObjectVersionTagging; if(s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG)){ - rgw_iam_add_existing_objtags(store, s, iam_action); + rgw_iam_add_existing_objtags(this, store, s, iam_action); } if (! s->iam_user_policies.empty()) { for (auto& user_policy : s->iam_user_policies) { if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) { - rgw_iam_add_existing_objtags(store, s, iam_action); + rgw_iam_add_existing_objtags(this, store, s, iam_action); } } } @@ -1029,7 +1036,7 @@ void RGWPutObjTags::execute(optional_yield y) } s->object->set_atomic(s->obj_ctx); - op_ret = s->object->modify_obj_attrs(s->obj_ctx, RGW_ATTR_TAGS, tags_bl, y); + op_ret = s->object->modify_obj_attrs(s->obj_ctx, RGW_ATTR_TAGS, tags_bl, y, this); if (op_ret == -ECANCELED){ op_ret = -ERR_TAG_CONFLICT; } @@ -1049,12 +1056,12 @@ int RGWDeleteObjTags::verify_permission(optional_yield y) rgw::IAM::s3DeleteObjectVersionTagging; if (s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG)){ - rgw_iam_add_existing_objtags(store, s, iam_action); + rgw_iam_add_existing_objtags(this, store, s, iam_action); } if (! s->iam_user_policies.empty()) { for (auto& user_policy : s->iam_user_policies) { if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) { - rgw_iam_add_existing_objtags(store, s, iam_action); + rgw_iam_add_existing_objtags(this, store, s, iam_action); } } } @@ -1069,7 +1076,7 @@ void RGWDeleteObjTags::execute(optional_yield y) if (rgw::sal::RGWObject::empty(s->object.get())) return; - op_ret = s->object->delete_obj_attrs(s->obj_ctx, RGW_ATTR_TAGS, y); + op_ret = s->object->delete_obj_attrs(this, s->obj_ctx, RGW_ATTR_TAGS, y); } int RGWGetBucketTags::verify_permission(optional_yield y) @@ -1106,19 +1113,19 @@ int RGWPutBucketTags::verify_permission(optional_yield y) { void RGWPutBucketTags::execute(optional_yield y) { - op_ret = get_params(y); + op_ret = get_params(this, y); if (op_ret < 0) return; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; } - op_ret = retry_raced_bucket_write(s->bucket.get(), [this, y] { + op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this, y] { rgw::sal::RGWAttrs attrs = s->bucket->get_attrs(); attrs[RGW_ATTR_TAGS] = tags_bl; - return s->bucket->set_instance_attrs(attrs, y); + return s->bucket->set_instance_attrs(this, attrs, y); }); } @@ -1136,16 +1143,16 @@ int RGWDeleteBucketTags::verify_permission(optional_yield y) void RGWDeleteBucketTags::execute(optional_yield y) { bufferlist in_data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = retry_raced_bucket_write(s->bucket.get(), [this, y] { + op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this, y] { rgw::sal::RGWAttrs attrs = s->bucket->get_attrs(); attrs.erase(RGW_ATTR_TAGS); - op_ret = s->bucket->set_instance_attrs(attrs, y); + op_ret = s->bucket->set_instance_attrs(this, attrs, y); if (op_ret < 0) { ldpp_dout(this, 0) << "RGWDeleteBucketTags() failed to remove RGW_ATTR_TAGS on bucket=" << s->bucket->get_name() @@ -1184,13 +1191,13 @@ void RGWPutBucketReplication::execute(optional_yield y) { if (op_ret < 0) return; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = retry_raced_bucket_write(s->bucket.get(), [this] { + op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] { auto sync_policy = (s->bucket->get_info().sync_policy ? *s->bucket->get_info().sync_policy : rgw_sync_policy_info()); for (auto& group : sync_policy_groups) { @@ -1199,7 +1206,7 @@ void RGWPutBucketReplication::execute(optional_yield y) { s->bucket->get_info().set_sync_policy(std::move(sync_policy)); - int ret = s->bucket->put_instance_info(false, real_time()); + int ret = s->bucket->put_instance_info(this, false, real_time()); if (ret < 0) { ldpp_dout(this, 0) << "ERROR: put_bucket_instance_info (bucket=" << s->bucket << ") returned ret=" << ret << dendl; return ret; @@ -1222,13 +1229,13 @@ int RGWDeleteBucketReplication::verify_permission(optional_yield y) void RGWDeleteBucketReplication::execute(optional_yield y) { bufferlist in_data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = retry_raced_bucket_write(s->bucket.get(), [this] { + op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] { if (!s->bucket->get_info().sync_policy) { return 0; } @@ -1239,7 +1246,7 @@ void RGWDeleteBucketReplication::execute(optional_yield y) s->bucket->get_info().set_sync_policy(std::move(sync_policy)); - int ret = s->bucket->put_instance_info(false, real_time()); + int ret = s->bucket->put_instance_info(this, false, real_time()); if (ret < 0) { ldpp_dout(this, 0) << "ERROR: put_bucket_instance_info (bucket=" << s->bucket << ") returned ret=" << ret << dendl; return ret; @@ -1291,7 +1298,7 @@ int RGWOp::init_quota() if (s->user->get_id() == s->bucket_owner.get_id()) { user = s->user.get(); } else { - int r = owner_user.load_by_id(s->yield); + int r = owner_user.load_by_id(this, s->yield); if (r < 0) return r; user = &owner_user; @@ -1506,7 +1513,7 @@ int RGWGetObj::read_user_manifest_part(rgw::sal::RGWBucket* bucket, read_op->params.if_match = ent.meta.etag.c_str(); } - op_ret = read_op->prepare(s->yield); + op_ret = read_op->prepare(s->yield, this); if (op_ret < 0) return op_ret; op_ret = part->range_to_ofs(ent.meta.accounted_size, cur_ofs, cur_end); @@ -1540,7 +1547,7 @@ int RGWGetObj::read_user_manifest_part(rgw::sal::RGWBucket* bucket, } } - op_ret = rgw_policy_from_attrset(s->cct, part->get_attrs(), &obj_policy); + op_ret = rgw_policy_from_attrset(s, s->cct, part->get_attrs(), &obj_policy); if (op_ret < 0) return op_ret; @@ -1561,13 +1568,14 @@ int RGWGetObj::read_user_manifest_part(rgw::sal::RGWBucket* bucket, perfcounter->inc(l_rgw_get_b, cur_end - cur_ofs); filter->fixup_range(cur_ofs, cur_end); - op_ret = read_op->iterate(cur_ofs, cur_end, filter, s->yield); + op_ret = read_op->iterate(this, cur_ofs, cur_end, filter, s->yield); if (op_ret >= 0) op_ret = filter->flush(); return op_ret; } -static int iterate_user_manifest_parts(CephContext * const cct, +static int iterate_user_manifest_parts(const DoutPrefixProvider *dpp, + CephContext * const cct, rgw::sal::RGWStore* const store, const off_t ofs, const off_t end, @@ -1603,7 +1611,7 @@ static int iterate_user_manifest_parts(CephContext * const cct, MD5 etag_sum; do { static constexpr auto MAX_LIST_OBJS = 100u; - int r = bucket->list(params, MAX_LIST_OBJS, results, y); + int r = bucket->list(dpp, params, MAX_LIST_OBJS, results, y); if (r < 0) { return r; } @@ -1790,14 +1798,14 @@ int RGWGetObj::handle_user_manifest(const char *prefix, optional_yield y) if (bucket_name.compare(s->bucket->get_name()) != 0) { map bucket_attrs; - r = store->get_bucket(s->user.get(), s->user->get_tenant(), bucket_name, &ubucket, y); + r = store->get_bucket(this, s->user.get(), s->user->get_tenant(), bucket_name, &ubucket, y); if (r < 0) { ldpp_dout(this, 0) << "could not get bucket info for bucket=" << bucket_name << dendl; return r; } bucket_acl = &_bucket_acl; - r = read_bucket_policy(store, s, ubucket->get_info(), bucket_attrs, bucket_acl, ubucket->get_key(), y); + r = read_bucket_policy(this, store, s, ubucket->get_info(), bucket_attrs, bucket_acl, ubucket->get_key(), y); if (r < 0) { ldpp_dout(this, 0) << "failed to read bucket policy" << dendl; return r; @@ -1815,7 +1823,7 @@ int RGWGetObj::handle_user_manifest(const char *prefix, optional_yield y) * - total length (of the parts we are going to send to client), * - overall DLO's content size, * - md5 sum of overall DLO's content (for etag of Swift API). */ - r = iterate_user_manifest_parts(s->cct, store, ofs, end, + r = iterate_user_manifest_parts(this, s->cct, store, ofs, end, pbucket, obj_prefix, bucket_acl, *bucket_policy, nullptr, &s->obj_size, &lo_etag, nullptr /* cb */, nullptr /* cb arg */, y); @@ -1829,7 +1837,7 @@ int RGWGetObj::handle_user_manifest(const char *prefix, optional_yield y) return r; } - r = iterate_user_manifest_parts(s->cct, store, ofs, end, + r = iterate_user_manifest_parts(this, s->cct, store, ofs, end, pbucket, obj_prefix, bucket_acl, *bucket_policy, &total_len, nullptr, nullptr, nullptr, nullptr, y); @@ -1843,7 +1851,7 @@ int RGWGetObj::handle_user_manifest(const char *prefix, optional_yield y) return 0; } - r = iterate_user_manifest_parts(s->cct, store, ofs, end, + r = iterate_user_manifest_parts(this, s->cct, store, ofs, end, pbucket, obj_prefix, bucket_acl, *bucket_policy, nullptr, nullptr, nullptr, get_obj_user_manifest_iterate_cb, (void *)this, y); @@ -1919,7 +1927,7 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl, optional_yield y) std::unique_ptr tmp_bucket; auto obj_ctx = store->svc()->sysobj->init_obj_ctx(); - int r = store->get_bucket(s->user.get(), s->user->get_tenant(), bucket_name, &tmp_bucket, y); + int r = store->get_bucket(this, s->user.get(), s->user->get_tenant(), bucket_name, &tmp_bucket, y); if (r < 0) { ldpp_dout(this, 0) << "could not get bucket info for bucket=" << bucket_name << dendl; @@ -1927,7 +1935,7 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl, optional_yield y) } bucket = tmp_bucket.get(); bucket_acl = &_bucket_acl; - r = read_bucket_policy(store, s, tmp_bucket->get_info(), tmp_bucket->get_attrs(), bucket_acl, + r = read_bucket_policy(this, store, s, tmp_bucket->get_info(), tmp_bucket->get_attrs(), bucket_acl, tmp_bucket->get_key(), y); if (r < 0) { ldpp_dout(this, 0) << "failed to read bucket ACL for bucket " @@ -2031,7 +2039,7 @@ static inline void rgw_cond_decode_objtags( bufferlist::const_iterator iter{&tags->second}; s->tagset.decode(iter); } catch (buffer::error& err) { - ldout(s->cct, 0) + ldpp_dout(s, 0) << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl; } } @@ -2073,7 +2081,7 @@ void RGWGetObj::execute(optional_yield y) read_op->params.if_nomatch = if_nomatch; read_op->params.lastmod = &lastmod; - op_ret = read_op->prepare(s->yield); + op_ret = read_op->prepare(s->yield, this); if (op_ret < 0) goto done_err; version_id = s->object->get_instance(); @@ -2119,7 +2127,7 @@ void RGWGetObj::execute(optional_yield y) op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info); if (op_ret < 0) { - ldpp_dout(s, 0) << "ERROR: failed to decode compression info, cannot decompress" << dendl; + ldpp_dout(this, 0) << "ERROR: failed to decode compression info, cannot decompress" << dendl; goto done_err; } if (need_decompress) { @@ -2196,7 +2204,7 @@ void RGWGetObj::execute(optional_yield y) ofs_x = ofs; end_x = end; filter->fixup_range(ofs_x, end_x); - op_ret = read_op->iterate(ofs_x, end_x, filter, s->yield); + op_ret = read_op->iterate(this, ofs_x, end_x, filter, s->yield); if (op_ret >= 0) op_ret = filter->flush(); @@ -2283,7 +2291,7 @@ void RGWListBuckets::execute(optional_yield y) } if (supports_account_metadata()) { - op_ret = store->ctl()->user->get_attrs_by_uid(s->user->get_id(), &attrs, s->yield); + op_ret = store->ctl()->user->get_attrs_by_uid(this, s->user->get_id(), &attrs, s->yield); if (op_ret < 0) { goto send_end; } @@ -2299,7 +2307,7 @@ void RGWListBuckets::execute(optional_yield y) read_count = max_buckets; } - op_ret = s->user->list_buckets(marker, end_marker, read_count, should_get_stats(), buckets, y); + op_ret = s->user->list_buckets(this, marker, end_marker, read_count, should_get_stats(), buckets, y); if (op_ret < 0) { /* hmm.. something wrong here.. the user was authenticated, so it @@ -2390,7 +2398,7 @@ void RGWGetUsage::execute(optional_yield y) RGWUsageIter usage_iter; while (s->bucket && is_truncated) { - op_ret = s->bucket->read_usage(start_epoch, end_epoch, max_entries, &is_truncated, + op_ret = s->bucket->read_usage(this, start_epoch, end_epoch, max_entries, &is_truncated, usage_iter, usage); if (op_ret == -ENOENT) { op_ret = 0; @@ -2402,19 +2410,19 @@ void RGWGetUsage::execute(optional_yield y) } } - op_ret = rgw_user_sync_all_stats(store, s->user->get_id(), y); + op_ret = rgw_user_sync_all_stats(this, store, s->user->get_id(), y); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to sync user stats" << dendl; return; } - op_ret = rgw_user_get_all_buckets_stats(store, s->user->get_id(), buckets_usage, y); + op_ret = rgw_user_get_all_buckets_stats(this, store, s->user->get_id(), buckets_usage, y); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to get user's buckets stats" << dendl; return; } - op_ret = store->ctl()->user->read_stats(s->user->get_id(), &stats, y); + op_ret = store->ctl()->user->read_stats(this, s->user->get_id(), &stats, y); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: can't read user header" << dendl; return; @@ -2442,7 +2450,7 @@ void RGWStatAccount::execute(optional_yield y) do { lastmarker = nullptr; - op_ret = rgw_read_user_buckets(store, s->user->get_id(), buckets, marker, + op_ret = rgw_read_user_buckets(this, store, s->user->get_id(), buckets, marker, string(), max_buckets, true, y); if (op_ret < 0) { /* hmm.. something wrong here.. the user was authenticated, so it @@ -2480,7 +2488,7 @@ void RGWStatAccount::execute(optional_yield y) } if (!lastmarker) { - lderr(s->cct) << "ERROR: rgw_read_user_buckets, stasis at marker=" + ldpp_dout(this, -1) << "ERROR: rgw_read_user_buckets, stasis at marker=" << marker << " uid=" << s->user->get_id() << dendl; break; } @@ -2560,7 +2568,7 @@ void RGWSetBucketVersioning::execute(optional_yield y) } } - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -2568,7 +2576,7 @@ void RGWSetBucketVersioning::execute(optional_yield y) bool modified = mfa_set_status; - op_ret = retry_raced_bucket_write(s->bucket.get(), [&] { + op_ret = retry_raced_bucket_write(this, s->bucket.get(), [&] { if (mfa_set_status) { if (mfa_status) { s->bucket->get_info().flags |= BUCKET_MFA_ENABLED; @@ -2588,7 +2596,7 @@ void RGWSetBucketVersioning::execute(optional_yield y) return op_ret; } s->bucket->set_attrs(rgw::sal::RGWAttrs(s->bucket_attrs)); - return s->bucket->put_instance_info(false, real_time()); + return s->bucket->put_instance_info(this, false, real_time()); }); if (!modified) { @@ -2636,16 +2644,16 @@ void RGWSetBucketWebsite::execute(optional_yield y) if (op_ret < 0) return; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << " forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = retry_raced_bucket_write(s->bucket.get(), [this] { + op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] { s->bucket->get_info().has_website = true; s->bucket->get_info().website_conf = website_conf; - op_ret = s->bucket->put_instance_info(false, real_time()); + op_ret = s->bucket->put_instance_info(this, false, real_time()); return op_ret; }); @@ -2670,16 +2678,16 @@ void RGWDeleteBucketWebsite::execute(optional_yield y) { bufferlist in_data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "NOTICE: forward_to_master failed on bucket=" << s->bucket->get_name() << "returned err=" << op_ret << dendl; return; } - op_ret = retry_raced_bucket_write(s->bucket.get(), [this] { + op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] { s->bucket->get_info().has_website = false; s->bucket->get_info().website_conf = RGWBucketWebsiteConf(); - op_ret = s->bucket->put_instance_info(false, real_time()); + op_ret = s->bucket->put_instance_info(this, false, real_time()); return op_ret; }); if (op_ret < 0) { @@ -2711,11 +2719,11 @@ void RGWStatBucket::execute(optional_yield y) return; } - op_ret = store->get_bucket(s->user.get(), s->bucket->get_key(), &bucket, y); + op_ret = store->get_bucket(this, s->user.get(), s->bucket->get_key(), &bucket, y); if (op_ret) { return; } - op_ret = bucket->update_container_stats(); + op_ret = bucket->update_container_stats(s); } int RGWListBucket::verify_permission(optional_yield y) @@ -2774,7 +2782,7 @@ void RGWListBucket::execute(optional_yield y) } if (need_container_stats()) { - op_ret = s->bucket->update_container_stats(); + op_ret = s->bucket->update_container_stats(s); } rgw::sal::RGWBucket::ListParams params; @@ -2788,7 +2796,7 @@ void RGWListBucket::execute(optional_yield y) rgw::sal::RGWBucket::ListResults results; - op_ret = s->bucket->list(params, max, results, y); + op_ret = s->bucket->list(this, params, max, results, y); if (op_ret >= 0) { next_marker = results.next_marker; is_truncated = results.is_truncated; @@ -2842,7 +2850,7 @@ int RGWCreateBucket::verify_permission(optional_yield y) if (s->user->get_max_buckets()) { rgw::sal::RGWBucketList buckets; string marker; - op_ret = rgw_read_user_buckets(store, s->user->get_id(), buckets, + op_ret = rgw_read_user_buckets(this, store, s->user->get_id(), buckets, marker, string(), s->user->get_max_buckets(), false, y); if (op_ret < 0) { @@ -3072,7 +3080,7 @@ void RGWCreateBucket::execute(optional_yield y) /* we need to make sure we read bucket info, it's not read before for this * specific request */ - op_ret = store->get_bucket(s->user.get(), s->bucket_tenant, s->bucket_name, &s->bucket, y); + op_ret = store->get_bucket(this, s->user.get(), s->bucket_tenant, s->bucket_name, &s->bucket, y); if (op_ret < 0 && op_ret != -ENOENT) return; s->bucket_exists = (op_ret != -ENOENT); @@ -3117,7 +3125,7 @@ void RGWCreateBucket::execute(optional_yield y) if (need_metadata_upload()) { /* It's supposed that following functions WILL NOT change any special * attributes (like RGW_ATTR_ACL) if they are already present in attrs. */ - op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false); + op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs, false); if (op_ret < 0) { return; } @@ -3148,7 +3156,7 @@ void RGWCreateBucket::execute(optional_yield y) /* We're replacing bucket with the newly created one */ ldpp_dout(this, 10) << "user=" << s->user << " bucket=" << tmp_bucket << dendl; - op_ret = store->create_bucket(*s->user, tmp_bucket, zonegroup_id, + op_ret = store->create_bucket(this, *s->user, tmp_bucket, zonegroup_id, placement_rule, info.swift_ver_location, pquota_info, policy, attrs, info, ep_objv, @@ -3177,10 +3185,10 @@ void RGWCreateBucket::execute(optional_yield y) } op_ret = store->ctl()->bucket->link_bucket(s->user->get_id(), s->bucket->get_key(), - s->bucket->get_creation_time(), y, false); + s->bucket->get_creation_time(), y, s, false); if (op_ret && !existed && op_ret != -EEXIST) { /* if it exists (or previously existed), don't remove it! */ - op_ret = store->ctl()->bucket->unlink_bucket(s->user->get_id(), s->bucket->get_key(), y); + op_ret = store->ctl()->bucket->unlink_bucket(s->user->get_id(), s->bucket->get_key(), y, this); if (op_ret < 0) { ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret << dendl; @@ -3198,7 +3206,7 @@ void RGWCreateBucket::execute(optional_yield y) do { map battrs; - op_ret = s->bucket->get_bucket_info(y); + op_ret = s->bucket->get_bucket_info(this, y); if (op_ret < 0) { return; } else if (!s->bucket->is_owner(s->user.get())) { @@ -3211,7 +3219,7 @@ void RGWCreateBucket::execute(optional_yield y) attrs.clear(); - op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false); + op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs, false); if (op_ret < 0) { return; } @@ -3235,7 +3243,7 @@ void RGWCreateBucket::execute(optional_yield y) /* This will also set the quota on the bucket. */ op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket->get_info(), attrs, &s->bucket->get_info().objv_tracker, - y); + y, this); } while (op_ret == -ECANCELED && tries++ < 20); /* Restore the proper return code. */ @@ -3291,18 +3299,18 @@ void RGWDeleteBucket::execute(optional_yield y) } } - op_ret = s->bucket->sync_user_stats(y); + op_ret = s->bucket->sync_user_stats(this, y); if ( op_ret < 0) { ldpp_dout(this, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl; } - op_ret = s->bucket->check_empty(y); + op_ret = s->bucket->check_empty(this, y); if (op_ret < 0) { return; } bufferlist in_data; - op_ret = store->forward_request_to_master(s->user.get(), &ot.read_version, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), &ot.read_version, in_data, nullptr, s->info, y); if (op_ret < 0) { if (op_ret == -ENOENT) { /* adjust error, we want to return with NoSuchBucket and not @@ -3327,8 +3335,7 @@ void RGWDeleteBucket::execute(optional_yield y) } } - op_ret = s->bucket->remove_bucket(false, prefix, delimiter, false, nullptr, - y); + op_ret = s->bucket->remove_bucket(this, false, prefix, delimiter, false, nullptr, y); if (op_ret < 0 && op_ret == -ECANCELED) { // lost a race, either with mdlog sync or another delete bucket operation. // in either case, we've already called ctl.bucket->unlink_bucket() @@ -3382,14 +3389,14 @@ int RGWPutObj::init_processing(optional_yield y) { } } std::unique_ptr bucket; - ret = store->get_bucket(s->user.get(), copy_source_tenant_name, copy_source_bucket_name, + ret = store->get_bucket(this, s->user.get(), copy_source_tenant_name, copy_source_bucket_name, &bucket, y); if (ret < 0) { ldpp_dout(this, 5) << __func__ << "(): get_bucket() returned ret=" << ret << dendl; return ret; } - ret = bucket->get_bucket_info(y); + ret = bucket->get_bucket_info(this, y); if (ret < 0) { ldpp_dout(this, 5) << __func__ << "(): get_bucket_info() returned ret=" << ret << dendl; return ret; @@ -3453,7 +3460,7 @@ int RGWPutObj::verify_permission(optional_yield y) cs_object->set_prefetch_data(s->obj_ctx); /* check source object permissions */ - if (read_obj_policy(store, s, copy_source_bucket_info, cs_attrs, &cs_acl, nullptr, + if (read_obj_policy(this, store, s, copy_source_bucket_info, cs_attrs, &cs_acl, nullptr, policy, cs_bucket.get(), cs_object.get(), y, true) < 0) { return -EACCES; } @@ -3617,7 +3624,7 @@ int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl) std::unique_ptr obj = bucket->get_object(rgw_obj_key(copy_source_object_name, copy_source_version_id)); std::unique_ptr read_op(obj->get_read_op(s->obj_ctx)); - ret = read_op->prepare(s->yield); + ret = read_op->prepare(s->yield, this); if (ret < 0) return ret; @@ -3626,7 +3633,7 @@ int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl) bool need_decompress; op_ret = rgw_compression_info_from_attrset(obj->get_attrs(), need_decompress, cs_info); if (op_ret < 0) { - ldpp_dout(s, 0) << "ERROR: failed to decode compression info" << dendl; + ldpp_dout(this, 0) << "ERROR: failed to decode compression info" << dendl; return -EIO; } @@ -3655,7 +3662,7 @@ int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl) return ret; filter->fixup_range(new_ofs, new_end); - ret = read_op->iterate(new_ofs, new_end, filter, s->yield); + ret = read_op->iterate(this, new_ofs, new_end, filter, s->yield); if (ret >= 0) ret = filter->flush(); @@ -3770,9 +3777,9 @@ void RGWPutObj::execute(optional_yield y) } // make reservation for notification if needed - rgw::notify::reservation_t res(store, s, s->object.get()); + rgw::notify::reservation_t res(this, store, s, s->object.get()); const auto event_type = rgw::notify::ObjectCreatedPut; - op_ret = rgw::notify::publish_reserve(event_type, res, obj_tags.get()); + op_ret = rgw::notify::publish_reserve(this, event_type, res, obj_tags.get()); if (op_ret < 0) { return; } @@ -3792,7 +3799,7 @@ void RGWPutObj::execute(optional_yield y) if (multipart) { RGWMPObj mp(s->object->get_name(), multipart_upload_id); - op_ret = get_multipart_info(s, mp.get_meta(), &upload_info); + op_ret = get_multipart_info(this, s, mp.get_meta(), &upload_info); if (op_ret < 0) { if (op_ret != -ENOENT) { ldpp_dout(this, 0) << "ERROR: get_multipart_info returned " << op_ret << ": " << cpp_strerror(-op_ret) << dendl; @@ -3846,7 +3853,7 @@ void RGWPutObj::execute(optional_yield y) rgw::sal::RGWRadosBucket bucket(store, copy_source_bucket_info); RGWObjState *astate; - op_ret = obj.get_obj_state(&obj_ctx, bucket, &astate, s->yield); + op_ret = obj.get_obj_state(this, &obj_ctx, bucket, &astate, s->yield); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: get copy source obj state returned with error" << op_ret << dendl; return; @@ -4008,7 +4015,7 @@ void RGWPutObj::execute(optional_yield y) emplace_attr(RGW_ATTR_ETAG, std::move(bl)); populate_with_generic_attrs(s, attrs); - op_ret = rgw_get_request_metadata(s->cct, s->info, attrs); + op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs); if (op_ret < 0) { return; } @@ -4056,7 +4063,7 @@ void RGWPutObj::execute(optional_yield y) } // send request to notification manager - const auto ret = rgw::notify::publish_commit(s->object.get(), s->obj_size, mtime, etag, event_type, res); + const auto ret = rgw::notify::publish_commit(s->object.get(), s->obj_size, mtime, etag, event_type, res, this); if (ret < 0) { ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl; // too late to rollback operation, hence op_ret is not set here @@ -4119,9 +4126,9 @@ void RGWPostObj::execute(optional_yield y) } // make reservation for notification if needed - rgw::notify::reservation_t res(store, s, s->object.get()); + rgw::notify::reservation_t res(this, store, s, s->object.get()); const auto event_type = rgw::notify::ObjectCreatedPost; - op_ret = rgw::notify::publish_reserve(event_type, res, nullptr); + op_ret = rgw::notify::publish_reserve(this, event_type, res, nullptr); if (op_ret < 0) { return; } @@ -4289,7 +4296,7 @@ void RGWPostObj::execute(optional_yield y) } while (is_next_file_to_upload()); // send request to notification manager - const auto ret = rgw::notify::publish_commit(s->object.get(), ofs, ceph::real_clock::now(), etag, event_type, res); + const auto ret = rgw::notify::publish_commit(s->object.get(), ofs, ceph::real_clock::now(), etag, event_type, res, this); if (ret < 0) { ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl; // too late to rollback operation, hence op_ret is not set here @@ -4341,7 +4348,7 @@ int RGWPutMetadataAccount::init_processing(optional_yield y) return op_ret; } - op_ret = store->ctl()->user->get_attrs_by_uid(s->user->get_id(), &orig_attrs, + op_ret = store->ctl()->user->get_attrs_by_uid(this, s->user->get_id(), &orig_attrs, s->yield, &acct_op_tracker); if (op_ret < 0) { @@ -4354,7 +4361,7 @@ int RGWPutMetadataAccount::init_processing(optional_yield y) attrs.emplace(RGW_ATTR_ACL, std::move(acl_bl)); } - op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false); + op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs, false); if (op_ret < 0) { return op_ret; } @@ -4404,7 +4411,7 @@ void RGWPutMetadataAccount::execute(optional_yield y) { /* Params have been extracted earlier. See init_processing(). */ RGWUserInfo new_uinfo; - op_ret = store->ctl()->user->get_info_by_uid(s->user->get_id(), &new_uinfo, s->yield, + op_ret = store->ctl()->user->get_info_by_uid(this, s->user->get_id(), &new_uinfo, s->yield, RGWUserCtl::GetParams() .set_objv_tracker(&acct_op_tracker)); if (op_ret < 0) { @@ -4425,7 +4432,7 @@ void RGWPutMetadataAccount::execute(optional_yield y) /* We are passing here the current (old) user info to allow the function * optimize-out some operations. */ - op_ret = store->ctl()->user->store_info(new_uinfo, s->yield, + op_ret = store->ctl()->user->store_info(this, new_uinfo, s->yield, RGWUserCtl::PutParams() .set_old_info(&s->user->get_info()) .set_objv_tracker(&acct_op_tracker) @@ -4453,7 +4460,7 @@ void RGWPutMetadataBucket::execute(optional_yield y) return; } - op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false); + op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs, false); if (op_ret < 0) { return; } @@ -4464,7 +4471,7 @@ void RGWPutMetadataBucket::execute(optional_yield y) return; } - op_ret = retry_raced_bucket_write(s->bucket.get(), [this] { + op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] { /* Encode special metadata first as we're using std::map::emplace under * the hood. This method will add the new items only if the map doesn't * contain such keys yet. */ @@ -4515,7 +4522,7 @@ void RGWPutMetadataBucket::execute(optional_yield y) /* Setting attributes also stores the provided bucket info. Due * to this fact, the new quota settings can be serialized with * the same call. */ - op_ret = s->bucket->set_instance_attrs(attrs, s->yield); + op_ret = s->bucket->set_instance_attrs(this, attrs, s->yield); return op_ret; }); } @@ -4548,13 +4555,13 @@ void RGWPutMetadataObject::execute(optional_yield y) return; } - op_ret = rgw_get_request_metadata(s->cct, s->info, attrs); + op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs); if (op_ret < 0) { return; } /* check if obj exists, read orig attrs */ - op_ret = s->object->get_obj_attrs(s->obj_ctx, s->yield, &target_obj); + op_ret = s->object->get_obj_attrs(s->obj_ctx, s->yield, s, &target_obj); if (op_ret < 0) { return; } @@ -4579,7 +4586,7 @@ void RGWPutMetadataObject::execute(optional_yield y) } } - op_ret = s->object->set_obj_attrs(s->obj_ctx, &attrs, &rmattrs, s->yield, &target_obj); + op_ret = s->object->set_obj_attrs(this, s->obj_ctx, &attrs, &rmattrs, s->yield, &target_obj); } int RGWDeleteObj::handle_slo_manifest(bufferlist& bl, optional_yield y) @@ -4709,7 +4716,7 @@ void RGWDeleteObj::execute(optional_yield y) bool check_obj_lock = s->object->have_instance() && s->bucket->get_info().obj_lock_enabled(); if (!rgw::sal::RGWObject::empty(s->object.get())) { - op_ret = s->object->get_obj_attrs(s->obj_ctx, s->yield); + op_ret = s->object->get_obj_attrs(s->obj_ctx, s->yield, this); if (op_ret < 0) { if (need_object_expiration() || multipart_delete) { return; @@ -4755,11 +4762,11 @@ void RGWDeleteObj::execute(optional_yield y) } // make reservation for notification if needed - rgw::notify::reservation_t res(store, s, s->object.get()); + rgw::notify::reservation_t res(this, store, s, s->object.get()); const auto versioned_object = s->bucket->versioning_enabled(); const auto event_type = versioned_object && s->object->get_instance().empty() ? rgw::notify::ObjectRemovedDeleteMarkerCreated : rgw::notify::ObjectRemovedDelete; - op_ret = rgw::notify::publish_reserve(event_type, res, nullptr); + op_ret = rgw::notify::publish_reserve(this, event_type, res, nullptr); if (op_ret < 0) { return; } @@ -4784,7 +4791,7 @@ void RGWDeleteObj::execute(optional_yield y) return; } - op_ret = s->object->delete_object(obj_ctx, s->owner, s->bucket_owner, unmod_since, + op_ret = s->object->delete_object(this, obj_ctx, s->owner, s->bucket_owner, unmod_since, s->system_request, epoch, version_id, s->yield); if (op_ret >= 0) { delete_marker = s->object->get_delete_marker(); @@ -4808,7 +4815,7 @@ void RGWDeleteObj::execute(optional_yield y) const auto obj_state = obj_ctx->get_state(s->object->get_obj()); // send request to notification manager - const auto ret = rgw::notify::publish_commit(s->object.get(), obj_state->size, obj_state->mtime, attrs[RGW_ATTR_ETAG].to_str(), event_type, res); + const auto ret = rgw::notify::publish_commit(s->object.get(), obj_state->size, obj_state->mtime, attrs[RGW_ATTR_ETAG].to_str(), event_type, res, this); if (ret < 0) { ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl; // too late to rollback operation, hence op_ret is not set here @@ -4820,7 +4827,8 @@ void RGWDeleteObj::execute(optional_yield y) bool RGWCopyObj::parse_copy_location(const std::string_view& url_src, string& bucket_name, - rgw_obj_key& key) + rgw_obj_key& key, + req_state* s) { std::string_view name_str; std::string_view params_str; @@ -4852,7 +4860,7 @@ bool RGWCopyObj::parse_copy_location(const std::string_view& url_src, if (! params_str.empty()) { RGWHTTPArgs args; args.set(std::string(params_str)); - args.parse(); + args.parse(s); key.instance = args.get("versionId", NULL); } @@ -4882,7 +4890,7 @@ int RGWCopyObj::verify_permission(optional_yield y) return op_ret; } - op_ret = src_bucket->load_by_name(src_tenant_name, src_bucket_name, s->bucket_instance_id, + op_ret = src_bucket->load_by_name(this, src_tenant_name, src_bucket_name, s->bucket_instance_id, s->sysobj_ctx, s->yield); if (op_ret < 0) { if (op_ret == -ENOENT) { @@ -4900,7 +4908,7 @@ int RGWCopyObj::verify_permission(optional_yield y) rgw_placement_rule src_placement; /* check source object permissions */ - op_ret = read_obj_policy(store, s, src_bucket->get_info(), src_bucket->get_attrs(), &src_acl, &src_placement.storage_class, + op_ret = read_obj_policy(this, store, s, src_bucket->get_info(), src_bucket->get_attrs(), &src_acl, &src_placement.storage_class, src_policy, src_bucket.get(), src_object.get(), y); if (op_ret < 0) { return op_ret; @@ -4952,7 +4960,7 @@ int RGWCopyObj::verify_permission(optional_yield y) } return op_ret; } - op_ret = dest_bucket->load_by_name(dest_tenant_name, dest_bucket_name, std::string(), + op_ret = dest_bucket->load_by_name(this, dest_tenant_name, dest_bucket_name, std::string(), s->sysobj_ctx, s->yield); if (op_ret < 0) { if (op_ret == -ENOENT) { @@ -4967,7 +4975,7 @@ int RGWCopyObj::verify_permission(optional_yield y) dest_object->set_atomic(s->obj_ctx); /* check dest bucket permissions */ - op_ret = read_bucket_policy(store, s, dest_bucket->get_info(), + op_ret = read_bucket_policy(this, store, s, dest_bucket->get_info(), dest_bucket->get_attrs(), &dest_bucket_policy, dest_bucket->get_key(), y); if (op_ret < 0) { @@ -5032,7 +5040,7 @@ int RGWCopyObj::init_common() dest_policy.encode(aclbl); emplace_attr(RGW_ATTR_ACL, std::move(aclbl)); - op_ret = rgw_get_request_metadata(s->cct, s->info, attrs); + op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs); if (op_ret < 0) { return op_ret; } @@ -5076,9 +5084,9 @@ void RGWCopyObj::execute(optional_yield y) } // make reservation for notification if needed - rgw::notify::reservation_t res(store, s, s->object.get()); + rgw::notify::reservation_t res(this, store, s, s->object.get()); const auto event_type = rgw::notify::ObjectCreatedCopy; - op_ret = rgw::notify::publish_reserve(event_type, res, nullptr); + op_ret = rgw::notify::publish_reserve(this, event_type, res, nullptr); if (op_ret < 0) { return; } @@ -5097,7 +5105,7 @@ void RGWCopyObj::execute(optional_yield y) if (!s->system_request) { // no quota enforcement for system requests // get src object size (cached in obj_ctx from verify_permission()) RGWObjState* astate = nullptr; - op_ret = src_object->get_obj_state(s->obj_ctx, *src_bucket, &astate, + op_ret = src_object->get_obj_state(this, s->obj_ctx, *src_bucket, &astate, s->yield, true); if (op_ret < 0) { return; @@ -5149,7 +5157,7 @@ void RGWCopyObj::execute(optional_yield y) s->yield); // send request to notification manager - const auto ret = rgw::notify::publish_commit(s->object.get(), s->obj_size, mtime, etag, event_type, res); + const auto ret = rgw::notify::publish_commit(s->object.get(), s->obj_size, mtime, etag, event_type, res, this); if (ret < 0) { ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl; // too late to rollback operation, hence op_ret is not set here @@ -5165,12 +5173,12 @@ int RGWGetACLs::verify_permission(optional_yield y) rgw::IAM::s3GetObjectVersionAcl; if (s->iam_policy && s->iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG)){ - rgw_iam_add_existing_objtags(store, s, iam_action); + rgw_iam_add_existing_objtags(this, store, s, iam_action); } if (! s->iam_user_policies.empty()) { for (auto& user_policy : s->iam_user_policies) { if (user_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) { - rgw_iam_add_existing_objtags(store, s, iam_action); + rgw_iam_add_existing_objtags(this, store, s, iam_action); } } } @@ -5214,7 +5222,7 @@ int RGWPutACLs::verify_permission(optional_yield y) rgw_add_grant_to_iam_environment(s->env, s); if (!rgw::sal::RGWObject::empty(s->object.get())) { auto iam_action = s->object->get_instance().empty() ? rgw::IAM::s3PutObjectAcl : rgw::IAM::s3PutObjectVersionAcl; - op_ret = rgw_iam_add_existing_objtags(store, s, iam_action); + op_ret = rgw_iam_add_existing_objtags(this, store, s, iam_action); perm = verify_object_permission(this, s, iam_action); } else { perm = verify_bucket_permission(this, s, rgw::IAM::s3PutBucketAcl); @@ -5363,7 +5371,7 @@ void RGWPutACLs::execute(optional_yield y) if (s->canned_acl.empty()) { in_data.append(data); } - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -5376,7 +5384,7 @@ void RGWPutACLs::execute(optional_yield y) *_dout << dendl; } - op_ret = policy->rebuild(store->ctl()->user, &owner, new_policy, s->err.message); + op_ret = policy->rebuild(this, store->ctl()->user, &owner, new_policy, s->err.message); if (op_ret < 0) return; @@ -5388,7 +5396,7 @@ void RGWPutACLs::execute(optional_yield y) if (s->bucket_access_conf && s->bucket_access_conf->block_public_acls() && - new_policy.is_public()) { + new_policy.is_public(this)) { op_ret = -EACCES; return; } @@ -5398,13 +5406,13 @@ void RGWPutACLs::execute(optional_yield y) if (!rgw::sal::RGWObject::empty(s->object.get())) { s->object->set_atomic(s->obj_ctx); //if instance is empty, we should modify the latest object - op_ret = s->object->modify_obj_attrs(s->obj_ctx, RGW_ATTR_ACL, bl, s->yield); + op_ret = s->object->modify_obj_attrs(s->obj_ctx, RGW_ATTR_ACL, bl, s->yield, this); } else { map attrs = s->bucket_attrs; attrs[RGW_ATTR_ACL] = bl; op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket->get_info(), attrs, &s->bucket->get_info().objv_tracker, - s->yield); + s->yield, this); } if (op_ret == -ECANCELED) { op_ret = 0; /* lost a race, but it's ok because acls are immutable */ @@ -5490,7 +5498,7 @@ void RGWPutLC::execute(optional_yield y) ldpp_dout(this, 15) << "New LifecycleConfiguration:" << ss.str() << dendl; } - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -5506,7 +5514,7 @@ void RGWPutLC::execute(optional_yield y) void RGWDeleteLC::execute(optional_yield y) { bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -5550,16 +5558,16 @@ void RGWPutCORS::execute(optional_yield y) if (op_ret < 0) return; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = retry_raced_bucket_write(s->bucket.get(), [this] { + op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] { rgw::sal::RGWAttrs attrs(s->bucket_attrs); attrs[RGW_ATTR_CORS] = cors_bl; - return s->bucket->set_instance_attrs(attrs, s->yield); + return s->bucket->set_instance_attrs(this, attrs, s->yield); }); } @@ -5572,13 +5580,13 @@ int RGWDeleteCORS::verify_permission(optional_yield y) void RGWDeleteCORS::execute(optional_yield y) { bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = retry_raced_bucket_write(s->bucket.get(), [this] { + op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] { op_ret = read_bucket_cors(); if (op_ret < 0) return op_ret; @@ -5591,7 +5599,7 @@ void RGWDeleteCORS::execute(optional_yield y) rgw::sal::RGWAttrs attrs(s->bucket_attrs); attrs.erase(RGW_ATTR_CORS); - op_ret = s->bucket->set_instance_attrs(attrs, s->yield); + op_ret = s->bucket->set_instance_attrs(this, attrs, s->yield); if (op_ret < 0) { ldpp_dout(this, 0) << "RGWLC::RGWDeleteCORS() failed to set attrs on bucket=" << s->bucket->get_name() << " returned err=" << op_ret << dendl; @@ -5682,7 +5690,7 @@ void RGWSetRequestPayment::pre_exec() void RGWSetRequestPayment::execute(optional_yield y) { - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -5694,7 +5702,7 @@ void RGWSetRequestPayment::execute(optional_yield y) return; s->bucket->get_info().requester_pays = requester_pays; - op_ret = s->bucket->put_instance_info(false, real_time()); + op_ret = s->bucket->put_instance_info(this, false, real_time()); if (op_ret < 0) { ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket->get_name() << " returned err=" << op_ret << dendl; @@ -5762,15 +5770,15 @@ void RGWInitMultipart::execute(optional_yield y) if (op_ret != 0) return; - op_ret = rgw_get_request_metadata(s->cct, s->info, attrs); + op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs); if (op_ret < 0) { return; } // make reservation for notification if needed - rgw::notify::reservation_t res(store, s, s->object.get()); + rgw::notify::reservation_t res(this, store, s, s->object.get()); const auto event_type = rgw::notify::ObjectCreatedPost; - op_ret = rgw::notify::publish_reserve(event_type, res, nullptr); + op_ret = rgw::notify::publish_reserve(this, event_type, res, nullptr); if (op_ret < 0) { return; } @@ -5809,11 +5817,11 @@ void RGWInitMultipart::execute(optional_yield y) op_ret = obj_op->prepare(s->yield); - op_ret = obj_op->write_meta(bl.length(), 0, s->yield); + op_ret = obj_op->write_meta(this, bl.length(), 0, s->yield); } while (op_ret == -EEXIST); // send request to notification manager - const auto ret = rgw::notify::publish_commit(s->object.get(), s->obj_size, ceph::real_clock::now(), attrs[RGW_ATTR_ETAG].to_str(), event_type, res); + const auto ret = rgw::notify::publish_commit(s->object.get(), s->obj_size, ceph::real_clock::now(), attrs[RGW_ATTR_ETAG].to_str(), event_type, res, this); if (ret < 0) { ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl; // too late to rollback operation, hence op_ret is not set here @@ -5916,9 +5924,9 @@ void RGWCompleteMultipart::execute(optional_yield y) mp.init(s->object->get_name(), upload_id); // make reservation for notification if needed - rgw::notify::reservation_t res(store, s, s->object.get()); + rgw::notify::reservation_t res(this, store, s, s->object.get()); const auto event_type = rgw::notify::ObjectCreatedCompleteMultipartUpload; - op_ret = rgw::notify::publish_reserve(event_type, res, nullptr); + op_ret = rgw::notify::publish_reserve(this, event_type, res, nullptr); if (op_ret < 0) { return; } @@ -5952,8 +5960,8 @@ void RGWCompleteMultipart::execute(optional_yield y) s->cct->_conf.get_val("rgw_mp_lock_max_time"); utime_t dur(max_lock_secs_mp, 0); - serializer = meta_obj->get_serializer("RGWCompleteMultipart"); - op_ret = serializer->try_lock(dur, y); + serializer = meta_obj->get_serializer(this, "RGWCompleteMultipart"); + op_ret = serializer->try_lock(this, dur, y); if (op_ret < 0) { ldpp_dout(this, 0) << "failed to acquire lock" << dendl; op_ret = -ERR_INTERNAL_ERROR; @@ -5961,7 +5969,7 @@ void RGWCompleteMultipart::execute(optional_yield y) return; } - op_ret = meta_obj->get_obj_attrs(s->obj_ctx, s->yield); + op_ret = meta_obj->get_obj_attrs(s->obj_ctx, s->yield, this); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << meta_obj << " ret=" << op_ret << dendl; @@ -5970,7 +5978,7 @@ void RGWCompleteMultipart::execute(optional_yield y) attrs = meta_obj->get_attrs(); do { - op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts, + op_ret = list_multipart_parts(this, store, s, upload_id, meta_oid, max_parts, marker, obj_parts, &marker, &truncated); if (op_ret == -ENOENT) { op_ret = -ERR_NO_SUCH_UPLOAD; @@ -6027,7 +6035,7 @@ void RGWCompleteMultipart::execute(optional_yield y) op_ret = -ERR_INVALID_PART; return; } else { - manifest.append(obj_part.manifest, store->svc()->zone); + manifest.append(this, obj_part.manifest, store->svc()->zone); } bool part_compressed = (obj_part.cs_info.compression_type != "none"); @@ -6118,13 +6126,13 @@ void RGWCompleteMultipart::execute(optional_yield y) if (op_ret < 0) return; - op_ret = obj_op->write_meta(ofs, accounted_size, s->yield); + op_ret = obj_op->write_meta(this, ofs, accounted_size, s->yield); if (op_ret < 0) return; // remove the upload obj string version_id; - int r = meta_obj->delete_object(s->obj_ctx, ACLOwner(), ACLOwner(), ceph::real_time(), false, 0, version_id, null_yield); + int r = meta_obj->delete_object(this, s->obj_ctx, ACLOwner(), ACLOwner(), ceph::real_time(), false, 0, version_id, null_yield); if (r >= 0) { /* serializer's exclusive lock is released */ serializer->clear_locked(); @@ -6133,7 +6141,7 @@ void RGWCompleteMultipart::execute(optional_yield y) } // send request to notification manager - const auto ret = rgw::notify::publish_commit(s->object.get(), ofs, ceph::real_clock::now(), final_etag_str, event_type, res); + const auto ret = rgw::notify::publish_commit(s->object.get(), ofs, ceph::real_clock::now(), final_etag_str, event_type, res, this); if (ret < 0) { ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl; // too late to rollback operation, hence op_ret is not set here @@ -6204,12 +6212,12 @@ void RGWAbortMultipart::execute(optional_yield y) mp.init(s->object->get_name(), upload_id); meta_oid = mp.get_meta(); - op_ret = get_multipart_info(s, meta_oid, nullptr); + op_ret = get_multipart_info(this, s, meta_oid, nullptr); if (op_ret < 0) return; RGWObjectCtx *obj_ctx = static_cast(s->obj_ctx); - op_ret = abort_multipart_upload(store, s->cct, obj_ctx, s->bucket->get_info(), mp); + op_ret = abort_multipart_upload(this, store, s->cct, obj_ctx, s->bucket->get_info(), mp); } int RGWListMultipart::verify_permission(optional_yield y) @@ -6237,11 +6245,11 @@ void RGWListMultipart::execute(optional_yield y) mp.init(s->object->get_name(), upload_id); meta_oid = mp.get_meta(); - op_ret = get_multipart_info(s, meta_oid, nullptr); + op_ret = get_multipart_info(this, s, meta_oid, nullptr); if (op_ret < 0) return; - op_ret = list_multipart_parts(store, s, upload_id, meta_oid, max_parts, + op_ret = list_multipart_parts(this, store, s, upload_id, meta_oid, max_parts, marker, parts, NULL, &truncated); } @@ -6283,7 +6291,7 @@ void RGWListBucketMultiparts::execute(optional_yield y) } marker_meta = marker.get_meta(); - op_ret = list_bucket_multiparts(store, s->bucket->get_info(), prefix, marker_meta, delimiter, + op_ret = list_bucket_multiparts(this, store, s->bucket->get_info(), prefix, marker_meta, delimiter, max_uploads, &objs, &common_prefixes, &is_truncated); if (op_ret < 0) { return; @@ -6476,7 +6484,7 @@ void RGWDeleteMultiObj::execute(optional_yield y) // verify_object_lock bool check_obj_lock = obj->have_instance() && bucket->get_info().obj_lock_enabled(); if (check_obj_lock) { - int get_attrs_response = obj->get_obj_attrs(s->obj_ctx, s->yield); + int get_attrs_response = obj->get_obj_attrs(s->obj_ctx, s->yield, this); if (get_attrs_response < 0) { if (get_attrs_response == -ENOENT) { // object maybe delete_marker, skip check_obj_lock @@ -6498,10 +6506,10 @@ void RGWDeleteMultiObj::execute(optional_yield y) } // make reservation for notification if needed const auto versioned_object = s->bucket->versioning_enabled(); - rgw::notify::reservation_t res(store, s, obj.get()); + rgw::notify::reservation_t res(this, store, s, obj.get()); const auto event_type = versioned_object && obj->get_instance().empty() ? rgw::notify::ObjectRemovedDeleteMarkerCreated : rgw::notify::ObjectRemovedDelete; - op_ret = rgw::notify::publish_reserve(event_type, res, nullptr); + op_ret = rgw::notify::publish_reserve(this, event_type, res, nullptr); if (op_ret < 0) { send_partial_response(*iter, false, "", op_ret); continue; @@ -6509,7 +6517,7 @@ void RGWDeleteMultiObj::execute(optional_yield y) obj->set_atomic(obj_ctx); - op_ret = obj->delete_object(obj_ctx, s->owner, s->bucket_owner, ceph::real_time(), + op_ret = obj->delete_object(this, obj_ctx, s->owner, s->bucket_owner, ceph::real_time(), false, 0, version_id, s->yield); if (op_ret == -ENOENT) { op_ret = 0; @@ -6522,7 +6530,7 @@ void RGWDeleteMultiObj::execute(optional_yield y) const auto etag = obj_state->get_attr(RGW_ATTR_ETAG, etag_bl) ? etag_bl.to_str() : ""; // send request to notification manager - const auto ret = rgw::notify::publish_commit(obj.get(), obj_state->size, obj_state->mtime, etag, event_type, res); + const auto ret = rgw::notify::publish_commit(obj.get(), obj_state->size, obj_state->mtime, etag, event_type, res, this); if (ret < 0) { ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl; // too late to rollback operation, hence op_ret is not set here @@ -6550,7 +6558,7 @@ bool RGWBulkDelete::Deleter::verify_permission(RGWBucketInfo& binfo, optional_yield y) { RGWAccessControlPolicy bacl(store->ctx()); - int ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket, y); + int ret = read_bucket_policy(dpp, store, s, binfo, battrs, &bacl, binfo.bucket, y); if (ret < 0) { return false; } @@ -6571,12 +6579,12 @@ bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path, optional_yie ACLOwner bowner; RGWObjVersionTracker ot; - int ret = store->get_bucket(s->user.get(), s->user->get_tenant(), path.bucket_name, &bucket, y); + int ret = store->get_bucket(dpp, s->user.get(), s->user->get_tenant(), path.bucket_name, &bucket, y); if (ret < 0) { goto binfo_fail; } - ret = bucket->get_bucket_info(s->yield); + ret = bucket->get_bucket_info(dpp, s->yield); if (ret < 0) { goto binfo_fail; } @@ -6594,12 +6602,12 @@ bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path, optional_yie std::unique_ptr obj = bucket->get_object(path.obj_key); obj->set_atomic(s->obj_ctx); - ret = obj->delete_object(s->obj_ctx, bowner, bucket_owner, ceph::real_time(), false, 0, version_id, s->yield); + ret = obj->delete_object(dpp, s->obj_ctx, bowner, bucket_owner, ceph::real_time(), false, 0, version_id, s->yield); if (ret < 0) { goto delop_fail; } } else { - ret = bucket->remove_bucket(false, string(), string(), true, &s->info, s->yield); + ret = bucket->remove_bucket(dpp, false, string(), string(), true, &s->info, s->yield); if (ret < 0) { goto delop_fail; } @@ -6611,10 +6619,10 @@ bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path, optional_yie binfo_fail: if (-ENOENT == ret) { - ldpp_dout(s, 20) << "cannot find bucket = " << path.bucket_name << dendl; + ldpp_dout(dpp, 20) << "cannot find bucket = " << path.bucket_name << dendl; num_unfound++; } else { - ldpp_dout(s, 20) << "cannot get bucket info, ret = " << ret << dendl; + ldpp_dout(dpp, 20) << "cannot get bucket info, ret = " << ret << dendl; fail_desc_t failed_item = { .err = ret, @@ -6625,7 +6633,7 @@ binfo_fail: return false; auth_fail: - ldpp_dout(s, 20) << "wrong auth for " << path << dendl; + ldpp_dout(dpp, 20) << "wrong auth for " << path << dendl; { fail_desc_t failed_item = { .err = ret, @@ -6637,7 +6645,7 @@ auth_fail: delop_fail: if (-ENOENT == ret) { - ldpp_dout(s, 20) << "cannot find entry " << path << dendl; + ldpp_dout(dpp, 20) << "cannot find entry " << path << dendl; num_unfound++; } else { fail_desc_t failed_item = { @@ -6651,9 +6659,9 @@ delop_fail: bool RGWBulkDelete::Deleter::delete_chunk(const std::list& paths, optional_yield y) { - ldpp_dout(s, 20) << "in delete_chunk" << dendl; + ldpp_dout(dpp, 20) << "in delete_chunk" << dendl; for (auto path : paths) { - ldpp_dout(s, 20) << "bulk deleting path: " << path << dendl; + ldpp_dout(dpp, 20) << "bulk deleting path: " << path << dendl; delete_single(path, y); } @@ -6775,7 +6783,7 @@ int RGWBulkUploadOp::handle_dir_verify_permission(optional_yield y) if (s->user->get_max_buckets() > 0) { rgw::sal::RGWBucketList buckets; std::string marker; - op_ret = rgw_read_user_buckets(store, s->user->get_user(), buckets, + op_ret = rgw_read_user_buckets(this, store, s->user->get_user(), buckets, marker, std::string(), s->user->get_max_buckets(), false, y); if (op_ret < 0) { @@ -6854,7 +6862,7 @@ int RGWBulkUploadOp::handle_dir(const std::string_view path, optional_yield y) placement_rule.storage_class = s->info.storage_class; forward_req_info(s->cct, info, bucket_name); - op_ret = store->create_bucket(*s->user, new_bucket, + op_ret = store->create_bucket(this, *s->user, new_bucket, store->get_zonegroup().get_id(), placement_rule, swift_ver_location, pquota_info, policy, attrs, @@ -6888,10 +6896,10 @@ int RGWBulkUploadOp::handle_dir(const std::string_view path, optional_yield y) op_ret = store->ctl()->bucket->link_bucket(s->user->get_id(), new_bucket, out_info.creation_time, - s->yield, false); + s->yield, s, false); if (op_ret && !existed && op_ret != -EEXIST) { /* if it exists (or previously existed), don't remove it! */ - op_ret = store->ctl()->bucket->unlink_bucket(s->user->get_id(), new_bucket, s->yield); + op_ret = store->ctl()->bucket->unlink_bucket(s->user->get_id(), new_bucket, s->yield, this); if (op_ret < 0) { ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret << dendl; } @@ -6911,7 +6919,7 @@ bool RGWBulkUploadOp::handle_file_verify_permission(RGWBucketInfo& binfo, optional_yield y) { RGWAccessControlPolicy bacl(store->ctx()); - op_ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket, y); + op_ret = read_bucket_policy(this, store, s, binfo, battrs, &bacl, binfo.bucket, y); if (op_ret < 0) { ldpp_dout(this, 20) << "cannot read_policy() for bucket" << dendl; return false; @@ -6962,7 +6970,7 @@ int RGWBulkUploadOp::handle_file(const std::string_view path, std::unique_ptr bucket; ACLOwner bowner; - op_ret = store->get_bucket(s->user.get(), rgw_bucket(rgw_bucket_key(s->user->get_tenant(), bucket_name)), &bucket, y); + op_ret = store->get_bucket(this, s->user.get(), rgw_bucket(rgw_bucket_key(s->user->get_tenant(), bucket_name)), &bucket, y); if (op_ret == -ENOENT) { ldpp_dout(this, 20) << "non existent directory=" << bucket_name << dendl; } else if (op_ret < 0) { @@ -7260,7 +7268,7 @@ void RGWGetAttrs::execute(optional_yield y) s->object->set_atomic(s->obj_ctx); - op_ret = s->object->get_obj_attrs(s->obj_ctx, s->yield); + op_ret = s->object->get_obj_attrs(s->obj_ctx, s->yield, this); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << s->object << " ret=" << op_ret << dendl; @@ -7316,7 +7324,7 @@ void RGWRMAttrs::execute(optional_yield y) s->object->set_atomic(s->obj_ctx); - op_ret = s->object->set_obj_attrs(s->obj_ctx, nullptr, &attrs, y); + op_ret = s->object->set_obj_attrs(this, s->obj_ctx, nullptr, &attrs, y); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to delete obj attrs, obj=" << s->object << " ret=" << op_ret << dendl; @@ -7353,14 +7361,14 @@ void RGWSetAttrs::execute(optional_yield y) if (!rgw::sal::RGWObject::empty(s->object.get())) { rgw::sal::RGWAttrs a(attrs); - op_ret = s->object->set_obj_attrs(s->obj_ctx, &a, nullptr, y); + op_ret = s->object->set_obj_attrs(this, s->obj_ctx, &a, nullptr, y); } else { for (auto& iter : attrs) { s->bucket_attrs[iter.first] = std::move(iter.second); } op_ret = store->ctl()->bucket->set_bucket_instance_attrs( s->bucket->get_info(), attrs, &s->bucket->get_info().objv_tracker, - s->yield); + s->yield, this); } } /* RGWSetAttrs::execute() */ @@ -7378,14 +7386,14 @@ void RGWGetObjLayout::execute(optional_yield y) std::unique_ptr stat_op(s->object->get_read_op(s->obj_ctx)); - op_ret = stat_op->prepare(y); + op_ret = stat_op->prepare(y, this); if (op_ret < 0) { return; } head_obj = stat_op->result.head_obj; - op_ret = stat_op->get_manifest(&manifest, y); + op_ret = stat_op->get_manifest(this, &manifest, y); } @@ -7413,7 +7421,7 @@ void RGWConfigBucketMetaSearch::execute(optional_yield y) s->bucket->get_info().mdsearch_config = mdsearch_config; - op_ret = s->bucket->put_instance_info(false, real_time()); + op_ret = s->bucket->put_instance_info(this, false, real_time()); if (op_ret < 0) { ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket->get_name() << " returned err=" << op_ret << dendl; @@ -7454,7 +7462,7 @@ void RGWDelBucketMetaSearch::execute(optional_yield y) { s->bucket->get_info().mdsearch_config.clear(); - op_ret = s->bucket->put_instance_info(false, real_time()); + op_ret = s->bucket->put_instance_info(this, false, real_time()); if (op_ret < 0) { ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket->get_name() << " returned err=" << op_ret << dendl; @@ -7478,11 +7486,11 @@ int RGWHandler::init(rgw::sal::RGWRadosStore *_store, return 0; } -int RGWHandler::do_init_permissions(optional_yield y) +int RGWHandler::do_init_permissions(const DoutPrefixProvider *dpp, optional_yield y) { - int ret = rgw_build_bucket_policies(store, s, y); + int ret = rgw_build_bucket_policies(dpp, store, s, y); if (ret < 0) { - ldpp_dout(s, 10) << "init_permissions on " << s->bucket + ldpp_dout(dpp, 10) << "init_permissions on " << s->bucket << " failed, ret=" << ret << dendl; return ret==-ENODATA ? -EACCES : ret; } @@ -7497,7 +7505,7 @@ int RGWHandler::do_read_permissions(RGWOp *op, bool only_bucket, optional_yield /* already read bucket info */ return 0; } - int ret = rgw_build_object_policies(store, s, op->prefetch_data(), y); + int ret = rgw_build_object_policies(op, store, s, op->prefetch_data(), y); if (ret < 0) { ldpp_dout(op, 10) << "read_permissions on " << s->bucket << ":" @@ -7575,7 +7583,7 @@ void RGWPutBucketPolicy::execute(optional_yield y) return; } - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -7591,10 +7599,10 @@ void RGWPutBucketPolicy::execute(optional_yield y) return; } - op_ret = retry_raced_bucket_write(s->bucket.get(), [&p, this, &attrs] { + op_ret = retry_raced_bucket_write(this, s->bucket.get(), [&p, this, &attrs] { attrs[RGW_ATTR_IAM_POLICY].clear(); attrs[RGW_ATTR_IAM_POLICY].append(p.text); - op_ret = s->bucket->set_instance_attrs(attrs, s->yield); + op_ret = s->bucket->set_instance_attrs(this, attrs, s->yield); return op_ret; }); } catch (rgw::IAM::PolicyParseException& e) { @@ -7665,10 +7673,10 @@ int RGWDeleteBucketPolicy::verify_permission(optional_yield y) void RGWDeleteBucketPolicy::execute(optional_yield y) { - op_ret = retry_raced_bucket_write(s->bucket.get(), [this] { + op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] { rgw::sal::RGWAttrs attrs(s->bucket_attrs); attrs.erase(RGW_ATTR_IAM_POLICY); - op_ret = s->bucket->set_instance_attrs(attrs, s->yield); + op_ret = s->bucket->set_instance_attrs(this, attrs, s->yield); return op_ret; }); } @@ -7709,7 +7717,7 @@ void RGWPutBucketObjectLock::execute(optional_yield y) try { RGWXMLDecoder::decode_xml("ObjectLockConfiguration", obj_lock, &parser, true); } catch (RGWXMLDecoder::err& err) { - ldout(s->cct, 5) << "unexpected xml:" << err << dendl; + ldpp_dout(this, 5) << "unexpected xml:" << err << dendl; op_ret = -ERR_MALFORMED_XML; return; } @@ -7719,15 +7727,15 @@ void RGWPutBucketObjectLock::execute(optional_yield y) return; } - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { - ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = retry_raced_bucket_write(s->bucket.get(), [this] { + op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] { s->bucket->get_info().obj_lock = obj_lock; - op_ret = s->bucket->put_instance_info(false, real_time()); + op_ret = s->bucket->put_instance_info(this, false, real_time()); return op_ret; }); return; @@ -7808,7 +7816,7 @@ void RGWPutObjRetention::execute(optional_yield y) obj_retention.encode(bl); //check old retention - op_ret = s->object->get_obj_attrs(s->obj_ctx, s->yield); + op_ret = s->object->get_obj_attrs(s->obj_ctx, s->yield, this); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: get obj attr error"<< dendl; return; @@ -7832,7 +7840,7 @@ void RGWPutObjRetention::execute(optional_yield y) } } - op_ret = s->object->modify_obj_attrs(s->obj_ctx, RGW_ATTR_OBJECT_RETENTION, bl, s->yield); + op_ret = s->object->modify_obj_attrs(s->obj_ctx, RGW_ATTR_OBJECT_RETENTION, bl, s->yield, this); return; } @@ -7857,7 +7865,7 @@ void RGWGetObjRetention::execute(optional_yield y) op_ret = -ERR_INVALID_REQUEST; return; } - op_ret = s->object->get_obj_attrs(s->obj_ctx, s->yield); + op_ret = s->object->get_obj_attrs(s->obj_ctx, s->yield, this); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << s->object << " ret=" << op_ret << dendl; @@ -7874,7 +7882,7 @@ void RGWGetObjRetention::execute(optional_yield y) try { obj_retention.decode(iter); } catch (const buffer::error& e) { - ldout(s->cct, 0) << __func__ << "decode object retention config failed" << dendl; + ldpp_dout(this, 0) << __func__ << "decode object retention config failed" << dendl; op_ret = -EIO; return; } @@ -7920,14 +7928,14 @@ void RGWPutObjLegalHold::execute(optional_yield y) { try { RGWXMLDecoder::decode_xml("LegalHold", obj_legal_hold, &parser, true); } catch (RGWXMLDecoder::err &err) { - ldout(s->cct, 5) << "unexpected xml:" << err << dendl; + ldpp_dout(this, 5) << "unexpected xml:" << err << dendl; op_ret = -ERR_MALFORMED_XML; return; } bufferlist bl; obj_legal_hold.encode(bl); //if instance is empty, we should modify the latest object - op_ret = s->object->modify_obj_attrs(s->obj_ctx, RGW_ATTR_OBJECT_LEGAL_HOLD, bl, s->yield); + op_ret = s->object->modify_obj_attrs(s->obj_ctx, RGW_ATTR_OBJECT_LEGAL_HOLD, bl, s->yield, this); return; } @@ -7952,7 +7960,7 @@ void RGWGetObjLegalHold::execute(optional_yield y) return; } map attrs; - op_ret = s->object->get_obj_attrs(s->obj_ctx, s->yield); + op_ret = s->object->get_obj_attrs(s->obj_ctx, s->yield, this); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << s->object << " ret=" << op_ret << dendl; @@ -7968,7 +7976,7 @@ void RGWGetObjLegalHold::execute(optional_yield y) try { obj_legal_hold.decode(iter); } catch (const buffer::error& e) { - ldout(s->cct, 0) << __func__ << "decode object legal hold config failed" << dendl; + ldpp_dout(this, 0) << __func__ << "decode object legal hold config failed" << dendl; op_ret = -EIO; return; } @@ -7992,7 +8000,7 @@ int RGWGetBucketPolicyStatus::verify_permission(optional_yield y) void RGWGetBucketPolicyStatus::execute(optional_yield y) { - isPublic = (s->iam_policy && rgw::IAM::is_public(*s->iam_policy)) || s->bucket_acl->is_public(); + isPublic = (s->iam_policy && rgw::IAM::is_public(*s->iam_policy)) || s->bucket_acl->is_public(this); } int RGWPutBucketPublicAccessBlock::verify_permission(optional_yield y) @@ -8038,7 +8046,7 @@ void RGWPutBucketPublicAccessBlock::execute(optional_yield y) return; } - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -8046,10 +8054,10 @@ void RGWPutBucketPublicAccessBlock::execute(optional_yield y) bufferlist bl; access_conf.encode(bl); - op_ret = retry_raced_bucket_write(s->bucket.get(), [this, &bl] { + op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this, &bl] { rgw::sal::RGWAttrs attrs(s->bucket_attrs); attrs[RGW_ATTR_PUBLIC_ACCESS] = bl; - return s->bucket->set_instance_attrs(attrs, s->yield); + return s->bucket->set_instance_attrs(this, attrs, s->yield); }); } @@ -8105,10 +8113,10 @@ int RGWDeleteBucketPublicAccessBlock::verify_permission(optional_yield y) void RGWDeleteBucketPublicAccessBlock::execute(optional_yield y) { - op_ret = retry_raced_bucket_write(s->bucket.get(), [this] { + op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] { rgw::sal::RGWAttrs attrs(s->bucket_attrs); attrs.erase(RGW_ATTR_PUBLIC_ACCESS); - op_ret = s->bucket->set_instance_attrs(attrs, s->yield); + op_ret = s->bucket->set_instance_attrs(this, attrs, s->yield); return op_ret; }); } diff --git a/src/rgw/rgw_op.h b/src/rgw/rgw_op.h index 33ed580782dff..db73d65cf5420 100644 --- a/src/rgw/rgw_op.h +++ b/src/rgw/rgw_op.h @@ -74,7 +74,8 @@ class StrategyRegistry; } } -int rgw_op_get_bucket_policy_from_attr(CephContext *cct, +int rgw_op_get_bucket_policy_from_attr(const DoutPrefixProvider *dpp, + CephContext *cct, rgw::sal::RGWStore *store, RGWBucketInfo& bucket_info, map& bucket_attrs, @@ -86,7 +87,7 @@ protected: rgw::sal::RGWRadosStore* store{nullptr}; struct req_state *s{nullptr}; - int do_init_permissions(optional_yield y); + int do_init_permissions(const DoutPrefixProvider *dpp, optional_yield y); int do_read_permissions(RGWOp* op, bool only_bucket, optional_yield y); public: @@ -450,7 +451,7 @@ public: void execute(optional_yield y) override; virtual void send_response() override = 0; - virtual int get_params(optional_yield y) = 0; + virtual int get_params(const DoutPrefixProvider *dpp, optional_yield y) = 0; const char* name() const override { return "put_bucket_tags"; } virtual uint32_t op_mask() override { return RGW_OP_TYPE_WRITE; } RGWOpType get_type() override { return RGW_OP_PUT_BUCKET_TAGGING; } @@ -1475,7 +1476,8 @@ public: static bool parse_copy_location(const std::string_view& src, string& bucket_name, - rgw_obj_key& object); + rgw_obj_key& object, + struct req_state *s); void emplace_attr(std::string&& key, buffer::list&& bl) { attrs.emplace(std::move(key), std::move(bl)); @@ -1945,8 +1947,8 @@ public: uint32_t op_mask() override { return RGW_OP_TYPE_READ; } }; -extern int rgw_build_bucket_policies(rgw::sal::RGWRadosStore* store, struct req_state* s, optional_yield y); -extern int rgw_build_object_policies(rgw::sal::RGWRadosStore *store, struct req_state *s, +extern int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore* store, struct req_state* s, optional_yield y); +extern int rgw_build_object_policies(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, struct req_state *s, bool prefetch_data, optional_yield y); extern void rgw_build_iam_environment(rgw::sal::RGWRadosStore* store, struct req_state* s); @@ -1969,7 +1971,7 @@ inline int get_system_versioning_params(req_state *s, string err; *olh_epoch = strict_strtol(epoch_str.c_str(), 10, &err); if (!err.empty()) { - lsubdout(s->cct, rgw, 0) << "failed to parse versioned-epoch param" + ldpp_subdout(s, rgw, 0) << "failed to parse versioned-epoch param" << dendl; return -EINVAL; } @@ -2013,7 +2015,8 @@ static inline void format_xattr(std::string &xattr) * On failure returns a negative error code. * */ -inline int rgw_get_request_metadata(CephContext* const cct, +inline int rgw_get_request_metadata(const DoutPrefixProvider *dpp, + CephContext* const cct, struct req_info& info, std::map& attrs, const bool allow_empty_attrs = true) @@ -2031,10 +2034,10 @@ inline int rgw_get_request_metadata(CephContext* const cct, std::string& xattr = kv.second; if (blocklisted_headers.count(name) == 1) { - lsubdout(cct, rgw, 10) << "skipping x>> " << name << dendl; + ldpp_subdout(dpp, rgw, 10) << "skipping x>> " << name << dendl; continue; } else if (allow_empty_attrs || !xattr.empty()) { - lsubdout(cct, rgw, 10) << "x>> " << name << ":" << xattr << dendl; + ldpp_subdout(dpp, rgw, 10) << "x>> " << name << ":" << xattr << dendl; format_xattr(xattr); std::string attr_name(RGW_ATTR_PREFIX); diff --git a/src/rgw/rgw_orphan.cc b/src/rgw/rgw_orphan.cc index 4ab8d2c2eec78..b4f67070f4ec0 100644 --- a/src/rgw/rgw_orphan.cc +++ b/src/rgw/rgw_orphan.cc @@ -146,10 +146,10 @@ int RGWOrphanStore::list_jobs(map & job_list) return 0; } -int RGWOrphanStore::init() +int RGWOrphanStore::init(const DoutPrefixProvider *dpp) { const rgw_pool& log_pool = store->svc()->zone->get_zone_params().log_pool; - int r = rgw_init_ioctx(store->getRados()->get_rados_handle(), log_pool, ioctx); + int r = rgw_init_ioctx(dpp, store->getRados()->get_rados_handle(), log_pool, ioctx); if (r < 0) { cerr << "ERROR: failed to open log pool (" << log_pool << " ret=" << r << std::endl; return r; @@ -158,18 +158,18 @@ int RGWOrphanStore::init() return 0; } -int RGWOrphanStore::store_entries(const string& oid, const map& entries) +int RGWOrphanStore::store_entries(const DoutPrefixProvider *dpp, const string& oid, const map& entries) { librados::ObjectWriteOperation op; op.omap_set(entries); cout << "storing " << entries.size() << " entries at " << oid << std::endl; - ldout(store->ctx(), 20) << "storing " << entries.size() << " entries at " << oid << ": " << dendl; + ldpp_dout(dpp, 20) << "storing " << entries.size() << " entries at " << oid << ": " << dendl; for (map::const_iterator iter = entries.begin(); iter != entries.end(); ++iter) { - ldout(store->ctx(), 20) << " > " << iter->first << dendl; + ldpp_dout(dpp, 20) << " > " << iter->first << dendl; } - int ret = rgw_rados_operate(ioctx, oid, &op, null_yield); + int ret = rgw_rados_operate(dpp, ioctx, oid, &op, null_yield); if (ret < 0) { - lderr(store->ctx()) << "ERROR: " << __func__ << "(" << oid << ") returned ret=" << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: " << __func__ << "(" << oid << ") returned ret=" << ret << dendl; } return 0; @@ -188,9 +188,9 @@ int RGWOrphanStore::read_entries(const string& oid, const string& marker, mapctx()) << "ERROR: failed to read state ret=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to read state ret=" << r << dendl; return r; } @@ -220,11 +220,11 @@ int RGWOrphanSearch::init(const string& job_name, RGWOrphanSearchInfo *info, boo r = save_state(); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to write state ret=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to write state ret=" << r << dendl; return r; } } else { - lderr(store->ctx()) << "ERROR: job not found" << dendl; + ldpp_dout(dpp, -1) << "ERROR: job not found" << dendl; return r; } @@ -246,7 +246,7 @@ int RGWOrphanSearch::init(const string& job_name, RGWOrphanSearchInfo *info, boo return 0; } -int RGWOrphanSearch::log_oids(map& log_shards, map >& oids) +int RGWOrphanSearch::log_oids(const DoutPrefixProvider *dpp, map& log_shards, map >& oids) { map >::iterator miter = oids.begin(); @@ -273,11 +273,11 @@ int RGWOrphanSearch::log_oids(map& log_shards, map entries; #define MAX_OMAP_SET_ENTRIES 100 for (int j = 0; cur != end && j != MAX_OMAP_SET_ENTRIES; ++cur, ++j) { - ldout(store->ctx(), 20) << "adding obj: " << *cur << dendl; + ldpp_dout(dpp, 20) << "adding obj: " << *cur << dendl; entries[*cur] = bufferlist(); } - int ret = orphan_store.store_entries(cur_info.oid, entries); + int ret = orphan_store.store_entries(dpp, cur_info.oid, entries); if (ret < 0) { return ret; } @@ -291,13 +291,13 @@ int RGWOrphanSearch::log_oids(map& log_shards, mapgetRados()->get_rados_handle(), search_info.pool, ioctx); + int ret = rgw_init_ioctx(dpp, store->getRados()->get_rados_handle(), search_info.pool, ioctx); if (ret < 0) { - lderr(store->ctx()) << __func__ << ": rgw_init_ioctx() returned ret=" << ret << dendl; + ldpp_dout(dpp, -1) << __func__ << ": rgw_init_ioctx() returned ret=" << ret << dendl; return ret; } @@ -351,7 +351,7 @@ int RGWOrphanSearch::build_all_oids_index() ++total; if (++count >= COUNT_BEFORE_FLUSH) { ldout(store->ctx(), 1) << "iterated through " << total << " objects" << dendl; - ret = log_oids(all_objs_index, oids); + ret = log_oids(dpp, all_objs_index, oids); if (ret < 0) { cerr << __func__ << ": ERROR: log_oids() returned ret=" << ret << std::endl; return ret; @@ -360,7 +360,7 @@ int RGWOrphanSearch::build_all_oids_index() oids.clear(); } } - ret = log_oids(all_objs_index, oids); + ret = log_oids(dpp, all_objs_index, oids); if (ret < 0) { cerr << __func__ << ": ERROR: log_oids() returned ret=" << ret << std::endl; return ret; @@ -369,14 +369,14 @@ int RGWOrphanSearch::build_all_oids_index() return 0; } -int RGWOrphanSearch::build_buckets_instance_index() +int RGWOrphanSearch::build_buckets_instance_index(const DoutPrefixProvider *dpp) { void *handle; int max = 1000; string section = "bucket.instance"; - int ret = store->ctl()->meta.mgr->list_keys_init(section, &handle); + int ret = store->ctl()->meta.mgr->list_keys_init(dpp, section, &handle); if (ret < 0) { - lderr(store->ctx()) << "ERROR: can't get key: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, -1) << "ERROR: can't get key: " << cpp_strerror(-ret) << dendl; return ret; } @@ -393,20 +393,20 @@ int RGWOrphanSearch::build_buckets_instance_index() list keys; ret = store->ctl()->meta.mgr->list_keys_next(handle, max, keys, &truncated); if (ret < 0) { - lderr(store->ctx()) << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, -1) << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << dendl; return ret; } for (list::iterator iter = keys.begin(); iter != keys.end(); ++iter) { ++total; - ldout(store->ctx(), 10) << "bucket_instance=" << *iter << " total=" << total << dendl; + ldpp_dout(dpp, 10) << "bucket_instance=" << *iter << " total=" << total << dendl; int shard = orphan_shard(*iter); instances[shard].push_back(*iter); if (++count >= COUNT_BEFORE_FLUSH) { - ret = log_oids(buckets_instance_index, instances); + ret = log_oids(dpp, buckets_instance_index, instances); if (ret < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: log_oids() returned ret=" << ret << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: log_oids() returned ret=" << ret << dendl; return ret; } count = 0; @@ -416,9 +416,9 @@ int RGWOrphanSearch::build_buckets_instance_index() } while (truncated); - ret = log_oids(buckets_instance_index, instances); + ret = log_oids(dpp, buckets_instance_index, instances); if (ret < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: log_oids() returned ret=" << ret << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: log_oids() returned ret=" << ret << dendl; return ret; } store->ctl()->meta.mgr->list_keys_complete(handle); @@ -426,7 +426,7 @@ int RGWOrphanSearch::build_buckets_instance_index() return 0; } -int RGWOrphanSearch::handle_stat_result(map >& oids, RGWRados::Object::Stat::Result& result) +int RGWOrphanSearch::handle_stat_result(const DoutPrefixProvider *dpp, map >& oids, RGWRados::Object::Stat::Result& result) { set obj_oids; rgw_bucket& bucket = result.obj.bucket; @@ -444,12 +444,12 @@ int RGWOrphanSearch::handle_stat_result(map >& oids, RGWRados: if (!detailed_mode && manifest.get_obj_size() <= manifest.get_head_size()) { - ldout(store->ctx(), 5) << "skipping object as it fits in a head" << dendl; + ldpp_dout(dpp, 5) << "skipping object as it fits in a head" << dendl; return 0; } RGWObjManifest::obj_iterator miter; - for (miter = manifest.obj_begin(); miter != manifest.obj_end(); ++miter) { + for (miter = manifest.obj_begin(dpp); miter != manifest.obj_end(dpp); ++miter) { const rgw_raw_obj& loc = miter.get_location().get_raw_obj(store); string s = loc.oid; obj_oids.insert(obj_fingerprint(s)); @@ -457,7 +457,7 @@ int RGWOrphanSearch::handle_stat_result(map >& oids, RGWRados: } for (set::iterator iter = obj_oids.begin(); iter != obj_oids.end(); ++iter) { - ldout(store->ctx(), 20) << __func__ << ": oid for obj=" << result.obj << ": " << *iter << dendl; + ldpp_dout(dpp, 20) << __func__ << ": oid for obj=" << result.obj << ": " << *iter << dendl; int shard = orphan_shard(*iter); oids[shard].push_back(*iter); @@ -466,27 +466,27 @@ int RGWOrphanSearch::handle_stat_result(map >& oids, RGWRados: return 0; } -int RGWOrphanSearch::pop_and_handle_stat_op(map >& oids, std::deque& ops) +int RGWOrphanSearch::pop_and_handle_stat_op(const DoutPrefixProvider *dpp, map >& oids, std::deque& ops) { RGWRados::Object::Stat& front_op = ops.front(); int ret = front_op.wait(); if (ret < 0) { if (ret != -ENOENT) { - lderr(store->ctx()) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl; } goto done; } - ret = handle_stat_result(oids, front_op.result); + ret = handle_stat_result(dpp, oids, front_op.result); if (ret < 0) { - lderr(store->ctx()) << "ERROR: handle_stat_response() returned error: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, -1) << "ERROR: handle_stat_response() returned error: " << cpp_strerror(-ret) << dendl; } done: ops.pop_front(); return ret; } -int RGWOrphanSearch::build_linked_oids_for_bucket(const string& bucket_instance_id, map >& oids) +int RGWOrphanSearch::build_linked_oids_for_bucket(const DoutPrefixProvider *dpp, const string& bucket_instance_id, map >& oids) { RGWObjectCtx obj_ctx(store); auto sysobj_ctx = store->svc()->sysobj->init_obj_ctx(); @@ -496,49 +496,49 @@ int RGWOrphanSearch::build_linked_oids_for_bucket(const string& bucket_instance_ int ret = rgw_bucket_parse_bucket_key(store->ctx(), bucket_instance_id, &orphan_bucket, &shard_id); if (ret < 0) { - ldout(store->ctx(),0) << __func__ << " failed to parse bucket instance: " + ldpp_dout(dpp, 0) << __func__ << " failed to parse bucket instance: " << bucket_instance_id << " skipping" << dendl; return ret; } RGWBucketInfo cur_bucket_info; ret = store->getRados()->get_bucket_info(store->svc(), orphan_bucket.tenant, - orphan_bucket.name, cur_bucket_info, nullptr, null_yield); + orphan_bucket.name, cur_bucket_info, nullptr, null_yield, dpp); if (ret < 0) { if (ret == -ENOENT) { /* probably raced with bucket removal */ return 0; } - lderr(store->ctx()) << __func__ << ": ERROR: RGWRados::get_bucket_instance_info() returned ret=" << ret << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: RGWRados::get_bucket_instance_info() returned ret=" << ret << dendl; return ret; } if (cur_bucket_info.bucket.bucket_id != orphan_bucket.bucket_id) { - ldout(store->ctx(), 0) << __func__ << ": Skipping stale bucket instance: " + ldpp_dout(dpp, 0) << __func__ << ": Skipping stale bucket instance: " << orphan_bucket.name << ": " << orphan_bucket.bucket_id << dendl; return 0; } if (cur_bucket_info.reshard_status == cls_rgw_reshard_status::IN_PROGRESS) { - ldout(store->ctx(), 0) << __func__ << ": reshard in progress. Skipping " + ldpp_dout(dpp, 0) << __func__ << ": reshard in progress. Skipping " << orphan_bucket.name << ": " << orphan_bucket.bucket_id << dendl; return 0; } RGWBucketInfo bucket_info; - ret = store->getRados()->get_bucket_instance_info(sysobj_ctx, bucket_instance_id, bucket_info, nullptr, nullptr, null_yield); + ret = store->getRados()->get_bucket_instance_info(sysobj_ctx, bucket_instance_id, bucket_info, nullptr, nullptr, null_yield, dpp); if (ret < 0) { if (ret == -ENOENT) { /* probably raced with bucket removal */ return 0; } - lderr(store->ctx()) << __func__ << ": ERROR: RGWRados::get_bucket_instance_info() returned ret=" << ret << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: RGWRados::get_bucket_instance_info() returned ret=" << ret << dendl; return ret; } - ldout(store->ctx(), 10) << "building linked oids for bucket instance: " << bucket_instance_id << dendl; + ldpp_dout(dpp, 10) << "building linked oids for bucket instance: " << bucket_instance_id << dendl; RGWRados::Bucket target(store->getRados(), bucket_info); RGWRados::Bucket::List list_op(&target); @@ -554,7 +554,7 @@ int RGWOrphanSearch::build_linked_oids_for_bucket(const string& bucket_instance_ do { vector result; - ret = list_op.list_objects(max_list_bucket_entries, + ret = list_op.list_objects(dpp, max_list_bucket_entries, &result, nullptr, &truncated, null_yield); if (ret < 0) { cerr << "ERROR: store->list_objects(): " << cpp_strerror(-ret) << std::endl; @@ -564,16 +564,16 @@ int RGWOrphanSearch::build_linked_oids_for_bucket(const string& bucket_instance_ for (vector::iterator iter = result.begin(); iter != result.end(); ++iter) { rgw_bucket_dir_entry& entry = *iter; if (entry.key.instance.empty()) { - ldout(store->ctx(), 20) << "obj entry: " << entry.key.name << dendl; + ldpp_dout(dpp, 20) << "obj entry: " << entry.key.name << dendl; } else { - ldout(store->ctx(), 20) << "obj entry: " << entry.key.name << " [" << entry.key.instance << "]" << dendl; + ldpp_dout(dpp, 20) << "obj entry: " << entry.key.name << " [" << entry.key.instance << "]" << dendl; } - ldout(store->ctx(), 20) << __func__ << ": entry.key.name=" << entry.key.name << " entry.key.instance=" << entry.key.instance << dendl; + ldpp_dout(dpp, 20) << __func__ << ": entry.key.name=" << entry.key.name << " entry.key.instance=" << entry.key.instance << dendl; if (!detailed_mode && entry.meta.accounted_size <= (uint64_t)store->ctx()->_conf->rgw_max_chunk_size) { - ldout(store->ctx(),5) << __func__ << "skipping stat as the object " << entry.key.name + ldpp_dout(dpp, 5) << __func__ << "skipping stat as the object " << entry.key.name << "fits in a head" << dendl; continue; } @@ -586,21 +586,21 @@ int RGWOrphanSearch::build_linked_oids_for_bucket(const string& bucket_instance_ RGWRados::Object::Stat& op = stat_ops.back(); - ret = op.stat_async(); + ret = op.stat_async(dpp); if (ret < 0) { - lderr(store->ctx()) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl; return ret; } if (stat_ops.size() >= max_concurrent_ios) { - ret = pop_and_handle_stat_op(oids, stat_ops); + ret = pop_and_handle_stat_op(dpp, oids, stat_ops); if (ret < 0) { if (ret != -ENOENT) { - lderr(store->ctx()) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl; } } } if (oids.size() >= COUNT_BEFORE_FLUSH) { - ret = log_oids(linked_objs_index, oids); + ret = log_oids(dpp, linked_objs_index, oids); if (ret < 0) { cerr << __func__ << ": ERROR: log_oids() returned ret=" << ret << std::endl; return ret; @@ -611,10 +611,10 @@ int RGWOrphanSearch::build_linked_oids_for_bucket(const string& bucket_instance_ } while (truncated); while (!stat_ops.empty()) { - ret = pop_and_handle_stat_op(oids, stat_ops); + ret = pop_and_handle_stat_op(dpp, oids, stat_ops); if (ret < 0) { if (ret != -ENOENT) { - lderr(store->ctx()) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl; } } } @@ -622,12 +622,12 @@ int RGWOrphanSearch::build_linked_oids_for_bucket(const string& bucket_instance_ return 0; } -int RGWOrphanSearch::build_linked_oids_index() +int RGWOrphanSearch::build_linked_oids_index(const DoutPrefixProvider *dpp) { map > oids; map::iterator iter = buckets_instance_index.find(search_stage.shard); for (; iter != buckets_instance_index.end(); ++iter) { - ldout(store->ctx(), 0) << "building linked oids index: " << iter->first << "/" << buckets_instance_index.size() << dendl; + ldpp_dout(dpp, 0) << "building linked oids index: " << iter->first << "/" << buckets_instance_index.size() << dendl; bool truncated; string oid = iter->second; @@ -641,7 +641,7 @@ int RGWOrphanSearch::build_linked_oids_index() } if (ret < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: read_entries() oid=" << oid << " returned ret=" << ret << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: read_entries() oid=" << oid << " returned ret=" << ret << dendl; return ret; } @@ -650,10 +650,10 @@ int RGWOrphanSearch::build_linked_oids_index() } for (map::iterator eiter = entries.begin(); eiter != entries.end(); ++eiter) { - ldout(store->ctx(), 20) << " indexed entry: " << eiter->first << dendl; - ret = build_linked_oids_for_bucket(eiter->first, oids); + ldpp_dout(dpp, 20) << " indexed entry: " << eiter->first << dendl; + ret = build_linked_oids_for_bucket(dpp, eiter->first, oids); if (ret < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: build_linked_oids_for_bucket() indexed entry=" << eiter->first + ldpp_dout(dpp, -1) << __func__ << ": ERROR: build_linked_oids_for_bucket() indexed entry=" << eiter->first << " returned ret=" << ret << dendl; return ret; } @@ -666,7 +666,7 @@ int RGWOrphanSearch::build_linked_oids_index() search_stage.marker.clear(); } - int ret = log_oids(linked_objs_index, oids); + int ret = log_oids(dpp, linked_objs_index, oids); if (ret < 0) { cerr << __func__ << ": ERROR: log_oids() returned ret=" << ret << std::endl; return ret; @@ -731,7 +731,7 @@ int OMAPReader::get_next(string *key, bufferlist *pbl, bool *done) return get_next(key, pbl, done); } -int RGWOrphanSearch::compare_oid_indexes() +int RGWOrphanSearch::compare_oid_indexes(const DoutPrefixProvider *dpp) { ceph_assert(linked_objs_index.size() == all_objs_index.size()); @@ -739,9 +739,9 @@ int RGWOrphanSearch::compare_oid_indexes() librados::IoCtx data_ioctx; - int ret = rgw_init_ioctx(store->getRados()->get_rados_handle(), search_info.pool, data_ioctx); + int ret = rgw_init_ioctx(dpp, store->getRados()->get_rados_handle(), search_info.pool, data_ioctx); if (ret < 0) { - lderr(store->ctx()) << __func__ << ": rgw_init_ioctx() returned ret=" << ret << dendl; + ldpp_dout(dpp, -1) << __func__ << ": rgw_init_ioctx() returned ret=" << ret << dendl; return ret; } @@ -780,7 +780,7 @@ int RGWOrphanSearch::compare_oid_indexes() } if (cur_linked == key_fp) { - ldout(store->ctx(), 20) << "linked: " << key << dendl; + ldpp_dout(dpp, 20) << "linked: " << key << dendl; continue; } @@ -788,15 +788,15 @@ int RGWOrphanSearch::compare_oid_indexes() r = data_ioctx.stat(key, NULL, &mtime); if (r < 0) { if (r != -ENOENT) { - lderr(store->ctx()) << "ERROR: ioctx.stat(" << key << ") returned ret=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: ioctx.stat(" << key << ") returned ret=" << r << dendl; } continue; } if (stale_secs && (uint64_t)mtime >= time_threshold) { - ldout(store->ctx(), 20) << "skipping: " << key << " (mtime=" << mtime << " threshold=" << time_threshold << ")" << dendl; + ldpp_dout(dpp, 20) << "skipping: " << key << " (mtime=" << mtime << " threshold=" << time_threshold << ")" << dendl; continue; } - ldout(store->ctx(), 20) << "leaked: " << key << dendl; + ldpp_dout(dpp, 20) << "leaked: " << key << dendl; cout << "leaked: " << key << std::endl; } while (!done); } @@ -804,74 +804,74 @@ int RGWOrphanSearch::compare_oid_indexes() return 0; } -int RGWOrphanSearch::run() +int RGWOrphanSearch::run(const DoutPrefixProvider *dpp) { int r; switch (search_stage.stage) { case ORPHAN_SEARCH_STAGE_INIT: - ldout(store->ctx(), 0) << __func__ << "(): initializing state" << dendl; + ldpp_dout(dpp, 0) << __func__ << "(): initializing state" << dendl; search_stage = RGWOrphanSearchStage(ORPHAN_SEARCH_STAGE_LSPOOL); r = save_state(); if (r < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl; return r; } // fall through case ORPHAN_SEARCH_STAGE_LSPOOL: - ldout(store->ctx(), 0) << __func__ << "(): building index of all objects in pool" << dendl; - r = build_all_oids_index(); + ldpp_dout(dpp, 0) << __func__ << "(): building index of all objects in pool" << dendl; + r = build_all_oids_index(dpp); if (r < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl; return r; } search_stage = RGWOrphanSearchStage(ORPHAN_SEARCH_STAGE_LSBUCKETS); r = save_state(); if (r < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl; return r; } // fall through case ORPHAN_SEARCH_STAGE_LSBUCKETS: - ldout(store->ctx(), 0) << __func__ << "(): building index of all bucket indexes" << dendl; - r = build_buckets_instance_index(); + ldpp_dout(dpp, 0) << __func__ << "(): building index of all bucket indexes" << dendl; + r = build_buckets_instance_index(dpp); if (r < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl; return r; } search_stage = RGWOrphanSearchStage(ORPHAN_SEARCH_STAGE_ITERATE_BI); r = save_state(); if (r < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl; return r; } // fall through case ORPHAN_SEARCH_STAGE_ITERATE_BI: - ldout(store->ctx(), 0) << __func__ << "(): building index of all linked objects" << dendl; - r = build_linked_oids_index(); + ldpp_dout(dpp, 0) << __func__ << "(): building index of all linked objects" << dendl; + r = build_linked_oids_index(dpp); if (r < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl; return r; } search_stage = RGWOrphanSearchStage(ORPHAN_SEARCH_STAGE_COMPARE); r = save_state(); if (r < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl; return r; } // fall through case ORPHAN_SEARCH_STAGE_COMPARE: - r = compare_oid_indexes(); + r = compare_oid_indexes(dpp); if (r < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl; return r; } @@ -924,7 +924,8 @@ int RGWOrphanSearch::finish() } -int RGWRadosList::handle_stat_result(RGWRados::Object::Stat::Result& result, +int RGWRadosList::handle_stat_result(const DoutPrefixProvider *dpp, + RGWRados::Object::Stat::Result& result, std::string& bucket_name, rgw_obj_key& obj_key, std::set& obj_oids) @@ -933,7 +934,7 @@ int RGWRadosList::handle_stat_result(RGWRados::Object::Stat::Result& result, rgw_bucket& bucket = result.obj.bucket; - ldout(store->ctx(), 20) << "RGWRadosList::" << __func__ << + ldpp_dout(dpp, 20) << "RGWRadosList::" << __func__ << " bucket=" << bucket << ", has_manifest=" << result.manifest.has_value() << dendl; @@ -941,11 +942,11 @@ int RGWRadosList::handle_stat_result(RGWRados::Object::Stat::Result& result, // iterator to store result of dlo/slo attribute find decltype(result.attrs)::iterator attr_it = result.attrs.end(); const std::string oid = bucket.marker + "_" + result.obj.get_oid(); - ldout(store->ctx(), 20) << "radoslist processing object=\"" << + ldpp_dout(dpp, 20) << "radoslist processing object=\"" << oid << "\"" << dendl; if (visited_oids.find(oid) != visited_oids.end()) { // apparently we hit a loop; don't continue with this oid - ldout(store->ctx(), 15) << + ldpp_dout(dpp, 15) << "radoslist stopped loop at already visited object=\"" << oid << "\"" << dendl; return 0; @@ -969,7 +970,7 @@ int RGWRadosList::handle_stat_result(RGWRados::Object::Stat::Result& result, obj_oids.insert(oid); visited_oids.insert(oid); // prevent dlo loops - ldout(store->ctx(), 15) << "radoslist added to visited list DLO=\"" << + ldpp_dout(dpp, 15) << "radoslist added to visited list DLO=\"" << oid << "\"" << dendl; char* prefix_path_c = attr_it->second.c_str(); @@ -984,7 +985,7 @@ int RGWRadosList::handle_stat_result(RGWRados::Object::Stat::Result& result, const std::string prefix = prefix_path.substr(sep_pos + 1); add_bucket_prefix(bucket_name, prefix); - ldout(store->ctx(), 25) << "radoslist DLO oid=\"" << oid << + ldpp_dout(dpp, 25) << "radoslist DLO oid=\"" << oid << "\" added bucket=\"" << bucket_name << "\" prefix=\"" << prefix << "\" to process list" << dendl; } else if ((attr_it = result.attrs.find(RGW_ATTR_SLO_MANIFEST)) != @@ -993,7 +994,7 @@ int RGWRadosList::handle_stat_result(RGWRados::Object::Stat::Result& result, obj_oids.insert(oid); visited_oids.insert(oid); // prevent slo loops - ldout(store->ctx(), 15) << "radoslist added to visited list SLO=\"" << + ldpp_dout(dpp, 15) << "radoslist added to visited list SLO=\"" << oid << "\"" << dendl; RGWSLOInfo slo_info; @@ -1001,7 +1002,7 @@ int RGWRadosList::handle_stat_result(RGWRados::Object::Stat::Result& result, try { ::decode(slo_info, bliter); } catch (buffer::error& err) { - ldout(store->ctx(), 0) << + ldpp_dout(dpp, 0) << "ERROR: failed to decode slo manifest for " << oid << dendl; return -EIO; } @@ -1022,7 +1023,7 @@ int RGWRadosList::handle_stat_result(RGWRados::Object::Stat::Result& result, const rgw_obj_key obj_key(obj_name); add_bucket_filter(bucket_name, obj_key); - ldout(store->ctx(), 25) << "radoslist SLO oid=\"" << oid << + ldpp_dout(dpp, 25) << "radoslist SLO oid=\"" << oid << "\" added bucket=\"" << bucket_name << "\" obj_key=\"" << obj_key << "\" to process list" << dendl; } @@ -1033,13 +1034,13 @@ int RGWRadosList::handle_stat_result(RGWRados::Object::Stat::Result& result, // manifest AND empty objects have no manifest, but they're // realized as empty rados objects if (0 == manifest.get_max_head_size() || - manifest.obj_begin() == manifest.obj_end()) { + manifest.obj_begin(dpp) == manifest.obj_end(dpp)) { obj_oids.insert(oid); // first_insert = true; } RGWObjManifest::obj_iterator miter; - for (miter = manifest.obj_begin(); miter != manifest.obj_end(); ++miter) { + for (miter = manifest.obj_begin(dpp); miter != manifest.obj_end(dpp); ++miter) { const rgw_raw_obj& loc = miter.get_location().get_raw_obj(store); string s = loc.oid; @@ -1051,6 +1052,7 @@ int RGWRadosList::handle_stat_result(RGWRados::Object::Stat::Result& result, } // RGWRadosList::handle_stat_result int RGWRadosList::pop_and_handle_stat_op( + const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, std::deque& ops) { @@ -1062,15 +1064,15 @@ int RGWRadosList::pop_and_handle_stat_op( int ret = front_op.wait(); if (ret < 0) { if (ret != -ENOENT) { - lderr(store->ctx()) << "ERROR: stat_async() returned error: " << + ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl; } goto done; } - ret = handle_stat_result(front_op.result, bucket_name, obj_key, obj_oids); + ret = handle_stat_result(dpp, front_op.result, bucket_name, obj_key, obj_oids); if (ret < 0) { - lderr(store->ctx()) << "ERROR: handle_stat_result() returned error: " << + ldpp_dout(dpp, -1) << "ERROR: handle_stat_result() returned error: " << cpp_strerror(-ret) << dendl; } @@ -1157,11 +1159,12 @@ int RGWRadosList::build_buckets_instance_index() int RGWRadosList::process_bucket( + const DoutPrefixProvider *dpp, const std::string& bucket_instance_id, const std::string& prefix, const std::set& entries_filter) { - ldout(store->ctx(), 10) << "RGWRadosList::" << __func__ << + ldpp_dout(dpp, 10) << "RGWRadosList::" << __func__ << " bucket_instance_id=" << bucket_instance_id << ", prefix=" << prefix << ", entries_filter.size=" << entries_filter.size() << dendl; @@ -1173,13 +1176,14 @@ int RGWRadosList::process_bucket( bucket_info, nullptr, nullptr, - null_yield); + null_yield, + dpp); if (ret < 0) { if (ret == -ENOENT) { // probably raced with bucket removal return 0; } - lderr(store->ctx()) << __func__ << + ldpp_dout(dpp, -1) << __func__ << ": ERROR: RGWRados::get_bucket_instance_info() returned ret=" << ret << dendl; return ret; @@ -1206,7 +1210,7 @@ int RGWRadosList::process_bucket( std::vector result; constexpr int64_t LIST_OBJS_MAX_ENTRIES = 100; - ret = list_op.list_objects(LIST_OBJS_MAX_ENTRIES, &result, + ret = list_op.list_objects(dpp, LIST_OBJS_MAX_ENTRIES, &result, NULL, &truncated, null_yield); if (ret == -ENOENT) { // race with bucket delete? @@ -1224,13 +1228,13 @@ int RGWRadosList::process_bucket( rgw_bucket_dir_entry& entry = *iter; if (entry.key.instance.empty()) { - ldout(store->ctx(), 20) << "obj entry: " << entry.key.name << dendl; + ldpp_dout(dpp, 20) << "obj entry: " << entry.key.name << dendl; } else { - ldout(store->ctx(), 20) << "obj entry: " << entry.key.name << + ldpp_dout(dpp, 20) << "obj entry: " << entry.key.name << " [" << entry.key.instance << "]" << dendl; } - ldout(store->ctx(), 20) << __func__ << ": entry.key.name=" << + ldpp_dout(dpp, 20) << __func__ << ": entry.key.name=" << entry.key.name << " entry.key.instance=" << entry.key.instance << dendl; @@ -1253,18 +1257,18 @@ int RGWRadosList::process_bucket( stat_ops.push_back(RGWRados::Object::Stat(&op_target)); RGWRados::Object::Stat& op = stat_ops.back(); - ret = op.stat_async(); + ret = op.stat_async(dpp); if (ret < 0) { - lderr(store->ctx()) << "ERROR: stat_async() returned error: " << + ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl; return ret; } if (stat_ops.size() >= max_concurrent_ios) { - ret = pop_and_handle_stat_op(obj_ctx, stat_ops); + ret = pop_and_handle_stat_op(dpp, obj_ctx, stat_ops); if (ret < 0) { if (ret != -ENOENT) { - lderr(store->ctx()) << + ldpp_dout(dpp, -1) << "ERROR: pop_and_handle_stat_op() returned error: " << cpp_strerror(-ret) << dendl; } @@ -1303,10 +1307,10 @@ int RGWRadosList::process_bucket( } while (truncated); while (!stat_ops.empty()) { - ret = pop_and_handle_stat_op(obj_ctx, stat_ops); + ret = pop_and_handle_stat_op(dpp, obj_ctx, stat_ops); if (ret < 0) { if (ret != -ENOENT) { - lderr(store->ctx()) << "ERROR: stat_async() returned error: " << + ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl; } } @@ -1316,14 +1320,14 @@ int RGWRadosList::process_bucket( } -int RGWRadosList::run() +int RGWRadosList::run(const DoutPrefixProvider *dpp) { int ret; void* handle = nullptr; - ret = store->ctl()->meta.mgr->list_keys_init("bucket", &handle); + ret = store->ctl()->meta.mgr->list_keys_init(dpp, "bucket", &handle); if (ret < 0) { - lderr(store->ctx()) << "RGWRadosList::" << __func__ << + ldpp_dout(dpp, -1) << "RGWRadosList::" << __func__ << " ERROR: list_keys_init returned " << cpp_strerror(-ret) << dendl; return ret; @@ -1338,7 +1342,7 @@ int RGWRadosList::run() buckets, &truncated); for (std::string& bucket_id : buckets) { - ret = run(bucket_id); + ret = run(dpp, bucket_id); if (ret == -ENOENT) { continue; } else if (ret < 0) { @@ -1351,7 +1355,7 @@ int RGWRadosList::run() } // RGWRadosList::run() -int RGWRadosList::run(const std::string& start_bucket_name) +int RGWRadosList::run(const DoutPrefixProvider *dpp, const std::string& start_bucket_name) { RGWSysObjectCtx sys_obj_ctx = store->svc()->sysobj->init_obj_ctx(); RGWObjectCtx obj_ctx(store); @@ -1374,7 +1378,8 @@ int RGWRadosList::run(const std::string& start_bucket_name) bucket_name, bucket_info, nullptr, - null_yield); + null_yield, + dpp); if (ret == -ENOENT) { std::cerr << "WARNING: bucket " << bucket_name << " does not exist; could it have been deleted very recently?" << @@ -1392,15 +1397,15 @@ int RGWRadosList::run(const std::string& start_bucket_name) static const std::string empty_prefix; auto do_process_bucket = - [&bucket_id, this] + [dpp, &bucket_id, this] (const std::string& prefix, const std::set& entries_filter) -> int { - int ret = process_bucket(bucket_id, prefix, entries_filter); + int ret = process_bucket(dpp, bucket_id, prefix, entries_filter); if (ret == -ENOENT) { // bucket deletion race? return 0; } if (ret < 0) { - lderr(store->ctx()) << "RGWRadosList::" << __func__ << + ldpp_dout(dpp, -1) << "RGWRadosList::" << __func__ << ": ERROR: process_bucket(); bucket_id=" << bucket_id << " returned ret=" << ret << dendl; } @@ -1443,19 +1448,20 @@ int RGWRadosList::run(const std::string& start_bucket_name) start_bucket_name, bucket_info, nullptr, - null_yield); + null_yield, + dpp); if (ret == -ENOENT) { // bucket deletion race? return 0; } else if (ret < 0) { - lderr(store->ctx()) << "RGWRadosList::" << __func__ << + ldpp_dout(dpp, -1) << "RGWRadosList::" << __func__ << ": ERROR: get_bucket_info returned ret=" << ret << dendl; return ret; } - ret = do_incomplete_multipart(store, bucket_info); + ret = do_incomplete_multipart(dpp, store, bucket_info); if (ret < 0) { - lderr(store->ctx()) << "RGWRadosList::" << __func__ << + ldpp_dout(dpp, -1) << "RGWRadosList::" << __func__ << ": ERROR: do_incomplete_multipart returned ret=" << ret << dendl; return ret; } @@ -1467,6 +1473,7 @@ done: int RGWRadosList::do_incomplete_multipart( + const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore* store, RGWBucketInfo& bucket_info) { @@ -1489,16 +1496,16 @@ int RGWRadosList::do_incomplete_multipart( do { std::vector objs; std::map common_prefixes; - ret = list_op.list_objects(max_uploads, &objs, &common_prefixes, + ret = list_op.list_objects(dpp, max_uploads, &objs, &common_prefixes, &is_listing_truncated, null_yield); if (ret == -ENOENT) { // could bucket have been removed while this is running? - ldout(store->ctx(), 20) << "RGWRadosList::" << __func__ << + ldpp_dout(dpp, 20) << "RGWRadosList::" << __func__ << ": WARNING: call to list_objects of multipart namespace got ENOENT; " "assuming bucket removal race" << dendl; break; } else if (ret < 0) { - lderr(store->ctx()) << "RGWRadosList::" << __func__ << + ldpp_dout(dpp, -1) << "RGWRadosList::" << __func__ << ": ERROR: list_objects op returned ret=" << ret << dendl; return ret; } @@ -1514,7 +1521,7 @@ int RGWRadosList::do_incomplete_multipart( } entry.obj = obj; uploads.push_back(entry); - ldout(store->ctx(), 20) << "RGWRadosList::" << __func__ << + ldpp_dout(dpp, 20) << "RGWRadosList::" << __func__ << " processing incomplete multipart entry " << entry << dendl; } @@ -1527,22 +1534,22 @@ int RGWRadosList::do_incomplete_multipart( for (const auto& upload : uploads) { const RGWMPObj& mp = upload.mp; - ret = list_multipart_parts(store, bucket_info, store->ctx(), + ret = list_multipart_parts(dpp, store, bucket_info, store->ctx(), mp.get_upload_id(), mp.get_meta(), max_parts, parts_marker, parts, NULL, &is_parts_truncated); if (ret == -ENOENT) { continue; } else if (ret < 0) { - lderr(store->ctx()) << "RGWRadosList::" << __func__ << + ldpp_dout(dpp, -1) << "RGWRadosList::" << __func__ << ": ERROR: list_multipart_parts returned ret=" << ret << dendl; return ret; } for (auto& p : parts) { RGWObjManifest& manifest = p.second.manifest; - for (auto obj_it = manifest.obj_begin(); - obj_it != manifest.obj_end(); + for (auto obj_it = manifest.obj_begin(dpp); + obj_it != manifest.obj_end(dpp); ++obj_it) { const rgw_raw_obj& loc = obj_it.get_location().get_raw_obj(store); diff --git a/src/rgw/rgw_orphan.h b/src/rgw/rgw_orphan.h index 2339ae0448125..34680d25a467a 100644 --- a/src/rgw/rgw_orphan.h +++ b/src/rgw/rgw_orphan.h @@ -133,7 +133,7 @@ public: librados::IoCtx& get_ioctx() { return ioctx; } - int init(); + int init(const DoutPrefixProvider *dpp); int read_job(const string& job_name, RGWOrphanSearchState& state); int write_job(const string& job_name, const RGWOrphanSearchState& state); @@ -141,7 +141,7 @@ public: int list_jobs(map &job_list); - int store_entries(const string& oid, const map& entries); + int store_entries(const DoutPrefixProvider *dpp, const string& oid, const map& entries); int read_entries(const string& oid, const string& marker, map *entries, bool *truncated); }; @@ -172,15 +172,15 @@ class RGWOrphanSearch { list::iterator end; }; - int log_oids(map& log_shards, map >& oids); + int log_oids(const DoutPrefixProvider *dpp, map& log_shards, map >& oids); #define RGW_ORPHANSEARCH_HASH_PRIME 7877 int orphan_shard(const string& str) { return ceph_str_hash_linux(str.c_str(), str.size()) % RGW_ORPHANSEARCH_HASH_PRIME % search_info.num_shards; } - int handle_stat_result(map >& oids, RGWRados::Object::Stat::Result& result); - int pop_and_handle_stat_op(map >& oids, std::deque& ops); + int handle_stat_result(const DoutPrefixProvider *dpp, map >& oids, RGWRados::Object::Stat::Result& result); + int pop_and_handle_stat_op(const DoutPrefixProvider *dpp, map >& oids, std::deque& ops); int remove_index(map& index); @@ -194,17 +194,17 @@ public: return orphan_store.write_job(search_info.job_name, state); } - int init(const string& job_name, RGWOrphanSearchInfo *info, bool _detailed_mode=false); + int init(const DoutPrefixProvider *dpp, const string& job_name, RGWOrphanSearchInfo *info, bool _detailed_mode=false); int create(const string& job_name, int num_shards); - int build_all_oids_index(); - int build_buckets_instance_index(); - int build_linked_oids_for_bucket(const string& bucket_instance_id, map >& oids); - int build_linked_oids_index(); - int compare_oid_indexes(); + int build_all_oids_index(const DoutPrefixProvider *dpp); + int build_buckets_instance_index(const DoutPrefixProvider *dpp); + int build_linked_oids_for_bucket(const DoutPrefixProvider *dpp, const string& bucket_instance_id, map >& oids); + int build_linked_oids_index(const DoutPrefixProvider *dpp); + int compare_oid_indexes(const DoutPrefixProvider *dpp); - int run(); + int run(const DoutPrefixProvider *dpp); int finish(); }; @@ -260,11 +260,13 @@ class RGWRadosList { bool include_rgw_obj_name; std::string field_separator; - int handle_stat_result(RGWRados::Object::Stat::Result& result, + int handle_stat_result(const DoutPrefixProvider *dpp, + RGWRados::Object::Stat::Result& result, std::string& bucket_name, rgw_obj_key& obj_key, std::set& obj_oids); - int pop_and_handle_stat_op(RGWObjectCtx& obj_ctx, + int pop_and_handle_stat_op(const DoutPrefixProvider *dpp, + RGWObjectCtx& obj_ctx, std::deque& ops); public: @@ -280,17 +282,19 @@ public: include_rgw_obj_name(false) {} - int process_bucket(const std::string& bucket_instance_id, + int process_bucket(const DoutPrefixProvider *dpp, + const std::string& bucket_instance_id, const std::string& prefix, const std::set& entries_filter); - int do_incomplete_multipart(rgw::sal::RGWRadosStore* store, + int do_incomplete_multipart(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore* store, RGWBucketInfo& bucket_info); int build_linked_oids_index(); - int run(const std::string& bucket_id); - int run(); + int run(const DoutPrefixProvider *dpp, const std::string& bucket_id); + int run(const DoutPrefixProvider *dpp); // if there's a non-empty field separator, that means we'll display // bucket and object names diff --git a/src/rgw/rgw_os_lib.cc b/src/rgw/rgw_os_lib.cc index d9b58f24319dd..5742a9c6f555b 100644 --- a/src/rgw/rgw_os_lib.cc +++ b/src/rgw/rgw_os_lib.cc @@ -29,7 +29,7 @@ namespace rgw { } s->info.args.set(p); - s->info.args.parse(); + s->info.args.parse(s); if (*req_name != '/') return 0; diff --git a/src/rgw/rgw_otp.cc b/src/rgw/rgw_otp.cc index 30d825cb3ca3b..07cc14f113b5f 100644 --- a/src/rgw/rgw_otp.cc +++ b/src/rgw/rgw_otp.cc @@ -91,7 +91,7 @@ class RGWOTPMetadataHandler : public RGWOTPMetadataHandlerBase { return new RGWOTPMetadataObject(std::move(devices), objv, mtime); } - int do_get(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject **obj, optional_yield y) override { + int do_get(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject **obj, optional_yield y, const DoutPrefixProvider *dpp) override { RGWObjVersionTracker objv_tracker; std::unique_ptr mdo(new RGWOTPMetadataObject); @@ -104,7 +104,8 @@ class RGWOTPMetadataHandler : public RGWOTPMetadataHandlerBase { &mdo->get_devs(), &mdo->get_mtime(), &objv_tracker, - y); + y, + dpp); if (ret < 0) { return ret; } @@ -119,12 +120,13 @@ class RGWOTPMetadataHandler : public RGWOTPMetadataHandlerBase { int do_put(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject *_obj, RGWObjVersionTracker& objv_tracker, optional_yield y, + const DoutPrefixProvider *dpp, RGWMDLogSyncType type, bool from_remote_zone) override { RGWOTPMetadataObject *obj = static_cast(_obj); RGWSI_OTP_BE_Ctx be_ctx(op->ctx()); - int ret = svc.otp->store_all(be_ctx, + int ret = svc.otp->store_all(dpp, be_ctx, entry, obj->devices, obj->mtime, @@ -138,12 +140,12 @@ class RGWOTPMetadataHandler : public RGWOTPMetadataHandlerBase { } int do_remove(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWObjVersionTracker& objv_tracker, - optional_yield y) override { + optional_yield y, const DoutPrefixProvider *dpp) override { RGWSI_MBOTP_RemoveParams params; RGWSI_OTP_BE_Ctx be_ctx(op->ctx()); - return svc.otp->remove_all(be_ctx, + return svc.otp->remove_all(dpp, be_ctx, entry, &objv_tracker, y); @@ -173,29 +175,32 @@ void RGWOTPCtl::init(RGWOTPMetadataHandler *_meta_handler) int RGWOTPCtl::read_all(const rgw_user& uid, RGWOTPInfo *info, optional_yield y, + const DoutPrefixProvider *dpp, const GetParams& params) { info->uid = uid; return meta_handler->call([&](RGWSI_OTP_BE_Ctx& ctx) { - return svc.otp->read_all(ctx, uid, &info->devices, params.mtime, params.objv_tracker, y); + return svc.otp->read_all(ctx, uid, &info->devices, params.mtime, params.objv_tracker, y, dpp); }); } -int RGWOTPCtl::store_all(const RGWOTPInfo& info, +int RGWOTPCtl::store_all(const DoutPrefixProvider *dpp, + const RGWOTPInfo& info, optional_yield y, const PutParams& params) { return meta_handler->call([&](RGWSI_OTP_BE_Ctx& ctx) { - return svc.otp->store_all(ctx, info.uid, info.devices, params.mtime, params.objv_tracker, y); + return svc.otp->store_all(dpp, ctx, info.uid, info.devices, params.mtime, params.objv_tracker, y); }); } -int RGWOTPCtl::remove_all(const rgw_user& uid, +int RGWOTPCtl::remove_all(const DoutPrefixProvider *dpp, + const rgw_user& uid, optional_yield y, const RemoveParams& params) { return meta_handler->call([&](RGWSI_OTP_BE_Ctx& ctx) { - return svc.otp->remove_all(ctx, uid, params.objv_tracker, y); + return svc.otp->remove_all(dpp, ctx, uid, params.objv_tracker, y); }); } diff --git a/src/rgw/rgw_otp.h b/src/rgw/rgw_otp.h index ca323b5f1e79b..259e2152fafad 100644 --- a/src/rgw/rgw_otp.h +++ b/src/rgw/rgw_otp.h @@ -103,10 +103,13 @@ public: }; int read_all(const rgw_user& uid, RGWOTPInfo *info, optional_yield y, + const DoutPrefixProvider *dpp, const GetParams& params = {}); - int store_all(const RGWOTPInfo& info, optional_yield y, + int store_all(const DoutPrefixProvider *dpp, + const RGWOTPInfo& info, optional_yield y, const PutParams& params = {}); - int remove_all(const rgw_user& user, optional_yield y, + int remove_all(const DoutPrefixProvider *dpp, + const rgw_user& user, optional_yield y, const RemoveParams& params = {}); }; diff --git a/src/rgw/rgw_period_history.cc b/src/rgw/rgw_period_history.cc index 67c63e7beaa41..abbd998cfb96d 100644 --- a/src/rgw/rgw_period_history.cc +++ b/src/rgw/rgw_period_history.cc @@ -85,7 +85,7 @@ class RGWPeriodHistory::Impl final { ~Impl(); Cursor get_current() const { return current_cursor; } - Cursor attach(RGWPeriod&& period, optional_yield y); + Cursor attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y); Cursor insert(RGWPeriod&& period); Cursor lookup(epoch_t realm_epoch); @@ -148,7 +148,7 @@ RGWPeriodHistory::Impl::~Impl() histories.clear_and_dispose(std::default_delete{}); } -Cursor RGWPeriodHistory::Impl::attach(RGWPeriod&& period, optional_yield y) +Cursor RGWPeriodHistory::Impl::attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y) { if (current_history == histories.end()) { return Cursor{-EINVAL}; @@ -179,12 +179,12 @@ Cursor RGWPeriodHistory::Impl::attach(RGWPeriod&& period, optional_yield y) } if (predecessor_id.empty()) { - lderr(cct) << "reached a period with an empty predecessor id" << dendl; + ldpp_dout(dpp, -1) << "reached a period with an empty predecessor id" << dendl; return Cursor{-EINVAL}; } // pull the period outside of the lock - int r = puller->pull(predecessor_id, period, y); + int r = puller->pull(dpp, predecessor_id, period, y); if (r < 0) { return Cursor{r}; } @@ -339,9 +339,9 @@ Cursor RGWPeriodHistory::get_current() const { return impl->get_current(); } -Cursor RGWPeriodHistory::attach(RGWPeriod&& period, optional_yield y) +Cursor RGWPeriodHistory::attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y) { - return impl->attach(std::move(period), y); + return impl->attach(dpp, std::move(period), y); } Cursor RGWPeriodHistory::insert(RGWPeriod&& period) { diff --git a/src/rgw/rgw_period_history.h b/src/rgw/rgw_period_history.h index 6004db2efef3a..0d412c76a3bed 100644 --- a/src/rgw/rgw_period_history.h +++ b/src/rgw/rgw_period_history.h @@ -11,6 +11,7 @@ #include "include/ceph_assert.h" #include "include/types.h" #include "common/async/yield_context.h" +#include "common/dout.h" namespace bi = boost::intrusive; @@ -42,7 +43,7 @@ class RGWPeriodHistory final { public: virtual ~Puller() = default; - virtual int pull(const std::string& period_id, RGWPeriod& period, + virtual int pull(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period, optional_yield y) = 0; }; @@ -100,7 +101,7 @@ class RGWPeriodHistory final { /// current_period and the given period, reading predecessor periods or /// fetching them from the master as necessary. returns a cursor at the /// given period that can be used to traverse the current_history - Cursor attach(RGWPeriod&& period, optional_yield y); + Cursor attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y); /// insert the given period into an existing history, or create a new /// unconnected history. similar to attach(), but it doesn't try to fetch diff --git a/src/rgw/rgw_period_puller.cc b/src/rgw/rgw_period_puller.cc index 7f870cbab05c8..8e0df896957f9 100644 --- a/src/rgw/rgw_period_puller.cc +++ b/src/rgw/rgw_period_puller.cc @@ -24,7 +24,7 @@ RGWPeriodPuller::RGWPeriodPuller(RGWSI_Zone *zone_svc, RGWSI_SysObj *sysobj_svc) namespace { // pull the given period over the connection -int pull_period(RGWRESTConn* conn, const std::string& period_id, +int pull_period(const DoutPrefixProvider *dpp, RGWRESTConn* conn, const std::string& period_id, const std::string& realm_id, RGWPeriod& period, optional_yield y) { @@ -40,7 +40,7 @@ int pull_period(RGWRESTConn* conn, const std::string& period_id, bufferlist data; #define MAX_REST_RESPONSE (128 * 1024) - int r = conn->forward(user, info, nullptr, MAX_REST_RESPONSE, nullptr, &data, y); + int r = conn->forward(dpp, user, info, nullptr, MAX_REST_RESPONSE, nullptr, &data, y); if (r < 0) { return r; } @@ -64,59 +64,59 @@ int pull_period(RGWRESTConn* conn, const std::string& period_id, } // anonymous namespace -int RGWPeriodPuller::pull(const std::string& period_id, RGWPeriod& period, +int RGWPeriodPuller::pull(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period, optional_yield y) { // try to read the period from rados period.set_id(period_id); period.set_epoch(0); - int r = period.init(cct, svc.sysobj, y); + int r = period.init(dpp, cct, svc.sysobj, y); if (r < 0) { if (svc.zone->is_meta_master()) { // can't pull if we're the master - ldout(cct, 1) << "metadata master failed to read period " + ldpp_dout(dpp, 1) << "metadata master failed to read period " << period_id << " from local storage: " << cpp_strerror(r) << dendl; return r; } - ldout(cct, 14) << "pulling period " << period_id + ldpp_dout(dpp, 14) << "pulling period " << period_id << " from master" << dendl; // request the period from the master zone - r = pull_period(svc.zone->get_master_conn(), period_id, + r = pull_period(dpp, svc.zone->get_master_conn(), period_id, svc.zone->get_realm().get_id(), period, y); if (r < 0) { - lderr(cct) << "failed to pull period " << period_id << dendl; + ldpp_dout(dpp, -1) << "failed to pull period " << period_id << dendl; return r; } // write the period to rados - r = period.store_info(true, y); + r = period.store_info(dpp, true, y); if (r == -EEXIST) { r = 0; } else if (r < 0) { - lderr(cct) << "failed to store period " << period_id << dendl; + ldpp_dout(dpp, -1) << "failed to store period " << period_id << dendl; return r; } // update latest epoch - r = period.update_latest_epoch(period.get_epoch(), y); + r = period.update_latest_epoch(dpp, period.get_epoch(), y); if (r == -EEXIST) { // already have this epoch (or a more recent one) return 0; } if (r < 0) { - lderr(cct) << "failed to update latest_epoch for period " + ldpp_dout(dpp, -1) << "failed to update latest_epoch for period " << period_id << dendl; return r; } // reflect period objects if this is the latest version if (svc.zone->get_realm().get_current_period() == period_id) { - r = period.reflect(y); + r = period.reflect(dpp, y); if (r < 0) { return r; } } - ldout(cct, 14) << "period " << period_id + ldpp_dout(dpp, 14) << "period " << period_id << " pulled and written to local storage" << dendl; } else { - ldout(cct, 14) << "found period " << period_id + ldpp_dout(dpp, 14) << "found period " << period_id << " in local storage" << dendl; } return 0; diff --git a/src/rgw/rgw_period_puller.h b/src/rgw/rgw_period_puller.h index 7ac5cc8dce5a1..654029dd1c4ef 100644 --- a/src/rgw/rgw_period_puller.h +++ b/src/rgw/rgw_period_puller.h @@ -21,7 +21,7 @@ class RGWPeriodPuller : public RGWPeriodHistory::Puller { public: explicit RGWPeriodPuller(RGWSI_Zone *zone_svc, RGWSI_SysObj *sysobj_svc); - int pull(const std::string& period_id, RGWPeriod& period, optional_yield y) override; + int pull(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period, optional_yield y) override; }; #endif // CEPH_RGW_PERIOD_PULLER_H diff --git a/src/rgw/rgw_period_pusher.cc b/src/rgw/rgw_period_pusher.cc index 0eb394ed9c7ad..57a98f6b7e272 100644 --- a/src/rgw/rgw_period_pusher.cc +++ b/src/rgw/rgw_period_pusher.cc @@ -45,15 +45,15 @@ class PushAndRetryCR : public RGWCoroutine { counter(0) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int PushAndRetryCR::operate() +int PushAndRetryCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { for (;;) { yield { - ldout(cct, 10) << "pushing period " << period.get_id() + ldpp_dout(dpp, 10) << "pushing period " << period.get_id() << " to " << zone << dendl; // initialize the http params rgw_http_param_pair params[] = { @@ -67,7 +67,7 @@ int PushAndRetryCR::operate() // stop on success if (get_ret_status() == 0) { - ldout(cct, 10) << "push to " << zone << " succeeded" << dendl; + ldpp_dout(dpp, 10) << "push to " << zone << " succeeded" << dendl; return set_cr_done(); } @@ -81,7 +81,7 @@ int PushAndRetryCR::operate() utime_t dur; dur.set_from_double(timeout); - ldout(cct, 10) << "waiting " << dur << "s for retry.." << dendl; + ldpp_dout(dpp, 10) << "waiting " << dur << "s for retry.." << dendl; wait(dur); timeout *= 2; @@ -110,15 +110,15 @@ class PushAllCR : public RGWCoroutine { conns(std::move(conns)) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int PushAllCR::operate() +int PushAllCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { // spawn a coroutine to push the period over each connection yield { - ldout(cct, 4) << "sending " << conns.size() << " periods" << dendl; + ldpp_dout(dpp, 4) << "sending " << conns.size() << " periods" << dendl; for (auto& c : conns) spawn(new PushAndRetryCR(cct, c.first, &c.second, http, period), false); } @@ -130,7 +130,8 @@ int PushAllCR::operate() } /// A background thread to run the PushAllCR coroutine and exit. -class RGWPeriodPusher::CRThread { +class RGWPeriodPusher::CRThread : public DoutPrefixProvider { + CephContext* cct; RGWCoroutinesManager coroutines; RGWHTTPManager http; boost::intrusive_ptr push_all; @@ -139,13 +140,13 @@ class RGWPeriodPusher::CRThread { public: CRThread(CephContext* cct, RGWPeriod&& period, std::map&& conns) - : coroutines(cct, NULL), + : cct(cct), coroutines(cct, NULL), http(cct, coroutines.get_completion_mgr()), push_all(new PushAllCR(cct, &http, std::move(period), std::move(conns))) { http.start(); // must spawn the CR thread after start - thread = std::thread([this] { coroutines.run(push_all.get()); }); + thread = std::thread([this] { coroutines.run(this, push_all.get()); }); } ~CRThread() { @@ -155,10 +156,14 @@ class RGWPeriodPusher::CRThread { if (thread.joinable()) thread.join(); } + + CephContext *get_cct() const override { return cct; } + unsigned get_subsys() const override { return dout_subsys; } + std::ostream& gen_prefix(std::ostream& out) const override { return out << "rgw period pusher CR thread: "; } }; -RGWPeriodPusher::RGWPeriodPusher(rgw::sal::RGWRadosStore* store, +RGWPeriodPusher::RGWPeriodPusher(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore* store, optional_yield y) : cct(store->ctx()), store(store) { @@ -169,9 +174,9 @@ RGWPeriodPusher::RGWPeriodPusher(rgw::sal::RGWRadosStore* store, // always send out the current period on startup RGWPeriod period; - int r = period.init(cct, store->svc()->sysobj, realm_id, y, realm.get_name()); + int r = period.init(dpp, cct, store->svc()->sysobj, realm_id, y, realm.get_name()); if (r < 0) { - lderr(cct) << "failed to load period for realm " << realm_id << dendl; + ldpp_dout(dpp, -1) << "failed to load period for realm " << realm_id << dendl; return; } diff --git a/src/rgw/rgw_period_pusher.h b/src/rgw/rgw_period_pusher.h index fded5095c1b22..0f6d43b01db6b 100644 --- a/src/rgw/rgw_period_pusher.h +++ b/src/rgw/rgw_period_pusher.h @@ -29,7 +29,7 @@ using RGWZonesNeedPeriod = RGWPeriod; class RGWPeriodPusher final : public RGWRealmWatcher::Watcher, public RGWRealmReloader::Pauser { public: - explicit RGWPeriodPusher(rgw::sal::RGWRadosStore* store, optional_yield y); + explicit RGWPeriodPusher(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore* store, optional_yield y); ~RGWPeriodPusher() override; /// respond to realm notifications by pushing new periods to other zones diff --git a/src/rgw/rgw_process.cc b/src/rgw/rgw_process.cc index fb8c06824c71e..02c3acc43eebe 100644 --- a/src/rgw/rgw_process.cc +++ b/src/rgw/rgw_process.cc @@ -82,7 +82,7 @@ RGWRequest* RGWProcess::RGWWQ::_dequeue() { void RGWProcess::RGWWQ::_process(RGWRequest *req, ThreadPool::TPHandle &) { perfcounter->inc(l_rgw_qactive); - process->handle_request(req); + process->handle_request(this, req); process->req_throttle.put(1); perfcounter->inc(l_rgw_qactive, -1); } @@ -232,7 +232,7 @@ int process_request(rgw::sal::RGWRadosStore* const store, abort_early(s, nullptr, init_error, nullptr, yield); goto done; } - dout(10) << "handler=" << typeid(*handler).name() << dendl; + ldpp_dout(s, 10) << "handler=" << typeid(*handler).name() << dendl; should_log = mgr->get_logging(); @@ -266,7 +266,7 @@ int process_request(rgw::sal::RGWRadosStore* const store, goto done; } req->op = op; - dout(10) << "op=" << typeid(*op).name() << dendl; + ldpp_dout(op, 10) << "op=" << typeid(*op).name() << dendl; s->op_type = op->get_type(); diff --git a/src/rgw/rgw_process.h b/src/rgw/rgw_process.h index 0afea681c2c10..598da0b120dec 100644 --- a/src/rgw/rgw_process.h +++ b/src/rgw/rgw_process.h @@ -57,7 +57,7 @@ protected: int sock_fd; std::string uri_prefix; - struct RGWWQ : public ThreadPool::WorkQueue { + struct RGWWQ : public DoutPrefixProvider, public ThreadPool::WorkQueue { RGWProcess* process; RGWWQ(RGWProcess* p, ceph::timespan timeout, ceph::timespan suicide_timeout, ThreadPool* tp) @@ -85,6 +85,11 @@ protected: void _clear() override { ceph_assert(process->m_req_queue.empty()); } + + CephContext *get_cct() const override { return process->cct; } + unsigned get_subsys() const { return ceph_subsys_rgw; } + std::ostream& gen_prefix(std::ostream& out) const { return out << "rgw request work queue: ";} + } req_wq; public: @@ -111,7 +116,7 @@ public: virtual ~RGWProcess() = default; virtual void run() = 0; - virtual void handle_request(RGWRequest *req) = 0; + virtual void handle_request(const DoutPrefixProvider *dpp, RGWRequest *req) = 0; void pause() { m_tp.pause(); @@ -147,7 +152,7 @@ public: } void run() override; - void handle_request(RGWRequest* req) override; + void handle_request(const DoutPrefixProvider *dpp, RGWRequest* req) override; }; class RGWProcessControlThread : public Thread { @@ -169,7 +174,7 @@ public: RGWProcess(cct, pe, num_threads, _conf) {} void run() override; void checkpoint(); - void handle_request(RGWRequest* req) override; + void handle_request(const DoutPrefixProvider *dpp, RGWRequest* req) override; void gen_request(const string& method, const string& resource, int content_length, std::atomic* fail_flag); diff --git a/src/rgw/rgw_pubsub.cc b/src/rgw/rgw_pubsub.cc index f1bacc83710eb..00d8ffdc8642e 100644 --- a/src/rgw/rgw_pubsub.cc +++ b/src/rgw/rgw_pubsub.cc @@ -431,11 +431,12 @@ RGWPubSub::RGWPubSub(rgw::sal::RGWRadosStore* _store, const std::string& _tenant get_meta_obj(&meta_obj); } -int RGWPubSub::remove(const rgw_raw_obj& obj, +int RGWPubSub::remove(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, RGWObjVersionTracker *objv_tracker, optional_yield y) { - int ret = rgw_delete_system_obj(store->svc()->sysobj, obj.pool, obj.oid, objv_tracker, y); + int ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, obj.pool, obj.oid, objv_tracker, y); if (ret < 0) { return ret; } @@ -453,12 +454,12 @@ int RGWPubSub::read_topics(rgw_pubsub_topics *result, RGWObjVersionTracker *objv return 0; } -int RGWPubSub::write_topics(const rgw_pubsub_topics& topics, +int RGWPubSub::write_topics(const DoutPrefixProvider *dpp, const rgw_pubsub_topics& topics, RGWObjVersionTracker *objv_tracker, optional_yield y) { - int ret = write(meta_obj, topics, objv_tracker, y); + int ret = write(dpp, meta_obj, topics, objv_tracker, y); if (ret < 0 && ret != -ENOENT) { - ldout(store->ctx(), 1) << "ERROR: failed to write topics info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to write topics info: ret=" << ret << dendl; return ret; } return 0; @@ -479,11 +480,11 @@ int RGWPubSub::Bucket::read_topics(rgw_pubsub_bucket_topics *result, RGWObjVersi return 0; } -int RGWPubSub::Bucket::write_topics(const rgw_pubsub_bucket_topics& topics, +int RGWPubSub::Bucket::write_topics(const DoutPrefixProvider *dpp, const rgw_pubsub_bucket_topics& topics, RGWObjVersionTracker *objv_tracker, optional_yield y) { - int ret = ps->write(bucket_meta_obj, topics, objv_tracker, y); + int ret = ps->write(dpp, bucket_meta_obj, topics, objv_tracker, y); if (ret < 0) { ldout(ps->store->ctx(), 1) << "ERROR: failed to write bucket topics info: ret=" << ret << dendl; return ret; @@ -535,31 +536,30 @@ int RGWPubSub::get_topic(const string& name, rgw_pubsub_topic *result) return 0; } -int RGWPubSub::Bucket::create_notification(const string& topic_name, const rgw::notify::EventTypeList& events, optional_yield y) { - return create_notification(topic_name, events, std::nullopt, "", y); +int RGWPubSub::Bucket::create_notification(const DoutPrefixProvider *dpp, const string& topic_name, const rgw::notify::EventTypeList& events, optional_yield y) { + return create_notification(dpp, topic_name, events, std::nullopt, "", y); } -int RGWPubSub::Bucket::create_notification(const string& topic_name,const rgw::notify::EventTypeList& events, OptionalFilter s3_filter, const std::string& notif_name, optional_yield y) { +int RGWPubSub::Bucket::create_notification(const DoutPrefixProvider *dpp, const string& topic_name,const rgw::notify::EventTypeList& events, OptionalFilter s3_filter, const std::string& notif_name, optional_yield y) { rgw_pubsub_topic_subs topic_info; - rgw::sal::RGWRadosStore *store = ps->store; int ret = ps->get_topic(topic_name, &topic_info); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to read topic '" << topic_name << "' info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to read topic '" << topic_name << "' info: ret=" << ret << dendl; return ret; } - ldout(store->ctx(), 20) << "successfully read topic '" << topic_name << "' info" << dendl; + ldpp_dout(dpp, 20) << "successfully read topic '" << topic_name << "' info" << dendl; RGWObjVersionTracker objv_tracker; rgw_pubsub_bucket_topics bucket_topics; ret = read_topics(&bucket_topics, &objv_tracker); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to read topics from bucket '" << + ldpp_dout(dpp, 1) << "ERROR: failed to read topics from bucket '" << bucket.name << "': ret=" << ret << dendl; return ret; } - ldout(store->ctx(), 20) << "successfully read " << bucket_topics.topics.size() << " topics from bucket '" << + ldpp_dout(dpp, 20) << "successfully read " << bucket_topics.topics.size() << " topics from bucket '" << bucket.name << "'" << dendl; auto& topic_filter = bucket_topics.topics[topic_name]; @@ -570,25 +570,24 @@ int RGWPubSub::Bucket::create_notification(const string& topic_name,const rgw::n topic_filter.s3_filter = *s3_filter; } - ret = write_topics(bucket_topics, &objv_tracker, y); + ret = write_topics(dpp, bucket_topics, &objv_tracker, y); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to write topics to bucket '" << bucket.name << "': ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to write topics to bucket '" << bucket.name << "': ret=" << ret << dendl; return ret; } - ldout(store->ctx(), 20) << "successfully wrote " << bucket_topics.topics.size() << " topics to bucket '" << bucket.name << "'" << dendl; + ldpp_dout(dpp, 20) << "successfully wrote " << bucket_topics.topics.size() << " topics to bucket '" << bucket.name << "'" << dendl; return 0; } -int RGWPubSub::Bucket::remove_notification(const string& topic_name, optional_yield y) +int RGWPubSub::Bucket::remove_notification(const DoutPrefixProvider *dpp, const string& topic_name, optional_yield y) { rgw_pubsub_topic_subs topic_info; - rgw::sal::RGWRadosStore *store = ps->store; int ret = ps->get_topic(topic_name, &topic_info); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to read topic info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to read topic info: ret=" << ret << dendl; return ret; } @@ -597,62 +596,62 @@ int RGWPubSub::Bucket::remove_notification(const string& topic_name, optional_yi ret = read_topics(&bucket_topics, &objv_tracker); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to read bucket topics info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to read bucket topics info: ret=" << ret << dendl; return ret; } bucket_topics.topics.erase(topic_name); - ret = write_topics(bucket_topics, &objv_tracker, y); + ret = write_topics(dpp, bucket_topics, &objv_tracker, y); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to write topics info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to write topics info: ret=" << ret << dendl; return ret; } return 0; } -int RGWPubSub::Bucket::remove_notifications(optional_yield y) +int RGWPubSub::Bucket::remove_notifications(const DoutPrefixProvider *dpp, optional_yield y) { // get all topics on a bucket rgw_pubsub_bucket_topics bucket_topics; auto ret = get_topics(&bucket_topics); if (ret < 0 && ret != -ENOENT) { - ldout(ps->store->ctx(), 1) << "ERROR: failed to get list of topics from bucket '" << bucket.name << "', ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to get list of topics from bucket '" << bucket.name << "', ret=" << ret << dendl; return ret ; } // remove all auto-genrated topics for (const auto& topic : bucket_topics.topics) { const auto& topic_name = topic.first; - ret = ps->remove_topic(topic_name, y); + ret = ps->remove_topic(dpp, topic_name, y); if (ret < 0 && ret != -ENOENT) { - ldout(ps->store->ctx(), 5) << "WARNING: failed to remove auto-generated topic '" << topic_name << "', ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "WARNING: failed to remove auto-generated topic '" << topic_name << "', ret=" << ret << dendl; } } // delete all notification of on a bucket - ret = ps->remove(bucket_meta_obj, nullptr, y); + ret = ps->remove(dpp, bucket_meta_obj, nullptr, y); if (ret < 0 && ret != -ENOENT) { - ldout(ps->store->ctx(), 1) << "ERROR: failed to remove bucket topics: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to remove bucket topics: ret=" << ret << dendl; return ret; } return 0; } -int RGWPubSub::create_topic(const string& name, optional_yield y) { - return create_topic(name, rgw_pubsub_sub_dest(), "", "", y); +int RGWPubSub::create_topic(const DoutPrefixProvider *dpp, const string& name, optional_yield y) { + return create_topic(dpp, name, rgw_pubsub_sub_dest(), "", "", y); } -int RGWPubSub::create_topic(const string& name, const rgw_pubsub_sub_dest& dest, const std::string& arn, const std::string& opaque_data, optional_yield y) { +int RGWPubSub::create_topic(const DoutPrefixProvider *dpp, const string& name, const rgw_pubsub_sub_dest& dest, const std::string& arn, const std::string& opaque_data, optional_yield y) { RGWObjVersionTracker objv_tracker; rgw_pubsub_topics topics; int ret = read_topics(&topics, &objv_tracker); if (ret < 0 && ret != -ENOENT) { // its not an error if not topics exist, we create one - ldout(store->ctx(), 1) << "ERROR: failed to read topics info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to read topics info: ret=" << ret << dendl; return ret; } @@ -663,35 +662,35 @@ int RGWPubSub::create_topic(const string& name, const rgw_pubsub_sub_dest& dest, new_topic.topic.arn = arn; new_topic.topic.opaque_data = opaque_data; - ret = write_topics(topics, &objv_tracker, y); + ret = write_topics(dpp, topics, &objv_tracker, y); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to write topics info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to write topics info: ret=" << ret << dendl; return ret; } return 0; } -int RGWPubSub::remove_topic(const string& name, optional_yield y) +int RGWPubSub::remove_topic(const DoutPrefixProvider *dpp, const string& name, optional_yield y) { RGWObjVersionTracker objv_tracker; rgw_pubsub_topics topics; int ret = read_topics(&topics, &objv_tracker); if (ret < 0 && ret != -ENOENT) { - ldout(store->ctx(), 1) << "ERROR: failed to read topics info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to read topics info: ret=" << ret << dendl; return ret; } else if (ret == -ENOENT) { // its not an error if no topics exist, just a no-op - ldout(store->ctx(), 10) << "WARNING: failed to read topics info, deletion is a no-op: ret=" << ret << dendl; + ldpp_dout(dpp, 10) << "WARNING: failed to read topics info, deletion is a no-op: ret=" << ret << dendl; return 0; } topics.topics.erase(name); - ret = write_topics(topics, &objv_tracker, y); + ret = write_topics(dpp, topics, &objv_tracker, y); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to remove topics info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to remove topics info: ret=" << ret << dendl; return ret; } @@ -708,25 +707,26 @@ int RGWPubSub::Sub::read_sub(rgw_pubsub_sub_config *result, RGWObjVersionTracker return 0; } -int RGWPubSub::Sub::write_sub(const rgw_pubsub_sub_config& sub_conf, +int RGWPubSub::Sub::write_sub(const DoutPrefixProvider *dpp, + const rgw_pubsub_sub_config& sub_conf, RGWObjVersionTracker *objv_tracker, optional_yield y) { - int ret = ps->write(sub_meta_obj, sub_conf, objv_tracker, y); + int ret = ps->write(dpp, sub_meta_obj, sub_conf, objv_tracker, y); if (ret < 0) { - ldout(ps->store->ctx(), 1) << "ERROR: failed to write subscription info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to write subscription info: ret=" << ret << dendl; return ret; } return 0; } -int RGWPubSub::Sub::remove_sub(RGWObjVersionTracker *objv_tracker, +int RGWPubSub::Sub::remove_sub(const DoutPrefixProvider *dpp, RGWObjVersionTracker *objv_tracker, optional_yield y) { - int ret = ps->remove(sub_meta_obj, objv_tracker, y); + int ret = ps->remove(dpp, sub_meta_obj, objv_tracker, y); if (ret < 0) { - ldout(ps->store->ctx(), 1) << "ERROR: failed to remove subscription info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to remove subscription info: ret=" << ret << dendl; return ret; } @@ -738,21 +738,20 @@ int RGWPubSub::Sub::get_conf(rgw_pubsub_sub_config *result) return read_sub(result, nullptr); } -int RGWPubSub::Sub::subscribe(const string& topic, const rgw_pubsub_sub_dest& dest, optional_yield y, const std::string& s3_id) +int RGWPubSub::Sub::subscribe(const DoutPrefixProvider *dpp, const string& topic, const rgw_pubsub_sub_dest& dest, optional_yield y, const std::string& s3_id) { RGWObjVersionTracker objv_tracker; rgw_pubsub_topics topics; - rgw::sal::RGWRadosStore *store = ps->store; int ret = ps->read_topics(&topics, &objv_tracker); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to read topics info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to read topics info: ret=" << ret << dendl; return ret != -ENOENT ? ret : -EINVAL; } auto iter = topics.topics.find(topic); if (iter == topics.topics.end()) { - ldout(store->ctx(), 1) << "ERROR: cannot add subscription to topic: topic not found" << dendl; + ldpp_dout(dpp, 1) << "ERROR: cannot add subscription to topic: topic not found" << dendl; return -EINVAL; } @@ -768,31 +767,30 @@ int RGWPubSub::Sub::subscribe(const string& topic, const rgw_pubsub_sub_dest& de t.subs.insert(sub); - ret = ps->write_topics(topics, &objv_tracker, y); + ret = ps->write_topics(dpp, topics, &objv_tracker, y); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to write topics info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to write topics info: ret=" << ret << dendl; return ret; } - ret = write_sub(sub_conf, nullptr, y); + ret = write_sub(dpp, sub_conf, nullptr, y); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to write subscription info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to write subscription info: ret=" << ret << dendl; return ret; } return 0; } -int RGWPubSub::Sub::unsubscribe(const string& _topic, optional_yield y) +int RGWPubSub::Sub::unsubscribe(const DoutPrefixProvider *dpp, const string& _topic, optional_yield y) { string topic = _topic; RGWObjVersionTracker sobjv_tracker; - rgw::sal::RGWRadosStore *store = ps->store; if (topic.empty()) { rgw_pubsub_sub_config sub_conf; int ret = read_sub(&sub_conf, &sobjv_tracker); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to read subscription info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to read subscription info: ret=" << ret << dendl; return ret; } topic = sub_conf.topic; @@ -804,7 +802,7 @@ int RGWPubSub::Sub::unsubscribe(const string& _topic, optional_yield y) int ret = ps->read_topics(&topics, &objv_tracker); if (ret < 0) { // not an error - could be that topic was already deleted - ldout(store->ctx(), 10) << "WARNING: failed to read topics info: ret=" << ret << dendl; + ldpp_dout(dpp, 10) << "WARNING: failed to read topics info: ret=" << ret << dendl; } else { auto iter = topics.topics.find(topic); if (iter != topics.topics.end()) { @@ -812,17 +810,17 @@ int RGWPubSub::Sub::unsubscribe(const string& _topic, optional_yield y) t.subs.erase(sub); - ret = ps->write_topics(topics, &objv_tracker, y); + ret = ps->write_topics(dpp, topics, &objv_tracker, y); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to write topics info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to write topics info: ret=" << ret << dendl; return ret; } } } - ret = remove_sub(&sobjv_tracker, y); + ret = remove_sub(dpp, &sobjv_tracker, y); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to delete subscription info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to delete subscription info: ret=" << ret << dendl; return ret; } return 0; @@ -841,13 +839,13 @@ void RGWPubSub::SubWithEvents::list_events_result::dump(Formatter *f) } template -int RGWPubSub::SubWithEvents::list_events(const string& marker, int max_events) +int RGWPubSub::SubWithEvents::list_events(const DoutPrefixProvider *dpp, const string& marker, int max_events) { RGWRados *store = ps->store->getRados(); rgw_pubsub_sub_config sub_conf; int ret = get_conf(&sub_conf); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to read sub config: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to read sub config: ret=" << ret << dendl; return ret; } @@ -859,7 +857,7 @@ int RGWPubSub::SubWithEvents::list_events(const string& marker, int m return 0; } if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to read bucket info for events bucket: bucket=" << sub_conf.dest.bucket_name << " ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to read bucket info for events bucket: bucket=" << sub_conf.dest.bucket_name << " ret=" << ret << dendl; return ret; } @@ -871,9 +869,9 @@ int RGWPubSub::SubWithEvents::list_events(const string& marker, int m std::vector objs; - ret = list_op.list_objects(max_events, &objs, nullptr, &list.is_truncated, null_yield); + ret = list_op.list_objects(dpp, max_events, &objs, nullptr, &list.is_truncated, null_yield); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to list bucket: bucket=" << sub_conf.dest.bucket_name << " ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to list bucket: bucket=" << sub_conf.dest.bucket_name << " ret=" << ret << dendl; return ret; } if (list.is_truncated) { @@ -887,7 +885,7 @@ int RGWPubSub::SubWithEvents::list_events(const string& marker, int m try { bl.decode_base64(bl64); } catch (buffer::error& err) { - ldout(store->ctx(), 1) << "ERROR: failed to event (not a valid base64)" << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to event (not a valid base64)" << dendl; continue; } EventType event; @@ -896,7 +894,7 @@ int RGWPubSub::SubWithEvents::list_events(const string& marker, int m try { decode(event, iter); } catch (buffer::error& err) { - ldout(store->ctx(), 1) << "ERROR: failed to decode event" << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to decode event" << dendl; continue; }; @@ -906,13 +904,13 @@ int RGWPubSub::SubWithEvents::list_events(const string& marker, int m } template -int RGWPubSub::SubWithEvents::remove_event(const string& event_id) +int RGWPubSub::SubWithEvents::remove_event(const DoutPrefixProvider *dpp, const string& event_id) { rgw::sal::RGWRadosStore *store = ps->store; rgw_pubsub_sub_config sub_conf; int ret = get_conf(&sub_conf); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to read sub config: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to read sub config: ret=" << ret << dendl; return ret; } @@ -920,7 +918,7 @@ int RGWPubSub::SubWithEvents::remove_event(const string& event_id) string tenant; ret = store->getRados()->get_bucket_info(store->svc(), tenant, sub_conf.dest.bucket_name, bucket_info, nullptr, null_yield, nullptr); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to read bucket info for events bucket: bucket=" << sub_conf.dest.bucket_name << " ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to read bucket info for events bucket: bucket=" << sub_conf.dest.bucket_name << " ret=" << ret << dendl; return ret; } @@ -937,9 +935,9 @@ int RGWPubSub::SubWithEvents::remove_event(const string& event_id) del_op.params.bucket_owner = bucket_info.owner; del_op.params.versioning_status = bucket_info.versioning_status(); - ret = del_op.delete_obj(null_yield); + ret = del_op.delete_obj(null_yield, dpp); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to remove event (obj=" << obj << "): ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to remove event (obj=" << obj << "): ret=" << ret << dendl; } return 0; } diff --git a/src/rgw/rgw_pubsub.h b/src/rgw/rgw_pubsub.h index 6d4d382affe95..b82384e64ec54 100644 --- a/src/rgw/rgw_pubsub.h +++ b/src/rgw/rgw_pubsub.h @@ -614,14 +614,14 @@ class RGWPubSub int read(const rgw_raw_obj& obj, T* data, RGWObjVersionTracker* objv_tracker); template - int write(const rgw_raw_obj& obj, const T& info, + int write(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const T& info, RGWObjVersionTracker* obj_tracker, optional_yield y); - int remove(const rgw_raw_obj& obj, RGWObjVersionTracker* objv_tracker, + int remove(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, RGWObjVersionTracker* objv_tracker, optional_yield y); int read_topics(rgw_pubsub_topics *result, RGWObjVersionTracker* objv_tracker); - int write_topics(const rgw_pubsub_topics& topics, + int write_topics(const DoutPrefixProvider *dpp, const rgw_pubsub_topics& topics, RGWObjVersionTracker* objv_tracker, optional_yield y); public: @@ -640,7 +640,7 @@ public: // set the list of topics associated with a bucket // use version tacker to enforce atomicity between read/write // return 0 on success, error code otherwise - int write_topics(const rgw_pubsub_bucket_topics& topics, + int write_topics(const DoutPrefixProvider *dpp, const rgw_pubsub_bucket_topics& topics, RGWObjVersionTracker* objv_tracker, optional_yield y); public: Bucket(RGWPubSub *_ps, const rgw_bucket& _bucket) : ps(_ps), bucket(_bucket) { @@ -656,16 +656,16 @@ public: // for S3 compliant notifications the version with: s3_filter and notif_name should be used // return -ENOENT if the topic does not exists // return 0 on success, error code otherwise - int create_notification(const string& topic_name, const rgw::notify::EventTypeList& events, optional_yield y); - int create_notification(const string& topic_name, const rgw::notify::EventTypeList& events, OptionalFilter s3_filter, const std::string& notif_name, optional_yield y); + int create_notification(const DoutPrefixProvider *dpp, const string& topic_name, const rgw::notify::EventTypeList& events, optional_yield y); + int create_notification(const DoutPrefixProvider *dpp, const string& topic_name, const rgw::notify::EventTypeList& events, OptionalFilter s3_filter, const std::string& notif_name, optional_yield y); // remove a topic and filter from bucket // if the topic does not exists on the bucket it is a no-op (considered success) // return -ENOENT if the topic does not exists // return 0 on success, error code otherwise - int remove_notification(const string& topic_name, optional_yield y); + int remove_notification(const DoutPrefixProvider *dpp, const string& topic_name, optional_yield y); // remove all notifications (and autogenerated topics) associated with the bucket // return 0 on success or if no topic was associated with the bucket, error code otherwise - int remove_notifications(optional_yield y); + int remove_notifications(const DoutPrefixProvider *dpp, optional_yield y); }; // base class for subscription @@ -677,9 +677,9 @@ public: rgw_raw_obj sub_meta_obj; int read_sub(rgw_pubsub_sub_config *result, RGWObjVersionTracker* objv_tracker); - int write_sub(const rgw_pubsub_sub_config& sub_conf, + int write_sub(const DoutPrefixProvider *dpp, const rgw_pubsub_sub_config& sub_conf, RGWObjVersionTracker* objv_tracker, optional_yield y); - int remove_sub(RGWObjVersionTracker* objv_tracker, optional_yield y); + int remove_sub(const DoutPrefixProvider *dpp, RGWObjVersionTracker* objv_tracker, optional_yield y); public: Sub(RGWPubSub *_ps, const std::string& _sub) : ps(_ps), sub(_sub) { ps->get_sub_meta_obj(sub, &sub_meta_obj); @@ -687,15 +687,15 @@ public: virtual ~Sub() = default; - int subscribe(const string& topic_name, const rgw_pubsub_sub_dest& dest, optional_yield y, + int subscribe(const DoutPrefixProvider *dpp, const string& topic_name, const rgw_pubsub_sub_dest& dest, optional_yield y, const std::string& s3_id=""); - int unsubscribe(const string& topic_name, optional_yield y); + int unsubscribe(const DoutPrefixProvider *dpp, const string& topic_name, optional_yield y); int get_conf(rgw_pubsub_sub_config* result); static const int DEFAULT_MAX_EVENTS = 100; // followint virtual methods should only be called in derived - virtual int list_events(const string& marker, int max_events) {ceph_assert(false);} - virtual int remove_event(const string& event_id) {ceph_assert(false);} + virtual int list_events(const DoutPrefixProvider *dpp, const string& marker, int max_events) {ceph_assert(false);} + virtual int remove_event(const DoutPrefixProvider *dpp, const string& event_id) {ceph_assert(false);} virtual void dump(Formatter* f) const {ceph_assert(false);} }; @@ -715,8 +715,8 @@ public: virtual ~SubWithEvents() = default; - int list_events(const string& marker, int max_events) override; - int remove_event(const string& event_id) override; + int list_events(const DoutPrefixProvider *dpp, const string& marker, int max_events) override; + int remove_event(const DoutPrefixProvider *dpp, const string& event_id) override; void dump(Formatter* f) const override; }; @@ -762,15 +762,15 @@ public: // create a topic with a name only // if the topic already exists it is a no-op (considered success) // return 0 on success, error code otherwise - int create_topic(const string& name, optional_yield y); + int create_topic(const DoutPrefixProvider *dpp, const string& name, optional_yield y); // create a topic with push destination information and ARN // if the topic already exists the destination and ARN values may be updated (considered succsess) // return 0 on success, error code otherwise - int create_topic(const string& name, const rgw_pubsub_sub_dest& dest, const std::string& arn, const std::string& opaque_data, optional_yield y); + int create_topic(const DoutPrefixProvider *dpp, const string& name, const rgw_pubsub_sub_dest& dest, const std::string& arn, const std::string& opaque_data, optional_yield y); // remove a topic according to its name // if the topic does not exists it is a no-op (considered success) // return 0 on success, error code otherwise - int remove_topic(const string& name, optional_yield y); + int remove_topic(const DoutPrefixProvider *dpp, const string& name, optional_yield y); }; @@ -798,13 +798,13 @@ int RGWPubSub::read(const rgw_raw_obj& obj, T* result, RGWObjVersionTracker* obj } template -int RGWPubSub::write(const rgw_raw_obj& obj, const T& info, +int RGWPubSub::write(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const T& info, RGWObjVersionTracker* objv_tracker, optional_yield y) { bufferlist bl; encode(info, bl); - int ret = rgw_put_system_obj(obj_ctx, obj.pool, obj.oid, + int ret = rgw_put_system_obj(dpp, obj_ctx, obj.pool, obj.oid, bl, false, objv_tracker, real_time(), y); if (ret < 0) { diff --git a/src/rgw/rgw_pubsub_push.cc b/src/rgw/rgw_pubsub_push.cc index 3b5b926661051..ca1b43b588d4d 100644 --- a/src/rgw/rgw_pubsub_push.cc +++ b/src/rgw/rgw_pubsub_push.cc @@ -73,7 +73,7 @@ private: } // send message to endpoint - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { init_new_io(this); const auto rc = sync_env->http_manager->add_request(this); if (rc < 0) { @@ -232,7 +232,7 @@ private: topic(_topic), conn(_conn), message(_message) {} // send message to endpoint, without waiting for reply - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { const auto rc = amqp::publish(conn, topic, message); if (rc < 0) { @@ -262,7 +262,7 @@ private: topic(_topic), conn(_conn), message(_message) {} // send message to endpoint, waiting for reply - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { init_new_io(this); @@ -504,7 +504,7 @@ private: topic(_topic), conn(_conn), message(_message) {} // send message to endpoint, without waiting for reply - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { const auto rc = kafka::publish(conn, topic, message); if (rc < 0) { @@ -534,7 +534,7 @@ private: topic(_topic), conn(_conn), message(_message) {} // send message to endpoint, waiting for reply - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { init_new_io(this); diff --git a/src/rgw/rgw_putobj_processor.cc b/src/rgw/rgw_putobj_processor.cc index 11b8db58499fb..acfe1c1641298 100644 --- a/src/rgw/rgw_putobj_processor.cc +++ b/src/rgw/rgw_putobj_processor.cc @@ -77,7 +77,7 @@ static int process_completed(const AioResultList& completed, RawObjSet *written) int RadosWriter::set_stripe_obj(const rgw_raw_obj& raw_obj) { stripe_obj = store->svc()->rados->obj(raw_obj); - return stripe_obj.open(); + return stripe_obj.open(dpp); } int RadosWriter::process(bufferlist&& bl, uint64_t offset) @@ -149,7 +149,7 @@ RadosWriter::~RadosWriter() continue; } - int r = store->delete_raw_obj(obj); + int r = store->delete_raw_obj(dpp, obj); if (r < 0 && r != -ENOENT) { ldpp_dout(dpp, 0) << "WARNING: failed to remove obj (" << obj << "), leaked" << dendl; } @@ -158,7 +158,7 @@ RadosWriter::~RadosWriter() if (need_to_remove_head) { std::string version_id; ldpp_dout(dpp, 5) << "NOTE: we are going to process the head obj (" << *raw_head << ")" << dendl; - int r = head_obj->delete_object(&obj_ctx, ACLOwner(), bucket->get_acl_owner(), ceph::real_time(), + int r = head_obj->delete_object(dpp, &obj_ctx, ACLOwner(), bucket->get_acl_owner(), ceph::real_time(), false, 0, version_id, null_yield); if (r < 0 && r != -ENOENT) { ldpp_dout(dpp, 0) << "WARNING: failed to remove obj (" << *raw_head << "), leaked" << dendl; @@ -179,7 +179,7 @@ int ManifestObjectProcessor::next(uint64_t offset, uint64_t *pstripe_size) rgw_raw_obj stripe_obj = manifest_gen.get_cur_obj(store); uint64_t chunk_size = 0; - r = store->get_raw_chunk_size(stripe_obj, &chunk_size); + r = store->get_raw_chunk_size(dpp, stripe_obj, &chunk_size); if (r < 0) { return r; } @@ -210,7 +210,7 @@ int AtomicObjectProcessor::prepare(optional_yield y) uint64_t chunk_size = 0; uint64_t alignment; - int r = head_obj->get_max_chunk_size(bucket->get_placement_rule(), + int r = head_obj->get_max_chunk_size(dpp, bucket->get_placement_rule(), &max_head_chunk_size, &alignment); if (r < 0) { return r; @@ -220,7 +220,7 @@ int AtomicObjectProcessor::prepare(optional_yield y) if (bucket->get_placement_rule() != tail_placement_rule) { if (!head_obj->placement_rules_match(bucket->get_placement_rule(), tail_placement_rule)) { same_pool = false; - r = head_obj->get_max_chunk_size(tail_placement_rule, &chunk_size); + r = head_obj->get_max_chunk_size(dpp, tail_placement_rule, &chunk_size); if (r < 0) { return r; } @@ -313,7 +313,7 @@ int AtomicObjectProcessor::complete(size_t accounted_size, return r; } - r = obj_op->write_meta(actual_size, accounted_size, y); + r = obj_op->write_meta(dpp, actual_size, accounted_size, y); if (r < 0) { return r; } @@ -362,7 +362,7 @@ int MultipartObjectProcessor::prepare_head() uint64_t stripe_size; uint64_t alignment; - int r = target_obj->get_max_chunk_size(tail_placement_rule, &chunk_size, &alignment); + int r = target_obj->get_max_chunk_size(dpp, tail_placement_rule, &chunk_size, &alignment); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: unexpected: get_max_chunk_size(): placement_rule=" << tail_placement_rule.to_str() << " obj=" << target_obj << " returned r=" << r << dendl; return r; @@ -440,7 +440,7 @@ int MultipartObjectProcessor::complete(size_t accounted_size, return r; } - r = obj_op->write_meta(actual_size, accounted_size, y); + r = obj_op->write_meta(dpp, actual_size, accounted_size, y); if (r < 0) return r; @@ -476,7 +476,7 @@ int MultipartObjectProcessor::complete(size_t accounted_size, bucket->get_object(rgw_obj_key(mp.get_meta(), std::string(), RGW_OBJ_NS_MULTIPART)); meta_obj->set_in_extra_data(true); - r = meta_obj->omap_set_val_by_key(p, bl, true, null_yield); + r = meta_obj->omap_set_val_by_key(dpp, p, bl, true, null_yield); if (r < 0) { return r == -ENOENT ? -ERR_NO_SUCH_UPLOAD : r; } @@ -504,7 +504,7 @@ int AppendObjectProcessor::process_first_chunk(bufferlist &&data, rgw::putobj::D int AppendObjectProcessor::prepare(optional_yield y) { RGWObjState *astate; - int r = head_obj->get_obj_state(&obj_ctx, *bucket, &astate, y); + int r = head_obj->get_obj_state(dpp, &obj_ctx, *bucket, &astate, y); if (r < 0) { return r; } @@ -571,7 +571,7 @@ int AppendObjectProcessor::prepare(optional_yield y) rgw_raw_obj stripe_obj = manifest_gen.get_cur_obj(store); uint64_t chunk_size = 0; - r = store->get_raw_chunk_size(stripe_obj, &chunk_size); + r = store->get_raw_chunk_size(dpp, stripe_obj, &chunk_size); if (r < 0) { return r; } @@ -611,7 +611,7 @@ int AppendObjectProcessor::complete(size_t accounted_size, const string &etag, c //For Append obj, disable versioning obj_op->params.versioning_disabled = true; if (cur_manifest) { - cur_manifest->append(manifest, store->svc()->zone); + cur_manifest->append(dpp, manifest, store->svc()->zone); obj_op->params.manifest = cur_manifest; } else { obj_op->params.manifest = &manifest; @@ -654,7 +654,7 @@ int AppendObjectProcessor::complete(size_t accounted_size, const string &etag, c if (r < 0) { return r; } - r = obj_op->write_meta(actual_size + cur_size, accounted_size + *cur_accounted_size, y); + r = obj_op->write_meta(dpp, actual_size + cur_size, accounted_size + *cur_accounted_size, y); if (r < 0) { return r; } diff --git a/src/rgw/rgw_quota.cc b/src/rgw/rgw_quota.cc index 49ec8f10517a7..6d821d8998451 100644 --- a/src/rgw/rgw_quota.cc +++ b/src/rgw/rgw_quota.cc @@ -65,7 +65,7 @@ protected: } }; - virtual int fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats, optional_yield y) = 0; + virtual int fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats, optional_yield y, const DoutPrefixProvider *dpp) = 0; virtual bool map_find(const rgw_user& user, const rgw_bucket& bucket, RGWQuotaCacheStats& qs) = 0; @@ -81,7 +81,7 @@ public: async_refcount->put_wait(); /* wait for all pending async requests to complete */ } - int get_stats(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats, RGWQuotaInfo& quota, optional_yield y); + int get_stats(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats, RGWQuotaInfo& quota, optional_yield y, const DoutPrefixProvider *dpp); void adjust_stats(const rgw_user& user, rgw_bucket& bucket, int objs_delta, uint64_t added_bytes, uint64_t removed_bytes); virtual bool can_use_cached_stats(RGWQuotaInfo& quota, RGWStorageStats& stats); @@ -196,7 +196,7 @@ void RGWQuotaCache::set_stats(const rgw_user& user, const rgw_bucket& bucket, } template -int RGWQuotaCache::get_stats(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats, RGWQuotaInfo& quota, optional_yield y) { +int RGWQuotaCache::get_stats(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats, RGWQuotaInfo& quota, optional_yield y, const DoutPrefixProvider *dpp) { RGWQuotaCacheStats qs; utime_t now = ceph_clock_now(); if (map_find(user, bucket, qs)) { @@ -216,7 +216,7 @@ int RGWQuotaCache::get_stats(const rgw_user& user, const rgw_bucket& bucket, } } - int ret = fetch_stats_from_storage(user, bucket, stats, y); + int ret = fetch_stats_from_storage(user, bucket, stats, y, dpp); if (ret < 0 && ret != -ENOENT) return ret; @@ -297,17 +297,18 @@ int BucketAsyncRefreshHandler::init_fetch() auto obj_ctx = store->svc()->sysobj->init_obj_ctx(); - int r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket, bucket_info, NULL, NULL, null_yield); + const DoutPrefix dp(store->ctx(), dout_subsys, "rgw bucket async refresh handler: "); + int r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket, bucket_info, NULL, NULL, null_yield, &dp); if (r < 0) { - ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket << " r=" << r << dendl; + ldpp_dout(&dp, 0) << "could not get bucket info for bucket=" << bucket << " r=" << r << dendl; return r; } - ldout(store->ctx(), 20) << "initiating async quota refresh for bucket=" << bucket << dendl; + ldpp_dout(&dp, 20) << "initiating async quota refresh for bucket=" << bucket << dendl; - r = store->getRados()->get_bucket_stats_async(bucket_info, RGW_NO_SHARD, this); + r = store->getRados()->get_bucket_stats_async(&dp, bucket_info, RGW_NO_SHARD, this); if (r < 0) { - ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket.name << dendl; + ldpp_dout(&dp, 0) << "could not get bucket info for bucket=" << bucket.name << dendl; /* get_bucket_stats_async() dropped our reference already */ return r; @@ -351,7 +352,7 @@ protected: stats_map.add(bucket, qs); } - int fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats, optional_yield y) override; + int fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats, optional_yield y, const DoutPrefixProvider *dpp) override; public: explicit RGWBucketStatsCache(rgw::sal::RGWRadosStore *_store) : RGWQuotaCache(_store, _store->ctx()->_conf->rgw_bucket_quota_cache_size) { @@ -362,15 +363,15 @@ public: } }; -int RGWBucketStatsCache::fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats, optional_yield y) +int RGWBucketStatsCache::fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats, optional_yield y, const DoutPrefixProvider *dpp) { RGWBucketInfo bucket_info; RGWSysObjectCtx obj_ctx = store->svc()->sysobj->init_obj_ctx(); - int r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket, bucket_info, NULL, NULL, y); + int r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket, bucket_info, NULL, NULL, y, dpp); if (r < 0) { - ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket << " r=" << r << dendl; + ldpp_dout(dpp, 0) << "could not get bucket info for bucket=" << bucket << " r=" << r << dendl; return r; } @@ -378,10 +379,10 @@ int RGWBucketStatsCache::fetch_stats_from_storage(const rgw_user& user, const rg string master_ver; map bucket_stats; - r = store->getRados()->get_bucket_stats(bucket_info, RGW_NO_SHARD, &bucket_ver, + r = store->getRados()->get_bucket_stats(dpp, bucket_info, RGW_NO_SHARD, &bucket_ver, &master_ver, bucket_stats, nullptr); if (r < 0) { - ldout(store->ctx(), 0) << "could not get bucket stats for bucket=" + ldpp_dout(dpp, 0) << "could not get bucket stats for bucket=" << bucket.name << dendl; return r; } @@ -401,12 +402,14 @@ int RGWBucketStatsCache::fetch_stats_from_storage(const rgw_user& user, const rg class UserAsyncRefreshHandler : public RGWQuotaCache::AsyncRefreshHandler, public RGWGetUserStats_CB { + const DoutPrefixProvider *dpp; rgw_bucket bucket; public: - UserAsyncRefreshHandler(rgw::sal::RGWRadosStore *_store, RGWQuotaCache *_cache, + UserAsyncRefreshHandler(const DoutPrefixProvider *_dpp, rgw::sal::RGWRadosStore *_store, RGWQuotaCache *_cache, const rgw_user& _user, const rgw_bucket& _bucket) : RGWQuotaCache::AsyncRefreshHandler(_store, _cache), RGWGetUserStats_CB(_user), + dpp(_dpp), bucket(_bucket) {} void drop_reference() override { put(); } @@ -416,10 +419,10 @@ public: int UserAsyncRefreshHandler::init_fetch() { - ldout(store->ctx(), 20) << "initiating async quota refresh for user=" << user << dendl; - int r = store->ctl()->user->read_stats_async(user, this); + ldpp_dout(dpp, 20) << "initiating async quota refresh for user=" << user << dendl; + int r = store->ctl()->user->read_stats_async(dpp, user, this); if (r < 0) { - ldout(store->ctx(), 0) << "could not get bucket info for user=" << user << dendl; + ldpp_dout(dpp, 0) << "could not get bucket info for user=" << user << dendl; /* get_bucket_stats_async() dropped our reference already */ return r; @@ -440,6 +443,7 @@ void UserAsyncRefreshHandler::handle_response(int r) } class RGWUserStatsCache : public RGWQuotaCache { + const DoutPrefixProvider *dpp; std::atomic down_flag = { false }; ceph::shared_mutex mutex = ceph::make_shared_mutex("RGWUserStatsCache"); map modified_buckets; @@ -466,7 +470,8 @@ class RGWUserStatsCache : public RGWQuotaCache { rgw_bucket bucket = iter->first; rgw_user& user = iter->second; ldout(cct, 20) << "BucketsSyncThread: sync user=" << user << " bucket=" << bucket << dendl; - int r = stats->sync_bucket(user, bucket, null_yield); + const DoutPrefix dp(cct, dout_subsys, "rgw bucket sync thread: "); + int r = stats->sync_bucket(user, bucket, null_yield, &dp); if (r < 0) { ldout(cct, 0) << "WARNING: sync_bucket() returned r=" << r << dendl; } @@ -511,7 +516,8 @@ class RGWUserStatsCache : public RGWQuotaCache { void *entry() override { ldout(cct, 20) << "UserSyncThread: start" << dendl; do { - int ret = stats->sync_all_users(null_yield); + const DoutPrefix dp(cct, dout_subsys, "rgw user sync thread: "); + int ret = stats->sync_all_users(&dp, null_yield); if (ret < 0) { ldout(cct, 5) << "ERROR: sync_all_users() returned ret=" << ret << dendl; } @@ -548,10 +554,10 @@ protected: stats_map.add(user, qs); } - int fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats, optional_yield y) override; - int sync_bucket(const rgw_user& rgw_user, rgw_bucket& bucket, optional_yield y); - int sync_user(const rgw_user& user, optional_yield y); - int sync_all_users(optional_yield y); + int fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats, optional_yield y, const DoutPrefixProvider *dpp) override; + int sync_bucket(const rgw_user& rgw_user, rgw_bucket& bucket, optional_yield y, const DoutPrefixProvider *dpp); + int sync_user(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y); + int sync_all_users(const DoutPrefixProvider *dpp, optional_yield y); void data_modified(const rgw_user& user, rgw_bucket& bucket) override; @@ -573,8 +579,8 @@ protected: } public: - RGWUserStatsCache(rgw::sal::RGWRadosStore *_store, bool quota_threads) - : RGWQuotaCache(_store, _store->ctx()->_conf->rgw_bucket_quota_cache_size) + RGWUserStatsCache(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *_store, bool quota_threads) + : RGWQuotaCache(_store, _store->ctx()->_conf->rgw_bucket_quota_cache_size), dpp(dpp) { if (quota_threads) { buckets_sync_thread = new BucketsSyncThread(store->ctx(), this); @@ -591,7 +597,7 @@ public: } AsyncRefreshHandler *allocate_refresh_handler(const rgw_user& user, const rgw_bucket& bucket) override { - return new UserAsyncRefreshHandler(store, this, user, bucket); + return new UserAsyncRefreshHandler(dpp, store, this, user, bucket); } bool can_use_cached_stats(RGWQuotaInfo& quota, RGWStorageStats& stats) override { @@ -618,9 +624,10 @@ public: int RGWUserStatsCache::fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { - int r = store->ctl()->user->read_stats(user, &stats, y); + int r = store->ctl()->user->read_stats(dpp, user, &stats, y); if (r < 0) { ldout(store->ctx(), 0) << "could not get user stats for user=" << user << dendl; return r; @@ -629,34 +636,34 @@ int RGWUserStatsCache::fetch_stats_from_storage(const rgw_user& user, return 0; } -int RGWUserStatsCache::sync_bucket(const rgw_user& user, rgw_bucket& bucket, optional_yield y) +int RGWUserStatsCache::sync_bucket(const rgw_user& user, rgw_bucket& bucket, optional_yield y, const DoutPrefixProvider *dpp) { RGWBucketInfo bucket_info; - int r = store->ctl()->bucket->read_bucket_instance_info(bucket, &bucket_info, y); + int r = store->ctl()->bucket->read_bucket_instance_info(bucket, &bucket_info, y, dpp); if (r < 0) { ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket << " r=" << r << dendl; return r; } RGWBucketEnt ent; - r = store->ctl()->bucket->sync_user_stats(user, bucket_info, y, &ent); + r = store->ctl()->bucket->sync_user_stats(dpp, user, bucket_info, y, &ent); if (r < 0) { ldout(store->ctx(), 0) << "ERROR: sync_user_stats() for user=" << user << ", bucket=" << bucket << " returned " << r << dendl; return r; } - return store->getRados()->check_bucket_shards(bucket_info, bucket, ent.count); + return store->getRados()->check_bucket_shards(bucket_info, bucket, ent.count, dpp); } -int RGWUserStatsCache::sync_user(const rgw_user& user, optional_yield y) +int RGWUserStatsCache::sync_user(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y) { string user_str = user.to_str(); RGWStorageStats stats; ceph::real_time last_stats_sync; ceph::real_time last_stats_update; - int ret = store->ctl()->user->read_stats(rgw_user(user_str), &stats, y, &last_stats_sync, &last_stats_update); + int ret = store->ctl()->user->read_stats(dpp, rgw_user(user_str), &stats, y, &last_stats_sync, &last_stats_update); if (ret < 0) { ldout(store->ctx(), 5) << "ERROR: can't read user header: ret=" << ret << dendl; return ret; @@ -674,7 +681,7 @@ int RGWUserStatsCache::sync_user(const rgw_user& user, optional_yield y) // check if enough time passed since last full sync /* FIXME: missing check? */ - ret = rgw_user_sync_all_stats(store, user, y); + ret = rgw_user_sync_all_stats(dpp, store, user, y); if (ret < 0) { ldout(store->ctx(), 0) << "ERROR: failed user stats sync, ret=" << ret << dendl; return ret; @@ -683,14 +690,14 @@ int RGWUserStatsCache::sync_user(const rgw_user& user, optional_yield y) return 0; } -int RGWUserStatsCache::sync_all_users(optional_yield y) +int RGWUserStatsCache::sync_all_users(const DoutPrefixProvider *dpp, optional_yield y) { string key = "user"; void *handle; - int ret = store->ctl()->meta.mgr->list_keys_init(key, &handle); + int ret = store->ctl()->meta.mgr->list_keys_init(dpp, key, &handle); if (ret < 0) { - ldout(store->ctx(), 10) << "ERROR: can't get key: ret=" << ret << dendl; + ldpp_dout(dpp, 10) << "ERROR: can't get key: ret=" << ret << dendl; return ret; } @@ -701,17 +708,17 @@ int RGWUserStatsCache::sync_all_users(optional_yield y) list keys; ret = store->ctl()->meta.mgr->list_keys_next(handle, max, keys, &truncated); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: lists_keys_next(): ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: lists_keys_next(): ret=" << ret << dendl; goto done; } for (list::iterator iter = keys.begin(); iter != keys.end() && !going_down(); ++iter) { rgw_user user(*iter); - ldout(store->ctx(), 20) << "RGWUserStatsCache: sync user=" << user << dendl; - int ret = sync_user(user, y); + ldpp_dout(dpp, 20) << "RGWUserStatsCache: sync user=" << user << dendl; + int ret = sync_user(dpp, user, y); if (ret < 0) { - ldout(store->ctx(), 5) << "ERROR: sync_user() failed, user=" << user << " ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "ERROR: sync_user() failed, user=" << user << " ret=" << ret << dendl; /* continuing to next user */ continue; @@ -922,9 +929,9 @@ class RGWQuotaHandlerImpl : public RGWQuotaHandler { return 0; } public: - RGWQuotaHandlerImpl(rgw::sal::RGWRadosStore *_store, bool quota_threads) : store(_store), + RGWQuotaHandlerImpl(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *_store, bool quota_threads) : store(_store), bucket_stats_cache(_store), - user_stats_cache(_store, quota_threads) {} + user_stats_cache(dpp, _store, quota_threads) {} int check_quota(const rgw_user& user, rgw_bucket& bucket, @@ -944,10 +951,11 @@ public: * fetch that info and not rely on cached data */ + const DoutPrefix dp(store->ctx(), dout_subsys, "rgw quota handler: "); if (bucket_quota.enabled) { RGWStorageStats bucket_stats; int ret = bucket_stats_cache.get_stats(user, bucket, bucket_stats, - bucket_quota, y); + bucket_quota, y, &dp); if (ret < 0) { return ret; } @@ -960,7 +968,7 @@ public: if (user_quota.enabled) { RGWStorageStats user_stats; int ret = user_stats_cache.get_stats(user, bucket, user_stats, - user_quota, y); + user_quota, y, &dp); if (ret < 0) { return ret; } @@ -994,9 +1002,9 @@ public: }; -RGWQuotaHandler *RGWQuotaHandler::generate_handler(rgw::sal::RGWRadosStore *store, bool quota_threads) +RGWQuotaHandler *RGWQuotaHandler::generate_handler(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, bool quota_threads) { - return new RGWQuotaHandlerImpl(store, quota_threads); + return new RGWQuotaHandlerImpl(dpp, store, quota_threads); } void RGWQuotaHandler::free_handler(RGWQuotaHandler *handler) diff --git a/src/rgw/rgw_quota.h b/src/rgw/rgw_quota.h index 6901aa913a561..a1e446db16a6c 100644 --- a/src/rgw/rgw_quota.h +++ b/src/rgw/rgw_quota.h @@ -115,7 +115,7 @@ public: virtual void update_stats(const rgw_user& bucket_owner, rgw_bucket& bucket, int obj_delta, uint64_t added_bytes, uint64_t removed_bytes) = 0; - static RGWQuotaHandler *generate_handler(rgw::sal::RGWRadosStore *store, bool quota_threads); + static RGWQuotaHandler *generate_handler(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, bool quota_threads); static void free_handler(RGWQuotaHandler *handler); }; diff --git a/src/rgw/rgw_rados.cc b/src/rgw/rgw_rados.cc index 64587f0a6f89d..fbd6e81331219 100644 --- a/src/rgw/rgw_rados.cc +++ b/src/rgw/rgw_rados.cc @@ -301,7 +301,7 @@ public: http_manager.start(); } - int notify_all(map& conn_map, set& shards) { + int notify_all(const DoutPrefixProvider *dpp, map& conn_map, set& shards) { rgw_http_param_pair pairs[] = { { "type", "metadata" }, { "notify", NULL }, { NULL, NULL } }; @@ -314,7 +314,7 @@ public: stacks.push_back(stack); } - return run(stacks); + return run(dpp, stacks); } }; @@ -328,7 +328,7 @@ public: http_manager.start(); } - int notify_all(map& conn_map, + int notify_all(const DoutPrefixProvider *dpp, map& conn_map, bc::flat_map >& shards) { rgw_http_param_pair pairs[] = { { "type", "data" }, { "notify", NULL }, @@ -343,7 +343,7 @@ public: stacks.push_back(stack); } - return run(stacks); + return run(dpp, stacks); } }; @@ -373,9 +373,9 @@ void *RGWRadosThread::Worker::entry() { do { auto start = ceph::real_clock::now(); - int r = processor->process(); + int r = processor->process(this); if (r < 0) { - dout(0) << "ERROR: processor->process() returned error r=" << r << dendl; + ldpp_dout(this, 0) << "ERROR: processor->process() returned error r=" << r << dendl; } if (processor->going_down()) @@ -417,10 +417,10 @@ public: RGWMetaNotifier(RGWRados *_store, RGWMetadataLog* log) : RGWRadosThread(_store, "meta-notifier"), notify_mgr(_store), log(log) {} - int process() override; + int process(const DoutPrefixProvider *dpp) override; }; -int RGWMetaNotifier::process() +int RGWMetaNotifier::process(const DoutPrefixProvider *dpp) { set shards; @@ -431,10 +431,10 @@ int RGWMetaNotifier::process() } for (set::iterator iter = shards.begin(); iter != shards.end(); ++iter) { - ldout(cct, 20) << __func__ << "(): notifying mdlog change, shard_id=" << *iter << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): notifying mdlog change, shard_id=" << *iter << dendl; } - notify_mgr.notify_all(store->svc.zone->get_zone_conn_map(), shards); + notify_mgr.notify_all(dpp, store->svc.zone->get_zone_conn_map(), shards); return 0; } @@ -451,10 +451,10 @@ class RGWDataNotifier : public RGWRadosThread { public: RGWDataNotifier(RGWRados *_store) : RGWRadosThread(_store, "data-notifier"), notify_mgr(_store) {} - int process() override; + int process(const DoutPrefixProvider *dpp) override; }; -int RGWDataNotifier::process() +int RGWDataNotifier::process(const DoutPrefixProvider *dpp) { auto data_log = store->svc.datalog_rados; if (!data_log) { @@ -468,11 +468,11 @@ int RGWDataNotifier::process() } for (const auto& [shard_id, keys] : shards) { - ldout(cct, 20) << __func__ << "(): notifying datalog change, shard_id=" + ldpp_dout(dpp, 20) << __func__ << "(): notifying datalog change, shard_id=" << shard_id << ": " << keys << dendl; } - notify_mgr.notify_all(store->svc.zone->get_zone_data_notify_to_map(), shards); + notify_mgr.notify_all(dpp, store->svc.zone->get_zone_data_notify_to_map(), shards); return 0; } @@ -482,8 +482,8 @@ public: RGWSyncProcessorThread(RGWRados *_store, const string& thread_name = "radosgw") : RGWRadosThread(_store, thread_name) {} RGWSyncProcessorThread(RGWRados *_store) : RGWRadosThread(_store) {} ~RGWSyncProcessorThread() override {} - int init() override = 0 ; - int process() override = 0; + int init(const DoutPrefixProvider *dpp) override = 0 ; + int process(const DoutPrefixProvider *dpp) override = 0; }; class RGWMetaSyncProcessorThread : public RGWSyncProcessorThread @@ -507,17 +507,17 @@ public: } RGWMetaSyncStatusManager* get_manager() { return &sync; } - int init() override { - int ret = sync.init(); + int init(const DoutPrefixProvider *dpp) override { + int ret = sync.init(dpp); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: sync.init() returned " << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: sync.init() returned " << ret << dendl; return ret; } return 0; } - int process() override { - sync.run(null_yield); + int process(const DoutPrefixProvider *dpp) override { + sync.run(dpp, null_yield); return 0; } }; @@ -554,16 +554,16 @@ public: } RGWDataSyncStatusManager* get_manager() { return &sync; } - int init() override { + int init(const DoutPrefixProvider *dpp) override { return 0; } - int process() override { + int process(const DoutPrefixProvider *dpp) override { while (!initialized) { if (going_down()) { return 0; } - int ret = sync.init(); + int ret = sync.init(dpp); if (ret >= 0) { initialized = true; break; @@ -571,7 +571,7 @@ public: /* we'll be back! */ return 0; } - sync.run(); + sync.run(dpp); return 0; } }; @@ -596,10 +596,10 @@ public: trim_interval(interval, 0) {} - int init() override { + int init(const DoutPrefixProvider *dpp) override { return http.start(); } - int process() override { + int process(const DoutPrefixProvider *dpp) override { list stacks; auto meta = new RGWCoroutinesStack(store->ctx(), &crs); meta->call(create_meta_log_trim_cr(this, store, &http, @@ -609,7 +609,7 @@ public: if (store->svc()->zone->sync_module_exports_data()) { auto data = new RGWCoroutinesStack(store->ctx(), &crs); - data->call(create_data_log_trim_cr(store, &http, + data->call(create_data_log_trim_cr(this, store, &http, cct->_conf->rgw_data_log_num_shards, trim_interval)); stacks.push_back(data); @@ -619,7 +619,7 @@ public: stacks.push_back(bucket); } - crs.run(stacks); + crs.run(dpp, stacks); return 0; } @@ -679,10 +679,10 @@ RGWDataSyncStatusManager* RGWRados::get_data_sync_manager(const rgw_zone_id& sou return thread->second->get_manager(); } -int RGWRados::get_required_alignment(const rgw_pool& pool, uint64_t *alignment) +int RGWRados::get_required_alignment(const DoutPrefixProvider *dpp, const rgw_pool& pool, uint64_t *alignment) { IoCtx ioctx; - int r = open_pool_ctx(pool, ioctx, false); + int r = open_pool_ctx(dpp, pool, ioctx, false); if (r < 0) { ldout(cct, 0) << "ERROR: open_pool_ctx() returned " << r << dendl; return r; @@ -730,10 +730,10 @@ void RGWRados::get_max_aligned_size(uint64_t size, uint64_t alignment, uint64_t *max_size = size - (size % alignment); } -int RGWRados::get_max_chunk_size(const rgw_pool& pool, uint64_t *max_chunk_size, uint64_t *palignment) +int RGWRados::get_max_chunk_size(const rgw_pool& pool, uint64_t *max_chunk_size, const DoutPrefixProvider *dpp, uint64_t *palignment) { uint64_t alignment; - int r = get_required_alignment(pool, &alignment); + int r = get_required_alignment(dpp, pool, &alignment); if (r < 0) { return r; } @@ -746,20 +746,20 @@ int RGWRados::get_max_chunk_size(const rgw_pool& pool, uint64_t *max_chunk_size, get_max_aligned_size(config_chunk_size, alignment, max_chunk_size); - ldout(cct, 20) << "max_chunk_size=" << *max_chunk_size << dendl; + ldpp_dout(dpp, 20) << "max_chunk_size=" << *max_chunk_size << dendl; return 0; } int RGWRados::get_max_chunk_size(const rgw_placement_rule& placement_rule, const rgw_obj& obj, - uint64_t *max_chunk_size, uint64_t *palignment) + uint64_t *max_chunk_size, const DoutPrefixProvider *dpp, uint64_t *palignment) { rgw_pool pool; if (!get_obj_data_pool(placement_rule, obj, &pool)) { - ldout(cct, 0) << "ERROR: failed to get data pool for object " << obj << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to get data pool for object " << obj << dendl; return -EIO; } - return get_max_chunk_size(pool, max_chunk_size, palignment); + return get_max_chunk_size(pool, max_chunk_size, dpp, palignment); } class RGWIndexCompletionManager; @@ -788,7 +788,7 @@ struct complete_op_data { } }; -class RGWIndexCompletionThread : public RGWRadosThread { +class RGWIndexCompletionThread : public RGWRadosThread, public DoutPrefixProvider { RGWRados *store; uint64_t interval_msec() override { @@ -803,7 +803,7 @@ public: RGWIndexCompletionThread(RGWRados *_store) : RGWRadosThread(_store, "index-complete"), store(_store) {} - int process() override; + int process(const DoutPrefixProvider *dpp) override; void add_completion(complete_op_data *completion) { { @@ -813,9 +813,13 @@ public: signal(); } + + CephContext *get_cct() const override { return store->ctx(); } + unsigned get_subsys() const { return dout_subsys; } + std::ostream& gen_prefix(std::ostream& out) const { return out << "rgw index completion thread: "; } }; -int RGWIndexCompletionThread::process() +int RGWIndexCompletionThread::process(const DoutPrefixProvider *dpp) { list comps; @@ -830,34 +834,34 @@ int RGWIndexCompletionThread::process() if (going_down()) { continue; } - ldout(store->ctx(), 20) << __func__ << "(): handling completion for key=" << c->key << dendl; + ldpp_dout(this, 20) << __func__ << "(): handling completion for key=" << c->key << dendl; RGWRados::BucketShard bs(store); RGWBucketInfo bucket_info; - int r = bs.init(c->obj.bucket, c->obj, &bucket_info); + int r = bs.init(c->obj.bucket, c->obj, &bucket_info, this); if (r < 0) { - ldout(cct, 0) << "ERROR: " << __func__ << "(): failed to initialize BucketShard, obj=" << c->obj << " r=" << r << dendl; + ldpp_dout(this, 0) << "ERROR: " << __func__ << "(): failed to initialize BucketShard, obj=" << c->obj << " r=" << r << dendl; /* not much to do */ continue; } - r = store->guard_reshard(&bs, c->obj, bucket_info, + r = store->guard_reshard(this, &bs, c->obj, bucket_info, [&](RGWRados::BucketShard *bs) -> int { librados::ObjectWriteOperation o; cls_rgw_guard_bucket_resharding(o, -ERR_BUSY_RESHARDING); cls_rgw_bucket_complete_op(o, c->op, c->tag, c->ver, c->key, c->dir_meta, &c->remove_objs, c->log_op, c->bilog_op, &c->zones_trace); - return bs->bucket_obj.operate(&o, null_yield); + return bs->bucket_obj.operate(this, &o, null_yield); }); if (r < 0) { - ldout(cct, 0) << "ERROR: " << __func__ << "(): bucket index completion failed, obj=" << c->obj << " r=" << r << dendl; + ldpp_dout(this, 0) << "ERROR: " << __func__ << "(): bucket index completion failed, obj=" << c->obj << " r=" << r << dendl; /* ignoring error, can't do anything about it */ continue; } - r = store->svc.datalog_rados->add_entry(bucket_info, bs.shard_id); + r = store->svc.datalog_rados->add_entry(this, bucket_info, bs.shard_id); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed writing data log" << dendl; + ldpp_dout(this, -1) << "ERROR: failed writing data log" << dendl; } } @@ -910,9 +914,9 @@ public: complete_op_data **result); bool handle_completion(completion_t cb, complete_op_data *arg); - int start() { + int start(const DoutPrefixProvider *dpp) { completion_thread = new RGWIndexCompletionThread(store); - int ret = completion_thread->init(); + int ret = completion_thread->init(dpp); if (ret < 0) { return ret; } @@ -1160,7 +1164,7 @@ int RGWRados::update_service_map(std::map&& status) * Initialize the RADOS instance and prepare to do other ops * Returns 0 on success, -ERR# on failure. */ -int RGWRados::init_complete() +int RGWRados::init_complete(const DoutPrefixProvider *dpp) { int ret; @@ -1169,27 +1173,27 @@ int RGWRados::init_complete() */ sync_module = svc.sync_modules->get_sync_module(); - ret = open_root_pool_ctx(); + ret = open_root_pool_ctx(dpp); if (ret < 0) return ret; - ret = open_gc_pool_ctx(); + ret = open_gc_pool_ctx(dpp); if (ret < 0) return ret; - ret = open_lc_pool_ctx(); + ret = open_lc_pool_ctx(dpp); if (ret < 0) return ret; - ret = open_objexp_pool_ctx(); + ret = open_objexp_pool_ctx(dpp); if (ret < 0) return ret; - ret = open_reshard_pool_ctx(); + ret = open_reshard_pool_ctx(dpp); if (ret < 0) return ret; - ret = open_notif_pool_ctx(); + ret = open_notif_pool_ctx(dpp); if (ret < 0) return ret; @@ -1234,16 +1238,16 @@ int RGWRados::init_complete() for (const auto &pt: zonegroup.placement_targets) { if (zone_params.placement_pools.find(pt.second.name) == zone_params.placement_pools.end()){ - ldout(cct, 0) << "WARNING: This zone does not contain the placement target " + ldpp_dout(dpp, 0) << "WARNING: This zone does not contain the placement target " << pt.second.name << " present in zonegroup" << dendl; } } auto async_processor = svc.rados->get_async_processor(); std::lock_guard l{meta_sync_thread_lock}; meta_sync_processor_thread = new RGWMetaSyncProcessorThread(this->store, async_processor); - ret = meta_sync_processor_thread->init(); + ret = meta_sync_processor_thread->init(dpp); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to initialize meta sync thread" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to initialize meta sync thread" << dendl; return ret; } meta_sync_processor_thread->start(); @@ -1255,18 +1259,18 @@ int RGWRados::init_complete() bucket_trim.emplace(this->store, config); ret = bucket_trim->init(); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to start bucket trim manager" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start bucket trim manager" << dendl; return ret; } svc.datalog_rados->set_observer(&*bucket_trim); std::lock_guard dl{data_sync_thread_lock}; for (auto source_zone : svc.zone->get_data_sync_source_zones()) { - ldout(cct, 5) << "starting data sync thread for zone " << source_zone->name << dendl; + ldpp_dout(dpp, 5) << "starting data sync thread for zone " << source_zone->name << dendl; auto *thread = new RGWDataSyncProcessorThread(this->store, svc.rados->get_async_processor(), source_zone); - ret = thread->init(); + ret = thread->init(dpp); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to initialize data sync thread" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to initialize data sync thread" << dendl; return ret; } thread->start(); @@ -1275,9 +1279,9 @@ int RGWRados::init_complete() auto interval = cct->_conf->rgw_sync_log_trim_interval; if (interval > 0) { sync_log_trimmer = new RGWSyncLogTrimThread(this->store, &*bucket_trim, interval); - ret = sync_log_trimmer->init(); + ret = sync_log_trimmer->init(dpp); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to initialize sync log trim thread" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to initialize sync log trim thread" << dendl; return ret; } sync_log_trimmer->start(); @@ -1295,16 +1299,16 @@ int RGWRados::init_complete() if (use_lc_thread) lc->start_processor(); - quota_handler = RGWQuotaHandler::generate_handler(this->store, quota_threads); + quota_handler = RGWQuotaHandler::generate_handler(dpp, this->store, quota_threads); bucket_index_max_shards = (cct->_conf->rgw_override_bucket_index_max_shards ? cct->_conf->rgw_override_bucket_index_max_shards : zone.bucket_index_max_shards); if (bucket_index_max_shards > get_max_bucket_shards()) { bucket_index_max_shards = get_max_bucket_shards(); - ldout(cct, 1) << __func__ << " bucket index max shards is too large, reset to value: " + ldpp_dout(dpp, 1) << __func__ << " bucket index max shards is too large, reset to value: " << get_max_bucket_shards() << dendl; } - ldout(cct, 20) << __func__ << " bucket index max shards: " << bucket_index_max_shards << dendl; + ldpp_dout(dpp, 20) << __func__ << " bucket index max shards: " << bucket_index_max_shards << dendl; bool need_tombstone_cache = !svc.zone->get_zone_data_notify_to_map().empty(); /* have zones syncing from us */ @@ -1323,37 +1327,37 @@ int RGWRados::init_complete() } index_completion_manager = new RGWIndexCompletionManager(this); - ret = index_completion_manager->start(); + ret = index_completion_manager->start(dpp); if (ret < 0) { return ret; } - ret = rgw::notify::init(cct, store); + ret = rgw::notify::init(cct, store, dpp); if (ret < 0 ) { - ldout(cct, 1) << "ERROR: failed to initialize notification manager" << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to initialize notification manager" << dendl; } return ret; } -int RGWRados::init_svc(bool raw) +int RGWRados::init_svc(bool raw, const DoutPrefixProvider *dpp) { if (raw) { - return svc.init_raw(cct, use_cache, null_yield); + return svc.init_raw(cct, use_cache, null_yield, dpp); } - return svc.init(cct, use_cache, run_sync_thread, null_yield); + return svc.init(cct, use_cache, run_sync_thread, null_yield, dpp); } -int RGWRados::init_ctl() +int RGWRados::init_ctl(const DoutPrefixProvider *dpp) { - return ctl.init(&svc); + return ctl.init(&svc, dpp); } /** * Initialize the RADOS instance and prepare to do other ops * Returns 0 on success, -ERR# on failure. */ -int RGWRados::initialize() +int RGWRados::initialize(const DoutPrefixProvider *dpp) { int ret; @@ -1361,15 +1365,15 @@ int RGWRados::initialize() cct->_conf.get_val("rgw_inject_notify_timeout_probability"); max_notify_retries = cct->_conf.get_val("rgw_max_notify_retries"); - ret = init_svc(false); + ret = init_svc(false, dpp); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to init services (ret=" << cpp_strerror(-ret) << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to init services (ret=" << cpp_strerror(-ret) << ")" << dendl; return ret; } - ret = init_ctl(); + ret = init_ctl(dpp); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to init ctls (ret=" << cpp_strerror(-ret) << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to init ctls (ret=" << cpp_strerror(-ret) << ")" << dendl; return ret; } @@ -1379,48 +1383,48 @@ int RGWRados::initialize() if (ret < 0) return ret; - return init_complete(); + return init_complete(dpp); } /** * Open the pool used as root for this gateway * Returns: 0 on success, -ERR# otherwise. */ -int RGWRados::open_root_pool_ctx() +int RGWRados::open_root_pool_ctx(const DoutPrefixProvider *dpp) { - return rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().domain_root, root_pool_ctx, true, true); + return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().domain_root, root_pool_ctx, true, true); } -int RGWRados::open_gc_pool_ctx() +int RGWRados::open_gc_pool_ctx(const DoutPrefixProvider *dpp) { - return rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().gc_pool, gc_pool_ctx, true, true); + return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().gc_pool, gc_pool_ctx, true, true); } -int RGWRados::open_lc_pool_ctx() +int RGWRados::open_lc_pool_ctx(const DoutPrefixProvider *dpp) { - return rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().lc_pool, lc_pool_ctx, true, true); + return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().lc_pool, lc_pool_ctx, true, true); } -int RGWRados::open_objexp_pool_ctx() +int RGWRados::open_objexp_pool_ctx(const DoutPrefixProvider *dpp) { - return rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().log_pool, objexp_pool_ctx, true, true); + return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().log_pool, objexp_pool_ctx, true, true); } -int RGWRados::open_reshard_pool_ctx() +int RGWRados::open_reshard_pool_ctx(const DoutPrefixProvider *dpp) { - return rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().reshard_pool, reshard_pool_ctx, true, true); + return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().reshard_pool, reshard_pool_ctx, true, true); } -int RGWRados::open_notif_pool_ctx() +int RGWRados::open_notif_pool_ctx(const DoutPrefixProvider *dpp) { - return rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().notif_pool, notif_pool_ctx, true, true); + return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().notif_pool, notif_pool_ctx, true, true); } -int RGWRados::open_pool_ctx(const rgw_pool& pool, librados::IoCtx& io_ctx, +int RGWRados::open_pool_ctx(const DoutPrefixProvider *dpp, const rgw_pool& pool, librados::IoCtx& io_ctx, bool mostly_omap) { constexpr bool create = true; // create the pool if it doesn't exist - return rgw_init_ioctx(get_rados_handle(), pool, io_ctx, create, mostly_omap); + return rgw_init_ioctx(dpp, get_rados_handle(), pool, io_ctx, create, mostly_omap); } /**** logs ****/ @@ -1431,10 +1435,10 @@ struct log_list_state { librados::NObjectIterator obit; }; -int RGWRados::log_list_init(const string& prefix, RGWAccessHandle *handle) +int RGWRados::log_list_init(const DoutPrefixProvider *dpp, const string& prefix, RGWAccessHandle *handle) { log_list_state *state = new log_list_state; - int r = rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().log_pool, state->io_ctx); + int r = rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().log_pool, state->io_ctx); if (r < 0) { delete state; return r; @@ -1465,10 +1469,10 @@ int RGWRados::log_list_next(RGWAccessHandle handle, string *name) return 0; } -int RGWRados::log_remove(const string& name) +int RGWRados::log_remove(const DoutPrefixProvider *dpp, const string& name) { librados::IoCtx io_ctx; - int r = rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().log_pool, io_ctx); + int r = rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().log_pool, io_ctx); if (r < 0) return r; return io_ctx.remove(name); @@ -1484,10 +1488,10 @@ struct log_show_state { log_show_state() : pos(0), eof(false) {} }; -int RGWRados::log_show_init(const string& name, RGWAccessHandle *handle) +int RGWRados::log_show_init(const DoutPrefixProvider *dpp, const string& name, RGWAccessHandle *handle) { log_show_state *state = new log_show_state; - int r = rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().log_pool, state->io_ctx); + int r = rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().log_pool, state->io_ctx); if (r < 0) { delete state; return r; @@ -1569,7 +1573,7 @@ static void usage_log_hash(CephContext *cct, const string& name, string& hash, u hash = buf; } -int RGWRados::log_usage(map& usage_info) +int RGWRados::log_usage(const DoutPrefixProvider *dpp, map& usage_info) { uint32_t index = 0; @@ -1585,7 +1589,7 @@ int RGWRados::log_usage(map& usage_info) RGWUsageBatch& info = iter->second; if (ub.user.empty()) { - ldout(cct, 0) << "WARNING: RGWRados::log_usage(): user name empty (bucket=" << ub.bucket << "), skipping" << dendl; + ldpp_dout(dpp, 0) << "WARNING: RGWRados::log_usage(): user name empty (bucket=" << ub.bucket << "), skipping" << dendl; continue; } @@ -1606,14 +1610,14 @@ int RGWRados::log_usage(map& usage_info) map::iterator liter; for (liter = log_objs.begin(); liter != log_objs.end(); ++liter) { - int r = cls_obj_usage_log_add(liter->first, liter->second); + int r = cls_obj_usage_log_add(dpp, liter->first, liter->second); if (r < 0) return r; } return 0; } -int RGWRados::read_usage(const rgw_user& user, const string& bucket_name, uint64_t start_epoch, uint64_t end_epoch, +int RGWRados::read_usage(const DoutPrefixProvider *dpp, const rgw_user& user, const string& bucket_name, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool *is_truncated, RGWUsageIter& usage_iter, map& usage) { @@ -1634,7 +1638,7 @@ int RGWRados::read_usage(const rgw_user& user, const string& bucket_name, uint64 map ret_usage; map::iterator iter; - int ret = cls_obj_usage_log_read(hash, user_str, bucket_name, start_epoch, end_epoch, num, + int ret = cls_obj_usage_log_read(dpp, hash, user_str, bucket_name, start_epoch, end_epoch, num, usage_iter.read_iter, ret_usage, is_truncated); if (ret == -ENOENT) goto next; @@ -1657,7 +1661,7 @@ next: return 0; } -int RGWRados::trim_usage(const rgw_user& user, const string& bucket_name, uint64_t start_epoch, uint64_t end_epoch) +int RGWRados::trim_usage(const DoutPrefixProvider *dpp, const rgw_user& user, const string& bucket_name, uint64_t start_epoch, uint64_t end_epoch) { uint32_t index = 0; string hash, first_hash; @@ -1666,7 +1670,7 @@ int RGWRados::trim_usage(const rgw_user& user, const string& bucket_name, uint64 hash = first_hash; do { - int ret = cls_obj_usage_log_trim(hash, user_str, bucket_name, start_epoch, end_epoch); + int ret = cls_obj_usage_log_trim(dpp, hash, user_str, bucket_name, start_epoch, end_epoch); if (ret < 0 && ret != -ENOENT) return ret; @@ -1678,15 +1682,15 @@ int RGWRados::trim_usage(const rgw_user& user, const string& bucket_name, uint64 } -int RGWRados::clear_usage() +int RGWRados::clear_usage(const DoutPrefixProvider *dpp) { auto max_shards = cct->_conf->rgw_usage_max_shards; int ret=0; for (unsigned i=0; i < max_shards; i++){ string oid = RGW_USAGE_OBJ_PREFIX + to_string(i); - ret = cls_obj_usage_log_clear(oid); + ret = cls_obj_usage_log_clear(dpp, oid); if (ret < 0){ - ldout(cct,0) << "usage clear on oid="<< oid << "failed with ret=" << ret << dendl; + ldpp_dout(dpp,0) << "usage clear on oid="<< oid << "failed with ret=" << ret << dendl; return ret; } } @@ -1707,7 +1711,7 @@ int RGWRados::decode_policy(bufferlist& bl, ACLOwner *owner) return 0; } -int rgw_policy_from_attrset(CephContext *cct, map& attrset, RGWAccessControlPolicy *policy) +int rgw_policy_from_attrset(const DoutPrefixProvider *dpp, CephContext *cct, map& attrset, RGWAccessControlPolicy *policy) { map::iterator aiter = attrset.find(RGW_ATTR_ACL); if (aiter == attrset.end()) @@ -1718,12 +1722,12 @@ int rgw_policy_from_attrset(CephContext *cct, map& attrset, try { policy->decode(iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl; return -EIO; } if (cct->_conf->subsys.should_gather()) { RGWAccessControlPolicy_S3 *s3policy = static_cast(policy); - ldout(cct, 15) << __func__ << " Read AccessControlPolicy"; + ldpp_dout(dpp, 15) << __func__ << " Read AccessControlPolicy"; s3policy->to_xml(*_dout); *_dout << dendl; } @@ -1731,7 +1735,7 @@ int rgw_policy_from_attrset(CephContext *cct, map& attrset, } -int RGWRados::Bucket::update_bucket_id(const string& new_bucket_id) +int RGWRados::Bucket::update_bucket_id(const string& new_bucket_id, const DoutPrefixProvider *dpp) { rgw_bucket bucket = bucket_info.bucket; bucket.update_bucket_id(new_bucket_id); @@ -1739,7 +1743,7 @@ int RGWRados::Bucket::update_bucket_id(const string& new_bucket_id) auto obj_ctx = store->svc.sysobj->init_obj_ctx(); bucket_info.objv_tracker.clear(); - int ret = store->get_bucket_instance_info(obj_ctx, bucket, bucket_info, nullptr, nullptr, null_yield); + int ret = store->get_bucket_instance_info(obj_ctx, bucket, bucket_info, nullptr, nullptr, null_yield, dpp); if (ret < 0) { return ret; } @@ -1766,6 +1770,7 @@ int RGWRados::Bucket::update_bucket_id(const string& new_bucket_id) * max, then truncated. */ int RGWRados::Bucket::List::list_objects_ordered( + const DoutPrefixProvider *dpp, int64_t max_p, vector *result, map *common_prefixes, @@ -1819,12 +1824,12 @@ int RGWRados::Bucket::List::list_objects_ordered( rgw_obj_index_key prev_marker; for (uint16_t attempt = 1; /* empty */; ++attempt) { - ldout(cct, 20) << "RGWRados::Bucket::List::" << __func__ << + ldpp_dout(dpp, 20) << "RGWRados::Bucket::List::" << __func__ << " starting attempt " << attempt << dendl; if (attempt > 1 && !(prev_marker < cur_marker)) { // we've failed to make forward progress - ldout(cct, 0) << "RGWRados::Bucket::List::" << __func__ << + ldpp_dout(dpp, 0) << "RGWRados::Bucket::List::" << __func__ << ": ERROR marker failed to make forward progress; attempt=" << attempt << ", prev_marker=" << prev_marker << ", cur_marker=" << cur_marker << dendl; @@ -1834,7 +1839,8 @@ int RGWRados::Bucket::List::list_objects_ordered( ent_map_t ent_map; ent_map.reserve(read_ahead); - int r = store->cls_bucket_list_ordered(target->get_bucket_info(), + int r = store->cls_bucket_list_ordered(dpp, + target->get_bucket_info(), shard_id, cur_marker, cur_prefix, @@ -1856,7 +1862,7 @@ int RGWRados::Bucket::List::list_objects_ordered( rgw_obj_index_key index_key = entry.key; rgw_obj_key obj(index_key); - ldout(cct, 20) << "RGWRados::Bucket::List::" << __func__ << + ldpp_dout(dpp, 20) << "RGWRados::Bucket::List::" << __func__ << " considering entry " << entry.key << dendl; /* note that parse_raw_oid() here will not set the correct @@ -1867,7 +1873,7 @@ int RGWRados::Bucket::List::list_objects_ordered( */ bool valid = rgw_obj_key::parse_raw_oid(index_key.name, &obj); if (!valid) { - ldout(cct, 0) << "ERROR: could not parse object name: " << + ldpp_dout(dpp, 0) << "ERROR: could not parse object name: " << obj.name << dendl; continue; } @@ -1921,7 +1927,7 @@ int RGWRados::Bucket::List::list_objects_ordered( // after the prefix if (delim_pos != int(obj.name.length() - params.delim.length())) { - ldout(cct, 0) << + ldpp_dout(dpp, 0) << "WARNING: found delimiter in place other than the end of " "the prefix; obj.name=" << obj.name << ", prefix=" << params.prefix << dendl; @@ -1970,7 +1976,7 @@ int RGWRados::Bucket::List::list_objects_ordered( goto done; } - ldout(cct, 20) << "RGWRados::Bucket::List::" << __func__ << + ldpp_dout(dpp, 20) << "RGWRados::Bucket::List::" << __func__ << " adding entry " << entry.key << " to result" << dendl; result->emplace_back(std::move(entry)); @@ -1989,11 +1995,11 @@ int RGWRados::Bucket::List::list_objects_ordered( cur_marker.name.substr(0, marker_delim_pos); skip_after_delim.append(after_delim_s); - ldout(cct, 20) << "skip_after_delim=" << skip_after_delim << dendl; + ldpp_dout(dpp, 20) << "skip_after_delim=" << skip_after_delim << dendl; if (skip_after_delim > cur_marker.name) { cur_marker = skip_after_delim; - ldout(cct, 20) << "setting cur_marker=" + ldpp_dout(dpp, 20) << "setting cur_marker=" << cur_marker.name << "[" << cur_marker.instance << "]" << dendl; @@ -2001,7 +2007,7 @@ int RGWRados::Bucket::List::list_objects_ordered( } } // if older osd didn't do delimiter filtering - ldout(cct, 20) << "RGWRados::Bucket::List::" << __func__ << + ldpp_dout(dpp, 20) << "RGWRados::Bucket::List::" << __func__ << " INFO end of outer loop, truncated=" << truncated << ", count=" << count << ", attempt=" << attempt << dendl; @@ -2046,14 +2052,14 @@ done: * is_truncated: if number of objects in the bucket is bigger than max, then * truncated. */ -int RGWRados::Bucket::List::list_objects_unordered(int64_t max_p, +int RGWRados::Bucket::List::list_objects_unordered(const DoutPrefixProvider *dpp, + int64_t max_p, vector *result, map *common_prefixes, bool *is_truncated, optional_yield y) { RGWRados *store = target->get_store(); - CephContext *cct = store->ctx(); int shard_id = target->get_shard_id(); int count = 0; @@ -2093,7 +2099,8 @@ int RGWRados::Bucket::List::list_objects_unordered(int64_t max_p, std::vector ent_list; ent_list.reserve(read_ahead); - int r = store->cls_bucket_list_unordered(target->get_bucket_info(), + int r = store->cls_bucket_list_unordered(dpp, + target->get_bucket_info(), shard_id, cur_marker, cur_prefix, @@ -2127,7 +2134,7 @@ int RGWRados::Bucket::List::list_objects_unordered(int64_t max_p, */ bool valid = rgw_obj_key::parse_raw_oid(index_key.name, &obj); if (!valid) { - ldout(cct, 0) << "ERROR: could not parse object name: " << + ldpp_dout(dpp, 0) << "ERROR: could not parse object name: " << obj.name << dendl; continue; } @@ -2175,11 +2182,11 @@ done: * create a rados pool, associated meta info * returns 0 on success, -ERR# otherwise. */ -int RGWRados::create_pool(const rgw_pool& pool) +int RGWRados::create_pool(const DoutPrefixProvider *dpp, const rgw_pool& pool) { librados::IoCtx io_ctx; constexpr bool create = true; - return rgw_init_ioctx(get_rados_handle(), pool, io_ctx, create); + return rgw_init_ioctx(dpp, get_rados_handle(), pool, io_ctx, create); } void RGWRados::create_bucket_id(string *bucket_id) @@ -2205,6 +2212,7 @@ int RGWRados::create_bucket(const RGWUserInfo& owner, rgw_bucket& bucket, rgw_bucket *pmaster_bucket, uint32_t *pmaster_num_shards, optional_yield y, + const DoutPrefixProvider *dpp, bool exclusive) { #define MAX_CREATE_RETRIES 20 /* need to bound retries */ @@ -2213,7 +2221,7 @@ int RGWRados::create_bucket(const RGWUserInfo& owner, rgw_bucket& bucket, for (int i = 0; i < MAX_CREATE_RETRIES; i++) { int ret = 0; - ret = svc.zone->select_bucket_placement(owner, zonegroup_id, placement_rule, + ret = svc.zone->select_bucket_placement(dpp, owner, zonegroup_id, placement_rule, &selected_placement_rule, &rule_info, y); if (ret < 0) return ret; @@ -2259,12 +2267,12 @@ int RGWRados::create_bucket(const RGWUserInfo& owner, rgw_bucket& bucket, info.quota = *pquota_info; } - int r = svc.bi->init_index(info); + int r = svc.bi->init_index(dpp, info); if (r < 0) { return r; } - ret = put_linked_bucket_info(info, exclusive, ceph::real_time(), pep_objv, &attrs, true); + ret = put_linked_bucket_info(info, exclusive, ceph::real_time(), pep_objv, &attrs, true, dpp); if (ret == -ECANCELED) { ret = -EEXIST; } @@ -2276,19 +2284,19 @@ int RGWRados::create_bucket(const RGWUserInfo& owner, rgw_bucket& bucket, if (r == -ENOENT) { continue; } - ldout(cct, 0) << "get_bucket_info returned " << r << dendl; + ldpp_dout(dpp, 0) << "get_bucket_info returned " << r << dendl; return r; } /* only remove it if it's a different bucket instance */ if (orig_info.bucket.bucket_id != bucket.bucket_id) { - int r = svc.bi->clean_index(info); + int r = svc.bi->clean_index(dpp, info); if (r < 0) { - ldout(cct, 0) << "WARNING: could not remove bucket index (r=" << r << ")" << dendl; + ldpp_dout(dpp, 0) << "WARNING: could not remove bucket index (r=" << r << ")" << dendl; } - r = ctl.bucket->remove_bucket_instance_info(info.bucket, info, null_yield); + r = ctl.bucket->remove_bucket_instance_info(info.bucket, info, null_yield, dpp); if (r < 0) { - ldout(cct, 0) << "WARNING: " << __func__ << "(): failed to remove bucket instance info: bucket instance=" << info.bucket.get_key() << ": r=" << r << dendl; + ldpp_dout(dpp, 0) << "WARNING: " << __func__ << "(): failed to remove bucket instance info: bucket instance=" << info.bucket.get_key() << ": r=" << r << dendl; /* continue anyway */ } } @@ -2300,7 +2308,7 @@ int RGWRados::create_bucket(const RGWUserInfo& owner, rgw_bucket& bucket, } /* this is highly unlikely */ - ldout(cct, 0) << "ERROR: could not create bucket, continuously raced with bucket creation and removal" << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not create bucket, continuously raced with bucket creation and removal" << dendl; return -ENOENT; } @@ -2316,18 +2324,18 @@ bool RGWRados::obj_to_raw(const rgw_placement_rule& placement_rule, const rgw_ob return get_obj_data_pool(placement_rule, obj, &raw_obj->pool); } -int RGWRados::get_obj_head_ioctx(const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::IoCtx *ioctx) +int RGWRados::get_obj_head_ioctx(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::IoCtx *ioctx) { string oid, key; get_obj_bucket_and_oid_loc(obj, oid, key); rgw_pool pool; if (!get_obj_data_pool(bucket_info.placement_rule, obj, &pool)) { - ldout(cct, 0) << "ERROR: cannot get data pool for obj=" << obj << ", probably misconfiguration" << dendl; + ldpp_dout(dpp, 0) << "ERROR: cannot get data pool for obj=" << obj << ", probably misconfiguration" << dendl; return -EIO; } - int r = open_pool_ctx(pool, *ioctx, false); + int r = open_pool_ctx(dpp, pool, *ioctx, false); if (r < 0) { return r; } @@ -2337,22 +2345,22 @@ int RGWRados::get_obj_head_ioctx(const RGWBucketInfo& bucket_info, const rgw_obj return 0; } -int RGWRados::get_obj_head_ref(const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_rados_ref *ref) +int RGWRados::get_obj_head_ref(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_rados_ref *ref) { get_obj_bucket_and_oid_loc(obj, ref->obj.oid, ref->obj.loc); rgw_pool pool; if (!get_obj_data_pool(bucket_info.placement_rule, obj, &pool)) { - ldout(cct, 0) << "ERROR: cannot get data pool for obj=" << obj << ", probably misconfiguration" << dendl; + ldpp_dout(dpp, 0) << "ERROR: cannot get data pool for obj=" << obj << ", probably misconfiguration" << dendl; return -EIO; } ref->pool = svc.rados->pool(pool); - int r = ref->pool.open(RGWSI_RADOS::OpenParams() + int r = ref->pool.open(dpp, RGWSI_RADOS::OpenParams() .set_mostly_omap(false)); if (r < 0) { - ldout(cct, 0) << "ERROR: failed opening data pool (pool=" << pool << "); r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed opening data pool (pool=" << pool << "); r=" << r << dendl; return r; } @@ -2361,7 +2369,7 @@ int RGWRados::get_obj_head_ref(const RGWBucketInfo& bucket_info, const rgw_obj& return 0; } -int RGWRados::get_raw_obj_ref(const rgw_raw_obj& obj, rgw_rados_ref *ref) +int RGWRados::get_raw_obj_ref(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, rgw_rados_ref *ref) { ref->obj = obj; @@ -2370,10 +2378,10 @@ int RGWRados::get_raw_obj_ref(const rgw_raw_obj& obj, rgw_rados_ref *ref) ref->obj.pool = svc.zone->get_zone_params().domain_root; } ref->pool = svc.rados->pool(obj.pool); - int r = ref->pool.open(RGWSI_RADOS::OpenParams() + int r = ref->pool.open(dpp, RGWSI_RADOS::OpenParams() .set_mostly_omap(false)); if (r < 0) { - ldout(cct, 0) << "ERROR: failed opening pool (pool=" << obj.pool << "); r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed opening pool (pool=" << obj.pool << "); r=" << r << dendl; return r; } @@ -2382,16 +2390,16 @@ int RGWRados::get_raw_obj_ref(const rgw_raw_obj& obj, rgw_rados_ref *ref) return 0; } -int RGWRados::get_system_obj_ref(const rgw_raw_obj& obj, rgw_rados_ref *ref) +int RGWRados::get_system_obj_ref(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, rgw_rados_ref *ref) { - return get_raw_obj_ref(obj, ref); + return get_raw_obj_ref(dpp, obj, ref); } /* * fixes an issue where head objects were supposed to have a locator created, but ended * up without one */ -int RGWRados::fix_head_obj_locator(const RGWBucketInfo& bucket_info, bool copy_obj, bool remove_bad, rgw_obj_key& key) +int RGWRados::fix_head_obj_locator(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, bool copy_obj, bool remove_bad, rgw_obj_key& key) { const rgw_bucket& bucket = bucket_info.bucket; string oid; @@ -2402,13 +2410,13 @@ int RGWRados::fix_head_obj_locator(const RGWBucketInfo& bucket_info, bool copy_o get_obj_bucket_and_oid_loc(obj, oid, locator); if (locator.empty()) { - ldout(cct, 20) << "object does not have a locator, nothing to fix" << dendl; + ldpp_dout(dpp, 20) << "object does not have a locator, nothing to fix" << dendl; return 0; } librados::IoCtx ioctx; - int ret = get_obj_head_ioctx(bucket_info, obj, &ioctx); + int ret = get_obj_head_ioctx(dpp, bucket_info, obj, &ioctx); if (ret < 0) { cerr << "ERROR: get_obj_head_ioctx() returned ret=" << ret << std::endl; return ret; @@ -2426,19 +2434,19 @@ int RGWRados::fix_head_obj_locator(const RGWBucketInfo& bucket_info, bool copy_o #define HEAD_SIZE 512 * 1024 op.read(0, HEAD_SIZE, &data, NULL); - ret = rgw_rados_operate(ioctx, oid, &op, &data, null_yield); + ret = rgw_rados_operate(dpp, ioctx, oid, &op, &data, null_yield); if (ret < 0) { - lderr(cct) << "ERROR: rgw_rados_operate(oid=" << oid << ") returned ret=" << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: rgw_rados_operate(oid=" << oid << ") returned ret=" << ret << dendl; return ret; } if (size > HEAD_SIZE) { - lderr(cct) << "ERROR: returned object size (" << size << ") > HEAD_SIZE (" << HEAD_SIZE << ")" << dendl; + ldpp_dout(dpp, -1) << "ERROR: returned object size (" << size << ") > HEAD_SIZE (" << HEAD_SIZE << ")" << dendl; return -EIO; } if (size != data.length()) { - lderr(cct) << "ERROR: returned object size (" << size << ") != data.length() (" << data.length() << ")" << dendl; + ldpp_dout(dpp, -1) << "ERROR: returned object size (" << size << ") != data.length() (" << data.length() << ")" << dendl; return -EIO; } @@ -2455,7 +2463,7 @@ int RGWRados::fix_head_obj_locator(const RGWBucketInfo& bucket_info, bool copy_o wop.write(0, data); ioctx.locator_set_key(locator); - rgw_rados_operate(ioctx, oid, &wop, null_yield); + rgw_rados_operate(dpp, ioctx, oid, &wop, null_yield); } if (remove_bad) { @@ -2463,7 +2471,7 @@ int RGWRados::fix_head_obj_locator(const RGWBucketInfo& bucket_info, bool copy_o ret = ioctx.remove(oid); if (ret < 0) { - lderr(cct) << "ERROR: failed to remove original bad object" << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to remove original bad object" << dendl; return ret; } } @@ -2471,7 +2479,8 @@ int RGWRados::fix_head_obj_locator(const RGWBucketInfo& bucket_info, bool copy_o return 0; } -int RGWRados::move_rados_obj(librados::IoCtx& src_ioctx, +int RGWRados::move_rados_obj(const DoutPrefixProvider *dpp, + librados::IoCtx& src_ioctx, const string& src_oid, const string& src_locator, librados::IoCtx& dst_ioctx, const string& dst_oid, const string& dst_locator) @@ -2503,7 +2512,7 @@ int RGWRados::move_rados_obj(librados::IoCtx& src_ioctx, mtime = real_clock::from_timespec(mtime_ts); } rop.read(ofs, chunk_size, &data, NULL); - ret = rgw_rados_operate(src_ioctx, src_oid, &rop, &data, null_yield); + ret = rgw_rados_operate(dpp, src_ioctx, src_oid, &rop, &data, null_yield); if (ret < 0) { goto done_err; } @@ -2518,7 +2527,7 @@ int RGWRados::move_rados_obj(librados::IoCtx& src_ioctx, mtime = real_clock::from_timespec(mtime_ts); } wop.write(ofs, data); - ret = rgw_rados_operate(dst_ioctx, dst_oid, &wop, null_yield); + ret = rgw_rados_operate(dpp, dst_ioctx, dst_oid, &wop, null_yield); if (ret < 0) { goto done_err; } @@ -2527,7 +2536,7 @@ int RGWRados::move_rados_obj(librados::IoCtx& src_ioctx, } while (!done); if (ofs != size) { - lderr(cct) << "ERROR: " << __func__ << ": copying " << src_oid << " -> " << dst_oid + ldpp_dout(dpp, -1) << "ERROR: " << __func__ << ": copying " << src_oid << " -> " << dst_oid << ": expected " << size << " bytes to copy, ended up with " << ofs << dendl; ret = -EIO; goto done_err; @@ -2539,7 +2548,7 @@ int RGWRados::move_rados_obj(librados::IoCtx& src_ioctx, done_err: // TODO: clean up dst_oid if we created it - lderr(cct) << "ERROR: failed to copy " << src_oid << " -> " << dst_oid << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to copy " << src_oid << " -> " << dst_oid << dendl; return ret; } @@ -2547,7 +2556,7 @@ done_err: * fixes an issue where head objects were supposed to have a locator created, but ended * up without one */ -int RGWRados::fix_tail_obj_locator(const RGWBucketInfo& bucket_info, rgw_obj_key& key, bool fix, bool *need_fix, optional_yield y) +int RGWRados::fix_tail_obj_locator(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, rgw_obj_key& key, bool fix, bool *need_fix, optional_yield y) { const rgw_bucket& bucket = bucket_info.bucket; rgw_obj obj(bucket, key); @@ -2557,21 +2566,21 @@ int RGWRados::fix_tail_obj_locator(const RGWBucketInfo& bucket_info, rgw_obj_key } rgw_rados_ref ref; - int r = get_obj_head_ref(bucket_info, obj, &ref); + int r = get_obj_head_ref(dpp, bucket_info, obj, &ref); if (r < 0) { return r; } RGWObjState *astate = NULL; RGWObjectCtx rctx(this->store); - r = get_obj_state(&rctx, bucket_info, obj, &astate, false, y); + r = get_obj_state(dpp, &rctx, bucket_info, obj, &astate, false, y); if (r < 0) return r; if (astate->manifest) { RGWObjManifest::obj_iterator miter; RGWObjManifest& manifest = *astate->manifest; - for (miter = manifest.obj_begin(); miter != manifest.obj_end(); ++miter) { + for (miter = manifest.obj_begin(dpp); miter != manifest.obj_end(dpp); ++miter) { rgw_raw_obj raw_loc = miter.get_location().get_raw_obj(store); rgw_obj loc; string oid; @@ -2589,7 +2598,7 @@ int RGWRados::fix_tail_obj_locator(const RGWBucketInfo& bucket_info, rgw_obj_key get_obj_bucket_and_oid_loc(loc, oid, locator); ref.pool.ioctx().locator_set_key(locator); - ldout(cct, 20) << __func__ << ": key=" << key << " oid=" << oid << " locator=" << locator << dendl; + ldpp_dout(dpp, 20) << __func__ << ": key=" << key << " oid=" << oid << " locator=" << locator << dendl; r = ioctx.stat(oid, NULL, NULL); if (r != -ENOENT) { @@ -2609,14 +2618,14 @@ int RGWRados::fix_tail_obj_locator(const RGWBucketInfo& bucket_info, rgw_obj_key /* cannot find a broken part */ continue; } - ldout(cct, 20) << __func__ << ": found bad object part: " << loc << dendl; + ldpp_dout(dpp, 20) << __func__ << ": found bad object part: " << loc << dendl; if (need_fix) { *need_fix = true; } if (fix) { - r = move_rados_obj(src_ioctx, oid, bad_loc, ioctx, oid, locator); + r = move_rados_obj(dpp, src_ioctx, oid, bad_loc, ioctx, oid, locator); if (r < 0) { - lderr(cct) << "ERROR: copy_rados_obj() on oid=" << oid << " returned r=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: copy_rados_obj() on oid=" << oid << " returned r=" << r << dendl; } } } @@ -2627,7 +2636,8 @@ int RGWRados::fix_tail_obj_locator(const RGWBucketInfo& bucket_info, rgw_obj_key int RGWRados::BucketShard::init(const rgw_bucket& _bucket, const rgw_obj& obj, - RGWBucketInfo* bucket_info_out) + RGWBucketInfo* bucket_info_out, + const DoutPrefixProvider *dpp) { bucket = _bucket; @@ -2637,26 +2647,27 @@ int RGWRados::BucketShard::init(const rgw_bucket& _bucket, RGWBucketInfo* bucket_info_p = bucket_info_out ? bucket_info_out : &bucket_info; - int ret = store->get_bucket_instance_info(obj_ctx, bucket, *bucket_info_p, NULL, NULL, null_yield); + int ret = store->get_bucket_instance_info(obj_ctx, bucket, *bucket_info_p, NULL, NULL, null_yield, dpp); if (ret < 0) { return ret; } string oid; - ret = store->svc.bi_rados->open_bucket_index_shard(*bucket_info_p, obj.get_hash_object(), &bucket_obj, &shard_id); + ret = store->svc.bi_rados->open_bucket_index_shard(dpp, *bucket_info_p, obj.get_hash_object(), &bucket_obj, &shard_id); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl; return ret; } - ldout(store->ctx(), 20) << " bucket index object: " << bucket_obj.get_raw_obj() << dendl; + ldpp_dout(dpp, 20) << " bucket index object: " << bucket_obj.get_raw_obj() << dendl; return 0; } int RGWRados::BucketShard::init(const rgw_bucket& _bucket, int sid, const rgw::bucket_index_layout_generation& idx_layout, - RGWBucketInfo* bucket_info_out) + RGWBucketInfo* bucket_info_out, + const DoutPrefixProvider *dpp) { bucket = _bucket; shard_id = sid; @@ -2667,52 +2678,52 @@ int RGWRados::BucketShard::init(const rgw_bucket& _bucket, RGWBucketInfo bucket_info; RGWBucketInfo* bucket_info_p = bucket_info_out ? bucket_info_out : &bucket_info; - int ret = store->get_bucket_instance_info(obj_ctx, bucket, *bucket_info_p, NULL, NULL, null_yield); + int ret = store->get_bucket_instance_info(obj_ctx, bucket, *bucket_info_p, NULL, NULL, null_yield, dpp); if (ret < 0) { return ret; } string oid; - ret = store->svc.bi_rados->open_bucket_index_shard(*bucket_info_p, shard_id, idx_layout, &bucket_obj); + ret = store->svc.bi_rados->open_bucket_index_shard(dpp, *bucket_info_p, shard_id, idx_layout, &bucket_obj); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl; return ret; } - ldout(store->ctx(), 20) << " bucket index oid: " << bucket_obj.get_raw_obj() << dendl; + ldpp_dout(dpp, 20) << " bucket index oid: " << bucket_obj.get_raw_obj() << dendl; return 0; } -int RGWRados::BucketShard::init(const RGWBucketInfo& bucket_info, +int RGWRados::BucketShard::init(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj) { bucket = bucket_info.bucket; - int ret = store->svc.bi_rados->open_bucket_index_shard(bucket_info, + int ret = store->svc.bi_rados->open_bucket_index_shard(dpp, bucket_info, obj.get_hash_object(), &bucket_obj, &shard_id); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl; return ret; } - ldout(store->ctx(), 20) << " bucket index object: " << bucket_obj << dendl; + ldpp_dout(dpp, 20) << " bucket index object: " << bucket_obj << dendl; return 0; } -int RGWRados::BucketShard::init(const RGWBucketInfo& bucket_info, const rgw::bucket_index_layout_generation& idx_layout, int sid) +int RGWRados::BucketShard::init(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw::bucket_index_layout_generation& idx_layout, int sid) { bucket = bucket_info.bucket; shard_id = sid; - int ret = store->svc.bi_rados->open_bucket_index_shard(bucket_info, shard_id, idx_layout, &bucket_obj); + int ret = store->svc.bi_rados->open_bucket_index_shard(dpp, bucket_info, shard_id, idx_layout, &bucket_obj); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl; return ret; } - ldout(store->ctx(), 20) << " bucket index object: " << bucket_obj << dendl; + ldpp_dout(dpp, 20) << " bucket index object: " << bucket_obj << dendl; return 0; } @@ -2721,7 +2732,8 @@ int RGWRados::BucketShard::init(const RGWBucketInfo& bucket_info, const rgw::buc /* Execute @handler on last item in bucket listing for bucket specified * in @bucket_info. @obj_prefix and @obj_delim narrow down the listing * to objects matching these criterias. */ -int RGWRados::on_last_entry_in_listing(RGWBucketInfo& bucket_info, +int RGWRados::on_last_entry_in_listing(const DoutPrefixProvider *dpp, + RGWBucketInfo& bucket_info, const std::string& obj_prefix, const std::string& obj_delim, std::function handler) @@ -2732,7 +2744,7 @@ int RGWRados::on_last_entry_in_listing(RGWBucketInfo& bucket_info, list_op.params.prefix = obj_prefix; list_op.params.delim = obj_delim; - ldout(cct, 20) << "iterating listing for bucket=" << bucket_info.bucket.name + ldpp_dout(dpp, 20) << "iterating listing for bucket=" << bucket_info.bucket.name << ", obj_prefix=" << obj_prefix << ", obj_delim=" << obj_delim << dendl; @@ -2746,7 +2758,7 @@ int RGWRados::on_last_entry_in_listing(RGWBucketInfo& bucket_info, static constexpr int MAX_LIST_OBJS = 100; std::vector entries(MAX_LIST_OBJS); - int ret = list_op.list_objects(MAX_LIST_OBJS, &entries, nullptr, + int ret = list_op.list_objects(dpp, MAX_LIST_OBJS, &entries, nullptr, &is_truncated, null_yield); if (ret < 0) { return ret; @@ -2783,7 +2795,7 @@ int RGWRados::swift_versioning_copy(RGWObjectCtx& obj_ctx, obj->set_atomic(&obj_ctx); RGWObjState * state = nullptr; - int r = get_obj_state(&obj_ctx, bucket->get_info(), obj->get_obj(), &state, false, y); + int r = get_obj_state(dpp, &obj_ctx, bucket->get_info(), obj->get_obj(), &state, false, y); if (r < 0) { return r; } @@ -2802,7 +2814,7 @@ int RGWRados::swift_versioning_copy(RGWObjectCtx& obj_ctx, r = get_bucket_info(&svc, bucket->get_tenant(), bucket->get_info().swift_ver_location, dest_bucket_info, NULL, null_yield, NULL); if (r < 0) { - ldout(cct, 10) << "failed to read dest bucket info: r=" << r << dendl; + ldpp_dout(dpp, 10) << "failed to read dest bucket info: r=" << r << dendl; if (r == -ENOENT) { return -ERR_PRECONDITION_FAILED; } @@ -2959,7 +2971,7 @@ int RGWRados::swift_versioning_restore(RGWObjectCtx& obj_ctx, } /* Need to remove the archived copy. */ - ret = delete_obj(obj_ctx, archive_binfo, archive_obj.get_obj(), + ret = delete_obj(dpp, obj_ctx, archive_binfo, archive_obj.get_obj(), archive_binfo.versioning_status()); return ret; @@ -2969,11 +2981,12 @@ int RGWRados::swift_versioning_restore(RGWObjectCtx& obj_ctx, const auto prefix = boost::str(boost::format("%03x%s") % obj_name.size() % obj_name); - return on_last_entry_in_listing(archive_binfo, prefix, std::string(), + return on_last_entry_in_listing(dpp, archive_binfo, prefix, std::string(), handler); } -int RGWRados::Object::Write::_do_write_meta(uint64_t size, uint64_t accounted_size, +int RGWRados::Object::Write::_do_write_meta(const DoutPrefixProvider *dpp, + uint64_t size, uint64_t accounted_size, map& attrs, bool assume_noent, bool modify_tail, void *_index_op, optional_yield y) @@ -2994,19 +3007,19 @@ int RGWRados::Object::Write::_do_write_meta(uint64_t size, uint64_t accounted_si #endif RGWObjState *state; - int r = target->get_state(&state, false, y, assume_noent); + int r = target->get_state(dpp, &state, false, y, assume_noent); if (r < 0) return r; rgw_obj& obj = target->get_obj(); if (obj.get_oid().empty()) { - ldout(store->ctx(), 0) << "ERROR: " << __func__ << "(): cannot write object with empty name" << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): cannot write object with empty name" << dendl; return -EIO; } rgw_rados_ref ref; - r = store->get_obj_head_ref(target->get_bucket_info(), obj, &ref); + r = store->get_obj_head_ref(dpp, target->get_bucket_info(), obj, &ref); if (r < 0) return r; @@ -3018,7 +3031,7 @@ int RGWRados::Object::Write::_do_write_meta(uint64_t size, uint64_t accounted_si if (!ptag && !index_op->get_optag()->empty()) { ptag = index_op->get_optag(); } - r = target->prepare_atomic_modification(op, reset_obj, ptag, meta.if_match, meta.if_nomatch, false, modify_tail, y); + r = target->prepare_atomic_modification(dpp, op, reset_obj, ptag, meta.if_match, meta.if_nomatch, false, modify_tail, y); if (r < 0) return r; @@ -3137,7 +3150,7 @@ int RGWRados::Object::Write::_do_write_meta(uint64_t size, uint64_t accounted_si if (!index_op->is_prepared()) { tracepoint(rgw_rados, prepare_enter, req_id.c_str()); - r = index_op->prepare(CLS_RGW_OP_ADD, &state->write_tag, y); + r = index_op->prepare(dpp, CLS_RGW_OP_ADD, &state->write_tag, y); tracepoint(rgw_rados, prepare_exit, req_id.c_str()); if (r < 0) return r; @@ -3146,7 +3159,7 @@ int RGWRados::Object::Write::_do_write_meta(uint64_t size, uint64_t accounted_si auto& ioctx = ref.pool.ioctx(); tracepoint(rgw_rados, operate_enter, req_id.c_str()); - r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield); + r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield); tracepoint(rgw_rados, operate_exit, req_id.c_str()); if (r < 0) { /* we can expect to get -ECANCELED if object was replaced under, or -ENOENT if was removed, or -EEXIST if it did not exist @@ -3161,13 +3174,13 @@ int RGWRados::Object::Write::_do_write_meta(uint64_t size, uint64_t accounted_si epoch = ioctx.get_last_version(); poolid = ioctx.get_id(); - r = target->complete_atomic_modification(); + r = target->complete_atomic_modification(dpp); if (r < 0) { - ldout(store->ctx(), 0) << "ERROR: complete_atomic_modification returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: complete_atomic_modification returned r=" << r << dendl; } tracepoint(rgw_rados, complete_enter, req_id.c_str()); - r = index_op->complete(poolid, epoch, size, accounted_size, + r = index_op->complete(dpp, poolid, epoch, size, accounted_size, meta.set_mtime, etag, content_type, storage_class, &acl_bl, meta.category, meta.remove_objs, meta.user_data, meta.appendable); @@ -3184,7 +3197,7 @@ int RGWRados::Object::Write::_do_write_meta(uint64_t size, uint64_t accounted_si state = NULL; if (versioned_op && meta.olh_epoch) { - r = store->set_olh(target->get_ctx(), target->get_bucket_info(), obj, false, NULL, *meta.olh_epoch, real_time(), false, y, meta.zones_trace); + r = store->set_olh(dpp, target->get_ctx(), target->get_bucket_info(), obj, false, NULL, *meta.olh_epoch, real_time(), false, y, meta.zones_trace); if (r < 0) { return r; } @@ -3194,10 +3207,10 @@ int RGWRados::Object::Write::_do_write_meta(uint64_t size, uint64_t accounted_si rgw_obj_index_key obj_key; obj.key.get_index_key(&obj_key); - r = store->obj_expirer->hint_add(meta.delete_at, obj.bucket.tenant, obj.bucket.name, + r = store->obj_expirer->hint_add(dpp, meta.delete_at, obj.bucket.tenant, obj.bucket.name, obj.bucket.bucket_id, obj_key); if (r < 0) { - ldout(store->ctx(), 0) << "ERROR: objexp_hint_add() returned r=" << r << ", object will not get removed" << dendl; + ldpp_dout(dpp, 0) << "ERROR: objexp_hint_add() returned r=" << r << ", object will not get removed" << dendl; /* ignoring error, nothing we can do at this point */ } } @@ -3215,9 +3228,9 @@ int RGWRados::Object::Write::_do_write_meta(uint64_t size, uint64_t accounted_si return 0; done_cancel: - int ret = index_op->cancel(); + int ret = index_op->cancel(dpp); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: index_op.cancel()() returned ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: index_op.cancel()() returned ret=" << ret << dendl; } meta.canceled = true; @@ -3259,7 +3272,7 @@ done_cancel: return r; } -int RGWRados::Object::Write::write_meta(uint64_t size, uint64_t accounted_size, +int RGWRados::Object::Write::write_meta(const DoutPrefixProvider *dpp, uint64_t size, uint64_t accounted_size, map& attrs, optional_yield y) { RGWBucketInfo& bucket_info = target->get_bucket_info(); @@ -3271,19 +3284,20 @@ int RGWRados::Object::Write::write_meta(uint64_t size, uint64_t accounted_size, bool assume_noent = (meta.if_match == NULL && meta.if_nomatch == NULL); int r; if (assume_noent) { - r = _do_write_meta(size, accounted_size, attrs, assume_noent, meta.modify_tail, (void *)&index_op, y); + r = _do_write_meta(dpp, size, accounted_size, attrs, assume_noent, meta.modify_tail, (void *)&index_op, y); if (r == -EEXIST) { assume_noent = false; } } if (!assume_noent) { - r = _do_write_meta(size, accounted_size, attrs, assume_noent, meta.modify_tail, (void *)&index_op, y); + r = _do_write_meta(dpp, size, accounted_size, attrs, assume_noent, meta.modify_tail, (void *)&index_op, y); } return r; } class RGWRadosPutObj : public RGWHTTPStreamRWRequest::ReceiveCB { + const DoutPrefixProvider *dpp; CephContext* cct; rgw_obj obj; rgw::putobj::DataProcessor *filter; @@ -3305,13 +3319,15 @@ class RGWRadosPutObj : public RGWHTTPStreamRWRequest::ReceiveCB uint64_t lofs{0}; /* logical ofs */ std::function&)> attrs_handler; public: - RGWRadosPutObj(CephContext* cct, + RGWRadosPutObj(const DoutPrefixProvider *dpp, + CephContext* cct, CompressorRef& plugin, boost::optional& compressor, rgw::putobj::ObjectProcessor *p, void (*_progress_cb)(off_t, void *), void *_progress_data, std::function&)> _attrs_handler) : + dpp(dpp), cct(cct), filter(p), compressor(compressor), @@ -3326,7 +3342,7 @@ public: if (extra_data_bl.length()) { JSONParser jp; if (!jp.parse(extra_data_bl.c_str(), extra_data_bl.length())) { - ldout(cct, 0) << "failed to parse response extra data. len=" << extra_data_bl.length() << " data=" << extra_data_bl.c_str() << dendl; + ldpp_dout(dpp, 0) << "failed to parse response extra data. len=" << extra_data_bl.length() << " data=" << extra_data_bl.c_str() << dendl; return -EIO; } @@ -3344,7 +3360,7 @@ public: bool compressed = false; int r = rgw_compression_info_from_attr(bl, compressed, info); if (r < 0) { - ldout(cct, 4) << "failed to decode compression info, " + ldpp_dout(dpp, 4) << "failed to decode compression info, " "disabling etag verification" << dendl; try_etag_verify = false; } else if (compressed) { @@ -3391,11 +3407,11 @@ public: * to know the sequence in which the filters must be applied. */ if (try_etag_verify && src_attrs.find(RGW_ATTR_CRYPT_MODE) == src_attrs.end()) { - ret = rgw::putobj::create_etag_verifier(cct, filter, manifest_bl, + ret = rgw::putobj::create_etag_verifier(dpp, cct, filter, manifest_bl, compression_info, etag_verifier); if (ret < 0) { - ldout(cct, 4) << "failed to initial etag verifier, " + ldpp_dout(dpp, 4) << "failed to initial etag verifier, " "disabling etag verification" << dendl; } else { filter = etag_verifier.get(); @@ -3613,7 +3629,8 @@ public: } }; -int RGWRados::stat_remote_obj(RGWObjectCtx& obj_ctx, +int RGWRados::stat_remote_obj(const DoutPrefixProvider *dpp, + RGWObjectCtx& obj_ctx, const rgw_user& user_id, req_info *info, const rgw_zone_id& source_zone, @@ -3678,7 +3695,7 @@ int RGWRados::stat_remote_obj(RGWObjectCtx& obj_ctx, constexpr bool rgwx_stat = true; constexpr bool sync_manifest = true; constexpr bool skip_decrypt = true; - int ret = conn->get_obj(user_id, info, src_obj, pmod, unmod_ptr, + int ret = conn->get_obj(dpp, user_id, info, src_obj, pmod, unmod_ptr, dest_mtime_weight.zone_short_id, dest_mtime_weight.pg_ver, prepend_meta, get_op, rgwx_stat, sync_manifest, skip_decrypt, @@ -3807,7 +3824,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, } else { map::iterator iter = zonegroup_conn_map.find(src_bucket->get_info().zonegroup); if (iter == zonegroup_conn_map.end()) { - ldout(cct, 0) << "could not find zonegroup connection to zonegroup: " << source_zone << dendl; + ldpp_dout(dpp, 0) << "could not find zonegroup connection to zonegroup: " << source_zone << dendl; return -ENOENT; } conn = iter->second; @@ -3815,7 +3832,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, } else { auto iter = zone_conn_map.find(source_zone); if (iter == zone_conn_map.end()) { - ldout(cct, 0) << "could not find zone connection to zone: " << source_zone << dendl; + ldpp_dout(dpp, 0) << "could not find zone connection to zone: " << source_zone << dendl; return -ENOENT; } conn = iter->second; @@ -3831,7 +3848,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, std::optional override_owner; - RGWRadosPutObj cb(cct, plugin, compressor, &processor, progress_cb, progress_data, + RGWRadosPutObj cb(dpp, cct, plugin, compressor, &processor, progress_cb, progress_data, [&](map& obj_attrs) { const rgw_placement_rule *ptail_rule; @@ -3843,7 +3860,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, &override_owner, &ptail_rule); if (ret < 0) { - ldout(cct, 5) << "Aborting fetch: source object filter returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "Aborting fetch: source object filter returned ret=" << ret << dendl; return ret; } @@ -3853,7 +3870,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, if (compression_type != "none") { plugin = Compressor::create(cct, compression_type); if (!plugin) { - ldout(cct, 1) << "Cannot load plugin for compression type " + ldpp_dout(dpp, 1) << "Cannot load plugin for compression type " << compression_type << dendl; } } @@ -3877,7 +3894,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, if (copy_if_newer) { /* need to get mtime for destination */ - ret = get_obj_state(&obj_ctx, dest_bucket->get_info(), dest_obj->get_obj(), &dest_state, false, null_yield); + ret = get_obj_state(dpp, &obj_ctx, dest_bucket->get_info(), dest_obj->get_obj(), &dest_state, false, null_yield); if (ret < 0) goto set_err_state; @@ -3892,7 +3909,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, static constexpr bool rgwx_stat = false; static constexpr bool sync_manifest = true; static constexpr bool skip_decrypt = true; - ret = conn->get_obj(user_id, info, src_obj, pmod, unmod_ptr, + ret = conn->get_obj(dpp, user_id, info, src_obj, pmod, unmod_ptr, dest_mtime_weight.zone_short_id, dest_mtime_weight.pg_ver, prepend_meta, get_op, rgwx_stat, sync_manifest, skip_decrypt, @@ -3913,7 +3930,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, } if (cb.get_data_len() != expected_size) { ret = -EIO; - ldout(cct, 0) << "ERROR: object truncated during fetching, expected " + ldpp_dout(dpp, 0) << "ERROR: object truncated during fetching, expected " << expected_size << " bytes but received " << cb.get_data_len() << dendl; goto set_err_state; } @@ -3934,8 +3951,8 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, auto& obj_attrs = cb.get_attrs(); RGWUserInfo owner_info; - if (ctl.user->get_info_by_uid(*override_owner, &owner_info, null_yield) < 0) { - ldout(cct, 10) << "owner info does not exist" << dendl; + if (ctl.user->get_info_by_uid(dpp, *override_owner, &owner_info, null_yield) < 0) { + ldpp_dout(dpp, 10) << "owner info does not exist" << dendl; return -EINVAL; } @@ -3943,14 +3960,14 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, auto aiter = obj_attrs.find(RGW_ATTR_ACL); if (aiter == obj_attrs.end()) { - ldout(cct, 0) << "WARNING: " << __func__ << "(): object doesn't have ACL attribute, setting default ACLs" << dendl; + ldpp_dout(dpp, 0) << "WARNING: " << __func__ << "(): object doesn't have ACL attribute, setting default ACLs" << dendl; acl.create_default(owner_info.user_id, owner_info.display_name); } else { auto iter = aiter->second.cbegin(); try { acl.decode(iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: " << __func__ << "(): could not decode policy, caught buffer::error" << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): could not decode policy, caught buffer::error" << dendl; return -EIO; } } @@ -3974,7 +3991,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, try { decode(delete_at, iter->second); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: failed to decode delete_at field in intra zone copy" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to decode delete_at field in intra zone copy" << dendl; } } } @@ -4007,7 +4024,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, try { decode(pg_ver, iter); } catch (buffer::error& err) { - ldout(ctx(), 0) << "ERROR: failed to decode pg ver attribute, ignoring" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to decode pg ver attribute, ignoring" << dendl; /* non critical error */ } } @@ -4025,7 +4042,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, if (verifier_etag != trimmed_etag) { ret = -EIO; - ldout(cct, 0) << "ERROR: source and destination objects don't match. Expected etag:" + ldpp_dout(dpp, 0) << "ERROR: source and destination objects don't match. Expected etag:" << trimmed_etag << " Computed etag:" << verifier_etag << dendl; goto set_err_state; } @@ -4042,28 +4059,28 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, } if (copy_if_newer && canceled) { - ldout(cct, 20) << "raced with another write of obj: " << dest_obj << dendl; + ldpp_dout(dpp, 20) << "raced with another write of obj: " << dest_obj << dendl; obj_ctx.invalidate(dest_obj->get_obj()); /* object was overwritten */ - ret = get_obj_state(&obj_ctx, dest_bucket->get_info(), dest_obj->get_obj(), &dest_state, false, null_yield); + ret = get_obj_state(dpp, &obj_ctx, dest_bucket->get_info(), dest_obj->get_obj(), &dest_state, false, null_yield); if (ret < 0) { - ldout(cct, 0) << "ERROR: " << __func__ << ": get_err_state() returned ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << ": get_err_state() returned ret=" << ret << dendl; goto set_err_state; } dest_mtime_weight.init(dest_state); dest_mtime_weight.high_precision = high_precision_time; if (!dest_state->exists || dest_mtime_weight < set_mtime_weight) { - ldout(cct, 20) << "retrying writing object mtime=" << set_mtime << " dest_state->mtime=" << dest_state->mtime << " dest_state->exists=" << dest_state->exists << dendl; + ldpp_dout(dpp, 20) << "retrying writing object mtime=" << set_mtime << " dest_state->mtime=" << dest_state->mtime << " dest_state->exists=" << dest_state->exists << dendl; continue; } else { - ldout(cct, 20) << "not retrying writing object mtime=" << set_mtime << " dest_state->mtime=" << dest_state->mtime << " dest_state->exists=" << dest_state->exists << dendl; + ldpp_dout(dpp, 20) << "not retrying writing object mtime=" << set_mtime << " dest_state->mtime=" << dest_state->mtime << " dest_state->exists=" << dest_state->exists << dendl; } } break; } if (i == MAX_COMPLETE_RETRY) { - ldout(cct, 0) << "ERROR: retried object completion too many times, something is wrong!" << dendl; + ldpp_dout(dpp, 0) << "ERROR: retried object completion too many times, something is wrong!" << dendl; ret = -EIO; goto set_err_state; } @@ -4078,7 +4095,7 @@ set_err_state: // for OP_LINK_OLH to call set_olh() with a real olh_epoch if (olh_epoch && *olh_epoch > 0) { constexpr bool log_data_change = true; - ret = set_olh(obj_ctx, dest_bucket->get_info(), dest_obj->get_obj(), false, nullptr, + ret = set_olh(dpp, obj_ctx, dest_bucket->get_info(), dest_obj->get_obj(), false, nullptr, *olh_epoch, real_time(), false, null_yield, zones_trace, log_data_change); } else { // we already have the latest copy @@ -4089,7 +4106,8 @@ set_err_state: } -int RGWRados::copy_obj_to_remote_dest(RGWObjState *astate, +int RGWRados::copy_obj_to_remote_dest(const DoutPrefixProvider *dpp, + RGWObjState *astate, map& src_attrs, RGWRados::Object::Read& read_op, const rgw_user& user_id, @@ -4102,12 +4120,12 @@ int RGWRados::copy_obj_to_remote_dest(RGWObjState *astate, auto rest_master_conn = svc.zone->get_master_conn(); - int ret = rest_master_conn->put_obj_async(user_id, dest_obj, astate->size, src_attrs, true, &out_stream_req); + int ret = rest_master_conn->put_obj_async(dpp, user_id, dest_obj, astate->size, src_attrs, true, &out_stream_req); if (ret < 0) { return ret; } - ret = read_op.iterate(0, astate->size - 1, out_stream_req->get_out_cb(), null_yield); + ret = read_op.iterate(dpp, 0, astate->size - 1, out_stream_req->get_out_cb(), null_yield); if (ret < 0) { delete out_stream_req; return ret; @@ -4211,7 +4229,7 @@ int RGWRados::copy_obj(RGWObjectCtx& obj_ctx, read_op.params.lastmod = src_mtime; read_op.params.obj_size = &obj_size; - ret = read_op.prepare(y); + ret = read_op.prepare(y, dpp); if (ret < 0) { return ret; } @@ -4239,7 +4257,7 @@ int RGWRados::copy_obj(RGWObjectCtx& obj_ctx, RGWObjManifest manifest; RGWObjState *astate = NULL; - ret = get_obj_state(&obj_ctx, src_bucket->get_info(), src_obj->get_obj(), &astate, y); + ret = get_obj_state(dpp, &obj_ctx, src_bucket->get_info(), src_obj->get_obj(), &astate, y); if (ret < 0) { return ret; } @@ -4248,11 +4266,11 @@ int RGWRados::copy_obj(RGWObjectCtx& obj_ctx, if (remote_dest) { /* dest is in a different zonegroup, copy it there */ - return copy_obj_to_remote_dest(astate, attrs, read_op, user_id, dest_obj, mtime); + return copy_obj_to_remote_dest(dpp, astate, attrs, read_op, user_id, dest_obj, mtime); } uint64_t max_chunk_size; - ret = get_max_chunk_size(dest_bucket->get_placement_rule(), dest_obj->get_obj(), &max_chunk_size); + ret = get_max_chunk_size(dest_bucket->get_placement_rule(), dest_obj->get_obj(), &max_chunk_size, dpp); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to get max_chunk_size() for bucket " << dest_obj->get_bucket() << dendl; return ret; @@ -4319,14 +4337,14 @@ int RGWRados::copy_obj(RGWObjectCtx& obj_ctx, mtime, real_time(), attrs, olh_epoch, delete_at, petag, dpp, y); } - RGWObjManifest::obj_iterator miter = astate->manifest->obj_begin(); + RGWObjManifest::obj_iterator miter = astate->manifest->obj_begin(dpp); if (copy_first) { // we need to copy first chunk, not increase refcount ++miter; } rgw_rados_ref ref; - ret = get_raw_obj_ref(miter.get_location().get_raw_obj(store), &ref); + ret = get_raw_obj_ref(dpp, miter.get_location().get_raw_obj(store), &ref); if (ret < 0) { return ret; } @@ -4358,7 +4376,7 @@ int RGWRados::copy_obj(RGWObjectCtx& obj_ctx, manifest.set_tail_placement(tail_placement.placement_rule, src_obj->get_bucket()->get_key()); } string ref_tag; - for (; miter != astate->manifest->obj_end(); ++miter) { + for (; miter != astate->manifest->obj_end(dpp); ++miter) { ObjectWriteOperation op; ref_tag = tag + '\0'; cls_refcount_get(op, ref_tag, true); @@ -4367,7 +4385,7 @@ int RGWRados::copy_obj(RGWObjectCtx& obj_ctx, auto& ioctx = ref.pool.ioctx(); ioctx.locator_set_key(loc.loc); - ret = rgw_rados_operate(ioctx, loc.oid, &op, null_yield); + ret = rgw_rados_operate(dpp, ioctx, loc.oid, &op, null_yield); if (ret < 0) { goto done_ret; } @@ -4383,7 +4401,7 @@ int RGWRados::copy_obj(RGWObjectCtx& obj_ctx, } if (copy_first) { - ret = read_op.read(0, max_chunk_size, first_chunk, y); + ret = read_op.read(0, max_chunk_size, first_chunk, y, dpp); if (ret < 0) { goto done_ret; } @@ -4404,7 +4422,7 @@ int RGWRados::copy_obj(RGWObjectCtx& obj_ctx, write_op.meta.delete_at = delete_at; write_op.meta.modify_tail = !copy_itself; - ret = write_op.write_meta(obj_size, astate->accounted_size, attrs, y); + ret = write_op.write_meta(dpp, obj_size, astate->accounted_size, attrs, y); if (ret < 0) { goto done_ret; } @@ -4423,7 +4441,7 @@ done_ret: ref.pool.ioctx().locator_set_key(riter->loc); - int r = rgw_rados_operate(ref.pool.ioctx(), riter->oid, &op, null_yield); + int r = rgw_rados_operate(dpp, ref.pool.ioctx(), riter->oid, &op, null_yield); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: cleanup after error failed to drop reference on obj=" << *riter << dendl; } @@ -4466,7 +4484,7 @@ int RGWRados::copy_obj_data(RGWObjectCtx& obj_ctx, do { bufferlist bl; - ret = read_op.read(ofs, end, bl, y); + ret = read_op.read(ofs, end, bl, y, dpp); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: fail to read object data, ret = " << ret << dendl; return ret; @@ -4535,7 +4553,7 @@ int RGWRados::transition_obj(RGWObjectCtx& obj_ctx, read_op.params.lastmod = &read_mtime; read_op.params.obj_size = &obj_size; - int ret = read_op.prepare(y); + int ret = read_op.prepare(y, dpp); if (ret < 0) { return ret; } @@ -4569,7 +4587,7 @@ int RGWRados::transition_obj(RGWObjectCtx& obj_ctx, return 0; } -int RGWRados::check_bucket_empty(RGWBucketInfo& bucket_info, optional_yield y) +int RGWRados::check_bucket_empty(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, optional_yield y) { constexpr uint NUM_ENTRIES = 1000u; @@ -4581,7 +4599,8 @@ int RGWRados::check_bucket_empty(RGWBucketInfo& bucket_info, optional_yield y) std::vector ent_list; ent_list.reserve(NUM_ENTRIES); - int r = cls_bucket_list_unordered(bucket_info, + int r = cls_bucket_list_unordered(dpp, + bucket_info, RGW_NO_SHARD, marker, prefix, @@ -4613,17 +4632,17 @@ int RGWRados::check_bucket_empty(RGWBucketInfo& bucket_info, optional_yield y) * bucket: the name of the bucket to delete * Returns 0 on success, -ERR# otherwise. */ -int RGWRados::delete_bucket(RGWBucketInfo& bucket_info, RGWObjVersionTracker& objv_tracker, optional_yield y, bool check_empty) +int RGWRados::delete_bucket(RGWBucketInfo& bucket_info, RGWObjVersionTracker& objv_tracker, optional_yield y, const DoutPrefixProvider *dpp, bool check_empty) { const rgw_bucket& bucket = bucket_info.bucket; RGWSI_RADOS::Pool index_pool; map bucket_objs; - int r = svc.bi_rados->open_bucket_index(bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr); + int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr); if (r < 0) return r; if (check_empty) { - r = check_bucket_empty(bucket_info, y); + r = check_bucket_empty(dpp, bucket_info, y); if (r < 0) { return r; } @@ -4636,13 +4655,14 @@ int RGWRados::delete_bucket(RGWBucketInfo& bucket_info, RGWObjVersionTracker& ob r = ctl.bucket->read_bucket_entrypoint_info(bucket_info.bucket, &ep, null_yield, + dpp, RGWBucketCtl::Bucket::GetParams() .set_objv_tracker(&objv_tracker)); if (r < 0 || (!bucket_info.bucket.bucket_id.empty() && ep.bucket.bucket_id != bucket_info.bucket.bucket_id)) { if (r != -ENOENT) { - ldout(cct, 0) << "ERROR: read_bucket_entrypoint_info() bucket=" << bucket_info.bucket << " returned error: r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: read_bucket_entrypoint_info() bucket=" << bucket_info.bucket << " returned error: r=" << r << dendl; /* we have no idea what caused the error, will not try to remove it */ } /* @@ -4654,7 +4674,7 @@ int RGWRados::delete_bucket(RGWBucketInfo& bucket_info, RGWObjVersionTracker& ob } if (remove_ep) { - r = ctl.bucket->remove_bucket_entrypoint_info(bucket_info.bucket, null_yield, + r = ctl.bucket->remove_bucket_entrypoint_info(bucket_info.bucket, null_yield, dpp, RGWBucketCtl::Bucket::RemoveParams() .set_objv_tracker(&objv_tracker)); if (r < 0) @@ -4664,7 +4684,7 @@ int RGWRados::delete_bucket(RGWBucketInfo& bucket_info, RGWObjVersionTracker& ob /* if the bucket is not synced we can remove the meta file */ if (!svc.zone->is_syncing_bucket_meta(bucket)) { RGWObjVersionTracker objv_tracker; - r = ctl.bucket->remove_bucket_instance_info(bucket, bucket_info, null_yield); + r = ctl.bucket->remove_bucket_instance_info(bucket, bucket_info, null_yield, dpp); if (r < 0) { return r; } @@ -4678,7 +4698,7 @@ int RGWRados::delete_bucket(RGWBucketInfo& bucket_info, RGWObjVersionTracker& ob return 0; } -int RGWRados::set_bucket_owner(rgw_bucket& bucket, ACLOwner& owner) +int RGWRados::set_bucket_owner(rgw_bucket& bucket, ACLOwner& owner, const DoutPrefixProvider *dpp) { RGWBucketInfo info; map attrs; @@ -4686,20 +4706,20 @@ int RGWRados::set_bucket_owner(rgw_bucket& bucket, ACLOwner& owner) auto obj_ctx = svc.sysobj->init_obj_ctx(); if (bucket.bucket_id.empty()) { - r = get_bucket_info(&svc, bucket.tenant, bucket.name, info, NULL, null_yield, &attrs); + r = get_bucket_info(&svc, bucket.tenant, bucket.name, info, NULL, null_yield, dpp, &attrs); } else { - r = get_bucket_instance_info(obj_ctx, bucket, info, nullptr, &attrs, null_yield); + r = get_bucket_instance_info(obj_ctx, bucket, info, nullptr, &attrs, null_yield, dpp); } if (r < 0) { - ldout(cct, 0) << "NOTICE: get_bucket_info on bucket=" << bucket.name << " returned err=" << r << dendl; + ldpp_dout(dpp, 0) << "NOTICE: get_bucket_info on bucket=" << bucket.name << " returned err=" << r << dendl; return r; } info.owner = owner.get_id(); - r = put_bucket_instance_info(info, false, real_time(), &attrs); + r = put_bucket_instance_info(info, false, real_time(), &attrs, dpp); if (r < 0) { - ldout(cct, 0) << "NOTICE: put_bucket_info on bucket=" << bucket.name << " returned err=" << r << dendl; + ldpp_dout(dpp, 0) << "NOTICE: put_bucket_info on bucket=" << bucket.name << " returned err=" << r << dendl; return r; } @@ -4707,7 +4727,7 @@ int RGWRados::set_bucket_owner(rgw_bucket& bucket, ACLOwner& owner) } -int RGWRados::set_buckets_enabled(vector& buckets, bool enabled) +int RGWRados::set_buckets_enabled(vector& buckets, bool enabled, const DoutPrefixProvider *dpp) { int ret = 0; @@ -4715,16 +4735,17 @@ int RGWRados::set_buckets_enabled(vector& buckets, bool enabled) for (iter = buckets.begin(); iter != buckets.end(); ++iter) { rgw_bucket& bucket = *iter; - if (enabled) - ldout(cct, 20) << "enabling bucket name=" << bucket.name << dendl; - else - ldout(cct, 20) << "disabling bucket name=" << bucket.name << dendl; + if (enabled) { + ldpp_dout(dpp, 20) << "enabling bucket name=" << bucket.name << dendl; + } else { + ldpp_dout(dpp, 20) << "disabling bucket name=" << bucket.name << dendl; + } RGWBucketInfo info; map attrs; - int r = get_bucket_info(&svc, bucket.tenant, bucket.name, info, NULL, null_yield, &attrs); + int r = get_bucket_info(&svc, bucket.tenant, bucket.name, info, NULL, null_yield, dpp, &attrs); if (r < 0) { - ldout(cct, 0) << "NOTICE: get_bucket_info on bucket=" << bucket.name << " returned err=" << r << ", skipping bucket" << dendl; + ldpp_dout(dpp, 0) << "NOTICE: get_bucket_info on bucket=" << bucket.name << " returned err=" << r << ", skipping bucket" << dendl; ret = r; continue; } @@ -4734,9 +4755,9 @@ int RGWRados::set_buckets_enabled(vector& buckets, bool enabled) info.flags |= BUCKET_SUSPENDED; } - r = put_bucket_instance_info(info, false, real_time(), &attrs); + r = put_bucket_instance_info(info, false, real_time(), &attrs, dpp); if (r < 0) { - ldout(cct, 0) << "NOTICE: put_bucket_info on bucket=" << bucket.name << " returned err=" << r << ", skipping bucket" << dendl; + ldpp_dout(dpp, 0) << "NOTICE: put_bucket_info on bucket=" << bucket.name << " returned err=" << r << ", skipping bucket" << dendl; ret = r; continue; } @@ -4744,10 +4765,10 @@ int RGWRados::set_buckets_enabled(vector& buckets, bool enabled) return ret; } -int RGWRados::bucket_suspended(rgw_bucket& bucket, bool *suspended) +int RGWRados::bucket_suspended(const DoutPrefixProvider *dpp, rgw_bucket& bucket, bool *suspended) { RGWBucketInfo bucket_info; - int ret = get_bucket_info(&svc, bucket.tenant, bucket.name, bucket_info, NULL, null_yield); + int ret = get_bucket_info(&svc, bucket.tenant, bucket.name, bucket_info, NULL, null_yield, dpp); if (ret < 0) { return ret; } @@ -4756,13 +4777,13 @@ int RGWRados::bucket_suspended(rgw_bucket& bucket, bool *suspended) return 0; } -int RGWRados::Object::complete_atomic_modification() +int RGWRados::Object::complete_atomic_modification(const DoutPrefixProvider *dpp) { if ((!state->manifest)|| state->keep_tail) return 0; cls_rgw_obj_chain chain; - store->update_gc_chain(obj, *state->manifest, &chain); + store->update_gc_chain(dpp, obj, *state->manifest, &chain); if (chain.empty()) { return 0; @@ -4772,17 +4793,17 @@ int RGWRados::Object::complete_atomic_modification() auto ret = store->gc->send_chain(chain, tag); // do it synchronously if (ret < 0) { //Delete objects inline if send chain to gc fails - store->delete_objs_inline(chain, tag); + store->delete_objs_inline(dpp, chain, tag); } return 0; } -void RGWRados::update_gc_chain(rgw_obj& head_obj, RGWObjManifest& manifest, cls_rgw_obj_chain *chain) +void RGWRados::update_gc_chain(const DoutPrefixProvider *dpp, rgw_obj& head_obj, RGWObjManifest& manifest, cls_rgw_obj_chain *chain) { RGWObjManifest::obj_iterator iter; rgw_raw_obj raw_head; obj_to_raw(manifest.get_head_placement_rule(), head_obj, &raw_head); - for (iter = manifest.obj_begin(); iter != manifest.obj_end(); ++iter) { + for (iter = manifest.obj_begin(dpp); iter != manifest.obj_end(dpp); ++iter) { const rgw_raw_obj& mobj = iter.get_location().get_raw_obj(store); if (mobj == raw_head) continue; @@ -4800,7 +4821,7 @@ int RGWRados::send_chain_to_gc(cls_rgw_obj_chain& chain, const string& tag) return gc->send_chain(chain, tag); } -void RGWRados::delete_objs_inline(cls_rgw_obj_chain& chain, const string& tag) +void RGWRados::delete_objs_inline(const DoutPrefixProvider *dpp, cls_rgw_obj_chain& chain, const string& tag) { string last_pool; std::unique_ptr ctx(new IoCtx); @@ -4809,10 +4830,10 @@ void RGWRados::delete_objs_inline(cls_rgw_obj_chain& chain, const string& tag) cls_rgw_obj& obj = *liter; if (obj.pool != last_pool) { ctx.reset(new IoCtx); - ret = rgw_init_ioctx(get_rados_handle(), obj.pool, *ctx); + ret = rgw_init_ioctx(dpp, get_rados_handle(), obj.pool, *ctx); if (ret < 0) { last_pool = ""; - ldout(cct, 0) << "ERROR: failed to create ioctx pool=" << + ldpp_dout(dpp, 0) << "ERROR: failed to create ioctx pool=" << obj.pool << dendl; continue; } @@ -4820,13 +4841,13 @@ void RGWRados::delete_objs_inline(cls_rgw_obj_chain& chain, const string& tag) } ctx->locator_set_key(obj.loc); const string& oid = obj.key.name; /* just stored raw oid there */ - ldout(cct, 5) << "delete_objs_inline: removing " << obj.pool << + ldpp_dout(dpp, 5) << "delete_objs_inline: removing " << obj.pool << ":" << obj.key.name << dendl; ObjectWriteOperation op; cls_refcount_put(op, tag, true); ret = ctx->operate(oid, &op); if (ret < 0) { - ldout(cct, 5) << "delete_objs_inline: refcount put returned error " << ret << dendl; + ldpp_dout(dpp, 5) << "delete_objs_inline: refcount put returned error " << ret << dendl; } } } @@ -4848,7 +4869,7 @@ static void accumulate_raw_stats(const rgw_bucket_dir_header& header, } } -int RGWRados::bucket_check_index(RGWBucketInfo& bucket_info, +int RGWRados::bucket_check_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, map *existing_stats, map *calculated_stats) { @@ -4858,7 +4879,7 @@ int RGWRados::bucket_check_index(RGWBucketInfo& bucket_info, map oids; map bucket_objs_ret; - int ret = svc.bi_rados->open_bucket_index(bucket_info, std::nullopt, &index_pool, &oids, nullptr); + int ret = svc.bi_rados->open_bucket_index(dpp, bucket_info, std::nullopt, &index_pool, &oids, nullptr); if (ret < 0) { return ret; } @@ -4882,12 +4903,12 @@ int RGWRados::bucket_check_index(RGWBucketInfo& bucket_info, return 0; } -int RGWRados::bucket_rebuild_index(RGWBucketInfo& bucket_info) +int RGWRados::bucket_rebuild_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info) { RGWSI_RADOS::Pool index_pool; map bucket_objs; - int r = svc.bi_rados->open_bucket_index(bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr); + int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr); if (r < 0) { return r; } @@ -4895,12 +4916,12 @@ int RGWRados::bucket_rebuild_index(RGWBucketInfo& bucket_info) return CLSRGWIssueBucketRebuild(index_pool.ioctx(), bucket_objs, cct->_conf->rgw_bucket_index_max_aio)(); } -int RGWRados::bucket_set_reshard(const RGWBucketInfo& bucket_info, const cls_rgw_bucket_instance_entry& entry) +int RGWRados::bucket_set_reshard(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const cls_rgw_bucket_instance_entry& entry) { RGWSI_RADOS::Pool index_pool; map bucket_objs; - int r = svc.bi_rados->open_bucket_index(bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr); + int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr); if (r < 0) { return r; } @@ -4908,7 +4929,7 @@ int RGWRados::bucket_set_reshard(const RGWBucketInfo& bucket_info, const cls_rgw return CLSRGWIssueSetBucketResharding(index_pool.ioctx(), bucket_objs, entry, cct->_conf->rgw_bucket_index_max_aio)(); } -int RGWRados::defer_gc(void *ctx, const RGWBucketInfo& bucket_info, const rgw_obj& obj, optional_yield y) +int RGWRados::defer_gc(const DoutPrefixProvider *dpp, void *ctx, const RGWBucketInfo& bucket_info, const rgw_obj& obj, optional_yield y) { RGWObjectCtx *rctx = static_cast(ctx); std::string oid, key; @@ -4918,12 +4939,12 @@ int RGWRados::defer_gc(void *ctx, const RGWBucketInfo& bucket_info, const rgw_ob RGWObjState *state = NULL; - int r = get_obj_state(rctx, bucket_info, obj, &state, false, y); + int r = get_obj_state(dpp, rctx, bucket_info, obj, &state, false, y); if (r < 0) return r; if (!state->is_atomic) { - ldout(cct, 20) << "state for obj=" << obj << " is not atomic, not deferring gc operation" << dendl; + ldpp_dout(dpp, 20) << "state for obj=" << obj << " is not atomic, not deferring gc operation" << dendl; return -EINVAL; } @@ -4934,14 +4955,14 @@ int RGWRados::defer_gc(void *ctx, const RGWBucketInfo& bucket_info, const rgw_ob } else if (state->obj_tag.length() > 0) { tag = state->obj_tag.c_str(); } else { - ldout(cct, 20) << "state->obj_tag is empty, not deferring gc operation" << dendl; + ldpp_dout(dpp, 20) << "state->obj_tag is empty, not deferring gc operation" << dendl; return -EINVAL; } - ldout(cct, 0) << "defer chain tag=" << tag << dendl; + ldpp_dout(dpp, 0) << "defer chain tag=" << tag << dendl; cls_rgw_obj_chain chain; - update_gc_chain(state->obj, *state->manifest, &chain); + update_gc_chain(dpp, state->obj, *state->manifest, &chain); return gc->async_defer_chain(tag, chain); } @@ -4979,7 +5000,7 @@ struct tombstone_entry { * obj: name of the object to delete * Returns: 0 on success, -ERR# otherwise. */ -int RGWRados::Object::Delete::delete_obj(optional_yield y) +int RGWRados::Object::Delete::delete_obj(optional_yield y, const DoutPrefixProvider *dpp) { RGWRados *store = target->get_store(); rgw_obj& src_obj = target->get_obj(); @@ -5020,19 +5041,19 @@ int RGWRados::Object::Delete::delete_obj(optional_yield y) meta.mtime = params.mtime; } - int r = store->set_olh(target->get_ctx(), target->get_bucket_info(), marker, true, &meta, params.olh_epoch, params.unmod_since, params.high_precision_time, y, params.zones_trace); + int r = store->set_olh(dpp, target->get_ctx(), target->get_bucket_info(), marker, true, &meta, params.olh_epoch, params.unmod_since, params.high_precision_time, y, params.zones_trace); if (r < 0) { return r; } } else { rgw_bucket_dir_entry dirent; - int r = store->bi_get_instance(target->get_bucket_info(), obj, &dirent); + int r = store->bi_get_instance(dpp, target->get_bucket_info(), obj, &dirent); if (r < 0) { return r; } result.delete_marker = dirent.is_delete_marker(); - r = store->unlink_obj_instance(target->get_ctx(), target->get_bucket_info(), obj, params.olh_epoch, y, params.zones_trace); + r = store->unlink_obj_instance(dpp, target->get_ctx(), target->get_bucket_info(), obj, params.olh_epoch, y, params.zones_trace); if (r < 0) { return r; } @@ -5040,15 +5061,15 @@ int RGWRados::Object::Delete::delete_obj(optional_yield y) } BucketShard *bs; - int r = target->get_bucket_shard(&bs); + int r = target->get_bucket_shard(&bs, dpp); if (r < 0) { - ldout(store->ctx(), 5) << "failed to get BucketShard object: r=" << r << dendl; + ldpp_dout(dpp, 5) << "failed to get BucketShard object: r=" << r << dendl; return r; } - r = store->svc.datalog_rados->add_entry(target->bucket_info, bs->shard_id); + r = store->svc.datalog_rados->add_entry(dpp, target->bucket_info, bs->shard_id); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed writing data log" << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl; return r; } @@ -5056,13 +5077,13 @@ int RGWRados::Object::Delete::delete_obj(optional_yield y) } rgw_rados_ref ref; - int r = store->get_obj_head_ref(target->get_bucket_info(), obj, &ref); + int r = store->get_obj_head_ref(dpp, target->get_bucket_info(), obj, &ref); if (r < 0) { return r; } RGWObjState *state; - r = target->get_state(&state, false, y); + r = target->get_state(dpp, &state, false, y); if (r < 0) return r; @@ -5076,7 +5097,7 @@ int RGWRados::Object::Delete::delete_obj(optional_yield y) unmod.tv_nsec = 0; } - ldout(store->ctx(), 10) << "If-UnModified-Since: " << params.unmod_since << " Last-Modified: " << ctime << dendl; + ldpp_dout(dpp, 10) << "If-UnModified-Since: " << params.unmod_since << " Last-Modified: " << ctime << dendl; if (ctime > unmod) { return -ERR_PRECONDITION_FAILED; } @@ -5099,7 +5120,7 @@ int RGWRados::Object::Delete::delete_obj(optional_yield y) auto iter = bl.cbegin(); decode(delete_at, iter); } catch (buffer::error& err) { - ldout(store->ctx(), 0) << "ERROR: couldn't decode RGW_ATTR_DELETE_AT" << dendl; + ldpp_dout(dpp, 0) << "ERROR: couldn't decode RGW_ATTR_DELETE_AT" << dendl; return -EIO; } @@ -5116,7 +5137,7 @@ int RGWRados::Object::Delete::delete_obj(optional_yield y) return -ENOENT; } - r = target->prepare_atomic_modification(op, false, NULL, NULL, NULL, true, false, y); + r = target->prepare_atomic_modification(dpp, op, false, NULL, NULL, NULL, true, false, y); if (r < 0) return r; @@ -5128,14 +5149,14 @@ int RGWRados::Object::Delete::delete_obj(optional_yield y) index_op.set_zones_trace(params.zones_trace); index_op.set_bilog_flags(params.bilog_flags); - r = index_op.prepare(CLS_RGW_OP_DEL, &state->write_tag, y); + r = index_op.prepare(dpp, CLS_RGW_OP_DEL, &state->write_tag, y); if (r < 0) return r; store->remove_rgw_head_obj(op); auto& ioctx = ref.pool.ioctx(); - r = rgw_rados_operate(ioctx, ref.obj.oid, &op, null_yield); + r = rgw_rados_operate(dpp, ioctx, ref.obj.oid, &op, null_yield); /* raced with another operation, object state is indeterminate */ const bool need_invalidate = (r == -ECANCELED); @@ -5147,17 +5168,17 @@ int RGWRados::Object::Delete::delete_obj(optional_yield y) tombstone_entry entry{*state}; obj_tombstone_cache->add(obj, entry); } - r = index_op.complete_del(poolid, ioctx.get_last_version(), state->mtime, params.remove_objs); + r = index_op.complete_del(dpp, poolid, ioctx.get_last_version(), state->mtime, params.remove_objs); - int ret = target->complete_atomic_modification(); + int ret = target->complete_atomic_modification(dpp); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: complete_atomic_modification returned ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: complete_atomic_modification returned ret=" << ret << dendl; } /* other than that, no need to propagate error */ } else { - int ret = index_op.cancel(); + int ret = index_op.cancel(dpp); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: index_op.cancel() returned ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: index_op.cancel() returned ret=" << ret << dendl; } } @@ -5174,7 +5195,8 @@ int RGWRados::Object::Delete::delete_obj(optional_yield y) return 0; } -int RGWRados::delete_obj(RGWObjectCtx& obj_ctx, +int RGWRados::delete_obj(const DoutPrefixProvider *dpp, + RGWObjectCtx& obj_ctx, const RGWBucketInfo& bucket_info, const rgw_obj& obj, int versioning_status, @@ -5191,13 +5213,13 @@ int RGWRados::delete_obj(RGWObjectCtx& obj_ctx, del_op.params.expiration_time = expiration_time; del_op.params.zones_trace = zones_trace; - return del_op.delete_obj(null_yield); + return del_op.delete_obj(null_yield, dpp); } -int RGWRados::delete_raw_obj(const rgw_raw_obj& obj) +int RGWRados::delete_raw_obj(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj) { rgw_rados_ref ref; - int r = get_raw_obj_ref(obj, &ref); + int r = get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { return r; } @@ -5205,14 +5227,14 @@ int RGWRados::delete_raw_obj(const rgw_raw_obj& obj) ObjectWriteOperation op; op.remove(); - r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield); + r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield); if (r < 0) return r; return 0; } -int RGWRados::delete_obj_index(const rgw_obj& obj, ceph::real_time mtime) +int RGWRados::delete_obj_index(const rgw_obj& obj, ceph::real_time mtime, const DoutPrefixProvider *dpp) { std::string oid, key; get_obj_bucket_and_oid_loc(obj, oid, key); @@ -5220,24 +5242,24 @@ int RGWRados::delete_obj_index(const rgw_obj& obj, ceph::real_time mtime) auto obj_ctx = svc.sysobj->init_obj_ctx(); RGWBucketInfo bucket_info; - int ret = get_bucket_instance_info(obj_ctx, obj.bucket, bucket_info, NULL, NULL, null_yield); + int ret = get_bucket_instance_info(obj_ctx, obj.bucket, bucket_info, NULL, NULL, null_yield, dpp); if (ret < 0) { - ldout(cct, 0) << "ERROR: " << __func__ << "() get_bucket_instance_info(bucket=" << obj.bucket << ") returned ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "() get_bucket_instance_info(bucket=" << obj.bucket << ") returned ret=" << ret << dendl; return ret; } RGWRados::Bucket bop(this, bucket_info); RGWRados::Bucket::UpdateIndex index_op(&bop, obj); - return index_op.complete_del(-1 /* pool */, 0, mtime, NULL); + return index_op.complete_del(dpp, -1 /* pool */, 0, mtime, NULL); } -static void generate_fake_tag(rgw::sal::RGWStore* store, map& attrset, RGWObjManifest& manifest, bufferlist& manifest_bl, bufferlist& tag_bl) +static void generate_fake_tag(const DoutPrefixProvider *dpp, rgw::sal::RGWStore* store, map& attrset, RGWObjManifest& manifest, bufferlist& manifest_bl, bufferlist& tag_bl) { string tag; - RGWObjManifest::obj_iterator mi = manifest.obj_begin(); - if (mi != manifest.obj_end()) { + RGWObjManifest::obj_iterator mi = manifest.obj_begin(dpp); + if (mi != manifest.obj_end(dpp)) { if (manifest.has_tail()) // first object usually points at the head, let's skip to a more unique part ++mi; tag = mi.get_location().get_raw_obj(store).oid; @@ -5276,17 +5298,17 @@ static bool has_olh_tag(map& attrs) return (iter != attrs.end()); } -int RGWRados::get_olh_target_state(RGWObjectCtx& obj_ctx, const RGWBucketInfo& bucket_info, const rgw_obj& obj, +int RGWRados::get_olh_target_state(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, const RGWBucketInfo& bucket_info, const rgw_obj& obj, RGWObjState *olh_state, RGWObjState **target_state, optional_yield y) { ceph_assert(olh_state->is_olh); rgw_obj target; - int r = RGWRados::follow_olh(bucket_info, obj_ctx, olh_state, obj, &target); /* might return -EAGAIN */ + int r = RGWRados::follow_olh(dpp, bucket_info, obj_ctx, olh_state, obj, &target); /* might return -EAGAIN */ if (r < 0) { return r; } - r = get_obj_state(&obj_ctx, bucket_info, target, target_state, false, y); + r = get_obj_state(dpp, &obj_ctx, bucket_info, target, target_state, false, y); if (r < 0) { return r; } @@ -5294,7 +5316,7 @@ int RGWRados::get_olh_target_state(RGWObjectCtx& obj_ctx, const RGWBucketInfo& b return 0; } -int RGWRados::get_obj_state_impl(RGWObjectCtx *rctx, const RGWBucketInfo& bucket_info, const rgw_obj& obj, +int RGWRados::get_obj_state_impl(const DoutPrefixProvider *dpp, RGWObjectCtx *rctx, const RGWBucketInfo& bucket_info, const rgw_obj& obj, RGWObjState **state, bool follow_olh, optional_yield y, bool assume_noent) { if (obj.empty()) { @@ -5304,11 +5326,11 @@ int RGWRados::get_obj_state_impl(RGWObjectCtx *rctx, const RGWBucketInfo& bucket bool need_follow_olh = follow_olh && obj.key.instance.empty(); RGWObjState *s = rctx->get_state(obj); - ldout(cct, 20) << "get_obj_state: rctx=" << (void *)rctx << " obj=" << obj << " state=" << (void *)s << " s->prefetch_data=" << s->prefetch_data << dendl; + ldpp_dout(dpp, 20) << "get_obj_state: rctx=" << (void *)rctx << " obj=" << obj << " state=" << (void *)s << " s->prefetch_data=" << s->prefetch_data << dendl; *state = s; if (s->has_attrs) { if (s->is_olh && need_follow_olh) { - return get_olh_target_state(*rctx, bucket_info, obj, s, state, y); + return get_olh_target_state(dpp, *rctx, bucket_info, obj, s, state, y); } return 0; } @@ -5321,7 +5343,7 @@ int RGWRados::get_obj_state_impl(RGWObjectCtx *rctx, const RGWBucketInfo& bucket int r = -ENOENT; if (!assume_noent) { - r = RGWRados::raw_obj_stat(raw_obj, &s->size, &s->mtime, &s->epoch, &s->attrset, (s->prefetch_data ? &s->data : NULL), NULL, y); + r = RGWRados::raw_obj_stat(dpp, raw_obj, &s->size, &s->mtime, &s->epoch, &s->attrset, (s->prefetch_data ? &s->data : NULL), NULL, y); } if (r == -ENOENT) { @@ -5332,7 +5354,7 @@ int RGWRados::get_obj_state_impl(RGWObjectCtx *rctx, const RGWBucketInfo& bucket s->mtime = entry.mtime; s->zone_short_id = entry.zone_short_id; s->pg_ver = entry.pg_ver; - ldout(cct, 20) << __func__ << "(): found obj in tombstone cache: obj=" << obj + ldpp_dout(dpp, 20) << __func__ << "(): found obj in tombstone cache: obj=" << obj << " mtime=" << s->mtime << " pgv=" << s->pg_ver << dendl; } else { s->mtime = real_time(); @@ -5367,7 +5389,7 @@ int RGWRados::get_obj_state_impl(RGWObjectCtx *rctx, const RGWBucketInfo& bucket decode(info, p); s->accounted_size = info.orig_size; } catch (buffer::error&) { - dout(0) << "ERROR: could not decode compression info for object: " << obj << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not decode compression info for object: " << obj << dendl; return -EIO; } } @@ -5397,15 +5419,15 @@ int RGWRados::get_obj_state_impl(RGWObjectCtx *rctx, const RGWBucketInfo& bucket if (!compressed) s->accounted_size = s->size; } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: couldn't decode manifest" << dendl; + ldpp_dout(dpp, 0) << "ERROR: couldn't decode manifest" << dendl; return -EIO; } - ldout(cct, 10) << "manifest: total_size = " << s->manifest->get_obj_size() << dendl; + ldpp_dout(dpp, 10) << "manifest: total_size = " << s->manifest->get_obj_size() << dendl; if (cct->_conf->subsys.should_gather() && \ s->manifest->has_explicit_objs()) { RGWObjManifest::obj_iterator mi; - for (mi = s->manifest->obj_begin(); mi != s->manifest->obj_end(); ++mi) { - ldout(cct, 20) << "manifest: ofs=" << mi.get_ofs() << " loc=" << mi.get_location().get_raw_obj(store) << dendl; + for (mi = s->manifest->obj_begin(dpp); mi != s->manifest->obj_end(dpp); ++mi) { + ldpp_dout(dpp, 20) << "manifest: ofs=" << mi.get_ofs() << " loc=" << mi.get_location().get_raw_obj(store) << dendl; } } @@ -5414,7 +5436,7 @@ int RGWRados::get_obj_state_impl(RGWObjectCtx *rctx, const RGWBucketInfo& bucket * Uh oh, something's wrong, object with manifest should have tag. Let's * create one out of the manifest, would be unique */ - generate_fake_tag(store, s->attrset, *s->manifest, manifest_bl, s->obj_tag); + generate_fake_tag(dpp, store, s->attrset, *s->manifest, manifest_bl, s->obj_tag); s->fake_tag = true; } } @@ -5426,7 +5448,7 @@ int RGWRados::get_obj_state_impl(RGWObjectCtx *rctx, const RGWBucketInfo& bucket try { decode(s->pg_ver, pgbl); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: couldn't decode pg ver attr for object " << s->obj << ", non-critical error, ignoring" << dendl; + ldpp_dout(dpp, 0) << "ERROR: couldn't decode pg ver attr for object " << s->obj << ", non-critical error, ignoring" << dendl; } } } @@ -5438,14 +5460,15 @@ int RGWRados::get_obj_state_impl(RGWObjectCtx *rctx, const RGWBucketInfo& bucket try { decode(s->zone_short_id, zbl); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: couldn't decode zone short id attr for object " << s->obj << ", non-critical error, ignoring" << dendl; + ldpp_dout(dpp, 0) << "ERROR: couldn't decode zone short id attr for object " << s->obj << ", non-critical error, ignoring" << dendl; } } } - if (s->obj_tag.length()) - ldout(cct, 20) << "get_obj_state: setting s->obj_tag to " << s->obj_tag.c_str() << dendl; - else - ldout(cct, 20) << "get_obj_state: s->obj_tag was set empty" << dendl; + if (s->obj_tag.length()) { + ldpp_dout(dpp, 20) << "get_obj_state: setting s->obj_tag to " << s->obj_tag.c_str() << dendl; + } else { + ldpp_dout(dpp, 20) << "get_obj_state: s->obj_tag was set empty" << dendl; + } /* an object might not be olh yet, but could have olh id tag, so we should set it anyway if * it exist, and not only if is_olh() returns true @@ -5458,10 +5481,10 @@ int RGWRados::get_obj_state_impl(RGWObjectCtx *rctx, const RGWBucketInfo& bucket if (is_olh(s->attrset)) { s->is_olh = true; - ldout(cct, 20) << __func__ << ": setting s->olh_tag to " << string(s->olh_tag.c_str(), s->olh_tag.length()) << dendl; + ldpp_dout(dpp, 20) << __func__ << ": setting s->olh_tag to " << string(s->olh_tag.c_str(), s->olh_tag.length()) << dendl; if (need_follow_olh) { - return get_olh_target_state(*rctx, bucket_info, obj, s, state, y); + return get_olh_target_state(dpp, *rctx, bucket_info, obj, s, state, y); } else if (obj.key.have_null_instance() && !s->manifest) { // read null version, and the head object only have olh info s->exists = false; @@ -5472,22 +5495,22 @@ int RGWRados::get_obj_state_impl(RGWObjectCtx *rctx, const RGWBucketInfo& bucket return 0; } -int RGWRados::get_obj_state(RGWObjectCtx *rctx, const RGWBucketInfo& bucket_info, const rgw_obj& obj, RGWObjState **state, +int RGWRados::get_obj_state(const DoutPrefixProvider *dpp, RGWObjectCtx *rctx, const RGWBucketInfo& bucket_info, const rgw_obj& obj, RGWObjState **state, bool follow_olh, optional_yield y, bool assume_noent) { int ret; do { - ret = get_obj_state_impl(rctx, bucket_info, obj, state, follow_olh, y, assume_noent); + ret = get_obj_state_impl(dpp, rctx, bucket_info, obj, state, follow_olh, y, assume_noent); } while (ret == -EAGAIN); return ret; } -int RGWRados::Object::get_manifest(RGWObjManifest **pmanifest, optional_yield y) +int RGWRados::Object::get_manifest(const DoutPrefixProvider *dpp, RGWObjManifest **pmanifest, optional_yield y) { RGWObjState *astate; - int r = get_state(&astate, true, y); + int r = get_state(dpp, &astate, true, y); if (r < 0) { return r; } @@ -5497,10 +5520,10 @@ int RGWRados::Object::get_manifest(RGWObjManifest **pmanifest, optional_yield y) return 0; } -int RGWRados::Object::Read::get_attr(const char *name, bufferlist& dest, optional_yield y) +int RGWRados::Object::Read::get_attr(const DoutPrefixProvider *dpp, const char *name, bufferlist& dest, optional_yield y) { RGWObjState *state; - int r = source->get_state(&state, true, y); + int r = source->get_state(dpp, &state, true, y); if (r < 0) return r; if (!state->exists) @@ -5511,7 +5534,7 @@ int RGWRados::Object::Read::get_attr(const char *name, bufferlist& dest, optiona return 0; } -int RGWRados::Object::Stat::stat_async() +int RGWRados::Object::Stat::stat_async(const DoutPrefixProvider *dpp) { RGWObjectCtx& ctx = source->get_ctx(); rgw_obj& obj = source->get_obj(); @@ -5532,7 +5555,7 @@ int RGWRados::Object::Stat::stat_async() string loc; get_obj_bucket_and_oid_loc(obj, oid, loc); - int r = store->get_obj_head_ioctx(source->get_bucket_info(), obj, &state.io_ctx); + int r = store->get_obj_head_ioctx(dpp, source->get_bucket_info(), obj, &state.io_ctx); if (r < 0) { return r; } @@ -5544,7 +5567,7 @@ int RGWRados::Object::Stat::stat_async() state.io_ctx.locator_set_key(loc); r = state.io_ctx.aio_operate(oid, state.completion, &op, NULL); if (r < 0) { - ldout(store->ctx(), 5) << __func__ + ldpp_dout(dpp, 5) << __func__ << ": ERROR: aio_operate() returned ret=" << r << dendl; return r; @@ -5590,39 +5613,40 @@ int RGWRados::Object::Stat::finish() return 0; } -int RGWRados::append_atomic_test(RGWObjectCtx *rctx, +int RGWRados::append_atomic_test(const DoutPrefixProvider *dpp, RGWObjectCtx *rctx, const RGWBucketInfo& bucket_info, const rgw_obj& obj, ObjectOperation& op, RGWObjState **pstate, optional_yield y) { if (!rctx) return 0; - int r = get_obj_state(rctx, bucket_info, obj, pstate, false, y); + int r = get_obj_state(dpp, rctx, bucket_info, obj, pstate, false, y); if (r < 0) return r; - return append_atomic_test(*pstate, op); + return append_atomic_test(dpp, *pstate, op); } -int RGWRados::append_atomic_test(const RGWObjState* state, +int RGWRados::append_atomic_test(const DoutPrefixProvider *dpp, + const RGWObjState* state, librados::ObjectOperation& op) { if (!state->is_atomic) { - ldout(cct, 20) << "state for obj=" << state->obj << " is not atomic, not appending atomic test" << dendl; + ldpp_dout(dpp, 20) << "state for obj=" << state->obj << " is not atomic, not appending atomic test" << dendl; return 0; } if (state->obj_tag.length() > 0 && !state->fake_tag) {// check for backward compatibility op.cmpxattr(RGW_ATTR_ID_TAG, LIBRADOS_CMPXATTR_OP_EQ, state->obj_tag); } else { - ldout(cct, 20) << "state->obj_tag is empty, not appending atomic test" << dendl; + ldpp_dout(dpp, 20) << "state->obj_tag is empty, not appending atomic test" << dendl; } return 0; } -int RGWRados::Object::get_state(RGWObjState **pstate, bool follow_olh, optional_yield y, bool assume_noent) +int RGWRados::Object::get_state(const DoutPrefixProvider *dpp, RGWObjState **pstate, bool follow_olh, optional_yield y, bool assume_noent) { - return store->get_obj_state(&ctx, bucket_info, obj, pstate, follow_olh, y, assume_noent); + return store->get_obj_state(dpp, &ctx, bucket_info, obj, pstate, follow_olh, y, assume_noent); } void RGWRados::Object::invalidate_state() @@ -5630,11 +5654,12 @@ void RGWRados::Object::invalidate_state() ctx.invalidate(obj); } -int RGWRados::Object::prepare_atomic_modification(ObjectWriteOperation& op, bool reset_obj, const string *ptag, +int RGWRados::Object::prepare_atomic_modification(const DoutPrefixProvider *dpp, + ObjectWriteOperation& op, bool reset_obj, const string *ptag, const char *if_match, const char *if_nomatch, bool removal_op, bool modify_tail, optional_yield y) { - int r = get_state(&state, false, y); + int r = get_state(dpp, &state, false, y); if (r < 0) return r; @@ -5643,7 +5668,7 @@ int RGWRados::Object::prepare_atomic_modification(ObjectWriteOperation& op, bool (!state->fake_tag); if (!state->is_atomic) { - ldout(store->ctx(), 20) << "prepare_atomic_modification: state is not atomic. state=" << (void *)state << dendl; + ldpp_dout(dpp, 20) << "prepare_atomic_modification: state is not atomic. state=" << (void *)state << dendl; if (reset_obj) { op.create(false); @@ -5713,7 +5738,7 @@ int RGWRados::Object::prepare_atomic_modification(ObjectWriteOperation& op, bool bufferlist bl; bl.append(state->write_tag.c_str(), state->write_tag.size() + 1); - ldout(store->ctx(), 10) << "setting object write_tag=" << state->write_tag << dendl; + ldpp_dout(dpp, 10) << "setting object write_tag=" << state->write_tag << dendl; op.setxattr(RGW_ATTR_ID_TAG, bl); if (modify_tail) { @@ -5731,14 +5756,14 @@ int RGWRados::Object::prepare_atomic_modification(ObjectWriteOperation& op, bool * bl: the contents of the attr * Returns: 0 on success, -ERR# otherwise. */ -int RGWRados::set_attr(void *ctx, const RGWBucketInfo& bucket_info, rgw_obj& obj, const char *name, bufferlist& bl) +int RGWRados::set_attr(const DoutPrefixProvider *dpp, void *ctx, const RGWBucketInfo& bucket_info, rgw_obj& obj, const char *name, bufferlist& bl) { map attrs; attrs[name] = bl; - return set_attrs(ctx, bucket_info, obj, attrs, NULL, null_yield); + return set_attrs(dpp, ctx, bucket_info, obj, attrs, NULL, null_yield); } -int RGWRados::set_attrs(void *ctx, const RGWBucketInfo& bucket_info, rgw_obj& src_obj, +int RGWRados::set_attrs(const DoutPrefixProvider *dpp, void *ctx, const RGWBucketInfo& bucket_info, rgw_obj& src_obj, map& attrs, map* rmattrs, optional_yield y) @@ -5749,7 +5774,7 @@ int RGWRados::set_attrs(void *ctx, const RGWBucketInfo& bucket_info, rgw_obj& sr } rgw_rados_ref ref; - int r = get_obj_head_ref(bucket_info, obj, &ref); + int r = get_obj_head_ref(dpp, bucket_info, obj, &ref); if (r < 0) { return r; } @@ -5758,7 +5783,7 @@ int RGWRados::set_attrs(void *ctx, const RGWBucketInfo& bucket_info, rgw_obj& sr ObjectWriteOperation op; RGWObjState *state = NULL; - r = append_atomic_test(rctx, bucket_info, obj, op, &state, y); + r = append_atomic_test(dpp, rctx, bucket_info, obj, op, &state, y); if (r < 0) return r; @@ -5794,9 +5819,9 @@ int RGWRados::set_attrs(void *ctx, const RGWBucketInfo& bucket_info, rgw_obj& sr rgw_obj_index_key obj_key; obj.key.get_index_key(&obj_key); - obj_expirer->hint_add(ts, bucket.tenant, bucket.name, bucket.bucket_id, obj_key); + obj_expirer->hint_add(dpp, ts, bucket.tenant, bucket.name, bucket.bucket_id, obj_key); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: failed to decode " RGW_ATTR_DELETE_AT << " attr" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to decode " RGW_ATTR_DELETE_AT << " attr" << dendl; } } } @@ -5814,7 +5839,7 @@ int RGWRados::set_attrs(void *ctx, const RGWBucketInfo& bucket_info, rgw_obj& sr string tag; append_rand_alpha(cct, tag, tag, 32); state->write_tag = tag; - r = index_op.prepare(CLS_RGW_OP_ADD, &state->write_tag, y); + r = index_op.prepare(dpp, CLS_RGW_OP_ADD, &state->write_tag, y); if (r < 0) return r; @@ -5828,7 +5853,7 @@ int RGWRados::set_attrs(void *ctx, const RGWBucketInfo& bucket_info, rgw_obj& sr struct timespec mtime_ts = real_clock::to_timespec(mtime); op.mtime2(&mtime_ts); auto& ioctx = ref.pool.ioctx(); - r = rgw_rados_operate(ioctx, ref.obj.oid, &op, null_yield); + r = rgw_rados_operate(dpp, ioctx, ref.obj.oid, &op, null_yield); if (state) { if (r >= 0) { bufferlist acl_bl = attrs[RGW_ATTR_ACL]; @@ -5843,13 +5868,13 @@ int RGWRados::set_attrs(void *ctx, const RGWBucketInfo& bucket_info, rgw_obj& sr } uint64_t epoch = ioctx.get_last_version(); int64_t poolid = ioctx.get_id(); - r = index_op.complete(poolid, epoch, state->size, state->accounted_size, + r = index_op.complete(dpp, poolid, epoch, state->size, state->accounted_size, mtime, etag, content_type, storage_class, &acl_bl, RGWObjCategory::Main, NULL); } else { - int ret = index_op.cancel(); + int ret = index_op.cancel(dpp); if (ret < 0) { - ldout(cct, 0) << "ERROR: complete_update_index_cancel() returned ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: complete_update_index_cancel() returned ret=" << ret << dendl; } } } @@ -5877,7 +5902,7 @@ int RGWRados::set_attrs(void *ctx, const RGWBucketInfo& bucket_info, rgw_obj& sr return 0; } -int RGWRados::Object::Read::prepare(optional_yield y) +int RGWRados::Object::Read::prepare(optional_yield y, const DoutPrefixProvider *dpp) { RGWRados *store = source->get_store(); CephContext *cct = store->ctx(); @@ -5887,7 +5912,7 @@ int RGWRados::Object::Read::prepare(optional_yield y) map::iterator iter; RGWObjState *astate; - int r = source->get_state(&astate, true, y); + int r = source->get_state(dpp, &astate, true, y); if (r < 0) return r; @@ -5903,7 +5928,7 @@ int RGWRados::Object::Read::prepare(optional_yield y) state.cur_pool = state.head_obj.pool; state.cur_ioctx = &state.io_ctxs[state.cur_pool]; - r = store->get_obj_head_ioctx(bucket_info, state.obj, state.cur_ioctx); + r = store->get_obj_head_ioctx(dpp, bucket_info, state.obj, state.cur_ioctx); if (r < 0) { return r; } @@ -5914,7 +5939,7 @@ int RGWRados::Object::Read::prepare(optional_yield y) *params.attrs = astate->attrset; if (cct->_conf->subsys.should_gather()) { for (iter = params.attrs->begin(); iter != params.attrs->end(); ++iter) { - ldout(cct, 20) << "Read xattr: " << iter->first << dendl; + ldpp_dout(dpp, 20) << "Read xattr rgw_rados: " << iter->first << dendl; } } } @@ -5930,7 +5955,7 @@ int RGWRados::Object::Read::prepare(optional_yield y) if (conds.mod_ptr && !conds.if_nomatch) { dest_weight.init(*conds.mod_ptr, conds.mod_zone_id, conds.mod_pg_ver); - ldout(cct, 10) << "If-Modified-Since: " << dest_weight << " Last-Modified: " << src_weight << dendl; + ldpp_dout(dpp, 10) << "If-Modified-Since: " << dest_weight << " Last-Modified: " << src_weight << dendl; if (!(dest_weight < src_weight)) { return -ERR_NOT_MODIFIED; } @@ -5938,20 +5963,20 @@ int RGWRados::Object::Read::prepare(optional_yield y) if (conds.unmod_ptr && !conds.if_match) { dest_weight.init(*conds.unmod_ptr, conds.mod_zone_id, conds.mod_pg_ver); - ldout(cct, 10) << "If-UnModified-Since: " << dest_weight << " Last-Modified: " << src_weight << dendl; + ldpp_dout(dpp, 10) << "If-UnModified-Since: " << dest_weight << " Last-Modified: " << src_weight << dendl; if (dest_weight < src_weight) { return -ERR_PRECONDITION_FAILED; } } } if (conds.if_match || conds.if_nomatch) { - r = get_attr(RGW_ATTR_ETAG, etag, y); + r = get_attr(dpp, RGW_ATTR_ETAG, etag, y); if (r < 0) return r; if (conds.if_match) { string if_match_str = rgw_string_unquote(conds.if_match); - ldout(cct, 10) << "ETag: " << string(etag.c_str(), etag.length()) << " " << " If-Match: " << if_match_str << dendl; + ldpp_dout(dpp, 10) << "ETag: " << string(etag.c_str(), etag.length()) << " " << " If-Match: " << if_match_str << dendl; if (if_match_str.compare(0, etag.length(), etag.c_str(), etag.length()) != 0) { return -ERR_PRECONDITION_FAILED; } @@ -5959,7 +5984,7 @@ int RGWRados::Object::Read::prepare(optional_yield y) if (conds.if_nomatch) { string if_nomatch_str = rgw_string_unquote(conds.if_nomatch); - ldout(cct, 10) << "ETag: " << string(etag.c_str(), etag.length()) << " " << " If-NoMatch: " << if_nomatch_str << dendl; + ldpp_dout(dpp, 10) << "ETag: " << string(etag.c_str(), etag.length()) << " " << " If-NoMatch: " << if_nomatch_str << dendl; if (if_nomatch_str.compare(0, etag.length(), etag.c_str(), etag.length()) == 0) { return -ERR_NOT_MODIFIED; } @@ -5996,7 +6021,7 @@ int RGWRados::Object::Read::range_to_ofs(uint64_t obj_size, int64_t &ofs, int64_ return 0; } -int RGWRados::Bucket::UpdateIndex::guard_reshard(BucketShard **pbs, std::function call) +int RGWRados::Bucket::UpdateIndex::guard_reshard(const DoutPrefixProvider *dpp, BucketShard **pbs, std::function call) { RGWRados *store = target->get_store(); BucketShard *bs; @@ -6004,30 +6029,30 @@ int RGWRados::Bucket::UpdateIndex::guard_reshard(BucketShard **pbs, std::functio #define NUM_RESHARD_RETRIES 10 for (int i = 0; i < NUM_RESHARD_RETRIES; ++i) { - int ret = get_bucket_shard(&bs); + int ret = get_bucket_shard(&bs, dpp); if (ret < 0) { - ldout(store->ctx(), 5) << "failed to get BucketShard object: ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "failed to get BucketShard object: ret=" << ret << dendl; return ret; } r = call(bs); if (r != -ERR_BUSY_RESHARDING) { break; } - ldout(store->ctx(), 0) << "NOTICE: resharding operation on bucket index detected, blocking" << dendl; + ldpp_dout(dpp, 0) << "NOTICE: resharding operation on bucket index detected, blocking" << dendl; string new_bucket_id; r = store->block_while_resharding(bs, &new_bucket_id, - target->bucket_info, null_yield); + target->bucket_info, null_yield, dpp); if (r == -ERR_BUSY_RESHARDING) { continue; } if (r < 0) { return r; } - ldout(store->ctx(), 20) << "reshard completion identified, new_bucket_id=" << new_bucket_id << dendl; + ldpp_dout(dpp, 20) << "reshard completion identified, new_bucket_id=" << new_bucket_id << dendl; i = 0; /* resharding is finished, make sure we can retry */ - r = target->update_bucket_id(new_bucket_id); + r = target->update_bucket_id(new_bucket_id, dpp); if (r < 0) { - ldout(store->ctx(), 0) << "ERROR: update_bucket_id() new_bucket_id=" << new_bucket_id << " returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: update_bucket_id() new_bucket_id=" << new_bucket_id << " returned r=" << r << dendl; return r; } invalidate_bs(); @@ -6044,7 +6069,7 @@ int RGWRados::Bucket::UpdateIndex::guard_reshard(BucketShard **pbs, std::functio return 0; } -int RGWRados::Bucket::UpdateIndex::prepare(RGWModifyOp op, const string *write_tag, optional_yield y) +int RGWRados::Bucket::UpdateIndex::prepare(const DoutPrefixProvider *dpp, RGWModifyOp op, const string *write_tag, optional_yield y) { if (blind) { return 0; @@ -6059,8 +6084,8 @@ int RGWRados::Bucket::UpdateIndex::prepare(RGWModifyOp op, const string *write_t } } - int r = guard_reshard(nullptr, [&](BucketShard *bs) -> int { - return store->cls_obj_prepare_op(*bs, op, optag, obj, bilog_flags, y, zones_trace); + int r = guard_reshard(dpp, nullptr, [&](BucketShard *bs) -> int { + return store->cls_obj_prepare_op(dpp, *bs, op, optag, obj, bilog_flags, y, zones_trace); }); if (r < 0) { @@ -6071,7 +6096,7 @@ int RGWRados::Bucket::UpdateIndex::prepare(RGWModifyOp op, const string *write_t return 0; } -int RGWRados::Bucket::UpdateIndex::complete(int64_t poolid, uint64_t epoch, +int RGWRados::Bucket::UpdateIndex::complete(const DoutPrefixProvider *dpp, int64_t poolid, uint64_t epoch, uint64_t size, uint64_t accounted_size, ceph::real_time& ut, const string& etag, const string& content_type, const string& storage_class, @@ -6086,9 +6111,9 @@ int RGWRados::Bucket::UpdateIndex::complete(int64_t poolid, uint64_t epoch, RGWRados *store = target->get_store(); BucketShard *bs; - int ret = get_bucket_shard(&bs); + int ret = get_bucket_shard(&bs, dpp); if (ret < 0) { - ldout(store->ctx(), 5) << "failed to get BucketShard object: ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "failed to get BucketShard object: ret=" << ret << dendl; return ret; } @@ -6106,7 +6131,7 @@ int RGWRados::Bucket::UpdateIndex::complete(int64_t poolid, uint64_t epoch, if (acl_bl && acl_bl->length()) { int ret = store->decode_policy(*acl_bl, &owner); if (ret < 0) { - ldout(store->ctx(), 0) << "WARNING: could not decode policy ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "WARNING: could not decode policy ret=" << ret << dendl; } } ent.meta.owner = owner.get_id().to_str(); @@ -6116,15 +6141,16 @@ int RGWRados::Bucket::UpdateIndex::complete(int64_t poolid, uint64_t epoch, ret = store->cls_obj_complete_add(*bs, obj, optag, poolid, epoch, ent, category, remove_objs, bilog_flags, zones_trace); - int r = store->svc.datalog_rados->add_entry(target->bucket_info, bs->shard_id); + int r = store->svc.datalog_rados->add_entry(dpp, target->bucket_info, bs->shard_id); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed writing data log" << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl; } return ret; } -int RGWRados::Bucket::UpdateIndex::complete_del(int64_t poolid, uint64_t epoch, +int RGWRados::Bucket::UpdateIndex::complete_del(const DoutPrefixProvider *dpp, + int64_t poolid, uint64_t epoch, real_time& removed_mtime, list *remove_objs) { @@ -6134,24 +6160,24 @@ int RGWRados::Bucket::UpdateIndex::complete_del(int64_t poolid, uint64_t epoch, RGWRados *store = target->get_store(); BucketShard *bs; - int ret = get_bucket_shard(&bs); + int ret = get_bucket_shard(&bs, dpp); if (ret < 0) { - ldout(store->ctx(), 5) << "failed to get BucketShard object: ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "failed to get BucketShard object: ret=" << ret << dendl; return ret; } ret = store->cls_obj_complete_del(*bs, optag, poolid, epoch, obj, removed_mtime, remove_objs, bilog_flags, zones_trace); - int r = store->svc.datalog_rados->add_entry(target->bucket_info, bs->shard_id); + int r = store->svc.datalog_rados->add_entry(dpp, target->bucket_info, bs->shard_id); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed writing data log" << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl; } return ret; } -int RGWRados::Bucket::UpdateIndex::cancel() +int RGWRados::Bucket::UpdateIndex::cancel(const DoutPrefixProvider *dpp) { if (blind) { return 0; @@ -6159,7 +6185,7 @@ int RGWRados::Bucket::UpdateIndex::cancel() RGWRados *store = target->get_store(); BucketShard *bs; - int ret = guard_reshard(&bs, [&](BucketShard *bs) -> int { + int ret = guard_reshard(dpp, &bs, [&](BucketShard *bs) -> int { return store->cls_obj_complete_cancel(*bs, optag, obj, bilog_flags, zones_trace); }); @@ -6168,18 +6194,17 @@ int RGWRados::Bucket::UpdateIndex::cancel() * for following the specific bucket shard log. Otherwise they end up staying behind, and users * have no way to tell that they're all caught up */ - int r = store->svc.datalog_rados->add_entry(target->bucket_info, bs->shard_id); + int r = store->svc.datalog_rados->add_entry(dpp, target->bucket_info, bs->shard_id); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed writing data log" << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl; } return ret; } -int RGWRados::Object::Read::read(int64_t ofs, int64_t end, bufferlist& bl, optional_yield y) +int RGWRados::Object::Read::read(int64_t ofs, int64_t end, bufferlist& bl, optional_yield y, const DoutPrefixProvider *dpp) { RGWRados *store = source->get_store(); - CephContext *cct = store->ctx(); rgw_raw_obj read_obj; uint64_t read_ofs = ofs; @@ -6193,7 +6218,7 @@ int RGWRados::Object::Read::read(int64_t ofs, int64_t end, bufferlist& bl, optio uint64_t max_chunk_size; RGWObjState *astate; - int r = source->get_state(&astate, true, y); + int r = source->get_state(dpp, &astate, true, y); if (r < 0) return r; @@ -6210,7 +6235,7 @@ int RGWRados::Object::Read::read(int64_t ofs, int64_t end, bufferlist& bl, optio if (astate->manifest && astate->manifest->has_tail()) { /* now get the relevant object part */ - RGWObjManifest::obj_iterator iter = astate->manifest->obj_find(ofs); + RGWObjManifest::obj_iterator iter = astate->manifest->obj_find(dpp, ofs); uint64_t stripe_ofs = iter.get_stripe_ofs(); read_obj = iter.get_location().get_raw_obj(store->store); @@ -6221,9 +6246,9 @@ int RGWRados::Object::Read::read(int64_t ofs, int64_t end, bufferlist& bl, optio read_obj = state.head_obj; } - r = store->get_max_chunk_size(read_obj.pool, &max_chunk_size); + r = store->get_max_chunk_size(read_obj.pool, &max_chunk_size, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to get max_chunk_size() for pool " << read_obj.pool << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to get max_chunk_size() for pool " << read_obj.pool << dendl; return r; } @@ -6235,7 +6260,7 @@ int RGWRados::Object::Read::read(int64_t ofs, int64_t end, bufferlist& bl, optio if (reading_from_head) { /* only when reading from the head object do we need to do the atomic test */ - r = store->append_atomic_test(&source->get_ctx(), source->get_bucket_info(), state.obj, op, &astate, y); + r = store->append_atomic_test(dpp, &source->get_ctx(), source->get_bucket_info(), state.obj, op, &astate, y); if (r < 0) return r; @@ -6259,16 +6284,16 @@ int RGWRados::Object::Read::read(int64_t ofs, int64_t end, bufferlist& bl, optio } } - ldout(cct, 20) << "rados->read obj-ofs=" << ofs << " read_ofs=" << read_ofs << " read_len=" << read_len << dendl; + ldpp_dout(dpp, 20) << "rados->read obj-ofs=" << ofs << " read_ofs=" << read_ofs << " read_len=" << read_len << dendl; op.read(read_ofs, read_len, pbl, NULL); if (state.cur_pool != read_obj.pool) { auto iter = state.io_ctxs.find(read_obj.pool); if (iter == state.io_ctxs.end()) { state.cur_ioctx = &state.io_ctxs[read_obj.pool]; - r = store->open_pool_ctx(read_obj.pool, *state.cur_ioctx, false); + r = store->open_pool_ctx(dpp, read_obj.pool, *state.cur_ioctx, false); if (r < 0) { - ldout(cct, 20) << "ERROR: failed to open pool context for pool=" << read_obj.pool << " r=" << r << dendl; + ldpp_dout(dpp, 20) << "ERROR: failed to open pool context for pool=" << read_obj.pool << " r=" << r << dendl; return r; } } else { @@ -6280,7 +6305,7 @@ int RGWRados::Object::Read::read(int64_t ofs, int64_t end, bufferlist& bl, optio state.cur_ioctx->locator_set_key(read_obj.loc); r = state.cur_ioctx->operate(read_obj.oid, &op, NULL); - ldout(cct, 20) << "rados->read r=" << r << " bl.length=" << bl.length() << dendl; + ldpp_dout(dpp, 20) << "rados->read r=" << r << " bl.length=" << bl.length() << dendl; if (r < 0) { return r; @@ -6347,17 +6372,19 @@ struct get_obj_data { } }; -static int _get_obj_iterate_cb(const rgw_raw_obj& read_obj, off_t obj_ofs, +static int _get_obj_iterate_cb(const DoutPrefixProvider *dpp, + const rgw_raw_obj& read_obj, off_t obj_ofs, off_t read_ofs, off_t len, bool is_head_obj, RGWObjState *astate, void *arg) { struct get_obj_data *d = (struct get_obj_data *)arg; - return d->store->get_obj_iterate_cb(read_obj, obj_ofs, read_ofs, len, + return d->store->get_obj_iterate_cb(dpp, read_obj, obj_ofs, read_ofs, len, is_head_obj, astate, arg); } -int RGWRados::get_obj_iterate_cb(const rgw_raw_obj& read_obj, off_t obj_ofs, +int RGWRados::get_obj_iterate_cb(const DoutPrefixProvider *dpp, + const rgw_raw_obj& read_obj, off_t obj_ofs, off_t read_ofs, off_t len, bool is_head_obj, RGWObjState *astate, void *arg) { @@ -6367,7 +6394,7 @@ int RGWRados::get_obj_iterate_cb(const rgw_raw_obj& read_obj, off_t obj_ofs, if (is_head_obj) { /* only when reading from the head object do we need to do the atomic test */ - int r = append_atomic_test(astate, op); + int r = append_atomic_test(dpp, astate, op); if (r < 0) return r; @@ -6389,13 +6416,13 @@ int RGWRados::get_obj_iterate_cb(const rgw_raw_obj& read_obj, off_t obj_ofs, } auto obj = d->store->svc.rados->obj(read_obj); - int r = obj.open(); + int r = obj.open(dpp); if (r < 0) { - ldout(cct, 4) << "failed to open rados context for " << read_obj << dendl; + ldpp_dout(dpp, 4) << "failed to open rados context for " << read_obj << dendl; return r; } - ldout(cct, 20) << "rados->get_obj_iterate_cb oid=" << read_obj.oid << " obj-ofs=" << obj_ofs << " read_ofs=" << read_ofs << " len=" << len << dendl; + ldpp_dout(dpp, 20) << "rados->get_obj_iterate_cb oid=" << read_obj.oid << " obj-ofs=" << obj_ofs << " read_ofs=" << read_ofs << " len=" << len << dendl; op.read(read_ofs, len, nullptr, nullptr); const uint64_t cost = len; @@ -6406,7 +6433,7 @@ int RGWRados::get_obj_iterate_cb(const rgw_raw_obj& read_obj, off_t obj_ofs, return d->flush(std::move(completed)); } -int RGWRados::Object::Read::iterate(int64_t ofs, int64_t end, RGWGetDataCB *cb, +int RGWRados::Object::Read::iterate(const DoutPrefixProvider *dpp, int64_t ofs, int64_t end, RGWGetDataCB *cb, optional_yield y) { RGWRados *store = source->get_store(); @@ -6418,10 +6445,10 @@ int RGWRados::Object::Read::iterate(int64_t ofs, int64_t end, RGWGetDataCB *cb, auto aio = rgw::make_throttle(window_size, y); get_obj_data data(store, cb, &*aio, ofs, y); - int r = store->iterate_obj(obj_ctx, source->get_bucket_info(), state.obj, + int r = store->iterate_obj(dpp, obj_ctx, source->get_bucket_info(), state.obj, ofs, end, chunk_size, _get_obj_iterate_cb, &data, y); if (r < 0) { - ldout(cct, 0) << "iterate_obj() failed with " << r << dendl; + ldpp_dout(dpp, 0) << "iterate_obj() failed with " << r << dendl; data.cancel(); // drain completions without writing back to client return r; } @@ -6429,7 +6456,7 @@ int RGWRados::Object::Read::iterate(int64_t ofs, int64_t end, RGWGetDataCB *cb, return data.drain(); } -int RGWRados::iterate_obj(RGWObjectCtx& obj_ctx, +int RGWRados::iterate_obj(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, const RGWBucketInfo& bucket_info, const rgw_obj& obj, off_t ofs, off_t end, uint64_t max_chunk_size, iterate_obj_cb cb, void *arg, optional_yield y) @@ -6443,7 +6470,7 @@ int RGWRados::iterate_obj(RGWObjectCtx& obj_ctx, obj_to_raw(bucket_info.placement_rule, obj, &head_obj); - int r = get_obj_state(&obj_ctx, bucket_info, obj, &astate, false, y); + int r = get_obj_state(dpp, &obj_ctx, bucket_info, obj, &astate, false, y); if (r < 0) { return r; } @@ -6455,9 +6482,9 @@ int RGWRados::iterate_obj(RGWObjectCtx& obj_ctx, if (astate->manifest) { /* now get the relevant object stripe */ - RGWObjManifest::obj_iterator iter = astate->manifest->obj_find(ofs); + RGWObjManifest::obj_iterator iter = astate->manifest->obj_find(dpp, ofs); - RGWObjManifest::obj_iterator obj_end = astate->manifest->obj_end(); + RGWObjManifest::obj_iterator obj_end = astate->manifest->obj_end(dpp); for (; iter != obj_end && ofs <= end; ++iter) { off_t stripe_ofs = iter.get_stripe_ofs(); @@ -6473,7 +6500,7 @@ int RGWRados::iterate_obj(RGWObjectCtx& obj_ctx, } reading_from_head = (read_obj == head_obj); - r = cb(read_obj, ofs, read_ofs, read_len, reading_from_head, astate, arg); + r = cb(dpp, read_obj, ofs, read_ofs, read_len, reading_from_head, astate, arg); if (r < 0) { return r; } @@ -6487,7 +6514,7 @@ int RGWRados::iterate_obj(RGWObjectCtx& obj_ctx, read_obj = head_obj; uint64_t read_len = std::min(len, max_chunk_size); - r = cb(read_obj, ofs, ofs, read_len, reading_from_head, astate, arg); + r = cb(dpp, read_obj, ofs, ofs, read_len, reading_from_head, astate, arg); if (r < 0) { return r; } @@ -6500,31 +6527,31 @@ int RGWRados::iterate_obj(RGWObjectCtx& obj_ctx, return 0; } -int RGWRados::obj_operate(const RGWBucketInfo& bucket_info, const rgw_obj& obj, ObjectWriteOperation *op) +int RGWRados::obj_operate(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, ObjectWriteOperation *op) { rgw_rados_ref ref; - int r = get_obj_head_ref(bucket_info, obj, &ref); + int r = get_obj_head_ref(dpp, bucket_info, obj, &ref); if (r < 0) { return r; } - return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, op, null_yield); + return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, op, null_yield); } -int RGWRados::obj_operate(const RGWBucketInfo& bucket_info, const rgw_obj& obj, ObjectReadOperation *op) +int RGWRados::obj_operate(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, ObjectReadOperation *op) { rgw_rados_ref ref; - int r = get_obj_head_ref(bucket_info, obj, &ref); + int r = get_obj_head_ref(dpp, bucket_info, obj, &ref); if (r < 0) { return r; } bufferlist outbl; - return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, op, &outbl, null_yield); + return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, op, &outbl, null_yield); } -int RGWRados::olh_init_modification_impl(const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, string *op_tag) +int RGWRados::olh_init_modification_impl(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, string *op_tag) { ObjectWriteOperation op; @@ -6552,7 +6579,7 @@ int RGWRados::olh_init_modification_impl(const RGWBucketInfo& bucket_info, RGWOb */ if (has_tag) { /* guard against racing writes */ - bucket_index_guard_olh_op(state, op); + bucket_index_guard_olh_op(dpp, state, op); } if (!has_tag) { @@ -6602,7 +6629,7 @@ int RGWRados::olh_init_modification_impl(const RGWBucketInfo& bucket_info, RGWOb op.setxattr(attr_name.c_str(), bl); - int ret = obj_operate(bucket_info, olh_obj, &op); + int ret = obj_operate(dpp, bucket_info, olh_obj, &op); if (ret < 0) { return ret; } @@ -6613,11 +6640,11 @@ int RGWRados::olh_init_modification_impl(const RGWBucketInfo& bucket_info, RGWOb return 0; } -int RGWRados::olh_init_modification(const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& obj, string *op_tag) +int RGWRados::olh_init_modification(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& obj, string *op_tag) { int ret; - ret = olh_init_modification_impl(bucket_info, state, obj, op_tag); + ret = olh_init_modification_impl(dpp, bucket_info, state, obj, op_tag); if (ret == -EEXIST) { ret = -ECANCELED; } @@ -6625,7 +6652,8 @@ int RGWRados::olh_init_modification(const RGWBucketInfo& bucket_info, RGWObjStat return ret; } -int RGWRados::guard_reshard(BucketShard *bs, +int RGWRados::guard_reshard(const DoutPrefixProvider *dpp, + BucketShard *bs, const rgw_obj& obj_instance, const RGWBucketInfo& bucket_info, std::function call) @@ -6635,25 +6663,25 @@ int RGWRados::guard_reshard(BucketShard *bs, int r; for (int i = 0; i < NUM_RESHARD_RETRIES; ++i) { - r = bs->init(pobj->bucket, *pobj, nullptr /* no RGWBucketInfo */); + r = bs->init(pobj->bucket, *pobj, nullptr /* no RGWBucketInfo */, dpp); if (r < 0) { - ldout(cct, 5) << "bs.init() returned ret=" << r << dendl; + ldpp_dout(dpp, 5) << "bs.init() returned ret=" << r << dendl; return r; } r = call(bs); if (r != -ERR_BUSY_RESHARDING) { break; } - ldout(cct, 0) << "NOTICE: resharding operation on bucket index detected, blocking" << dendl; + ldpp_dout(dpp, 0) << "NOTICE: resharding operation on bucket index detected, blocking" << dendl; string new_bucket_id; - r = block_while_resharding(bs, &new_bucket_id, bucket_info, null_yield); + r = block_while_resharding(bs, &new_bucket_id, bucket_info, null_yield, dpp); if (r == -ERR_BUSY_RESHARDING) { continue; } if (r < 0) { return r; } - ldout(cct, 20) << "reshard completion identified, new_bucket_id=" << new_bucket_id << dendl; + ldpp_dout(dpp, 20) << "reshard completion identified, new_bucket_id=" << new_bucket_id << dendl; i = 0; /* resharding is finished, make sure we can retry */ obj = *pobj; @@ -6671,7 +6699,8 @@ int RGWRados::guard_reshard(BucketShard *bs, int RGWRados::block_while_resharding(RGWRados::BucketShard *bs, string *new_bucket_id, const RGWBucketInfo& bucket_info, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { int ret = 0; cls_rgw_bucket_instance_entry entry; @@ -6682,12 +6711,12 @@ int RGWRados::block_while_resharding(RGWRados::BucketShard *bs, // new_bucket_id and returns 0, otherwise it returns a negative // error code auto fetch_new_bucket_id = - [this, &bucket_info](const std::string& log_tag, + [this, &bucket_info, dpp](const std::string& log_tag, std::string* new_bucket_id) -> int { RGWBucketInfo fresh_bucket_info = bucket_info; - int ret = try_refresh_bucket_info(fresh_bucket_info, nullptr); + int ret = try_refresh_bucket_info(fresh_bucket_info, nullptr, dpp); if (ret < 0) { - ldout(cct, 0) << __func__ << + ldpp_dout(dpp, 0) << __func__ << " ERROR: failed to refresh bucket info after reshard at " << log_tag << ": " << cpp_strerror(-ret) << dendl; return ret; @@ -6703,7 +6732,7 @@ int RGWRados::block_while_resharding(RGWRados::BucketShard *bs, if (ret == -ENOENT) { return fetch_new_bucket_id("get_bucket_resharding_failed", new_bucket_id); } else if (ret < 0) { - ldout(cct, 0) << __func__ << + ldpp_dout(dpp, 0) << __func__ << " ERROR: failed to get bucket resharding : " << cpp_strerror(-ret) << dendl; return ret; @@ -6714,7 +6743,7 @@ int RGWRados::block_while_resharding(RGWRados::BucketShard *bs, new_bucket_id); } - ldout(cct, 20) << "NOTICE: reshard still in progress; " << + ldpp_dout(dpp, 20) << "NOTICE: reshard still in progress; " << (i < num_retries ? "retrying" : "too many retries") << dendl; if (i == num_retries) { @@ -6735,22 +6764,22 @@ int RGWRados::block_while_resharding(RGWRados::BucketShard *bs, RGWBucketReshardLock reshard_lock(this->store, bucket_info, true); ret = reshard_lock.lock(); if (ret < 0) { - ldout(cct, 20) << __func__ << + ldpp_dout(dpp, 20) << __func__ << " INFO: failed to take reshard lock for bucket " << bucket_id << "; expected if resharding underway" << dendl; } else { - ldout(cct, 10) << __func__ << + ldpp_dout(dpp, 10) << __func__ << " INFO: was able to take reshard lock for bucket " << bucket_id << dendl; - ret = RGWBucketReshard::clear_resharding(this->store, bucket_info); + ret = RGWBucketReshard::clear_resharding(dpp, this->store, bucket_info); if (ret < 0) { reshard_lock.unlock(); - ldout(cct, 0) << __func__ << + ldpp_dout(dpp, 0) << __func__ << " ERROR: failed to clear resharding flags for bucket " << bucket_id << dendl; } else { reshard_lock.unlock(); - ldout(cct, 5) << __func__ << + ldpp_dout(dpp, 5) << __func__ << " INFO: apparently successfully cleared resharding flags for " "bucket " << bucket_id << dendl; continue; // if we apparently succeed immediately test again @@ -6760,18 +6789,18 @@ int RGWRados::block_while_resharding(RGWRados::BucketShard *bs, ret = reshard_wait->wait(y); if (ret < 0) { - ldout(cct, 0) << __func__ << + ldpp_dout(dpp, 0) << __func__ << " ERROR: bucket is still resharding, please retry" << dendl; return ret; } } // for loop - ldout(cct, 0) << __func__ << + ldpp_dout(dpp, 0) << __func__ << " ERROR: bucket is still resharding, please retry" << dendl; return -ERR_BUSY_RESHARDING; } -int RGWRados::bucket_index_link_olh(const RGWBucketInfo& bucket_info, RGWObjState& olh_state, const rgw_obj& obj_instance, +int RGWRados::bucket_index_link_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& olh_state, const rgw_obj& obj_instance, bool delete_marker, const string& op_tag, struct rgw_bucket_dir_entry_meta *meta, @@ -6780,7 +6809,7 @@ int RGWRados::bucket_index_link_olh(const RGWBucketInfo& bucket_info, RGWObjStat rgw_zone_set *_zones_trace, bool log_data_change) { rgw_rados_ref ref; - int r = get_obj_head_ref(bucket_info, obj_instance, &ref); + int r = get_obj_head_ref(dpp, bucket_info, obj_instance, &ref); if (r < 0) { return r; } @@ -6793,7 +6822,7 @@ int RGWRados::bucket_index_link_olh(const RGWBucketInfo& bucket_info, RGWObjStat BucketShard bs(this); - r = guard_reshard(&bs, obj_instance, bucket_info, + r = guard_reshard(dpp, &bs, obj_instance, bucket_info, [&](BucketShard *bs) -> int { cls_rgw_obj_key key(obj_instance.key.get_index_key_name(), obj_instance.key.instance); auto& ref = bs->bucket_obj.get_ref(); @@ -6803,32 +6832,32 @@ int RGWRados::bucket_index_link_olh(const RGWBucketInfo& bucket_info, RGWObjStat delete_marker, op_tag, meta, olh_epoch, unmod_since, high_precision_time, svc.zone->get_zone().log_data, zones_trace); - return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield); + return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield); }); if (r < 0) { - ldout(cct, 20) << "rgw_rados_operate() after cls_rgw_bucket_link_olh() returned r=" << r << dendl; + ldpp_dout(dpp, 20) << "rgw_rados_operate() after cls_rgw_bucket_link_olh() returned r=" << r << dendl; return r; } - r = svc.datalog_rados->add_entry(bucket_info, bs.shard_id); + r = svc.datalog_rados->add_entry(dpp, bucket_info, bs.shard_id); if (r < 0) { - ldout(cct, 0) << "ERROR: failed writing data log" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed writing data log" << dendl; } return 0; } -void RGWRados::bucket_index_guard_olh_op(RGWObjState& olh_state, ObjectOperation& op) +void RGWRados::bucket_index_guard_olh_op(const DoutPrefixProvider *dpp, RGWObjState& olh_state, ObjectOperation& op) { - ldout(cct, 20) << __func__ << "(): olh_state.olh_tag=" << string(olh_state.olh_tag.c_str(), olh_state.olh_tag.length()) << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): olh_state.olh_tag=" << string(olh_state.olh_tag.c_str(), olh_state.olh_tag.length()) << dendl; op.cmpxattr(RGW_ATTR_OLH_ID_TAG, CEPH_OSD_CMPXATTR_OP_EQ, olh_state.olh_tag); } -int RGWRados::bucket_index_unlink_instance(const RGWBucketInfo& bucket_info, const rgw_obj& obj_instance, +int RGWRados::bucket_index_unlink_instance(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj_instance, const string& op_tag, const string& olh_tag, uint64_t olh_epoch, rgw_zone_set *_zones_trace) { rgw_rados_ref ref; - int r = get_obj_head_ref(bucket_info, obj_instance, &ref); + int r = get_obj_head_ref(dpp, bucket_info, obj_instance, &ref); if (r < 0) { return r; } @@ -6842,39 +6871,40 @@ int RGWRados::bucket_index_unlink_instance(const RGWBucketInfo& bucket_info, con BucketShard bs(this); cls_rgw_obj_key key(obj_instance.key.get_index_key_name(), obj_instance.key.instance); - r = guard_reshard(&bs, obj_instance, bucket_info, + r = guard_reshard(dpp, &bs, obj_instance, bucket_info, [&](BucketShard *bs) -> int { auto& ref = bs->bucket_obj.get_ref(); librados::ObjectWriteOperation op; cls_rgw_guard_bucket_resharding(op, -ERR_BUSY_RESHARDING); cls_rgw_bucket_unlink_instance(op, key, op_tag, olh_tag, olh_epoch, svc.zone->get_zone().log_data, zones_trace); - return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield); + return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield); }); if (r < 0) { - ldout(cct, 20) << "rgw_rados_operate() after cls_rgw_bucket_link_instance() returned r=" << r << dendl; + ldpp_dout(dpp, 20) << "rgw_rados_operate() after cls_rgw_bucket_link_instance() returned r=" << r << dendl; return r; } return 0; } -int RGWRados::bucket_index_read_olh_log(const RGWBucketInfo& bucket_info, RGWObjState& state, +int RGWRados::bucket_index_read_olh_log(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& obj_instance, uint64_t ver_marker, map > *log, bool *is_truncated) { rgw_rados_ref ref; - int r = get_obj_head_ref(bucket_info, obj_instance, &ref); + int r = get_obj_head_ref(dpp, bucket_info, obj_instance, &ref); if (r < 0) { return r; } BucketShard bs(this); int ret = - bs.init(obj_instance.bucket, obj_instance, nullptr /* no RGWBucketInfo */); + bs.init(obj_instance.bucket, obj_instance, nullptr /* no RGWBucketInfo */, dpp); if (ret < 0) { - ldout(cct, 5) << "bs.init() returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl; return ret; } @@ -6882,7 +6912,7 @@ int RGWRados::bucket_index_read_olh_log(const RGWBucketInfo& bucket_info, RGWObj cls_rgw_obj_key key(obj_instance.key.get_index_key_name(), string()); - ret = guard_reshard(&bs, obj_instance, bucket_info, + ret = guard_reshard(dpp, &bs, obj_instance, bucket_info, [&](BucketShard *bs) -> int { auto& ref = bs->bucket_obj.get_ref(); ObjectReadOperation op; @@ -6892,7 +6922,7 @@ int RGWRados::bucket_index_read_olh_log(const RGWBucketInfo& bucket_info, RGWObj int op_ret = 0; cls_rgw_get_olh_log(op, key, ver_marker, olh_tag, log_ret, op_ret); bufferlist outbl; - int r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, &outbl, null_yield); + int r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, &outbl, null_yield); if (r < 0) { return r; } @@ -6905,7 +6935,7 @@ int RGWRados::bucket_index_read_olh_log(const RGWBucketInfo& bucket_info, RGWObj return r; }); if (ret < 0) { - ldout(cct, 20) << "cls_rgw_get_olh_log() returned r=" << r << dendl; + ldpp_dout(dpp, 20) << "cls_rgw_get_olh_log() returned r=" << r << dendl; return ret; } @@ -6916,27 +6946,27 @@ int RGWRados::bucket_index_read_olh_log(const RGWBucketInfo& bucket_info, RGWObj // the attributes from another zone, causing link_olh() to fail endlessly due to // olh_tag mismatch. this attempts to detect this case and reconstruct the OLH // attributes from the bucket index. see http://tracker.ceph.com/issues/37792 -int RGWRados::repair_olh(RGWObjState* state, const RGWBucketInfo& bucket_info, +int RGWRados::repair_olh(const DoutPrefixProvider *dpp, RGWObjState* state, const RGWBucketInfo& bucket_info, const rgw_obj& obj) { // fetch the current olh entry from the bucket index rgw_bucket_olh_entry olh; - int r = bi_get_olh(bucket_info, obj, &olh); + int r = bi_get_olh(dpp, bucket_info, obj, &olh); if (r < 0) { - ldout(cct, 0) << "repair_olh failed to read olh entry for " << obj << dendl; + ldpp_dout(dpp, 0) << "repair_olh failed to read olh entry for " << obj << dendl; return r; } if (olh.tag == rgw_bl_str(state->olh_tag)) { // mismatch already resolved? return 0; } - ldout(cct, 4) << "repair_olh setting olh_tag=" << olh.tag + ldpp_dout(dpp, 4) << "repair_olh setting olh_tag=" << olh.tag << " key=" << olh.key << " delete_marker=" << olh.delete_marker << dendl; // rewrite OLH_ID_TAG and OLH_INFO from current olh ObjectWriteOperation op; // assert this is the same olh tag we think we're fixing - bucket_index_guard_olh_op(*state, op); + bucket_index_guard_olh_op(dpp, *state, op); // preserve existing mtime struct timespec mtime_ts = ceph::real_clock::to_timespec(state->mtime); op.mtime2(&mtime_ts); @@ -6954,32 +6984,32 @@ int RGWRados::repair_olh(RGWObjState* state, const RGWBucketInfo& bucket_info, op.setxattr(RGW_ATTR_OLH_INFO, bl); } rgw_rados_ref ref; - r = get_obj_head_ref(bucket_info, obj, &ref); + r = get_obj_head_ref(dpp, bucket_info, obj, &ref); if (r < 0) { return r; } - r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield); + r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield); if (r < 0) { - ldout(cct, 0) << "repair_olh failed to write olh attributes with " + ldpp_dout(dpp, 0) << "repair_olh failed to write olh attributes with " << cpp_strerror(r) << dendl; return r; } return 0; } -int RGWRados::bucket_index_trim_olh_log(const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& obj_instance, uint64_t ver) +int RGWRados::bucket_index_trim_olh_log(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& obj_instance, uint64_t ver) { rgw_rados_ref ref; - int r = get_obj_head_ref(bucket_info, obj_instance, &ref); + int r = get_obj_head_ref(dpp, bucket_info, obj_instance, &ref); if (r < 0) { return r; } BucketShard bs(this); int ret = - bs.init(obj_instance.bucket, obj_instance, nullptr /* no RGWBucketInfo */); + bs.init(obj_instance.bucket, obj_instance, nullptr /* no RGWBucketInfo */, dpp); if (ret < 0) { - ldout(cct, 5) << "bs.init() returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl; return ret; } @@ -6987,25 +7017,25 @@ int RGWRados::bucket_index_trim_olh_log(const RGWBucketInfo& bucket_info, RGWObj cls_rgw_obj_key key(obj_instance.key.get_index_key_name(), string()); - ret = guard_reshard(&bs, obj_instance, bucket_info, + ret = guard_reshard(dpp, &bs, obj_instance, bucket_info, [&](BucketShard *pbs) -> int { ObjectWriteOperation op; cls_rgw_guard_bucket_resharding(op, -ERR_BUSY_RESHARDING); cls_rgw_trim_olh_log(op, key, ver, olh_tag); - return pbs->bucket_obj.operate(&op, null_yield); + return pbs->bucket_obj.operate(dpp, &op, null_yield); }); if (ret < 0) { - ldout(cct, 20) << "cls_rgw_trim_olh_log() returned r=" << ret << dendl; + ldpp_dout(dpp, 20) << "cls_rgw_trim_olh_log() returned r=" << ret << dendl; return ret; } return 0; } -int RGWRados::bucket_index_clear_olh(const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& obj_instance) +int RGWRados::bucket_index_clear_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& obj_instance) { rgw_rados_ref ref; - int r = get_obj_head_ref(bucket_info, obj_instance, &ref); + int r = get_obj_head_ref(dpp, bucket_info, obj_instance, &ref); if (r < 0) { return r; } @@ -7016,16 +7046,16 @@ int RGWRados::bucket_index_clear_olh(const RGWBucketInfo& bucket_info, RGWObjSta cls_rgw_obj_key key(obj_instance.key.get_index_key_name(), string()); - int ret = guard_reshard(&bs, obj_instance, bucket_info, + int ret = guard_reshard(dpp, &bs, obj_instance, bucket_info, [&](BucketShard *pbs) -> int { ObjectWriteOperation op; auto& ref = pbs->bucket_obj.get_ref(); cls_rgw_guard_bucket_resharding(op, -ERR_BUSY_RESHARDING); cls_rgw_clear_olh(op, key, olh_tag); - return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield); + return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield); }); if (ret < 0) { - ldout(cct, 5) << "rgw_rados_operate() after cls_rgw_clear_olh() returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "rgw_rados_operate() after cls_rgw_clear_olh() returned ret=" << ret << dendl; return ret; } @@ -7044,7 +7074,7 @@ static int decode_olh_info(CephContext* cct, const bufferlist& bl, RGWOLHInfo *o } } -int RGWRados::apply_olh_log(RGWObjectCtx& obj_ctx, RGWObjState& state, const RGWBucketInfo& bucket_info, const rgw_obj& obj, +int RGWRados::apply_olh_log(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, RGWObjState& state, const RGWBucketInfo& bucket_info, const rgw_obj& obj, bufferlist& olh_tag, map >& log, uint64_t *plast_ver, rgw_zone_set* zones_trace) { @@ -7100,7 +7130,7 @@ int RGWRados::apply_olh_log(RGWObjectCtx& obj_ctx, RGWObjState& state, const RGW for (; viter != iter->second.end(); ++viter) { rgw_bucket_olh_log_entry& entry = *viter; - ldout(cct, 20) << "olh_log_entry: epoch=" << iter->first << " op=" << (int)entry.op + ldpp_dout(dpp, 20) << "olh_log_entry: epoch=" << iter->first << " op=" << (int)entry.op << " key=" << entry.key.name << "[" << entry.key.instance << "] " << (entry.delete_marker ? "(delete)" : "") << dendl; switch (entry.op) { @@ -7111,14 +7141,14 @@ int RGWRados::apply_olh_log(RGWObjectCtx& obj_ctx, RGWObjState& state, const RGW // only overwrite a link of the same epoch if its key sorts before if (link_epoch < iter->first || key.instance.empty() || key.instance > entry.key.instance) { - ldout(cct, 20) << "apply_olh_log applying key=" << entry.key << " epoch=" << iter->first << " delete_marker=" << entry.delete_marker + ldpp_dout(dpp, 20) << "apply_olh_log applying key=" << entry.key << " epoch=" << iter->first << " delete_marker=" << entry.delete_marker << " over current=" << key << " epoch=" << link_epoch << " delete_marker=" << delete_marker << dendl; need_to_link = true; need_to_remove = false; key = entry.key; delete_marker = entry.delete_marker; } else { - ldout(cct, 20) << "apply_olh skipping key=" << entry.key<< " epoch=" << iter->first << " delete_marker=" << entry.delete_marker + ldpp_dout(dpp, 20) << "apply_olh skipping key=" << entry.key<< " epoch=" << iter->first << " delete_marker=" << entry.delete_marker << " before current=" << key << " epoch=" << link_epoch << " delete_marker=" << delete_marker << dendl; } break; @@ -7127,7 +7157,7 @@ int RGWRados::apply_olh_log(RGWObjectCtx& obj_ctx, RGWObjState& state, const RGW need_to_link = false; break; default: - ldout(cct, 0) << "ERROR: apply_olh_log: invalid op: " << (int)entry.op << dendl; + ldpp_dout(dpp, 0) << "ERROR: apply_olh_log: invalid op: " << (int)entry.op << dendl; return -EIO; } string attr_name = RGW_ATTR_OLH_PENDING_PREFIX; @@ -7137,7 +7167,7 @@ int RGWRados::apply_olh_log(RGWObjectCtx& obj_ctx, RGWObjState& state, const RGW } rgw_rados_ref ref; - int r = get_obj_head_ref(bucket_info, obj, &ref); + int r = get_obj_head_ref(dpp, bucket_info, obj, &ref); if (r < 0) { return r; } @@ -7159,26 +7189,26 @@ int RGWRados::apply_olh_log(RGWObjectCtx& obj_ctx, RGWObjState& state, const RGW liter != remove_instances.end(); ++liter) { cls_rgw_obj_key& key = *liter; rgw_obj obj_instance(bucket, key); - int ret = delete_obj(obj_ctx, bucket_info, obj_instance, 0, RGW_BILOG_FLAG_VERSIONED_OP, ceph::real_time(), zones_trace); + int ret = delete_obj(dpp, obj_ctx, bucket_info, obj_instance, 0, RGW_BILOG_FLAG_VERSIONED_OP, ceph::real_time(), zones_trace); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "ERROR: delete_obj() returned " << ret << " obj_instance=" << obj_instance << dendl; + ldpp_dout(dpp, 0) << "ERROR: delete_obj() returned " << ret << " obj_instance=" << obj_instance << dendl; return ret; } } /* update olh object */ - r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield); + r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield); if (r == -ECANCELED) { r = 0; } if (r < 0) { - ldout(cct, 0) << "ERROR: could not apply olh update, r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not apply olh update, r=" << r << dendl; return r; } - r = bucket_index_trim_olh_log(bucket_info, state, obj, last_ver); + r = bucket_index_trim_olh_log(dpp, bucket_info, state, obj, last_ver); if (r < 0) { - ldout(cct, 0) << "ERROR: could not trim olh log, r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not trim olh log, r=" << r << dendl; return r; } @@ -7190,16 +7220,16 @@ int RGWRados::apply_olh_log(RGWObjectCtx& obj_ctx, RGWObjState& state, const RGW cls_obj_check_prefix_exist(rm_op, RGW_ATTR_OLH_PENDING_PREFIX, true); /* fail if found one of these, pending modification */ rm_op.remove(); - r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &rm_op, null_yield); + r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &rm_op, null_yield); if (r == -ECANCELED) { return 0; /* someone else won this race */ } else { /* * only clear if was successful, otherwise we might clobber pending operations on this object */ - r = bucket_index_clear_olh(bucket_info, state, obj); + r = bucket_index_clear_olh(dpp, bucket_info, state, obj); if (r < 0) { - ldout(cct, 0) << "ERROR: could not clear bucket index olh entries r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not clear bucket index olh entries r=" << r << dendl; return r; } } @@ -7211,18 +7241,18 @@ int RGWRados::apply_olh_log(RGWObjectCtx& obj_ctx, RGWObjState& state, const RGW /* * read olh log and apply it */ -int RGWRados::update_olh(RGWObjectCtx& obj_ctx, RGWObjState *state, const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_zone_set *zones_trace) +int RGWRados::update_olh(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, RGWObjState *state, const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_zone_set *zones_trace) { map > log; bool is_truncated; uint64_t ver_marker = 0; do { - int ret = bucket_index_read_olh_log(bucket_info, *state, obj, ver_marker, &log, &is_truncated); + int ret = bucket_index_read_olh_log(dpp, bucket_info, *state, obj, ver_marker, &log, &is_truncated); if (ret < 0) { return ret; } - ret = apply_olh_log(obj_ctx, *state, bucket_info, obj, state->olh_tag, log, &ver_marker, zones_trace); + ret = apply_olh_log(dpp, obj_ctx, *state, bucket_info, obj, state->olh_tag, log, &ver_marker, zones_trace); if (ret < 0) { return ret; } @@ -7231,7 +7261,7 @@ int RGWRados::update_olh(RGWObjectCtx& obj_ctx, RGWObjState *state, const RGWBuc return 0; } -int RGWRados::set_olh(RGWObjectCtx& obj_ctx, const RGWBucketInfo& bucket_info, const rgw_obj& target_obj, bool delete_marker, rgw_bucket_dir_entry_meta *meta, +int RGWRados::set_olh(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, const RGWBucketInfo& bucket_info, const rgw_obj& target_obj, bool delete_marker, rgw_bucket_dir_entry_meta *meta, uint64_t olh_epoch, real_time unmod_since, bool high_precision_time, optional_yield y, rgw_zone_set *zones_trace, bool log_data_change) { @@ -7251,28 +7281,28 @@ int RGWRados::set_olh(RGWObjectCtx& obj_ctx, const RGWBucketInfo& bucket_info, c obj_ctx.invalidate(olh_obj); } - ret = get_obj_state(&obj_ctx, bucket_info, olh_obj, &state, false, y); /* don't follow olh */ + ret = get_obj_state(dpp, &obj_ctx, bucket_info, olh_obj, &state, false, y); /* don't follow olh */ if (ret < 0) { return ret; } - ret = olh_init_modification(bucket_info, *state, olh_obj, &op_tag); + ret = olh_init_modification(dpp, bucket_info, *state, olh_obj, &op_tag); if (ret < 0) { - ldout(cct, 20) << "olh_init_modification() target_obj=" << target_obj << " delete_marker=" << (int)delete_marker << " returned " << ret << dendl; + ldpp_dout(dpp, 20) << "olh_init_modification() target_obj=" << target_obj << " delete_marker=" << (int)delete_marker << " returned " << ret << dendl; if (ret == -ECANCELED) { continue; } return ret; } - ret = bucket_index_link_olh(bucket_info, *state, target_obj, delete_marker, + ret = bucket_index_link_olh(dpp, bucket_info, *state, target_obj, delete_marker, op_tag, meta, olh_epoch, unmod_since, high_precision_time, zones_trace, log_data_change); if (ret < 0) { - ldout(cct, 20) << "bucket_index_link_olh() target_obj=" << target_obj << " delete_marker=" << (int)delete_marker << " returned " << ret << dendl; + ldpp_dout(dpp, 20) << "bucket_index_link_olh() target_obj=" << target_obj << " delete_marker=" << (int)delete_marker << " returned " << ret << dendl; if (ret == -ECANCELED) { // the bucket index rejected the link_olh() due to olh tag mismatch; // attempt to reconstruct olh head attributes based on the bucket index - int r2 = repair_olh(state, bucket_info, olh_obj); + int r2 = repair_olh(dpp, state, bucket_info, olh_obj); if (r2 < 0 && r2 != -ECANCELED) { return r2; } @@ -7284,23 +7314,23 @@ int RGWRados::set_olh(RGWObjectCtx& obj_ctx, const RGWBucketInfo& bucket_info, c } if (i == MAX_ECANCELED_RETRY) { - ldout(cct, 0) << "ERROR: exceeded max ECANCELED retries, aborting (EIO)" << dendl; + ldpp_dout(dpp, 0) << "ERROR: exceeded max ECANCELED retries, aborting (EIO)" << dendl; return -EIO; } - ret = update_olh(obj_ctx, state, bucket_info, olh_obj); + ret = update_olh(dpp, obj_ctx, state, bucket_info, olh_obj); if (ret == -ECANCELED) { /* already did what we needed, no need to retry, raced with another user */ ret = 0; } if (ret < 0) { - ldout(cct, 20) << "update_olh() target_obj=" << target_obj << " returned " << ret << dendl; + ldpp_dout(dpp, 20) << "update_olh() target_obj=" << target_obj << " returned " << ret << dendl; return ret; } return 0; } -int RGWRados::unlink_obj_instance(RGWObjectCtx& obj_ctx, RGWBucketInfo& bucket_info, const rgw_obj& target_obj, +int RGWRados::unlink_obj_instance(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, RGWBucketInfo& bucket_info, const rgw_obj& target_obj, uint64_t olh_epoch, optional_yield y, rgw_zone_set *zones_trace) { string op_tag; @@ -7318,13 +7348,13 @@ int RGWRados::unlink_obj_instance(RGWObjectCtx& obj_ctx, RGWBucketInfo& bucket_i obj_ctx.invalidate(olh_obj); } - ret = get_obj_state(&obj_ctx, bucket_info, olh_obj, &state, false, y); /* don't follow olh */ + ret = get_obj_state(dpp, &obj_ctx, bucket_info, olh_obj, &state, false, y); /* don't follow olh */ if (ret < 0) return ret; - ret = olh_init_modification(bucket_info, *state, olh_obj, &op_tag); + ret = olh_init_modification(dpp, bucket_info, *state, olh_obj, &op_tag); if (ret < 0) { - ldout(cct, 20) << "olh_init_modification() target_obj=" << target_obj << " returned " << ret << dendl; + ldpp_dout(dpp, 20) << "olh_init_modification() target_obj=" << target_obj << " returned " << ret << dendl; if (ret == -ECANCELED) { continue; } @@ -7333,9 +7363,9 @@ int RGWRados::unlink_obj_instance(RGWObjectCtx& obj_ctx, RGWBucketInfo& bucket_i string olh_tag(state->olh_tag.c_str(), state->olh_tag.length()); - ret = bucket_index_unlink_instance(bucket_info, target_obj, op_tag, olh_tag, olh_epoch, zones_trace); + ret = bucket_index_unlink_instance(dpp, bucket_info, target_obj, op_tag, olh_tag, olh_epoch, zones_trace); if (ret < 0) { - ldout(cct, 20) << "bucket_index_unlink_instance() target_obj=" << target_obj << " returned " << ret << dendl; + ldpp_dout(dpp, 20) << "bucket_index_unlink_instance() target_obj=" << target_obj << " returned " << ret << dendl; if (ret == -ECANCELED) { continue; } @@ -7345,16 +7375,16 @@ int RGWRados::unlink_obj_instance(RGWObjectCtx& obj_ctx, RGWBucketInfo& bucket_i } if (i == MAX_ECANCELED_RETRY) { - ldout(cct, 0) << "ERROR: exceeded max ECANCELED retries, aborting (EIO)" << dendl; + ldpp_dout(dpp, 0) << "ERROR: exceeded max ECANCELED retries, aborting (EIO)" << dendl; return -EIO; } - ret = update_olh(obj_ctx, state, bucket_info, olh_obj, zones_trace); + ret = update_olh(dpp, obj_ctx, state, bucket_info, olh_obj, zones_trace); if (ret == -ECANCELED) { /* already did what we needed, no need to retry, raced with another user */ return 0; } if (ret < 0) { - ldout(cct, 20) << "update_olh() target_obj=" << target_obj << " returned " << ret << dendl; + ldpp_dout(dpp, 20) << "update_olh() target_obj=" << target_obj << " returned " << ret << dendl; return ret; } @@ -7377,14 +7407,14 @@ void RGWRados::gen_rand_obj_instance_name(rgw_obj *target_obj) gen_rand_obj_instance_name(&target_obj->key); } -int RGWRados::get_olh(const RGWBucketInfo& bucket_info, const rgw_obj& obj, RGWOLHInfo *olh) +int RGWRados::get_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, RGWOLHInfo *olh) { map attrset; ObjectReadOperation op; op.getxattrs(&attrset, NULL); - int r = obj_operate(bucket_info, obj, &op); + int r = obj_operate(dpp, bucket_info, obj, &op); if (r < 0) { return r; } @@ -7428,10 +7458,10 @@ void RGWRados::check_pending_olh_entries(map& pending_entrie } } -int RGWRados::remove_olh_pending_entries(const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, map& pending_attrs) +int RGWRados::remove_olh_pending_entries(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, map& pending_attrs) { rgw_rados_ref ref; - int r = get_obj_head_ref(bucket_info, olh_obj, &ref); + int r = get_obj_head_ref(dpp, bucket_info, olh_obj, &ref); if (r < 0) { return r; } @@ -7442,26 +7472,26 @@ int RGWRados::remove_olh_pending_entries(const RGWBucketInfo& bucket_info, RGWOb auto i = pending_attrs.begin(); while (i != pending_attrs.end()) { ObjectWriteOperation op; - bucket_index_guard_olh_op(state, op); + bucket_index_guard_olh_op(dpp, state, op); for (int n = 0; n < max_entries && i != pending_attrs.end(); ++n, ++i) { op.rmxattr(i->first.c_str()); } - r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield); + r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield); if (r == -ENOENT || r == -ECANCELED) { /* raced with some other change, shouldn't sweat about it */ return 0; } if (r < 0) { - ldout(cct, 0) << "ERROR: could not apply olh update, r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not apply olh update, r=" << r << dendl; return r; } } return 0; } -int RGWRados::follow_olh(const RGWBucketInfo& bucket_info, RGWObjectCtx& obj_ctx, RGWObjState *state, const rgw_obj& olh_obj, rgw_obj *target) +int RGWRados::follow_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjectCtx& obj_ctx, RGWObjState *state, const rgw_obj& olh_obj, rgw_obj *target) { map pending_entries; rgw_filter_attrset(state->attrset, RGW_ATTR_OLH_PENDING_PREFIX, &pending_entries); @@ -7470,16 +7500,16 @@ int RGWRados::follow_olh(const RGWBucketInfo& bucket_info, RGWObjectCtx& obj_ctx check_pending_olh_entries(pending_entries, &rm_pending_entries); if (!rm_pending_entries.empty()) { - int ret = remove_olh_pending_entries(bucket_info, *state, olh_obj, rm_pending_entries); + int ret = remove_olh_pending_entries(dpp, bucket_info, *state, olh_obj, rm_pending_entries); if (ret < 0) { - ldout(cct, 20) << "ERROR: rm_pending_entries returned ret=" << ret << dendl; + ldpp_dout(dpp, 20) << "ERROR: rm_pending_entries returned ret=" << ret << dendl; return ret; } } if (!pending_entries.empty()) { - ldout(cct, 20) << __func__ << "(): found pending entries, need to update_olh() on bucket=" << olh_obj.bucket << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): found pending entries, need to update_olh() on bucket=" << olh_obj.bucket << dendl; - int ret = update_olh(obj_ctx, state, bucket_info, olh_obj); + int ret = update_olh(dpp, obj_ctx, state, bucket_info, olh_obj); if (ret < 0) { return ret; } @@ -7505,12 +7535,13 @@ int RGWRados::follow_olh(const RGWBucketInfo& bucket_info, RGWObjectCtx& obj_ctx return 0; } -int RGWRados::raw_obj_stat(rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *epoch, +int RGWRados::raw_obj_stat(const DoutPrefixProvider *dpp, + rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *epoch, map *attrs, bufferlist *first_chunk, RGWObjVersionTracker *objv_tracker, optional_yield y) { rgw_rados_ref ref; - int r = get_raw_obj_ref(obj, &ref); + int r = get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { return r; } @@ -7533,7 +7564,7 @@ int RGWRados::raw_obj_stat(rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, op.read(0, cct->_conf->rgw_max_chunk_size, first_chunk, NULL); } bufferlist outbl; - r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, &outbl, null_yield); + r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, &outbl, null_yield); if (epoch) { *epoch = ref.pool.ioctx().get_last_version(); @@ -7553,12 +7584,12 @@ int RGWRados::raw_obj_stat(rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, return 0; } -int RGWRados::get_bucket_stats(RGWBucketInfo& bucket_info, int shard_id, string *bucket_ver, string *master_ver, +int RGWRados::get_bucket_stats(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, int shard_id, string *bucket_ver, string *master_ver, map& stats, string *max_marker, bool *syncstopped) { vector headers; map bucket_instance_ids; - int r = cls_bucket_head(bucket_info, shard_id, headers, &bucket_instance_ids); + int r = cls_bucket_head(dpp, bucket_info, shard_id, headers, &bucket_instance_ids); if (r < 0) { return r; } @@ -7632,12 +7663,12 @@ public: } }; -int RGWRados::get_bucket_stats_async(RGWBucketInfo& bucket_info, int shard_id, RGWGetBucketStats_CB *ctx) +int RGWRados::get_bucket_stats_async(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, int shard_id, RGWGetBucketStats_CB *ctx) { int num_aio = 0; RGWGetBucketStatsContext *get_ctx = new RGWGetBucketStatsContext(ctx, bucket_info.layout.current_index.layout.normal.num_shards ? : 1); ceph_assert(get_ctx); - int r = cls_bucket_head_async(bucket_info, shard_id, get_ctx, &num_aio); + int r = cls_bucket_head_async(dpp, bucket_info, shard_id, get_ctx, &num_aio); if (r < 0) { ctx->put(); if (num_aio) { @@ -7653,20 +7684,23 @@ int RGWRados::get_bucket_instance_info(RGWSysObjectCtx& obj_ctx, RGWBucketInfo& info, real_time *pmtime, map *pattrs, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { rgw_bucket bucket; rgw_bucket_parse_bucket_key(cct, meta_key, &bucket, nullptr); - return get_bucket_instance_info(obj_ctx, bucket, info, pmtime, pattrs, y); + return get_bucket_instance_info(obj_ctx, bucket, info, pmtime, pattrs, y, dpp); } int RGWRados::get_bucket_instance_info(RGWSysObjectCtx& obj_ctx, const rgw_bucket& bucket, RGWBucketInfo& info, - real_time *pmtime, map *pattrs, optional_yield y) + real_time *pmtime, map *pattrs, optional_yield y, + const DoutPrefixProvider *dpp) { RGWSI_MetaBackend_CtxParams bectx_params = RGWSI_MetaBackend_CtxParams_SObj(&obj_ctx); return ctl.bucket->read_bucket_instance_info(bucket, &info, y, + dpp, RGWBucketCtl::BucketInstance::GetParams() .set_mtime(pmtime) .set_attrs(pattrs) @@ -7677,14 +7711,15 @@ int RGWRados::get_bucket_info(RGWServices *svc, const string& tenant, const string& bucket_name, RGWBucketInfo& info, real_time *pmtime, - optional_yield y, map *pattrs) + optional_yield y, + const DoutPrefixProvider *dpp, map *pattrs) { auto obj_ctx = svc->sysobj->init_obj_ctx(); RGWSI_MetaBackend_CtxParams bectx_params = RGWSI_MetaBackend_CtxParams_SObj(&obj_ctx); rgw_bucket bucket; bucket.tenant = tenant; bucket.name = bucket_name; - return ctl.bucket->read_bucket_info(bucket, &info, y, + return ctl.bucket->read_bucket_info(bucket, &info, y, dpp, RGWBucketCtl::BucketInstance::GetParams() .set_mtime(pmtime) .set_attrs(pattrs) @@ -7693,6 +7728,7 @@ int RGWRados::get_bucket_info(RGWServices *svc, int RGWRados::try_refresh_bucket_info(RGWBucketInfo& info, ceph::real_time *pmtime, + const DoutPrefixProvider *dpp, map *pattrs) { rgw_bucket bucket = info.bucket; @@ -7700,7 +7736,7 @@ int RGWRados::try_refresh_bucket_info(RGWBucketInfo& info, auto rv = info.objv_tracker.read_version; - return ctl.bucket->read_bucket_info(bucket, &info, null_yield, + return ctl.bucket->read_bucket_info(bucket, &info, null_yield, dpp, RGWBucketCtl::BucketInstance::GetParams() .set_mtime(pmtime) .set_attrs(pattrs) @@ -7708,9 +7744,10 @@ int RGWRados::try_refresh_bucket_info(RGWBucketInfo& info, } int RGWRados::put_bucket_instance_info(RGWBucketInfo& info, bool exclusive, - real_time mtime, map *pattrs) + real_time mtime, map *pattrs, + const DoutPrefixProvider *dpp) { - return ctl.bucket->store_bucket_instance_info(info.bucket, info, null_yield, + return ctl.bucket->store_bucket_instance_info(info.bucket, info, null_yield, dpp, RGWBucketCtl::BucketInstance::PutParams() .set_exclusive(exclusive) .set_mtime(mtime) @@ -7718,11 +7755,12 @@ int RGWRados::put_bucket_instance_info(RGWBucketInfo& info, bool exclusive, } int RGWRados::put_linked_bucket_info(RGWBucketInfo& info, bool exclusive, real_time mtime, obj_version *pep_objv, - map *pattrs, bool create_entry_point) + map *pattrs, bool create_entry_point, + const DoutPrefixProvider *dpp) { bool create_head = !info.has_instance_obj || create_entry_point; - int ret = put_bucket_instance_info(info, exclusive, mtime, pattrs); + int ret = put_bucket_instance_info(info, exclusive, mtime, pattrs, dpp); if (ret < 0) { return ret; } @@ -7744,7 +7782,7 @@ int RGWRados::put_linked_bucket_info(RGWBucketInfo& info, bool exclusive, real_t *pep_objv = ot.write_version; } } - ret = ctl.bucket->store_bucket_entrypoint_info(info.bucket, entry_point, null_yield, RGWBucketCtl::Bucket::PutParams() + ret = ctl.bucket->store_bucket_entrypoint_info(info.bucket, entry_point, null_yield, dpp, RGWBucketCtl::Bucket::PutParams() .set_exclusive(exclusive) .set_objv_tracker(&ot) .set_mtime(mtime)); @@ -7754,7 +7792,7 @@ int RGWRados::put_linked_bucket_info(RGWBucketInfo& info, bool exclusive, real_t return 0; } -int RGWRados::update_containers_stats(map& m) +int RGWRados::update_containers_stats(map& m, const DoutPrefixProvider *dpp) { auto obj_ctx = svc.sysobj->init_obj_ctx(); @@ -7769,12 +7807,12 @@ int RGWRados::update_containers_stats(map& m) vector headers; RGWBucketInfo bucket_info; - int ret = get_bucket_instance_info(obj_ctx, bucket, bucket_info, NULL, NULL, null_yield); + int ret = get_bucket_instance_info(obj_ctx, bucket, bucket_info, NULL, NULL, null_yield, dpp); if (ret < 0) { return ret; } - int r = cls_bucket_head(bucket_info, RGW_NO_SHARD, headers); + int r = cls_bucket_head(dpp, bucket_info, RGW_NO_SHARD, headers); if (r < 0) return r; @@ -7798,10 +7836,10 @@ int RGWRados::update_containers_stats(map& m) return m.size(); } -int RGWRados::append_async(rgw_raw_obj& obj, size_t size, bufferlist& bl) +int RGWRados::append_async(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, size_t size, bufferlist& bl) { rgw_rados_ref ref; - int r = get_raw_obj_ref(obj, &ref); + int r = get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { return r; } @@ -7813,12 +7851,12 @@ int RGWRados::append_async(rgw_raw_obj& obj, size_t size, bufferlist& bl) return r; } -int RGWRados::pool_iterate_begin(const rgw_pool& pool, RGWPoolIterCtx& ctx) +int RGWRados::pool_iterate_begin(const DoutPrefixProvider *dpp, const rgw_pool& pool, RGWPoolIterCtx& ctx) { librados::IoCtx& io_ctx = ctx.io_ctx; librados::NObjectIterator& iter = ctx.iter; - int r = open_pool_ctx(pool, io_ctx, false); + int r = open_pool_ctx(dpp, pool, io_ctx, false); if (r < 0) return r; @@ -7827,18 +7865,18 @@ int RGWRados::pool_iterate_begin(const rgw_pool& pool, RGWPoolIterCtx& ctx) return 0; } -int RGWRados::pool_iterate_begin(const rgw_pool& pool, const string& cursor, RGWPoolIterCtx& ctx) +int RGWRados::pool_iterate_begin(const DoutPrefixProvider *dpp, const rgw_pool& pool, const string& cursor, RGWPoolIterCtx& ctx) { librados::IoCtx& io_ctx = ctx.io_ctx; librados::NObjectIterator& iter = ctx.iter; - int r = open_pool_ctx(pool, io_ctx, false); + int r = open_pool_ctx(dpp, pool, io_ctx, false); if (r < 0) return r; librados::ObjectCursor oc; if (!oc.from_str(cursor)) { - ldout(cct, 10) << "failed to parse cursor: " << cursor << dendl; + ldpp_dout(dpp, 10) << "failed to parse cursor: " << cursor << dendl; return -EINVAL; } @@ -7847,11 +7885,11 @@ int RGWRados::pool_iterate_begin(const rgw_pool& pool, const string& cursor, RGW return 0; } catch (const std::system_error& e) { r = -e.code().value(); - ldout(cct, 10) << "nobjects_begin threw " << e.what() + ldpp_dout(dpp, 10) << "nobjects_begin threw " << e.what() << ", returning " << r << dendl; return r; } catch (const std::exception& e) { - ldout(cct, 10) << "nobjects_begin threw " << e.what() + ldpp_dout(dpp, 10) << "nobjects_begin threw " << e.what() << ", returning -5" << dendl; return -EIO; } @@ -7912,12 +7950,12 @@ int RGWRados::pool_iterate(RGWPoolIterCtx& ctx, uint32_t num, vectorinitialized) { - int r = pool_iterate_begin(pool, marker, ctx->iter_ctx); + int r = pool_iterate_begin(dpp, pool, marker, ctx->iter_ctx); if (r < 0) { - ldout(cct, 10) << "failed to list objects pool_iterate_begin() returned r=" << r << dendl; + ldpp_dout(dpp, 10) << "failed to list objects pool_iterate_begin() returned r=" << r << dendl; return r; } ctx->initialized = true; @@ -7925,7 +7963,7 @@ int RGWRados::list_raw_objects_init(const rgw_pool& pool, const string& marker, return 0; } -int RGWRados::list_raw_objects_next(const string& prefix_filter, int max, +int RGWRados::list_raw_objects_next(const DoutPrefixProvider *dpp, const string& prefix_filter, int max, RGWListRawObjsCtx& ctx, list& oids, bool *is_truncated) { @@ -7937,7 +7975,7 @@ int RGWRados::list_raw_objects_next(const string& prefix_filter, int max, int r = pool_iterate(ctx.iter_ctx, max, objs, is_truncated, &filter); if (r < 0) { if(r != -ENOENT) - ldout(cct, 10) << "failed to list objects pool_iterate returned r=" << r << dendl; + ldpp_dout(dpp, 10) << "failed to list objects pool_iterate returned r=" << r << dendl; return r; } @@ -7949,18 +7987,18 @@ int RGWRados::list_raw_objects_next(const string& prefix_filter, int max, return oids.size(); } -int RGWRados::list_raw_objects(const rgw_pool& pool, const string& prefix_filter, +int RGWRados::list_raw_objects(const DoutPrefixProvider *dpp, const rgw_pool& pool, const string& prefix_filter, int max, RGWListRawObjsCtx& ctx, list& oids, bool *is_truncated) { if (!ctx.initialized) { - int r = list_raw_objects_init(pool, string(), &ctx); + int r = list_raw_objects_init(dpp, pool, string(), &ctx); if (r < 0) { return r; } } - return list_raw_objects_next(prefix_filter, max, ctx, oids, is_truncated); + return list_raw_objects_next(dpp, prefix_filter, max, ctx, oids, is_truncated); } string RGWRados::list_raw_objs_get_cursor(RGWListRawObjsCtx& ctx) @@ -7968,13 +8006,13 @@ string RGWRados::list_raw_objs_get_cursor(RGWListRawObjsCtx& ctx) return pool_iterate_get_cursor(ctx.iter_ctx); } -int RGWRados::bi_get_instance(const RGWBucketInfo& bucket_info, const rgw_obj& obj, +int RGWRados::bi_get_instance(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_bucket_dir_entry *dirent) { rgw_cls_bi_entry bi_entry; - int r = bi_get(bucket_info, obj, BIIndexType::Instance, &bi_entry); + int r = bi_get(dpp, bucket_info, obj, BIIndexType::Instance, &bi_entry); if (r < 0 && r != -ENOENT) { - ldout(cct, 0) << "ERROR: bi_get() returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: bi_get() returned r=" << r << dendl; } if (r < 0) { return r; @@ -7983,20 +8021,20 @@ int RGWRados::bi_get_instance(const RGWBucketInfo& bucket_info, const rgw_obj& o try { decode(*dirent, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: failed to decode bi_entry()" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to decode bi_entry()" << dendl; return -EIO; } return 0; } -int RGWRados::bi_get_olh(const RGWBucketInfo& bucket_info, const rgw_obj& obj, +int RGWRados::bi_get_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_bucket_olh_entry *olh) { rgw_cls_bi_entry bi_entry; - int r = bi_get(bucket_info, obj, BIIndexType::OLH, &bi_entry); + int r = bi_get(dpp, bucket_info, obj, BIIndexType::OLH, &bi_entry); if (r < 0 && r != -ENOENT) { - ldout(cct, 0) << "ERROR: bi_get() returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: bi_get() returned r=" << r << dendl; } if (r < 0) { return r; @@ -8005,20 +8043,20 @@ int RGWRados::bi_get_olh(const RGWBucketInfo& bucket_info, const rgw_obj& obj, try { decode(*olh, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: failed to decode bi_entry()" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to decode bi_entry()" << dendl; return -EIO; } return 0; } -int RGWRados::bi_get(const RGWBucketInfo& bucket_info, const rgw_obj& obj, +int RGWRados::bi_get(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, BIIndexType index_type, rgw_cls_bi_entry *entry) { BucketShard bs(this); - int ret = bs.init(bucket_info, obj); + int ret = bs.init(dpp, bucket_info, obj); if (ret < 0) { - ldout(cct, 5) << "bs.init() returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl; return ret; } @@ -8045,25 +8083,25 @@ int RGWRados::bi_put(BucketShard& bs, rgw_cls_bi_entry& entry) return 0; } -int RGWRados::bi_put(rgw_bucket& bucket, rgw_obj& obj, rgw_cls_bi_entry& entry) +int RGWRados::bi_put(const DoutPrefixProvider *dpp, rgw_bucket& bucket, rgw_obj& obj, rgw_cls_bi_entry& entry) { BucketShard bs(this); - int ret = bs.init(bucket, obj, nullptr /* no RGWBucketInfo */); + int ret = bs.init(bucket, obj, nullptr /* no RGWBucketInfo */, dpp); if (ret < 0) { - ldout(cct, 5) << "bs.init() returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl; return ret; } return bi_put(bs, entry); } -int RGWRados::bi_list(rgw_bucket& bucket, const string& obj_name, const string& marker, uint32_t max, list *entries, bool *is_truncated) +int RGWRados::bi_list(const DoutPrefixProvider *dpp, rgw_bucket& bucket, const string& obj_name, const string& marker, uint32_t max, list *entries, bool *is_truncated) { rgw_obj obj(bucket, obj_name); BucketShard bs(this); - int ret = bs.init(bucket, obj, nullptr /* no RGWBucketInfo */); + int ret = bs.init(bucket, obj, nullptr /* no RGWBucketInfo */, dpp); if (ret < 0) { - ldout(cct, 5) << "bs.init() returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl; return ret; } @@ -8103,21 +8141,21 @@ int RGWRados::bi_remove(BucketShard& bs) return 0; } -int RGWRados::bi_list(const RGWBucketInfo& bucket_info, int shard_id, const string& filter_obj, const string& marker, uint32_t max, list *entries, bool *is_truncated) +int RGWRados::bi_list(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id, const string& filter_obj, const string& marker, uint32_t max, list *entries, bool *is_truncated) { BucketShard bs(this); - int ret = bs.init(bucket_info.bucket, shard_id, bucket_info.layout.current_index, nullptr /* no RGWBucketInfo */); + int ret = bs.init(bucket_info.bucket, shard_id, bucket_info.layout.current_index, nullptr /* no RGWBucketInfo */, dpp); if (ret < 0) { - ldout(cct, 5) << "bs.init() returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl; return ret; } return bi_list(bs, filter_obj, marker, max, entries, is_truncated); } -int RGWRados::gc_operate(string& oid, librados::ObjectWriteOperation *op) +int RGWRados::gc_operate(const DoutPrefixProvider *dpp, string& oid, librados::ObjectWriteOperation *op) { - return rgw_rados_operate(gc_pool_ctx, oid, op, null_yield); + return rgw_rados_operate(dpp, gc_pool_ctx, oid, op, null_yield); } int RGWRados::gc_aio_operate(const string& oid, librados::AioCompletion *c, @@ -8126,9 +8164,9 @@ int RGWRados::gc_aio_operate(const string& oid, librados::AioCompletion *c, return gc_pool_ctx.aio_operate(oid, c, op); } -int RGWRados::gc_operate(string& oid, librados::ObjectReadOperation *op, bufferlist *pbl) +int RGWRados::gc_operate(const DoutPrefixProvider *dpp, string& oid, librados::ObjectReadOperation *op, bufferlist *pbl) { - return rgw_rados_operate(gc_pool_ctx, oid, op, pbl, null_yield); + return rgw_rados_operate(dpp, gc_pool_ctx, oid, op, pbl, null_yield); } int RGWRados::list_gc_objs(int *index, string& marker, uint32_t max, bool expired_only, std::list& result, bool *truncated, bool& processing_queue) @@ -8158,12 +8196,12 @@ int RGWRados::process_lc() return ret; } -bool RGWRados::process_expire_objects() +bool RGWRados::process_expire_objects(const DoutPrefixProvider *dpp) { - return obj_expirer->inspect_all_shards(utime_t(), ceph_clock_now()); + return obj_expirer->inspect_all_shards(dpp, utime_t(), ceph_clock_now()); } -int RGWRados::cls_obj_prepare_op(BucketShard& bs, RGWModifyOp op, string& tag, +int RGWRados::cls_obj_prepare_op(const DoutPrefixProvider *dpp, BucketShard& bs, RGWModifyOp op, string& tag, rgw_obj& obj, uint16_t bilog_flags, optional_yield y, rgw_zone_set *_zones_trace) { rgw_zone_set zones_trace; @@ -8176,7 +8214,7 @@ int RGWRados::cls_obj_prepare_op(BucketShard& bs, RGWModifyOp op, string& tag, cls_rgw_obj_key key(obj.key.get_index_key_name(), obj.key.instance); cls_rgw_guard_bucket_resharding(o, -ERR_BUSY_RESHARDING); cls_rgw_bucket_prepare_op(o, op, tag, key, obj.key.get_loc(), svc.zone->get_zone().log_data, bilog_flags, zones_trace); - return bs.bucket_obj.operate(&o, y); + return bs.bucket_obj.operate(dpp, &o, y); } int RGWRados::cls_obj_complete_op(BucketShard& bs, const rgw_obj& obj, RGWModifyOp op, string& tag, @@ -8245,11 +8283,11 @@ int RGWRados::cls_obj_complete_cancel(BucketShard& bs, string& tag, rgw_obj& obj zones_trace); } -int RGWRados::cls_obj_set_bucket_tag_timeout(RGWBucketInfo& bucket_info, uint64_t timeout) +int RGWRados::cls_obj_set_bucket_tag_timeout(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, uint64_t timeout) { RGWSI_RADOS::Pool index_pool; map bucket_objs; - int r = svc.bi_rados->open_bucket_index(bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr); + int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr); if (r < 0) return r; @@ -8287,7 +8325,8 @@ uint32_t RGWRados::calc_ordered_bucket_list_per_shard(uint32_t num_entries, } -int RGWRados::cls_bucket_list_ordered(RGWBucketInfo& bucket_info, +int RGWRados::cls_bucket_list_ordered(const DoutPrefixProvider *dpp, + RGWBucketInfo& bucket_info, const int shard_id, const rgw_obj_index_key& start_after, const string& prefix, @@ -8307,7 +8346,7 @@ int RGWRados::cls_bucket_list_ordered(RGWBucketInfo& bucket_info, * few results, perhaps due to filtering or to a series of * namespaced entries */ - ldout(cct, 10) << "RGWRados::" << __func__ << ": " << bucket_info.bucket << + ldpp_dout(dpp, 10) << "RGWRados::" << __func__ << ": " << bucket_info.bucket << " start_after=\"" << start_after.name << "[" << start_after.instance << "]\", prefix=\"" << prefix << @@ -8322,7 +8361,7 @@ int RGWRados::cls_bucket_list_ordered(RGWBucketInfo& bucket_info, // value - list result for the corresponding oid (shard), it is filled by // the AIO callback map shard_oids; - int r = svc.bi_rados->open_bucket_index(bucket_info, shard_id, + int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &shard_oids, nullptr); if (r < 0) { @@ -8344,7 +8383,7 @@ int RGWRados::cls_bucket_list_ordered(RGWBucketInfo& bucket_info, num_entries_per_shard = num_entries; } - ldout(cct, 10) << "RGWRados::" << __func__ << + ldpp_dout(dpp, 10) << "RGWRados::" << __func__ << " request from each of " << shard_count << " shard(s) for " << num_entries_per_shard << " entries to get " << num_entries << " total entries" << dendl; @@ -8456,7 +8495,7 @@ int RGWRados::cls_bucket_list_ordered(RGWBucketInfo& bucket_info, const string& name = tracker.entry_name(); rgw_bucket_dir_entry& dirent = tracker.dir_entry(); - ldout(cct, 20) << "RGWRados::" << __func__ << " currently processing " << + ldpp_dout(dpp, 20) << "RGWRados::" << __func__ << " currently processing " << dirent.key << " from shard " << tracker.shard_idx << dendl; const bool force_check = @@ -8472,7 +8511,7 @@ int RGWRados::cls_bucket_list_ordered(RGWBucketInfo& bucket_info, * well. */ librados::IoCtx sub_ctx; sub_ctx.dup(ioctx); - r = check_disk_state(sub_ctx, bucket_info, dirent, dirent, + r = check_disk_state(dpp, sub_ctx, bucket_info, dirent, dirent, updates[tracker.oid_name], y); if (r < 0 && r != -ENOENT) { return r; @@ -8482,13 +8521,13 @@ int RGWRados::cls_bucket_list_ordered(RGWBucketInfo& bucket_info, } if (r >= 0) { - ldout(cct, 10) << "RGWRados::" << __func__ << ": got " << + ldpp_dout(dpp, 10) << "RGWRados::" << __func__ << ": got " << dirent.key.name << "[" << dirent.key.instance << "]" << dendl; m[name] = std::move(dirent); last_entry_visited = &(m[name]); ++count; } else { - ldout(cct, 10) << "RGWRados::" << __func__ << ": skipping " << + ldpp_dout(dpp, 10) << "RGWRados::" << __func__ << ": skipping " << dirent.key.name << "[" << dirent.key.instance << "]" << dendl; last_entry_visited = &tracker.dir_entry(); } @@ -8531,22 +8570,22 @@ int RGWRados::cls_bucket_list_ordered(RGWBucketInfo& bucket_info, } } - ldout(cct, 20) << "RGWRados::" << __func__ << + ldpp_dout(dpp, 20) << "RGWRados::" << __func__ << ": returning, count=" << count << ", is_truncated=" << *is_truncated << dendl; if (*is_truncated && count < num_entries) { - ldout(cct, 10) << "RGWRados::" << __func__ << + ldpp_dout(dpp, 10) << "RGWRados::" << __func__ << ": INFO requested " << num_entries << " entries but returning " << count << ", which is truncated" << dendl; } if (last_entry_visited != nullptr && last_entry) { *last_entry = last_entry_visited->key; - ldout(cct, 20) << "RGWRados::" << __func__ << + ldpp_dout(dpp, 20) << "RGWRados::" << __func__ << ": returning, last_entry=" << *last_entry << dendl; } else { - ldout(cct, 20) << "RGWRados::" << __func__ << + ldpp_dout(dpp, 20) << "RGWRados::" << __func__ << ": returning, last_entry NOT SET" << dendl; } @@ -8554,7 +8593,8 @@ int RGWRados::cls_bucket_list_ordered(RGWBucketInfo& bucket_info, } -int RGWRados::cls_bucket_list_unordered(RGWBucketInfo& bucket_info, +int RGWRados::cls_bucket_list_unordered(const DoutPrefixProvider *dpp, + RGWBucketInfo& bucket_info, int shard_id, const rgw_obj_index_key& start_after, const string& prefix, @@ -8565,7 +8605,7 @@ int RGWRados::cls_bucket_list_unordered(RGWBucketInfo& bucket_info, rgw_obj_index_key *last_entry, optional_yield y, check_filter_t force_check_filter) { - ldout(cct, 10) << "cls_bucket_list_unordered " << bucket_info.bucket << + ldpp_dout(dpp, 10) << "cls_bucket_list_unordered " << bucket_info.bucket << " start_after " << start_after.name << "[" << start_after.instance << "] num_entries " << num_entries << dendl; @@ -8576,7 +8616,7 @@ int RGWRados::cls_bucket_list_unordered(RGWBucketInfo& bucket_info, RGWSI_RADOS::Pool index_pool; map oids; - int r = svc.bi_rados->open_bucket_index(bucket_info, shard_id, &index_pool, &oids, nullptr); + int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &oids, nullptr); if (r < 0) return r; @@ -8608,7 +8648,7 @@ int RGWRados::cls_bucket_list_unordered(RGWBucketInfo& bucket_info, rgw_obj_key obj_key; bool parsed = rgw_obj_key::parse_raw_oid(key, &obj_key); if (!parsed) { - ldout(cct, 0) << + ldpp_dout(dpp, 0) << "ERROR: RGWRados::cls_bucket_list_unordered received an invalid " "start marker: '" << start_after << "'" << dendl; return -EINVAL; @@ -8638,7 +8678,7 @@ int RGWRados::cls_bucket_list_unordered(RGWBucketInfo& bucket_info, cls_rgw_bucket_list_op(op, marker, prefix, empty_delimiter, num_entries, list_versions, &result); - r = rgw_rados_operate(ioctx, oid, &op, nullptr, null_yield); + r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, null_yield); if (r < 0) return r; @@ -8654,7 +8694,7 @@ int RGWRados::cls_bucket_list_unordered(RGWBucketInfo& bucket_info, * and if the tags are old we need to do cleanup as well. */ librados::IoCtx sub_ctx; sub_ctx.dup(ioctx); - r = check_disk_state(sub_ctx, bucket_info, dirent, dirent, updates[oid], y); + r = check_disk_state(dpp, sub_ctx, bucket_info, dirent, dirent, updates[oid], y); if (r < 0 && r != -ENOENT) { return r; } @@ -8664,7 +8704,7 @@ int RGWRados::cls_bucket_list_unordered(RGWBucketInfo& bucket_info, // at this point either r >=0 or r == -ENOENT if (r >= 0) { // i.e., if r != -ENOENT - ldout(cct, 10) << "RGWRados::cls_bucket_list_unordered: got " << + ldpp_dout(dpp, 10) << "RGWRados::cls_bucket_list_unordered: got " << dirent.key.name << "[" << dirent.key.instance << "]" << dendl; if (count < num_entries) { @@ -8712,13 +8752,13 @@ check_updates: } // RGWRados::cls_bucket_list_unordered -int RGWRados::cls_obj_usage_log_add(const string& oid, +int RGWRados::cls_obj_usage_log_add(const DoutPrefixProvider *dpp, const string& oid, rgw_usage_log_info& info) { rgw_raw_obj obj(svc.zone->get_zone_params().usage_log_pool, oid); rgw_rados_ref ref; - int r = get_raw_obj_ref(obj, &ref); + int r = get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { return r; } @@ -8726,11 +8766,11 @@ int RGWRados::cls_obj_usage_log_add(const string& oid, ObjectWriteOperation op; cls_rgw_usage_log_add(op, info); - r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield); + r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield); return r; } -int RGWRados::cls_obj_usage_log_read(const string& oid, const string& user, const string& bucket, +int RGWRados::cls_obj_usage_log_read(const DoutPrefixProvider *dpp, const string& oid, const string& user, const string& bucket, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, string& read_iter, map& usage, bool *is_truncated) @@ -8738,7 +8778,7 @@ int RGWRados::cls_obj_usage_log_read(const string& oid, const string& user, cons rgw_raw_obj obj(svc.zone->get_zone_params().usage_log_pool, oid); rgw_rados_ref ref; - int r = get_raw_obj_ref(obj, &ref); + int r = get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { return r; } @@ -8751,13 +8791,13 @@ int RGWRados::cls_obj_usage_log_read(const string& oid, const string& user, cons return r; } -static int cls_rgw_usage_log_trim_repeat(rgw_rados_ref ref, const string& user, const string& bucket, uint64_t start_epoch, uint64_t end_epoch) +static int cls_rgw_usage_log_trim_repeat(const DoutPrefixProvider *dpp, rgw_rados_ref ref, const string& user, const string& bucket, uint64_t start_epoch, uint64_t end_epoch) { bool done = false; do { librados::ObjectWriteOperation op; cls_rgw_usage_log_trim(op, user, bucket, start_epoch, end_epoch); - int r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield); + int r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield); if (r == -ENODATA) done = true; else if (r < 0) @@ -8767,45 +8807,45 @@ static int cls_rgw_usage_log_trim_repeat(rgw_rados_ref ref, const string& user, return 0; } -int RGWRados::cls_obj_usage_log_trim(const string& oid, const string& user, const string& bucket, +int RGWRados::cls_obj_usage_log_trim(const DoutPrefixProvider *dpp, const string& oid, const string& user, const string& bucket, uint64_t start_epoch, uint64_t end_epoch) { rgw_raw_obj obj(svc.zone->get_zone_params().usage_log_pool, oid); rgw_rados_ref ref; - int r = get_raw_obj_ref(obj, &ref); + int r = get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { return r; } - r = cls_rgw_usage_log_trim_repeat(ref, user, bucket, start_epoch, end_epoch); + r = cls_rgw_usage_log_trim_repeat(dpp, ref, user, bucket, start_epoch, end_epoch); return r; } -int RGWRados::cls_obj_usage_log_clear(string& oid) +int RGWRados::cls_obj_usage_log_clear(const DoutPrefixProvider *dpp, string& oid) { rgw_raw_obj obj(svc.zone->get_zone_params().usage_log_pool, oid); rgw_rados_ref ref; - int r = get_raw_obj_ref(obj, &ref); + int r = get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { return r; } librados::ObjectWriteOperation op; cls_rgw_usage_log_clear(op); - r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield); + r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield); return r; } -int RGWRados::remove_objs_from_index(RGWBucketInfo& bucket_info, list& oid_list) +int RGWRados::remove_objs_from_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, list& oid_list) { RGWSI_RADOS::Pool index_pool; string dir_oid; uint8_t suggest_flag = (svc.zone->get_zone().log_data ? CEPH_RGW_DIR_SUGGEST_LOG_OP : 0); - int r = svc.bi_rados->open_bucket_index(bucket_info, &index_pool, &dir_oid); + int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, &index_pool, &dir_oid); if (r < 0) return r; @@ -8814,7 +8854,7 @@ int RGWRados::remove_objs_from_index(RGWBucketInfo& bucket_info, liststore); - int r = get_obj_state(&rctx, bucket_info, obj, &astate, false, y); + int r = get_obj_state(dpp, &rctx, bucket_info, obj, &astate, false, y); if (r < 0) return r; @@ -8892,23 +8933,23 @@ int RGWRados::check_disk_state(librados::IoCtx io_ctx, if (iter != astate->attrset.end()) { r = decode_policy(iter->second, &owner); if (r < 0) { - dout(0) << "WARNING: could not decode policy for object: " << obj << dendl; + ldpp_dout(dpp, 0) << "WARNING: could not decode policy for object: " << obj << dendl; } } if (astate->manifest) { RGWObjManifest::obj_iterator miter; RGWObjManifest& manifest = *astate->manifest; - for (miter = manifest.obj_begin(); miter != manifest.obj_end(); ++miter) { + for (miter = manifest.obj_begin(dpp); miter != manifest.obj_end(dpp); ++miter) { const rgw_raw_obj& raw_loc = miter.get_location().get_raw_obj(store); rgw_obj loc; RGWSI_Tier_RADOS::raw_obj_to_obj(manifest.get_obj().bucket, raw_loc, &loc); if (loc.key.ns == RGW_OBJ_NS_MULTIPART) { - dout(10) << "check_disk_state(): removing manifest part from index: " << loc << dendl; - r = delete_obj_index(loc, astate->mtime); + ldpp_dout(dpp, 0) << "check_disk_state(): removing manifest part from index: " << loc << dendl; + r = delete_obj_index(loc, astate->mtime, dpp); if (r < 0) { - dout(0) << "WARNING: delete_obj_index() returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "WARNING: delete_obj_index() returned r=" << r << dendl; } } } @@ -8938,21 +8979,21 @@ int RGWRados::check_disk_state(librados::IoCtx io_ctx, return 0; } -int RGWRados::cls_bucket_head(const RGWBucketInfo& bucket_info, int shard_id, vector& headers, map *bucket_instance_ids) +int RGWRados::cls_bucket_head(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id, vector& headers, map *bucket_instance_ids) { RGWSI_RADOS::Pool index_pool; map oids; map list_results; - int r = svc.bi_rados->open_bucket_index(bucket_info, shard_id, &index_pool, &oids, bucket_instance_ids); + int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &oids, bucket_instance_ids); if (r < 0) { - ldout(cct, 20) << "cls_bucket_head: open_bucket_index() returned " + ldpp_dout(dpp, 20) << "cls_bucket_head: open_bucket_index() returned " << r << dendl; return r; } r = CLSRGWIssueGetDirHeader(index_pool.ioctx(), oids, list_results, cct->_conf->rgw_bucket_index_max_aio)(); if (r < 0) { - ldout(cct, 20) << "cls_bucket_head: CLSRGWIssueGetDirHeader() returned " + ldpp_dout(dpp, 20) << "cls_bucket_head: CLSRGWIssueGetDirHeader() returned " << r << dendl; return r; } @@ -8964,11 +9005,11 @@ int RGWRados::cls_bucket_head(const RGWBucketInfo& bucket_info, int shard_id, ve return 0; } -int RGWRados::cls_bucket_head_async(const RGWBucketInfo& bucket_info, int shard_id, RGWGetDirHeader_CB *ctx, int *num_aio) +int RGWRados::cls_bucket_head_async(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id, RGWGetDirHeader_CB *ctx, int *num_aio) { RGWSI_RADOS::Pool index_pool; map bucket_objs; - int r = svc.bi_rados->open_bucket_index(bucket_info, shard_id, &index_pool, &bucket_objs, nullptr); + int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &bucket_objs, nullptr); if (r < 0) return r; @@ -8987,7 +9028,8 @@ int RGWRados::cls_bucket_head_async(const RGWBucketInfo& bucket_info, int shard_ int RGWRados::check_bucket_shards(const RGWBucketInfo& bucket_info, const rgw_bucket& bucket, - uint64_t num_objs) + uint64_t num_objs, + const DoutPrefixProvider *dpp) { if (! cct->_conf.get_val("rgw_dynamic_resharding")) { return 0; @@ -9021,23 +9063,23 @@ int RGWRados::check_bucket_shards(const RGWBucketInfo& bucket_info, return 0; } - ldout(cct, 1) << "RGWRados::" << __func__ << " bucket " << bucket.name << + ldpp_dout(dpp, 1) << "RGWRados::" << __func__ << " bucket " << bucket.name << " needs resharding; current num shards " << bucket_info.layout.current_index.layout.normal.num_shards << "; new num shards " << final_num_shards << " (suggested " << suggested_num_shards << ")" << dendl; - return add_bucket_to_reshard(bucket_info, final_num_shards); + return add_bucket_to_reshard(dpp, bucket_info, final_num_shards); } -int RGWRados::add_bucket_to_reshard(const RGWBucketInfo& bucket_info, uint32_t new_num_shards) +int RGWRados::add_bucket_to_reshard(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, uint32_t new_num_shards) { - RGWReshard reshard(this->store); + RGWReshard reshard(this->store, dpp); uint32_t num_source_shards = (bucket_info.layout.current_index.layout.normal.num_shards > 0 ? bucket_info.layout.current_index.layout.normal.num_shards : 1); new_num_shards = std::min(new_num_shards, get_max_bucket_shards()); if (new_num_shards <= num_source_shards) { - ldout(cct, 20) << "not resharding bucket name=" << bucket_info.bucket.name << ", orig_num=" << num_source_shards << ", new_num_shards=" << new_num_shards << dendl; + ldpp_dout(dpp, 20) << "not resharding bucket name=" << bucket_info.bucket.name << ", orig_num=" << num_source_shards << ", new_num_shards=" << new_num_shards << dendl; return 0; } @@ -9049,7 +9091,7 @@ int RGWRados::add_bucket_to_reshard(const RGWBucketInfo& bucket_info, uint32_t n entry.old_num_shards = num_source_shards; entry.new_num_shards = new_num_shards; - return reshard.add(entry); + return reshard.add(dpp, entry); } int RGWRados::check_quota(const rgw_user& bucket_owner, rgw_bucket& bucket, @@ -9103,12 +9145,12 @@ librados::Rados* RGWRados::get_rados_handle() return &rados; } -int RGWRados::delete_raw_obj_aio(const rgw_raw_obj& obj, list& handles) +int RGWRados::delete_raw_obj_aio(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, list& handles) { rgw_rados_ref ref; - int ret = get_raw_obj_ref(obj, &ref); + int ret = get_raw_obj_ref(dpp, obj, &ref); if (ret < 0) { - lderr(cct) << "ERROR: failed to get obj ref with ret=" << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to get obj ref with ret=" << ret << dendl; return ret; } @@ -9119,7 +9161,7 @@ int RGWRados::delete_raw_obj_aio(const rgw_raw_obj& obj, listpause(); - ldout(cct, 1) << "Frontends paused" << dendl; + ldpp_dout(&dp, 1) << "Frontends paused" << dendl; // TODO: make RGWRados responsible for rgw_log_usage lifetime rgw_log_usage_finalize(); @@ -93,7 +94,7 @@ void RGWRealmReloader::reload() RGWStoreManager::close_storage(store); store = nullptr; - ldout(cct, 1) << "Store closed" << dendl; + ldpp_dout(&dp, 1) << "Store closed" << dendl; { // allow a new notify to reschedule us. it's important that we do this // before we start loading the new realm, or we could miss some updates @@ -101,10 +102,11 @@ void RGWRealmReloader::reload() reload_scheduled = nullptr; } + while (!store) { // recreate and initialize a new store store = - RGWStoreManager::get_storage(cct, + RGWStoreManager::get_storage(&dp, cct, cct->_conf->rgw_enable_gc_threads, cct->_conf->rgw_enable_lc_threads, cct->_conf->rgw_enable_quota_threads, @@ -112,7 +114,7 @@ void RGWRealmReloader::reload() cct->_conf.get_val("rgw_dynamic_resharding"), cct->_conf->rgw_cache_enabled); - ldout(cct, 1) << "Creating new store" << dendl; + ldpp_dout(&dp, 1) << "Creating new store" << dendl; rgw::sal::RGWRadosStore* store_cleanup = nullptr; { @@ -123,7 +125,7 @@ void RGWRealmReloader::reload() // sleep until we get another notification, and retry until we get // a working configuration if (store == nullptr) { - lderr(cct) << "Failed to reinitialize RGWRados after a realm " + ldpp_dout(&dp, -1) << "Failed to reinitialize RGWRados after a realm " "configuration update. Waiting for a new update." << dendl; // sleep until another event is scheduled @@ -144,7 +146,7 @@ void RGWRealmReloader::reload() } if (store_cleanup) { - ldout(cct, 4) << "Got another notification, restarting RGWRados " + ldpp_dout(&dp, 4) << "Got another notification, restarting RGWRados " "initialization." << dendl; RGWStoreManager::close_storage(store_cleanup); @@ -153,19 +155,19 @@ void RGWRealmReloader::reload() int r = store->getRados()->register_to_service_map("rgw", service_map_meta); if (r < 0) { - lderr(cct) << "ERROR: failed to register to service map: " << cpp_strerror(-r) << dendl; + ldpp_dout(&dp, -1) << "ERROR: failed to register to service map: " << cpp_strerror(-r) << dendl; /* ignore error */ } - ldout(cct, 1) << "Finishing initialization of new store" << dendl; + ldpp_dout(&dp, 1) << "Finishing initialization of new store" << dendl; // finish initializing the new store - ldout(cct, 1) << " - REST subsystem init" << dendl; + ldpp_dout(&dp, 1) << " - REST subsystem init" << dendl; rgw_rest_init(cct, store->svc()->zone->get_zonegroup()); - ldout(cct, 1) << " - usage subsystem init" << dendl; + ldpp_dout(&dp, 1) << " - usage subsystem init" << dendl; rgw_log_usage_init(cct, store->getRados()); - ldout(cct, 1) << "Resuming frontends with new realm configuration." << dendl; + ldpp_dout(&dp, 1) << "Resuming frontends with new realm configuration." << dendl; frontends->resume(store); } diff --git a/src/rgw/rgw_realm_watcher.cc b/src/rgw/rgw_realm_watcher.cc index aec48e76e0287..f6cd3475985b8 100644 --- a/src/rgw/rgw_realm_watcher.cc +++ b/src/rgw/rgw_realm_watcher.cc @@ -13,19 +13,19 @@ #define dout_prefix (*_dout << "rgw realm watcher: ") -RGWRealmWatcher::RGWRealmWatcher(CephContext* cct, const RGWRealm& realm) +RGWRealmWatcher::RGWRealmWatcher(const DoutPrefixProvider *dpp, CephContext* cct, const RGWRealm& realm) : cct(cct) { // no default realm, nothing to watch if (realm.get_id().empty()) { - ldout(cct, 4) << "No realm, disabling dynamic reconfiguration." << dendl; + ldpp_dout(dpp, 4) << "No realm, disabling dynamic reconfiguration." << dendl; return; } // establish the watch on RGWRealm - int r = watch_start(realm); + int r = watch_start(dpp, realm); if (r < 0) { - lderr(cct) << "Failed to establish a watch on RGWRealm, " + ldpp_dout(dpp, -1) << "Failed to establish a watch on RGWRealm, " "disabling dynamic reconfiguration." << dendl; return; } @@ -78,27 +78,27 @@ void RGWRealmWatcher::handle_error(uint64_t cookie, int err) watch_restart(); } -int RGWRealmWatcher::watch_start(const RGWRealm& realm) +int RGWRealmWatcher::watch_start(const DoutPrefixProvider *dpp, const RGWRealm& realm) { // initialize a Rados client int r = rados.init_with_context(cct); if (r < 0) { - lderr(cct) << "Rados client initialization failed with " + ldpp_dout(dpp, -1) << "Rados client initialization failed with " << cpp_strerror(-r) << dendl; return r; } r = rados.connect(); if (r < 0) { - lderr(cct) << "Rados client connection failed with " + ldpp_dout(dpp, -1) << "Rados client connection failed with " << cpp_strerror(-r) << dendl; return r; } // open an IoCtx for the realm's pool rgw_pool pool(realm.get_pool(cct)); - r = rgw_init_ioctx(&rados, pool, pool_ctx); + r = rgw_init_ioctx(dpp, &rados, pool, pool_ctx); if (r < 0) { - lderr(cct) << "Failed to open pool " << pool + ldpp_dout(dpp, -1) << "Failed to open pool " << pool << " with " << cpp_strerror(-r) << dendl; rados.shutdown(); return r; @@ -108,14 +108,14 @@ int RGWRealmWatcher::watch_start(const RGWRealm& realm) auto oid = realm.get_control_oid(); r = pool_ctx.watch2(oid, &watch_handle, this); if (r < 0) { - lderr(cct) << "Failed to watch " << oid + ldpp_dout(dpp, -1) << "Failed to watch " << oid << " with " << cpp_strerror(-r) << dendl; pool_ctx.close(); rados.shutdown(); return r; } - ldout(cct, 10) << "Watching " << oid << dendl; + ldpp_dout(dpp, 10) << "Watching " << oid << dendl; std::swap(watch_oid, oid); return 0; } diff --git a/src/rgw/rgw_realm_watcher.h b/src/rgw/rgw_realm_watcher.h index c6741ea96dad3..b2e3ac6b9d649 100644 --- a/src/rgw/rgw_realm_watcher.h +++ b/src/rgw/rgw_realm_watcher.h @@ -36,7 +36,7 @@ class RGWRealmWatcher : public librados::WatchCtx2 { bufferlist::const_iterator& p) = 0; }; - RGWRealmWatcher(CephContext* cct, const RGWRealm& realm); + RGWRealmWatcher(const DoutPrefixProvider *dpp, CephContext* cct, const RGWRealm& realm); ~RGWRealmWatcher() override; /// register a watcher for the given notification type @@ -59,7 +59,7 @@ class RGWRealmWatcher : public librados::WatchCtx2 { uint64_t watch_handle = 0; std::string watch_oid; - int watch_start(const RGWRealm& realm); + int watch_start(const DoutPrefixProvider *dpp, const RGWRealm& realm); int watch_restart(); void watch_stop(); diff --git a/src/rgw/rgw_reshard.cc b/src/rgw/rgw_reshard.cc index 68352f3e56ad4..cc78ac108fcb8 100644 --- a/src/rgw/rgw_reshard.cc +++ b/src/rgw/rgw_reshard.cc @@ -102,7 +102,8 @@ class BucketReshardShard { } public: - BucketReshardShard(rgw::sal::RGWRadosStore *_store, const RGWBucketInfo& _bucket_info, + BucketReshardShard(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *_store, const RGWBucketInfo& _bucket_info, int _num_shard, const rgw::bucket_index_layout_generation& _idx_layout, deque& _completions) : store(_store), bucket_info(_bucket_info), idx_layout(_idx_layout), bs(store->getRados()), @@ -110,7 +111,7 @@ public: { num_shard = (idx_layout.layout.normal.num_shards > 0 ? _num_shard : -1); - bs.init(bucket_info.bucket, num_shard, idx_layout, nullptr /* no RGWBucketInfo */); + bs.init(bucket_info.bucket, num_shard, idx_layout, nullptr /* no RGWBucketInfo */, dpp); max_aio_completions = store->ctx()->_conf.get_val("rgw_reshard_max_aio"); @@ -189,7 +190,8 @@ class BucketReshardManager { vector target_shards; public: - BucketReshardManager(rgw::sal::RGWRadosStore *_store, + BucketReshardManager(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *_store, const RGWBucketInfo& _target_bucket_info, int _num_target_shards) : store(_store), target_bucket_info(_target_bucket_info), @@ -198,7 +200,7 @@ public: const auto& idx_layout = target_bucket_info.layout.current_index; target_shards.resize(num_target_shards); for (int i = 0; i < num_target_shards; ++i) { - target_shards[i] = new BucketReshardShard(store, target_bucket_info, i, idx_layout, completions); + target_shards[i] = new BucketReshardShard(dpp, store, target_bucket_info, i, idx_layout, completions); } } @@ -257,23 +259,24 @@ RGWBucketReshard::RGWBucketReshard(rgw::sal::RGWRadosStore *_store, outer_reshard_lock(_outer_reshard_lock) { } -int RGWBucketReshard::set_resharding_status(rgw::sal::RGWRadosStore* store, +int RGWBucketReshard::set_resharding_status(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore* store, const RGWBucketInfo& bucket_info, const string& new_instance_id, int32_t num_shards, cls_rgw_reshard_status status) { if (new_instance_id.empty()) { - ldout(store->ctx(), 0) << __func__ << " missing new bucket instance id" << dendl; + ldpp_dout(dpp, 0) << __func__ << " missing new bucket instance id" << dendl; return -EINVAL; } cls_rgw_bucket_instance_entry instance_entry; instance_entry.set_status(new_instance_id, num_shards, status); - int ret = store->getRados()->bucket_set_reshard(bucket_info, instance_entry); + int ret = store->getRados()->bucket_set_reshard(dpp, bucket_info, instance_entry); if (ret < 0) { - ldout(store->ctx(), 0) << "RGWReshard::" << __func__ << " ERROR: error setting bucket resharding flag on bucket index: " + ldpp_dout(dpp, 0) << "RGWReshard::" << __func__ << " ERROR: error setting bucket resharding flag on bucket index: " << cpp_strerror(-ret) << dendl; return ret; } @@ -281,21 +284,22 @@ int RGWBucketReshard::set_resharding_status(rgw::sal::RGWRadosStore* store, } // reshard lock assumes lock is held -int RGWBucketReshard::clear_resharding(rgw::sal::RGWRadosStore* store, +int RGWBucketReshard::clear_resharding(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore* store, const RGWBucketInfo& bucket_info) { - int ret = clear_index_shard_reshard_status(store, bucket_info); + int ret = clear_index_shard_reshard_status(dpp, store, bucket_info); if (ret < 0) { - ldout(store->ctx(), 0) << "RGWBucketReshard::" << __func__ << + ldpp_dout(dpp, 0) << "RGWBucketReshard::" << __func__ << " ERROR: error clearing reshard status from index shard " << cpp_strerror(-ret) << dendl; return ret; } cls_rgw_bucket_instance_entry instance_entry; - ret = store->getRados()->bucket_set_reshard(bucket_info, instance_entry); + ret = store->getRados()->bucket_set_reshard(dpp, bucket_info, instance_entry); if (ret < 0) { - ldout(store->ctx(), 0) << "RGWReshard::" << __func__ << + ldpp_dout(dpp, 0) << "RGWReshard::" << __func__ << " ERROR: error setting bucket resharding flag on bucket index: " << cpp_strerror(-ret) << dendl; return ret; @@ -304,18 +308,19 @@ int RGWBucketReshard::clear_resharding(rgw::sal::RGWRadosStore* store, return 0; } -int RGWBucketReshard::clear_index_shard_reshard_status(rgw::sal::RGWRadosStore* store, +int RGWBucketReshard::clear_index_shard_reshard_status(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore* store, const RGWBucketInfo& bucket_info) { uint32_t num_shards = bucket_info.layout.current_index.layout.normal.num_shards; if (num_shards < std::numeric_limits::max()) { - int ret = set_resharding_status(store, bucket_info, + int ret = set_resharding_status(dpp, store, bucket_info, bucket_info.bucket.bucket_id, (num_shards < 1 ? 1 : num_shards), cls_rgw_reshard_status::NOT_RESHARDING); if (ret < 0) { - ldout(store->ctx(), 0) << "RGWBucketReshard::" << __func__ << + ldpp_dout(dpp, 0) << "RGWBucketReshard::" << __func__ << " ERROR: error clearing reshard status from index shard " << cpp_strerror(-ret) << dendl; return ret; @@ -329,7 +334,8 @@ static int create_new_bucket_instance(rgw::sal::RGWRadosStore *store, int new_num_shards, const RGWBucketInfo& bucket_info, map& attrs, - RGWBucketInfo& new_bucket_info) + RGWBucketInfo& new_bucket_info, + const DoutPrefixProvider *dpp) { new_bucket_info = bucket_info; @@ -341,13 +347,13 @@ static int create_new_bucket_instance(rgw::sal::RGWRadosStore *store, new_bucket_info.new_bucket_instance_id.clear(); new_bucket_info.reshard_status = cls_rgw_reshard_status::NOT_RESHARDING; - int ret = store->svc()->bi->init_index(new_bucket_info); + int ret = store->svc()->bi->init_index(dpp, new_bucket_info); if (ret < 0) { cerr << "ERROR: failed to init new bucket indexes: " << cpp_strerror(-ret) << std::endl; return ret; } - ret = store->getRados()->put_bucket_instance_info(new_bucket_info, true, real_time(), &attrs); + ret = store->getRados()->put_bucket_instance_info(new_bucket_info, true, real_time(), &attrs, dpp); if (ret < 0) { cerr << "ERROR: failed to store new bucket instance info: " << cpp_strerror(-ret) << std::endl; return ret; @@ -357,20 +363,21 @@ static int create_new_bucket_instance(rgw::sal::RGWRadosStore *store, } int RGWBucketReshard::create_new_bucket_instance(int new_num_shards, - RGWBucketInfo& new_bucket_info) + RGWBucketInfo& new_bucket_info, + const DoutPrefixProvider *dpp) { return ::create_new_bucket_instance(store, new_num_shards, - bucket_info, bucket_attrs, new_bucket_info); + bucket_info, bucket_attrs, new_bucket_info, dpp); } -int RGWBucketReshard::cancel() +int RGWBucketReshard::cancel(const DoutPrefixProvider *dpp) { int ret = reshard_lock.lock(); if (ret < 0) { return ret; } - ret = clear_resharding(); + ret = clear_resharding(dpp); reshard_lock.unlock(); return ret; @@ -378,27 +385,30 @@ int RGWBucketReshard::cancel() class BucketInfoReshardUpdate { + const DoutPrefixProvider *dpp; rgw::sal::RGWRadosStore *store; RGWBucketInfo& bucket_info; std::map bucket_attrs; bool in_progress{false}; - int set_status(cls_rgw_reshard_status s) { + int set_status(cls_rgw_reshard_status s, const DoutPrefixProvider *dpp) { bucket_info.reshard_status = s; - int ret = store->getRados()->put_bucket_instance_info(bucket_info, false, real_time(), &bucket_attrs); + int ret = store->getRados()->put_bucket_instance_info(bucket_info, false, real_time(), &bucket_attrs, dpp); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: failed to write bucket info, ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to write bucket info, ret=" << ret << dendl; return ret; } return 0; } public: - BucketInfoReshardUpdate(rgw::sal::RGWRadosStore *_store, + BucketInfoReshardUpdate(const DoutPrefixProvider *_dpp, + rgw::sal::RGWRadosStore *_store, RGWBucketInfo& _bucket_info, map& _bucket_attrs, const string& new_bucket_id) : + dpp(_dpp), store(_store), bucket_info(_bucket_info), bucket_attrs(_bucket_attrs) @@ -410,20 +420,20 @@ public: if (in_progress) { // resharding must not have ended correctly, clean up int ret = - RGWBucketReshard::clear_index_shard_reshard_status(store, bucket_info); + RGWBucketReshard::clear_index_shard_reshard_status(dpp, store, bucket_info); if (ret < 0) { - lderr(store->ctx()) << "Error: " << __func__ << + ldpp_dout(dpp, -1) << "Error: " << __func__ << " clear_index_shard_status returned " << ret << dendl; } bucket_info.new_bucket_instance_id.clear(); // clears new_bucket_instance as well - set_status(cls_rgw_reshard_status::NOT_RESHARDING); + set_status(cls_rgw_reshard_status::NOT_RESHARDING, dpp); } } int start() { - int ret = set_status(cls_rgw_reshard_status::IN_PROGRESS); + int ret = set_status(cls_rgw_reshard_status::IN_PROGRESS, dpp); if (ret < 0) { return ret; } @@ -432,7 +442,7 @@ public: } int complete() { - int ret = set_status(cls_rgw_reshard_status::DONE); + int ret = set_status(cls_rgw_reshard_status::DONE, dpp); if (ret < 0) { return ret; } @@ -525,7 +535,8 @@ int RGWBucketReshard::do_reshard(int num_shards, int max_entries, bool verbose, ostream *out, - Formatter *formatter) + Formatter *formatter, + const DoutPrefixProvider *dpp) { if (out) { const rgw_bucket& bucket = bucket_info.bucket; @@ -541,24 +552,24 @@ int RGWBucketReshard::do_reshard(int num_shards, list entries; if (max_entries < 0) { - ldout(store->ctx(), 0) << __func__ << + ldpp_dout(dpp, 0) << __func__ << ": can't reshard, negative max_entries" << dendl; return -EINVAL; } // NB: destructor cleans up sharding state if reshard does not // complete successfully - BucketInfoReshardUpdate bucket_info_updater(store, bucket_info, bucket_attrs, new_bucket_info.bucket.bucket_id); + BucketInfoReshardUpdate bucket_info_updater(dpp, store, bucket_info, bucket_attrs, new_bucket_info.bucket.bucket_id); int ret = bucket_info_updater.start(); if (ret < 0) { - ldout(store->ctx(), 0) << __func__ << ": failed to update bucket info ret=" << ret << dendl; + ldpp_dout(dpp, 0) << __func__ << ": failed to update bucket info ret=" << ret << dendl; return ret; } int num_target_shards = (new_bucket_info.layout.current_index.layout.normal.num_shards > 0 ? new_bucket_info.layout.current_index.layout.normal.num_shards : 1); - BucketReshardManager target_shards_mgr(store, new_bucket_info, num_target_shards); + BucketReshardManager target_shards_mgr(dpp, store, new_bucket_info, num_target_shards); bool verbose_json_out = verbose && (formatter != nullptr) && (out != nullptr); @@ -580,7 +591,7 @@ int RGWBucketReshard::do_reshard(int num_shards, marker.clear(); while (is_truncated) { entries.clear(); - ret = store->getRados()->bi_list(bucket_info, i, string(), marker, max_entries, &entries, &is_truncated); + ret = store->getRados()->bi_list(dpp, bucket_info, i, string(), marker, max_entries, &entries, &is_truncated); if (ret < 0 && ret != -ENOENT) { derr << "ERROR: bi_list(): " << cpp_strerror(-ret) << dendl; return ret; @@ -613,7 +624,7 @@ int RGWBucketReshard::do_reshard(int num_shards, } int ret = store->getRados()->get_target_shard_id(new_bucket_info.layout.current_index.layout.normal, obj.get_hash_object(), &target_shard_id); if (ret < 0) { - lderr(store->ctx()) << "ERROR: get_target_shard_id() returned ret=" << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: get_target_shard_id() returned ret=" << ret << dendl; return ret; } @@ -637,7 +648,7 @@ int RGWBucketReshard::do_reshard(int num_shards, } ret = reshard_lock.renew(now); if (ret < 0) { - lderr(store->ctx()) << "Error renewing bucket lock: " << ret << dendl; + ldpp_dout(dpp, -1) << "Error renewing bucket lock: " << ret << dendl; return ret; } } @@ -660,19 +671,19 @@ int RGWBucketReshard::do_reshard(int num_shards, ret = target_shards_mgr.finish(); if (ret < 0) { - lderr(store->ctx()) << "ERROR: failed to reshard" << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to reshard" << dendl; return -EIO; } - ret = store->ctl()->bucket->link_bucket(new_bucket_info.owner, new_bucket_info.bucket, bucket_info.creation_time, null_yield); + ret = store->ctl()->bucket->link_bucket(new_bucket_info.owner, new_bucket_info.bucket, bucket_info.creation_time, null_yield, dpp); if (ret < 0) { - lderr(store->ctx()) << "failed to link new bucket instance (bucket_id=" << new_bucket_info.bucket.bucket_id << ": " << cpp_strerror(-ret) << ")" << dendl; + ldpp_dout(dpp, -1) << "failed to link new bucket instance (bucket_id=" << new_bucket_info.bucket.bucket_id << ": " << cpp_strerror(-ret) << ")" << dendl; return ret; } ret = bucket_info_updater.complete(); if (ret < 0) { - ldout(store->ctx(), 0) << __func__ << ": failed to update bucket info ret=" << ret << dendl; + ldpp_dout(dpp, 0) << __func__ << ": failed to update bucket info ret=" << ret << dendl; /* don't error out, reshard process succeeded */ } @@ -680,13 +691,14 @@ int RGWBucketReshard::do_reshard(int num_shards, // NB: some error clean-up is done by ~BucketInfoReshardUpdate } // RGWBucketReshard::do_reshard -int RGWBucketReshard::get_status(list *status) +int RGWBucketReshard::get_status(const DoutPrefixProvider *dpp, list *status) { - return store->svc()->bi_rados->get_reshard_status(bucket_info, status); + return store->svc()->bi_rados->get_reshard_status(dpp, bucket_info, status); } int RGWBucketReshard::execute(int num_shards, int max_op_entries, + const DoutPrefixProvider *dpp, bool verbose, ostream *out, Formatter *formatter, RGWReshard* reshard_log) { @@ -696,14 +708,14 @@ int RGWBucketReshard::execute(int num_shards, int max_op_entries, } RGWBucketInfo new_bucket_info; - ret = create_new_bucket_instance(num_shards, new_bucket_info); + ret = create_new_bucket_instance(num_shards, new_bucket_info, dpp); if (ret < 0) { // shard state is uncertain, but this will attempt to remove them anyway goto error_out; } if (reshard_log) { - ret = reshard_log->update(bucket_info, new_bucket_info); + ret = reshard_log->update(dpp, bucket_info, new_bucket_info); if (ret < 0) { goto error_out; } @@ -711,7 +723,7 @@ int RGWBucketReshard::execute(int num_shards, int max_op_entries, // set resharding status of current bucket_info & shards with // information about planned resharding - ret = set_resharding_status(new_bucket_info.bucket.bucket_id, + ret = set_resharding_status(dpp, new_bucket_info.bucket.bucket_id, num_shards, cls_rgw_reshard_status::IN_PROGRESS); if (ret < 0) { goto error_out; @@ -720,7 +732,7 @@ int RGWBucketReshard::execute(int num_shards, int max_op_entries, ret = do_reshard(num_shards, new_bucket_info, max_op_entries, - verbose, out, formatter); + verbose, out, formatter, dpp); if (ret < 0) { goto error_out; } @@ -734,23 +746,23 @@ int RGWBucketReshard::execute(int num_shards, int max_op_entries, // best effort and don't report out an error; the lock isn't needed // at this point since all we're using a best effor to to remove old // shard objects - ret = store->svc()->bi->clean_index(bucket_info); + ret = store->svc()->bi->clean_index(dpp, bucket_info); if (ret < 0) { - lderr(store->ctx()) << "Error: " << __func__ << + ldpp_dout(dpp, -1) << "Error: " << __func__ << " failed to clean up old shards; " << "RGWRados::clean_bucket_index returned " << ret << dendl; } ret = store->ctl()->bucket->remove_bucket_instance_info(bucket_info.bucket, - bucket_info, null_yield); + bucket_info, null_yield, dpp); if (ret < 0) { - lderr(store->ctx()) << "Error: " << __func__ << + ldpp_dout(dpp, -1) << "Error: " << __func__ << " failed to clean old bucket info object \"" << bucket_info.bucket.get_key() << "\"created after successful resharding with error " << ret << dendl; } - ldout(store->ctx(), 1) << __func__ << + ldpp_dout(dpp, 1) << __func__ << " INFO: reshard of bucket \"" << bucket_info.bucket.name << "\" from \"" << bucket_info.bucket.get_key() << "\" to \"" << new_bucket_info.bucket.get_key() << "\" completed successfully" << dendl; @@ -764,18 +776,18 @@ error_out: // since the real problem is the issue that led to this error code // path, we won't touch ret and instead use another variable to // temporarily error codes - int ret2 = store->svc()->bi->clean_index(new_bucket_info); + int ret2 = store->svc()->bi->clean_index(dpp, new_bucket_info); if (ret2 < 0) { - lderr(store->ctx()) << "Error: " << __func__ << + ldpp_dout(dpp, -1) << "Error: " << __func__ << " failed to clean up shards from failed incomplete resharding; " << "RGWRados::clean_bucket_index returned " << ret2 << dendl; } ret2 = store->ctl()->bucket->remove_bucket_instance_info(new_bucket_info.bucket, new_bucket_info, - null_yield); + null_yield, dpp); if (ret2 < 0) { - lderr(store->ctx()) << "Error: " << __func__ << + ldpp_dout(dpp, -1) << "Error: " << __func__ << " failed to clean bucket info object \"" << new_bucket_info.bucket.get_key() << "\"created during incomplete resharding with error " << ret2 << dendl; @@ -812,10 +824,10 @@ void RGWReshard::get_bucket_logshard_oid(const string& tenant, const string& buc get_logshard_oid(int(sid), oid); } -int RGWReshard::add(cls_rgw_reshard_entry& entry) +int RGWReshard::add(const DoutPrefixProvider *dpp, cls_rgw_reshard_entry& entry) { if (!store->svc()->zone->can_reshard()) { - ldout(store->ctx(), 20) << __func__ << " Resharding is disabled" << dendl; + ldpp_dout(dpp, 20) << __func__ << " Resharding is disabled" << dendl; return 0; } @@ -826,15 +838,15 @@ int RGWReshard::add(cls_rgw_reshard_entry& entry) librados::ObjectWriteOperation op; cls_rgw_reshard_add(op, entry); - int ret = rgw_rados_operate(store->getRados()->reshard_pool_ctx, logshard_oid, &op, null_yield); + int ret = rgw_rados_operate(dpp, store->getRados()->reshard_pool_ctx, logshard_oid, &op, null_yield); if (ret < 0) { - lderr(store->ctx()) << "ERROR: failed to add entry to reshard log, oid=" << logshard_oid << " tenant=" << entry.tenant << " bucket=" << entry.bucket_name << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to add entry to reshard log, oid=" << logshard_oid << " tenant=" << entry.tenant << " bucket=" << entry.bucket_name << dendl; return ret; } return 0; } -int RGWReshard::update(const RGWBucketInfo& bucket_info, const RGWBucketInfo& new_bucket_info) +int RGWReshard::update(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const RGWBucketInfo& new_bucket_info) { cls_rgw_reshard_entry entry; entry.bucket_name = bucket_info.bucket.name; @@ -848,9 +860,9 @@ int RGWReshard::update(const RGWBucketInfo& bucket_info, const RGWBucketInfo& ne entry.new_instance_id = new_bucket_info.bucket.name + ":" + new_bucket_info.bucket.bucket_id; - ret = add(entry); + ret = add(dpp, entry); if (ret < 0) { - ldout(store->ctx(), 0) << __func__ << ":Error in updating entry bucket " << entry.bucket_name << ": " << + ldpp_dout(dpp, 0) << __func__ << ":Error in updating entry bucket " << entry.bucket_name << ": " << cpp_strerror(-ret) << dendl; } @@ -901,7 +913,7 @@ int RGWReshard::get(cls_rgw_reshard_entry& entry) return 0; } -int RGWReshard::remove(cls_rgw_reshard_entry& entry) +int RGWReshard::remove(const DoutPrefixProvider *dpp, cls_rgw_reshard_entry& entry) { string logshard_oid; @@ -910,9 +922,9 @@ int RGWReshard::remove(cls_rgw_reshard_entry& entry) librados::ObjectWriteOperation op; cls_rgw_reshard_remove(op, entry); - int ret = rgw_rados_operate(store->getRados()->reshard_pool_ctx, logshard_oid, &op, null_yield); + int ret = rgw_rados_operate(dpp, store->getRados()->reshard_pool_ctx, logshard_oid, &op, null_yield); if (ret < 0) { - lderr(store->ctx()) << "ERROR: failed to remove entry from reshard log, oid=" << logshard_oid << " tenant=" << entry.tenant << " bucket=" << entry.bucket_name << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to remove entry from reshard log, oid=" << logshard_oid << " tenant=" << entry.tenant << " bucket=" << entry.bucket_name << dendl; return ret; } @@ -976,12 +988,11 @@ void RGWReshardWait::stop() } } -int RGWReshard::process_single_logshard(int logshard_num) +int RGWReshard::process_single_logshard(int logshard_num, const DoutPrefixProvider *dpp) { string marker; bool truncated = true; - CephContext *cct = store->ctx(); constexpr uint32_t max_entries = 1000; string logshard_oid; @@ -991,7 +1002,7 @@ int RGWReshard::process_single_logshard(int logshard_num) int ret = logshard_lock.lock(); if (ret < 0) { - ldout(store->ctx(), 5) << __func__ << "(): failed to acquire lock on " << + ldpp_dout(dpp, 5) << __func__ << "(): failed to acquire lock on " << logshard_oid << ", ret = " << ret < entries; ret = list(logshard_num, marker, max_entries, entries, &truncated); if (ret < 0) { - ldout(cct, 10) << "cannot list all reshards in logshard oid=" << + ldpp_dout(dpp, 10) << "cannot list all reshards in logshard oid=" << logshard_oid << dendl; continue; } @@ -1008,7 +1019,7 @@ int RGWReshard::process_single_logshard(int logshard_num) for(auto& entry: entries) { // logshard entries if(entry.new_instance_id.empty()) { - ldout(store->ctx(), 20) << __func__ << " resharding " << + ldpp_dout(dpp, 20) << __func__ << " resharding " << entry.bucket_name << dendl; rgw_bucket bucket; @@ -1018,10 +1029,10 @@ int RGWReshard::process_single_logshard(int logshard_num) ret = store->getRados()->get_bucket_info(store->svc(), entry.tenant, entry.bucket_name, bucket_info, nullptr, - null_yield, &attrs); + null_yield, dpp, &attrs); if (ret < 0 || bucket_info.bucket.bucket_id != entry.bucket_id) { if (ret < 0) { - ldout(cct, 0) << __func__ << + ldpp_dout(dpp, 0) << __func__ << ": Error in get_bucket_info for bucket " << entry.bucket_name << ": " << cpp_strerror(-ret) << dendl; if (ret != -ENOENT) { @@ -1029,20 +1040,20 @@ int RGWReshard::process_single_logshard(int logshard_num) return ret; } } else { - ldout(cct,0) << __func__ << + ldpp_dout(dpp, 0) << __func__ << ": Bucket: " << entry.bucket_name << " already resharded by someone, skipping " << dendl; } // we've encountered a reshard queue entry for an apparently // non-existent bucket; let's try to recover by cleaning up - ldout(cct, 0) << __func__ << + ldpp_dout(dpp, 0) << __func__ << ": removing reshard queue entry for a resharded or non-existent bucket" << entry.bucket_name << dendl; - ret = remove(entry); + ret = remove(dpp, entry); if (ret < 0) { - ldout(cct, 0) << __func__ << + ldpp_dout(dpp, 0) << __func__ << ": Error removing non-existent bucket " << entry.bucket_name << " from resharding queue: " << cpp_strerror(-ret) << dendl; @@ -1054,22 +1065,22 @@ int RGWReshard::process_single_logshard(int logshard_num) } RGWBucketReshard br(store, bucket_info, attrs, nullptr); - ret = br.execute(entry.new_num_shards, max_entries, false, nullptr, + ret = br.execute(entry.new_num_shards, max_entries, dpp, false, nullptr, nullptr, this); if (ret < 0) { - ldout(store->ctx(), 0) << __func__ << + ldpp_dout(dpp, 0) << __func__ << ": Error during resharding bucket " << entry.bucket_name << ":" << cpp_strerror(-ret)<< dendl; return ret; } - ldout(store->ctx(), 20) << __func__ << + ldpp_dout(dpp, 20) << __func__ << " removing reshard queue entry for bucket " << entry.bucket_name << dendl; - ret = remove(entry); + ret = remove(dpp, entry); if (ret < 0) { - ldout(cct, 0) << __func__ << ": Error removing bucket " << + ldpp_dout(dpp, 0) << __func__ << ": Error removing bucket " << entry.bucket_name << " from resharding queue: " << cpp_strerror(-ret) << dendl; return ret; @@ -1104,10 +1115,10 @@ void RGWReshard::get_logshard_oid(int shard_num, string *logshard) *logshard = objname + buf; } -int RGWReshard::process_all_logshards() +int RGWReshard::process_all_logshards(const DoutPrefixProvider *dpp) { if (!store->svc()->zone->can_reshard()) { - ldout(store->ctx(), 20) << __func__ << " Resharding is disabled" << dendl; + ldpp_dout(dpp, 20) << __func__ << " Resharding is disabled" << dendl; return 0; } int ret = 0; @@ -1116,11 +1127,11 @@ int RGWReshard::process_all_logshards() string logshard; get_logshard_oid(i, &logshard); - ldout(store->ctx(), 20) << "processing logshard = " << logshard << dendl; + ldpp_dout(dpp, 20) << "processing logshard = " << logshard << dendl; - ret = process_single_logshard(i); + ret = process_single_logshard(i, dpp); - ldout(store->ctx(), 20) << "finish processing logshard = " << logshard << " , ret = " << ret << dendl; + ldpp_dout(dpp, 20) << "finish processing logshard = " << logshard << " , ret = " << ret << dendl; } return 0; @@ -1151,7 +1162,7 @@ void RGWReshard::stop_processor() void *RGWReshard::ReshardWorker::entry() { do { utime_t start = ceph_clock_now(); - reshard->process_all_logshards(); + reshard->process_all_logshards(this); if (reshard->going_down()) break; @@ -1177,3 +1188,18 @@ void RGWReshard::ReshardWorker::stop() std::lock_guard l{lock}; cond.notify_all(); } + +CephContext *RGWReshard::ReshardWorker::get_cct() const +{ + return cct; +} + +unsigned RGWReshard::ReshardWorker::get_subsys() const +{ + return dout_subsys; +} + +std::ostream& RGWReshard::ReshardWorker::gen_prefix(std::ostream& out) const +{ + return out << "rgw reshard worker thread: "; +} diff --git a/src/rgw/rgw_reshard.h b/src/rgw/rgw_reshard.h index a5c190f59143f..ecb18690fa3fd 100644 --- a/src/rgw/rgw_reshard.h +++ b/src/rgw/rgw_reshard.h @@ -85,13 +85,15 @@ private: static const std::initializer_list reshard_primes; int create_new_bucket_instance(int new_num_shards, - RGWBucketInfo& new_bucket_info); + RGWBucketInfo& new_bucket_info, + const DoutPrefixProvider *dpp); int do_reshard(int num_shards, RGWBucketInfo& new_bucket_info, int max_entries, bool verbose, ostream *os, - Formatter *formatter); + Formatter *formatter, + const DoutPrefixProvider *dpp); public: // pass nullptr for the final parameter if no outer reshard lock to @@ -101,30 +103,34 @@ public: const std::map& _bucket_attrs, RGWBucketReshardLock* _outer_reshard_lock); int execute(int num_shards, int max_op_entries, + const DoutPrefixProvider *dpp, bool verbose = false, ostream *out = nullptr, Formatter *formatter = nullptr, RGWReshard *reshard_log = nullptr); - int get_status(std::list *status); - int cancel(); - static int clear_resharding(rgw::sal::RGWRadosStore* store, + int get_status(const DoutPrefixProvider *dpp, std::list *status); + int cancel(const DoutPrefixProvider *dpp); + static int clear_resharding(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore* store, const RGWBucketInfo& bucket_info); - int clear_resharding() { - return clear_resharding(store, bucket_info); + int clear_resharding(const DoutPrefixProvider *dpp) { + return clear_resharding(dpp, store, bucket_info); } - static int clear_index_shard_reshard_status(rgw::sal::RGWRadosStore* store, + static int clear_index_shard_reshard_status(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore* store, const RGWBucketInfo& bucket_info); - int clear_index_shard_reshard_status() { - return clear_index_shard_reshard_status(store, bucket_info); + int clear_index_shard_reshard_status(const DoutPrefixProvider *dpp) { + return clear_index_shard_reshard_status(dpp, store, bucket_info); } - static int set_resharding_status(rgw::sal::RGWRadosStore* store, + static int set_resharding_status(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore* store, const RGWBucketInfo& bucket_info, const string& new_instance_id, int32_t num_shards, cls_rgw_reshard_status status); - int set_resharding_status(const string& new_instance_id, + int set_resharding_status(const DoutPrefixProvider *dpp, const string& new_instance_id, int32_t num_shards, cls_rgw_reshard_status status) { - return set_resharding_status(store, bucket_info, + return set_resharding_status(dpp, store, bucket_info, new_instance_id, num_shards, status); } @@ -202,7 +208,7 @@ private: void get_logshard_oid(int shard_num, string *shard); protected: - class ReshardWorker : public Thread { + class ReshardWorker : public Thread, public DoutPrefixProvider { CephContext *cct; RGWReshard *reshard; ceph::mutex lock = ceph::make_mutex("ReshardWorker"); @@ -212,11 +218,14 @@ protected: ReshardWorker(CephContext * const _cct, RGWReshard * const _reshard) : cct(_cct), - reshard(_reshard) { - } + reshard(_reshard) {} void *entry() override; void stop(); + + CephContext *get_cct() const override; + unsigned get_subsys() const; + std::ostream& gen_prefix(std::ostream& out) const; }; ReshardWorker *worker = nullptr; @@ -227,16 +236,16 @@ protected: public: RGWReshard(rgw::sal::RGWRadosStore* _store, bool _verbose = false, ostream *_out = nullptr, Formatter *_formatter = nullptr); - int add(cls_rgw_reshard_entry& entry); - int update(const RGWBucketInfo& bucket_info, const RGWBucketInfo& new_bucket_info); + int add(const DoutPrefixProvider *dpp, cls_rgw_reshard_entry& entry); + int update(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const RGWBucketInfo& new_bucket_info); int get(cls_rgw_reshard_entry& entry); - int remove(cls_rgw_reshard_entry& entry); + int remove(const DoutPrefixProvider *dpp, cls_rgw_reshard_entry& entry); int list(int logshard_num, string& marker, uint32_t max, std::list& entries, bool *is_truncated); int clear_bucket_resharding(const string& bucket_instance_oid, cls_rgw_reshard_entry& entry); /* reshard thread */ - int process_single_logshard(int logshard_num); - int process_all_logshards(); + int process_single_logshard(int logshard_num, const DoutPrefixProvider *dpp); + int process_all_logshards(const DoutPrefixProvider *dpp); bool going_down(); void start_processor(); void stop_processor(); diff --git a/src/rgw/rgw_rest.cc b/src/rgw/rgw_rest.cc index cd7e730d89554..40af359718585 100644 --- a/src/rgw/rgw_rest.cc +++ b/src/rgw/rgw_rest.cc @@ -281,7 +281,7 @@ static void dump_status(struct req_state *s, int status, try { RESTFUL_IO(s)->send_status(status, status_name); } catch (rgw::io::Exception& e) { - ldout(s->cct, 0) << "ERROR: s->cio->send_status() returned err=" + ldpp_dout(s, 0) << "ERROR: s->cio->send_status() returned err=" << e.what() << dendl; } } @@ -337,7 +337,7 @@ void dump_header(struct req_state* const s, try { RESTFUL_IO(s)->send_header(name, val); } catch (rgw::io::Exception& e) { - ldout(s->cct, 0) << "ERROR: s->cio->send_header() returned err=" + ldpp_dout(s, 0) << "ERROR: s->cio->send_header() returned err=" << e.what() << dendl; } } @@ -376,7 +376,7 @@ void dump_content_length(struct req_state* const s, const uint64_t len) try { RESTFUL_IO(s)->send_content_length(len); } catch (rgw::io::Exception& e) { - ldout(s->cct, 0) << "ERROR: s->cio->send_content_length() returned err=" + ldpp_dout(s, 0) << "ERROR: s->cio->send_content_length() returned err=" << e.what() << dendl; } dump_header(s, "Accept-Ranges", "bytes"); @@ -387,7 +387,7 @@ static void dump_chunked_encoding(struct req_state* const s) try { RESTFUL_IO(s)->send_chunked_transfer_encoding(); } catch (rgw::io::Exception& e) { - ldout(s->cct, 0) << "ERROR: RESTFUL_IO(s)->send_chunked_transfer_encoding()" + ldpp_dout(s, 0) << "ERROR: RESTFUL_IO(s)->send_chunked_transfer_encoding()" << " returned err=" << e.what() << dendl; } } @@ -623,7 +623,7 @@ void end_header(struct req_state* s, RGWOp* op, const char *content_type, try { RESTFUL_IO(s)->complete_header(); } catch (rgw::io::Exception& e) { - ldout(s->cct, 0) << "ERROR: RESTFUL_IO(s)->complete_header() returned err=" + ldpp_dout(s, 0) << "ERROR: RESTFUL_IO(s)->complete_header() returned err=" << e.what() << dendl; } @@ -661,13 +661,13 @@ void abort_early(struct req_state *s, RGWOp* op, int err_no, if (op != NULL) { int new_err_no; new_err_no = op->error_handler(err_no, &error_content, y); - ldout(s->cct, 1) << "op->ERRORHANDLER: err_no=" << err_no + ldpp_dout(s, 1) << "op->ERRORHANDLER: err_no=" << err_no << " new_err_no=" << new_err_no << dendl; err_no = new_err_no; } else if (handler != NULL) { int new_err_no; new_err_no = handler->error_handler(err_no, &error_content, y); - ldout(s->cct, 1) << "handler->ERRORHANDLER: err_no=" << err_no + ldpp_dout(s, 1) << "handler->ERRORHANDLER: err_no=" << err_no << " new_err_no=" << new_err_no << dendl; err_no = new_err_no; } @@ -724,7 +724,7 @@ void dump_continue(struct req_state * const s) try { RESTFUL_IO(s)->send_100_continue(); } catch (rgw::io::Exception& e) { - ldout(s->cct, 0) << "ERROR: RESTFUL_IO(s)->send_100_continue() returned err=" + ldpp_dout(s, 0) << "ERROR: RESTFUL_IO(s)->send_100_continue() returned err=" << e.what() << dendl; } } @@ -1018,7 +1018,7 @@ int RGWPutObj_ObjStore::get_params(optional_yield y) { int ret = 0; ret = torrent.get_params(); - ldout(s->cct, 5) << "NOTICE: open produce torrent file " << dendl; + ldpp_dout(s, 5) << "NOTICE: open produce torrent file " << dendl; if (ret < 0) { return ret; @@ -1410,12 +1410,12 @@ int RGWPostObj_ObjStore::get_params(optional_yield y) } if (s->cct->_conf->subsys.should_gather()) { - ldout(s->cct, 20) << "request content_type_str=" + ldpp_dout(s, 20) << "request content_type_str=" << req_content_type_str << dendl; - ldout(s->cct, 20) << "request content_type params:" << dendl; + ldpp_dout(s, 20) << "request content_type params:" << dendl; for (const auto& pair : params) { - ldout(s->cct, 20) << " " << pair.first << " -> " << pair.second + ldpp_dout(s, 20) << " " << pair.first << " -> " << pair.second << dendl; } } @@ -1438,7 +1438,7 @@ int RGWPutACLs_ObjStore::get_params(optional_yield y) { const auto max_size = s->cct->_conf->rgw_max_put_param_size; std::tie(op_ret, data) = rgw_rest_read_all_input(s, max_size, false); - ldout(s->cct, 0) << "RGWPutACLs_ObjStore::get_params read data is: " << data.c_str() << dendl; + ldpp_dout(s, 0) << "RGWPutACLs_ObjStore::get_params read data is: " << data.c_str() << dendl; return op_ret; } @@ -1574,7 +1574,7 @@ int RGWListMultipart_ObjStore::get_params(optional_yield y) string err; marker = strict_strtol(marker_str.c_str(), 10, &err); if (!err.empty()) { - ldout(s->cct, 20) << "bad marker: " << marker << dendl; + ldpp_dout(s, 20) << "bad marker: " << marker << dendl; op_ret = -EINVAL; return op_ret; } @@ -1850,7 +1850,7 @@ int RGWHandler_REST::init_permissions(RGWOp* op, optional_yield y) if (! s->user->get_id().empty() && s->auth.identity->get_identity_type() != TYPE_ROLE) { try { map uattrs; - if (auto ret = store->ctl()->user->get_attrs_by_uid(s->user->get_id(), &uattrs, y); ! ret) { + if (auto ret = store->ctl()->user->get_attrs_by_uid(s, s->user->get_id(), &uattrs, y); ! ret) { auto user_policies = get_iam_user_policy_from_attr(s->cct, store, uattrs, s->user->get_tenant()); s->iam_user_policies.insert(s->iam_user_policies.end(), std::make_move_iterator(user_policies.begin()), @@ -1858,14 +1858,14 @@ int RGWHandler_REST::init_permissions(RGWOp* op, optional_yield y) } } catch (const std::exception& e) { - lderr(s->cct) << "Error reading IAM User Policy: " << e.what() << dendl; + ldpp_dout(op, -1) << "Error reading IAM User Policy: " << e.what() << dendl; } } rgw_build_iam_environment(store, s); return 0; } - return do_init_permissions(y); + return do_init_permissions(op, y); } int RGWHandler_REST::read_permissions(RGWOp* op_obj, optional_yield y) @@ -2037,7 +2037,7 @@ int RGWREST::preprocess(struct req_state *s, rgw::io::BasicClient* cio) if (api_s3website_priority_rawpos != apis.end()) { api_priority_s3website = apis.size() - std::distance(apis.begin(), api_s3website_priority_rawpos); } - ldout(s->cct, 10) << "rgw api priority: s3=" << api_priority_s3 << " s3website=" << api_priority_s3website << dendl; + ldpp_dout(s, 10) << "rgw api priority: s3=" << api_priority_s3 << " s3website=" << api_priority_s3website << dendl; bool s3website_enabled = api_priority_s3website >= 0; if (info.host.size()) { @@ -2053,7 +2053,7 @@ int RGWREST::preprocess(struct req_state *s, rgw::io::BasicClient* cio) info.host = info.host.substr(0, pos); } } - ldout(s->cct, 10) << "host=" << info.host << dendl; + ldpp_dout(s, 10) << "host=" << info.host << dendl; string domain; string subdomain; bool in_hosted_domain_s3website = false; @@ -2071,7 +2071,7 @@ int RGWREST::preprocess(struct req_state *s, rgw::io::BasicClient* cio) } } - ldout(s->cct, 20) + ldpp_dout(s, 20) << "subdomain=" << subdomain << " domain=" << domain << " in_hosted_domain=" << in_hosted_domain @@ -2085,13 +2085,13 @@ int RGWREST::preprocess(struct req_state *s, rgw::io::BasicClient* cio) bool found; int r = rgw_resolver->resolve_cname(info.host, cname, &found); if (r < 0) { - ldout(s->cct, 0) + ldpp_dout(s, 0) << "WARNING: rgw_resolver->resolve_cname() returned r=" << r << dendl; } if (found) { - ldout(s->cct, 5) << "resolved host cname " << info.host << " -> " + ldpp_dout(s, 5) << "resolved host cname " << info.host << " -> " << cname << dendl; in_hosted_domain = rgw_find_host_in_domains(cname, &domain, &subdomain, hostnames_set); @@ -2110,7 +2110,7 @@ int RGWREST::preprocess(struct req_state *s, rgw::io::BasicClient* cio) } } - ldout(s->cct, 20) + ldpp_dout(s, 20) << "subdomain=" << subdomain << " domain=" << domain << " in_hosted_domain=" << in_hosted_domain @@ -2159,7 +2159,7 @@ int RGWREST::preprocess(struct req_state *s, rgw::io::BasicClient* cio) s->info.domain = domain; } - ldout(s->cct, 20) + ldpp_dout(s, 20) << "final domain/bucket" << " subdomain=" << subdomain << " domain=" << domain @@ -2239,14 +2239,14 @@ int RGWREST::preprocess(struct req_state *s, rgw::io::BasicClient* cio) string err; s->content_length = strict_strtoll(s->length, 10, &err); if (!err.empty()) { - ldout(s->cct, 10) << "bad content length, aborting" << dendl; + ldpp_dout(s, 10) << "bad content length, aborting" << dendl; return -EINVAL; } } } if (s->content_length < 0) { - ldout(s->cct, 10) << "negative content length, aborting" << dendl; + ldpp_dout(s, 10) << "negative content length, aborting" << dendl; return -EINVAL; } @@ -2265,7 +2265,7 @@ int RGWREST::preprocess(struct req_state *s, rgw::io::BasicClient* cio) } s->op = op_from_method(info.method); - info.init_meta_info(&s->has_bad_meta); + info.init_meta_info(s, &s->has_bad_meta); return 0; } diff --git a/src/rgw/rgw_rest_bucket.cc b/src/rgw/rgw_rest_bucket.cc index 3a07cebf0a68f..7726da6ada8e3 100644 --- a/src/rgw/rgw_rest_bucket.cc +++ b/src/rgw/rgw_rest_bucket.cc @@ -48,7 +48,7 @@ void RGWOp_Bucket_Info::execute(optional_yield y) op_state.set_bucket_name(bucket); op_state.set_fetch_stats(fetch_stats); - op_ret = RGWBucketAdminOp::info(store, op_state, flusher, y); + op_ret = RGWBucketAdminOp::info(store, op_state, flusher, y, this); } class RGWOp_Get_Policy : public RGWRESTOp { @@ -78,7 +78,7 @@ void RGWOp_Get_Policy::execute(optional_yield y) op_state.set_bucket_name(bucket); op_state.set_object(object); - op_ret = RGWBucketAdminOp::get_policy(store, op_state, flusher); + op_ret = RGWBucketAdminOp::get_policy(store, op_state, flusher, this); } class RGWOp_Check_Bucket_Index : public RGWRESTOp { @@ -112,7 +112,7 @@ void RGWOp_Check_Bucket_Index::execute(optional_yield y) op_state.set_fix_index(fix_index); op_state.set_check_objects(check_objects); - op_ret = RGWBucketAdminOp::check_index(store, op_state, flusher, s->yield); + op_ret = RGWBucketAdminOp::check_index(store, op_state, flusher, s->yield, s); } class RGWOp_Bucket_Link : public RGWRESTOp { @@ -150,12 +150,12 @@ void RGWOp_Bucket_Link::execute(optional_yield y) op_state.set_new_bucket_name(new_bucket_name); bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = RGWBucketAdminOp::link(store, op_state); + op_ret = RGWBucketAdminOp::link(store, op_state, s); } class RGWOp_Bucket_Unlink : public RGWRESTOp { @@ -188,12 +188,12 @@ void RGWOp_Bucket_Unlink::execute(optional_yield y) op_state.set_bucket_name(bucket); bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = RGWBucketAdminOp::unlink(store, op_state); + op_ret = RGWBucketAdminOp::unlink(store, op_state, s); } class RGWOp_Bucket_Remove : public RGWRESTOp { @@ -219,13 +219,13 @@ void RGWOp_Bucket_Remove::execute(optional_yield y) RESTArgs::get_string(s, "bucket", bucket_name, &bucket_name); RESTArgs::get_bool(s, "purge-objects", false, &delete_children); - op_ret = store->get_bucket(nullptr, string(), bucket_name, &bucket, y); + op_ret = store->get_bucket(s, nullptr, string(), bucket_name, &bucket, y); if (op_ret < 0) { ldpp_dout(this, 0) << "get_bucket returned ret=" << op_ret << dendl; return; } - op_ret = bucket->remove_bucket(delete_children, string(), string(), true, &s->info, s->yield); + op_ret = bucket->remove_bucket(s, delete_children, string(), string(), true, &s->info, s->yield); } class RGWOp_Set_Bucket_Quota : public RGWRESTOp { @@ -284,7 +284,7 @@ void RGWOp_Set_Bucket_Quota::execute(optional_yield y) if (use_http_params) { RGWBucketInfo bucket_info; map attrs; - op_ret = store->getRados()->get_bucket_info(store->svc(), uid.tenant, bucket, bucket_info, NULL, s->yield, &attrs); + op_ret = store->getRados()->get_bucket_info(store->svc(), uid.tenant, bucket, bucket_info, NULL, s->yield, s, &attrs); if (op_ret < 0) { return; } @@ -302,7 +302,7 @@ void RGWOp_Set_Bucket_Quota::execute(optional_yield y) op_state.set_bucket_name(bucket); op_state.set_quota(quota); - op_ret = RGWBucketAdminOp::set_quota(store, op_state); + op_ret = RGWBucketAdminOp::set_quota(store, op_state, s); } class RGWOp_Sync_Bucket : public RGWRESTOp { @@ -334,7 +334,7 @@ void RGWOp_Sync_Bucket::execute(optional_yield y) op_state.set_tenant(tenant); op_state.set_sync_bucket(sync_bucket); - op_ret = RGWBucketAdminOp::sync_bucket(store, op_state); + op_ret = RGWBucketAdminOp::sync_bucket(store, op_state, s); } class RGWOp_Object_Remove: public RGWRESTOp { @@ -364,7 +364,7 @@ void RGWOp_Object_Remove::execute(optional_yield y) op_state.set_bucket_name(bucket); op_state.set_object(object); - op_ret = RGWBucketAdminOp::remove_object(store, op_state); + op_ret = RGWBucketAdminOp::remove_object(store, op_state, s); } diff --git a/src/rgw/rgw_rest_client.cc b/src/rgw/rgw_rest_client.cc index b0d6e31ca40ad..dc78e418bcc1d 100644 --- a/src/rgw/rgw_rest_client.cc +++ b/src/rgw/rgw_rest_client.cc @@ -116,7 +116,7 @@ static void get_gmt_date_str(string& date_str) date_str = buffer; } -int RGWRESTSimpleRequest::execute(RGWAccessKey& key, const char *_method, const char *resource, optional_yield y) +int RGWRESTSimpleRequest::execute(const DoutPrefixProvider *dpp, RGWAccessKey& key, const char *_method, const char *resource, optional_yield y) { method = _method; string new_url = url; @@ -139,7 +139,7 @@ int RGWRESTSimpleRequest::execute(RGWAccessKey& key, const char *_method, const meta_map_t meta_map; map sub_resources; - rgw_create_s3_canonical_header(method.c_str(), NULL, NULL, date_str.c_str(), + rgw_create_s3_canonical_header(dpp, method.c_str(), NULL, NULL, date_str.c_str(), meta_map, meta_map, url.c_str(), sub_resources, canonical_header); @@ -152,7 +152,7 @@ int RGWRESTSimpleRequest::execute(RGWAccessKey& key, const char *_method, const string auth_hdr = "AWS " + key.id + ":" + digest; - ldout(cct, 15) << "generated auth header: " << auth_hdr << dendl; + ldpp_dout(dpp, 15) << "generated auth header: " << auth_hdr << dendl; headers.push_back(pair("AUTHORIZATION", auth_hdr)); int r = process(y); @@ -233,7 +233,7 @@ void RGWHTTPSimpleRequest::get_out_headers(map *pheaders) out_headers.clear(); } -static int sign_request(CephContext *cct, RGWAccessKey& key, RGWEnv& env, req_info& info) +static int sign_request(const DoutPrefixProvider *dpp, CephContext *cct, RGWAccessKey& key, RGWEnv& env, req_info& info) { /* don't sign if no key is provided */ if (key.key.empty()) { @@ -247,12 +247,12 @@ static int sign_request(CephContext *cct, RGWAccessKey& key, RGWEnv& env, req_in } string canonical_header; - if (!rgw_create_s3_canonical_header(info, NULL, canonical_header, false)) { - ldout(cct, 0) << "failed to create canonical s3 header" << dendl; + if (!rgw_create_s3_canonical_header(dpp, info, NULL, canonical_header, false)) { + ldpp_dout(dpp, 0) << "failed to create canonical s3 header" << dendl; return -EINVAL; } - ldout(cct, 10) << "generated canonical header: " << canonical_header << dendl; + ldpp_dout(dpp, 10) << "generated canonical header: " << canonical_header << dendl; string digest; try { @@ -262,14 +262,14 @@ static int sign_request(CephContext *cct, RGWAccessKey& key, RGWEnv& env, req_in } string auth_hdr = "AWS " + key.id + ":" + digest; - ldout(cct, 15) << "generated auth header: " << auth_hdr << dendl; + ldpp_dout(dpp, 15) << "generated auth header: " << auth_hdr << dendl; env.set("AUTHORIZATION", auth_hdr); return 0; } -int RGWRESTSimpleRequest::forward_request(RGWAccessKey& key, req_info& info, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y) +int RGWRESTSimpleRequest::forward_request(const DoutPrefixProvider *dpp, RGWAccessKey& key, req_info& info, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y) { string date_str; @@ -293,9 +293,9 @@ int RGWRESTSimpleRequest::forward_request(RGWAccessKey& key, req_info& info, siz if (content_md5) { new_env.set("HTTP_CONTENT_MD5", content_md5); } - int ret = sign_request(cct, key, new_env, new_info); + int ret = sign_request(dpp, cct, key, new_env, new_info); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to sign request" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to sign request" << dendl; return ret; } @@ -491,7 +491,7 @@ void RGWRESTGenerateHTTPHeaders::set_extra_headers(const map& ex } } -int RGWRESTGenerateHTTPHeaders::set_obj_attrs(map& rgw_attrs) +int RGWRESTGenerateHTTPHeaders::set_obj_attrs(const DoutPrefixProvider *dpp, map& rgw_attrs) { map new_attrs; @@ -508,9 +508,9 @@ int RGWRESTGenerateHTTPHeaders::set_obj_attrs(map& rgw_attrs } RGWAccessControlPolicy policy; - int ret = rgw_policy_from_attrset(cct, rgw_attrs, &policy); + int ret = rgw_policy_from_attrset(dpp, cct, rgw_attrs, &policy); if (ret < 0) { - ldout(cct, 0) << "ERROR: couldn't get policy ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: couldn't get policy ret=" << ret << dendl; return ret; } @@ -557,11 +557,11 @@ void RGWRESTGenerateHTTPHeaders::set_policy(RGWAccessControlPolicy& policy) add_grants_headers(grants_by_type, *new_env, new_info->x_meta_map); } -int RGWRESTGenerateHTTPHeaders::sign(RGWAccessKey& key) +int RGWRESTGenerateHTTPHeaders::sign(const DoutPrefixProvider *dpp, RGWAccessKey& key) { - int ret = sign_request(cct, key, *new_env, *new_info); + int ret = sign_request(dpp, cct, key, *new_env, *new_info); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to sign request" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to sign request" << dendl; return ret; } @@ -593,25 +593,25 @@ void RGWRESTStreamS3PutObj::send_init(rgw::sal::RGWObject* obj) url = headers_gen.get_url(); } -int RGWRESTStreamS3PutObj::send_ready(RGWAccessKey& key, map& rgw_attrs, bool send) +int RGWRESTStreamS3PutObj::send_ready(const DoutPrefixProvider *dpp, RGWAccessKey& key, map& rgw_attrs, bool send) { - headers_gen.set_obj_attrs(rgw_attrs); + headers_gen.set_obj_attrs(dpp, rgw_attrs); - return send_ready(key, send); + return send_ready(dpp, key, send); } -int RGWRESTStreamS3PutObj::send_ready(RGWAccessKey& key, const map& http_attrs, +int RGWRESTStreamS3PutObj::send_ready(const DoutPrefixProvider *dpp, RGWAccessKey& key, const map& http_attrs, RGWAccessControlPolicy& policy, bool send) { headers_gen.set_http_attrs(http_attrs); headers_gen.set_policy(policy); - return send_ready(key, send); + return send_ready(dpp, key, send); } -int RGWRESTStreamS3PutObj::send_ready(RGWAccessKey& key, bool send) +int RGWRESTStreamS3PutObj::send_ready(const DoutPrefixProvider *dpp, RGWAccessKey& key, bool send) { - headers_gen.sign(key); + headers_gen.sign(dpp, key); for (const auto& kv: new_env.get_map()) { headers.emplace_back(kv); @@ -628,10 +628,10 @@ int RGWRESTStreamS3PutObj::send_ready(RGWAccessKey& key, bool send) return 0; } -int RGWRESTStreamS3PutObj::put_obj_init(RGWAccessKey& key, rgw::sal::RGWObject* obj, uint64_t obj_size, map& attrs, bool send) +int RGWRESTStreamS3PutObj::put_obj_init(const DoutPrefixProvider *dpp, RGWAccessKey& key, rgw::sal::RGWObject* obj, uint64_t obj_size, map& attrs, bool send) { send_init(obj); - return send_ready(key, attrs, send); + return send_ready(dpp, key, attrs, send); } void set_str_from_headers(map& out_headers, const string& header_name, string& str) @@ -683,33 +683,33 @@ static void send_prepare_convert(const rgw_obj& obj, string *resource) *resource = urlsafe_bucket + "/" + urlsafe_object; } -int RGWRESTStreamRWRequest::send_request(RGWAccessKey& key, map& extra_headers, const rgw_obj& obj, RGWHTTPManager *mgr) +int RGWRESTStreamRWRequest::send_request(const DoutPrefixProvider *dpp, RGWAccessKey& key, map& extra_headers, const rgw_obj& obj, RGWHTTPManager *mgr) { string resource; send_prepare_convert(obj, &resource); - return send_request(&key, extra_headers, resource, mgr); + return send_request(dpp, &key, extra_headers, resource, mgr); } -int RGWRESTStreamRWRequest::send_prepare(RGWAccessKey& key, map& extra_headers, const rgw_obj& obj) +int RGWRESTStreamRWRequest::send_prepare(const DoutPrefixProvider *dpp, RGWAccessKey& key, map& extra_headers, const rgw_obj& obj) { string resource; send_prepare_convert(obj, &resource); - return do_send_prepare(&key, extra_headers, resource); + return do_send_prepare(dpp, &key, extra_headers, resource); } -int RGWRESTStreamRWRequest::send_prepare(RGWAccessKey *key, map& extra_headers, const string& resource, +int RGWRESTStreamRWRequest::send_prepare(const DoutPrefixProvider *dpp, RGWAccessKey *key, map& extra_headers, const string& resource, bufferlist *send_data) { string new_resource; //do not encode slash url_encode(resource, new_resource, false); - return do_send_prepare(key, extra_headers, new_resource, send_data); + return do_send_prepare(dpp, key, extra_headers, new_resource, send_data); } -int RGWRESTStreamRWRequest::do_send_prepare(RGWAccessKey *key, map& extra_headers, const string& resource, +int RGWRESTStreamRWRequest::do_send_prepare(const DoutPrefixProvider *dpp, RGWAccessKey *key, map& extra_headers, const string& resource, bufferlist *send_data) { string new_url = url; @@ -757,9 +757,9 @@ int RGWRESTStreamRWRequest::do_send_prepare(RGWAccessKey *key, map& extra_headers, const string& resource, +int RGWRESTStreamRWRequest::send_request(const DoutPrefixProvider *dpp, RGWAccessKey *key, map& extra_headers, const string& resource, RGWHTTPManager *mgr, bufferlist *send_data) { - int ret = send_prepare(key, extra_headers, resource, send_data); + int ret = send_prepare(dpp, key, extra_headers, resource, send_data); if (ret < 0) { return ret; } diff --git a/src/rgw/rgw_rest_client.h b/src/rgw/rgw_rest_client.h index c065dc0248a23..577f0ff989360 100644 --- a/src/rgw/rgw_rest_client.h +++ b/src/rgw/rgw_rest_client.h @@ -63,8 +63,8 @@ public: RGWRESTSimpleRequest(CephContext *_cct, const string& _method, const string& _url, param_vec_t *_headers, param_vec_t *_params) : RGWHTTPSimpleRequest(_cct, _method, _url, _headers, _params) {} - int execute(RGWAccessKey& key, const char *method, const char *resource, optional_yield y); - int forward_request(RGWAccessKey& key, req_info& info, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y); + int execute(const DoutPrefixProvider *dpp, RGWAccessKey& key, const char *method, const char *resource, optional_yield y); + int forward_request(const DoutPrefixProvider *dpp, RGWAccessKey& key, req_info& info, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y); }; class RGWWriteDrainCB { @@ -86,10 +86,10 @@ public: RGWRESTGenerateHTTPHeaders(CephContext *_cct, RGWEnv *_env, req_info *_info) : cct(_cct), new_env(_env), new_info(_info) {} void init(const string& method, const string& url, const string& resource, const param_vec_t& params); void set_extra_headers(const map& extra_headers); - int set_obj_attrs(map& rgw_attrs); + int set_obj_attrs(const DoutPrefixProvider *dpp, map& rgw_attrs); void set_http_attrs(const map& http_attrs); void set_policy(RGWAccessControlPolicy& policy); - int sign(RGWAccessKey& key); + int sign(const DoutPrefixProvider *dpp, RGWAccessKey& key); const string& get_url() { return url; } }; @@ -169,12 +169,12 @@ public: } virtual ~RGWRESTStreamRWRequest() override {} - int send_prepare(RGWAccessKey *key, map& extra_headers, const string& resource, bufferlist *send_data = nullptr /* optional input data */); - int send_prepare(RGWAccessKey& key, map& extra_headers, const rgw_obj& obj); + int send_prepare(const DoutPrefixProvider *dpp, RGWAccessKey *key, map& extra_headers, const string& resource, bufferlist *send_data = nullptr /* optional input data */); + int send_prepare(const DoutPrefixProvider *dpp, RGWAccessKey& key, map& extra_headers, const rgw_obj& obj); int send(RGWHTTPManager *mgr); - int send_request(RGWAccessKey& key, map& extra_headers, const rgw_obj& obj, RGWHTTPManager *mgr); - int send_request(RGWAccessKey *key, map& extra_headers, const string& resource, RGWHTTPManager *mgr, bufferlist *send_data = nullptr /* optional input data */); + int send_request(const DoutPrefixProvider *dpp, RGWAccessKey& key, map& extra_headers, const rgw_obj& obj, RGWHTTPManager *mgr); + int send_request(const DoutPrefixProvider *dpp, RGWAccessKey *key, map& extra_headers, const string& resource, RGWHTTPManager *mgr, bufferlist *send_data = nullptr /* optional input data */); int complete_request(optional_yield y, string *etag = nullptr, @@ -186,7 +186,7 @@ public: void add_params(param_vec_t *params); private: - int do_send_prepare(RGWAccessKey *key, map& extra_headers, const string& resource, bufferlist *send_data = nullptr /* optional input data */); + int do_send_prepare(const DoutPrefixProvider *dpp, RGWAccessKey *key, map& extra_headers, const string& resource, bufferlist *send_data = nullptr /* optional input data */); }; class RGWRESTStreamReadRequest : public RGWRESTStreamRWRequest { @@ -213,12 +213,12 @@ public: ~RGWRESTStreamS3PutObj() override; void send_init(rgw::sal::RGWObject* obj); - int send_ready(RGWAccessKey& key, map& rgw_attrs, bool send); - int send_ready(RGWAccessKey& key, const map& http_attrs, + int send_ready(const DoutPrefixProvider *dpp, RGWAccessKey& key, map& rgw_attrs, bool send); + int send_ready(const DoutPrefixProvider *dpp, RGWAccessKey& key, const map& http_attrs, RGWAccessControlPolicy& policy, bool send); - int send_ready(RGWAccessKey& key, bool send); + int send_ready(const DoutPrefixProvider *dpp, RGWAccessKey& key, bool send); - int put_obj_init(RGWAccessKey& key, rgw::sal::RGWObject* obj, uint64_t obj_size, map& attrs, bool send); + int put_obj_init(const DoutPrefixProvider *dpp, RGWAccessKey& key, rgw::sal::RGWObject* obj, uint64_t obj_size, map& attrs, bool send); RGWGetDataCB *get_out_cb() { return out_cb; } }; diff --git a/src/rgw/rgw_rest_config.cc b/src/rgw/rgw_rest_config.cc index 9d906ec4c7bbc..07b56919d5233 100644 --- a/src/rgw/rgw_rest_config.cc +++ b/src/rgw/rgw_rest_config.cc @@ -31,9 +31,9 @@ #define dout_subsys ceph_subsys_rgw void RGWOp_ZoneGroupMap_Get::execute(optional_yield y) { - op_ret = zonegroup_map.read(g_ceph_context, store->svc()->sysobj, y); + op_ret = zonegroup_map.read(this, g_ceph_context, store->svc()->sysobj, y); if (op_ret < 0) { - dout(5) << "failed to read zone_group map" << dendl; + ldpp_dout(this, 5) << "failed to read zone_group map" << dendl; } } diff --git a/src/rgw/rgw_rest_conn.cc b/src/rgw/rgw_rest_conn.cc index 3ee9183944c8f..d4c377de1587e 100644 --- a/src/rgw/rgw_rest_conn.cc +++ b/src/rgw/rgw_rest_conn.cc @@ -86,7 +86,7 @@ void RGWRESTConn::populate_params(param_vec_t& params, const rgw_user *uid, cons populate_zonegroup(params, zonegroup); } -int RGWRESTConn::forward(const rgw_user& uid, req_info& info, obj_version *objv, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y) +int RGWRESTConn::forward(const DoutPrefixProvider *dpp, const rgw_user& uid, req_info& info, obj_version *objv, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y) { string url; int ret = get_url(url); @@ -101,7 +101,7 @@ int RGWRESTConn::forward(const rgw_user& uid, req_info& info, obj_version *objv, params.push_back(param_pair_t(RGW_SYS_PARAM_PREFIX "ver", buf)); } RGWRESTSimpleRequest req(cct, info.method, url, NULL, ¶ms); - return req.forward_request(key, info, max_response, inbl, outbl, y); + return req.forward_request(dpp, key, info, max_response, inbl, outbl, y); } class StreamObjData : public RGWGetDataCB { @@ -131,7 +131,8 @@ int RGWRESTConn::put_obj_send_init(rgw::sal::RGWObject* obj, const rgw_http_para return 0; } -int RGWRESTConn::put_obj_async(const rgw_user& uid, rgw::sal::RGWObject* obj, uint64_t obj_size, +int RGWRESTConn::put_obj_async(const DoutPrefixProvider *dpp, + const rgw_user& uid, rgw::sal::RGWObject* obj, uint64_t obj_size, map& attrs, bool send, RGWRESTStreamS3PutObj **req) { @@ -143,7 +144,7 @@ int RGWRESTConn::put_obj_async(const rgw_user& uid, rgw::sal::RGWObject* obj, ui param_vec_t params; populate_params(params, &uid, self_zone_group); RGWRESTStreamS3PutObj *wr = new RGWRESTStreamS3PutObj(cct, "PUT", url, NULL, ¶ms, host_style); - ret = wr->put_obj_init(key, obj, obj_size, attrs, send); + ret = wr->put_obj_init(dpp, key, obj, obj_size, attrs, send); if (ret < 0) { delete wr; return ret; @@ -185,7 +186,7 @@ static void set_header(T val, map& headers, const string& header } -int RGWRESTConn::get_obj(const rgw_user& uid, req_info *info /* optional */, const rgw::sal::RGWObject* obj, +int RGWRESTConn::get_obj(const DoutPrefixProvider *dpp, const rgw_user& uid, req_info *info /* optional */, const rgw::sal::RGWObject* obj, const real_time *mod_ptr, const real_time *unmod_ptr, uint32_t mod_zone_id, uint64_t mod_pg_ver, bool prepend_metadata, bool get_op, bool rgwx_stat, @@ -203,10 +204,10 @@ int RGWRESTConn::get_obj(const rgw_user& uid, req_info *info /* optional */, con params.sync_manifest = sync_manifest; params.skip_decrypt = skip_decrypt; params.cb = cb; - return get_obj(obj, params, send, req); + return get_obj(dpp, obj, params, send, req); } -int RGWRESTConn::get_obj(const rgw::sal::RGWObject* obj, const get_obj_params& in_params, bool send, RGWRESTStreamRWRequest **req) +int RGWRESTConn::get_obj(const DoutPrefixProvider *dpp, const rgw::sal::RGWObject* obj, const get_obj_params& in_params, bool send, RGWRESTStreamRWRequest **req) { string url; int ret = get_url(url); @@ -269,7 +270,7 @@ int RGWRESTConn::get_obj(const rgw::sal::RGWObject* obj, const get_obj_params& i set_header(buf, extra_headers, "RANGE"); } - int r = (*req)->send_prepare(key, extra_headers, obj->get_obj()); + int r = (*req)->send_prepare(dpp, key, extra_headers, obj->get_obj()); if (r < 0) { goto done_err; } @@ -303,7 +304,8 @@ int RGWRESTConn::complete_request(RGWRESTStreamRWRequest *req, return ret; } -int RGWRESTConn::get_resource(const string& resource, +int RGWRESTConn::get_resource(const DoutPrefixProvider *dpp, + const string& resource, param_vec_t *extra_params, map *extra_headers, bufferlist& bl, @@ -333,9 +335,9 @@ int RGWRESTConn::get_resource(const string& resource, headers.insert(extra_headers->begin(), extra_headers->end()); } - ret = req.send_request(&key, headers, resource, mgr, send_data); + ret = req.send_request(dpp, &key, headers, resource, mgr, send_data); if (ret < 0) { - ldout(cct, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl; return ret; } @@ -376,22 +378,22 @@ void RGWRESTReadResource::init_common(param_vec_t *extra_headers) req.set_params(¶ms); } -int RGWRESTReadResource::read(optional_yield y) +int RGWRESTReadResource::read(const DoutPrefixProvider *dpp, optional_yield y) { - int ret = req.send_request(&conn->get_key(), headers, resource, mgr); + int ret = req.send_request(dpp, &conn->get_key(), headers, resource, mgr); if (ret < 0) { - ldout(cct, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl; return ret; } return req.complete_request(y); } -int RGWRESTReadResource::aio_read() +int RGWRESTReadResource::aio_read(const DoutPrefixProvider *dpp) { - int ret = req.send_request(&conn->get_key(), headers, resource, mgr); + int ret = req.send_request(dpp, &conn->get_key(), headers, resource, mgr); if (ret < 0) { - ldout(cct, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl; return ret; } @@ -434,28 +436,28 @@ void RGWRESTSendResource::init_common(param_vec_t *extra_headers) req.set_params(¶ms); } -int RGWRESTSendResource::send(bufferlist& outbl, optional_yield y) +int RGWRESTSendResource::send(const DoutPrefixProvider *dpp, bufferlist& outbl, optional_yield y) { req.set_send_length(outbl.length()); req.set_outbl(outbl); - int ret = req.send_request(&conn->get_key(), headers, resource, mgr); + int ret = req.send_request(dpp, &conn->get_key(), headers, resource, mgr); if (ret < 0) { - ldout(cct, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl; return ret; } return req.complete_request(y); } -int RGWRESTSendResource::aio_send(bufferlist& outbl) +int RGWRESTSendResource::aio_send(const DoutPrefixProvider *dpp, bufferlist& outbl) { req.set_send_length(outbl.length()); req.set_outbl(outbl); - int ret = req.send_request(&conn->get_key(), headers, resource, mgr); + int ret = req.send_request(dpp, &conn->get_key(), headers, resource, mgr); if (ret < 0) { - ldout(cct, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl; return ret; } diff --git a/src/rgw/rgw_rest_conn.h b/src/rgw/rgw_rest_conn.h index b4fac21391b74..97578e2d6a612 100644 --- a/src/rgw/rgw_rest_conn.h +++ b/src/rgw/rgw_rest_conn.h @@ -108,12 +108,12 @@ public: virtual void populate_params(param_vec_t& params, const rgw_user *uid, const string& zonegroup); /* sync request */ - int forward(const rgw_user& uid, req_info& info, obj_version *objv, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y); + int forward(const DoutPrefixProvider *dpp, const rgw_user& uid, req_info& info, obj_version *objv, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y); /* async requests */ int put_obj_send_init(rgw::sal::RGWObject* obj, const rgw_http_param_pair *extra_params, RGWRESTStreamS3PutObj **req); - int put_obj_async(const rgw_user& uid, rgw::sal::RGWObject* obj, uint64_t obj_size, + int put_obj_async(const DoutPrefixProvider *dpp, const rgw_user& uid, rgw::sal::RGWObject* obj, uint64_t obj_size, map& attrs, bool send, RGWRESTStreamS3PutObj **req); int complete_request(RGWRESTStreamS3PutObj *req, string& etag, ceph::real_time *mtime, optional_yield y); @@ -143,9 +143,9 @@ public: uint64_t range_end{0}; }; - int get_obj(const rgw::sal::RGWObject* obj, const get_obj_params& params, bool send, RGWRESTStreamRWRequest **req); + int get_obj(const DoutPrefixProvider *dpp, const rgw::sal::RGWObject* obj, const get_obj_params& params, bool send, RGWRESTStreamRWRequest **req); - int get_obj(const rgw_user& uid, req_info *info /* optional */, const rgw::sal::RGWObject* obj, + int get_obj(const DoutPrefixProvider *dpp, const rgw_user& uid, req_info *info /* optional */, const rgw::sal::RGWObject* obj, const ceph::real_time *mod_ptr, const ceph::real_time *unmod_ptr, uint32_t mod_zone_id, uint64_t mod_pg_ver, bool prepend_metadata, bool get_op, bool rgwx_stat, bool sync_manifest, @@ -158,7 +158,8 @@ public: map *pheaders, optional_yield y); - int get_resource(const string& resource, + int get_resource(const DoutPrefixProvider *dpp, + const string& resource, param_vec_t *extra_params, map* extra_headers, bufferlist& bl, @@ -167,13 +168,13 @@ public: optional_yield y); template - int get_json_resource(const string& resource, param_vec_t *params, + int get_json_resource(const DoutPrefixProvider *dpp, const string& resource, param_vec_t *params, bufferlist *in_data, optional_yield y, T& t); template - int get_json_resource(const string& resource, param_vec_t *params, + int get_json_resource(const DoutPrefixProvider *dpp, const string& resource, param_vec_t *params, optional_yield y, T& t); template - int get_json_resource(const string& resource, const rgw_http_param_pair *pp, + int get_json_resource(const DoutPrefixProvider *dpp, const string& resource, const rgw_http_param_pair *pp, optional_yield y, T& t); private: @@ -211,11 +212,11 @@ public: template -int RGWRESTConn::get_json_resource(const string& resource, param_vec_t *params, +int RGWRESTConn::get_json_resource(const DoutPrefixProvider *dpp, const string& resource, param_vec_t *params, bufferlist *in_data, optional_yield y, T& t) { bufferlist bl; - int ret = get_resource(resource, params, nullptr, bl, in_data, nullptr, y); + int ret = get_resource(dpp, resource, params, nullptr, bl, in_data, nullptr, y); if (ret < 0) { return ret; } @@ -229,18 +230,18 @@ int RGWRESTConn::get_json_resource(const string& resource, param_vec_t *params, } template -int RGWRESTConn::get_json_resource(const string& resource, param_vec_t *params, +int RGWRESTConn::get_json_resource(const DoutPrefixProvider *dpp, const string& resource, param_vec_t *params, optional_yield y, T& t) { - return get_json_resource(resource, params, nullptr, y, t); + return get_json_resource(dpp, resource, params, nullptr, y, t); } template -int RGWRESTConn::get_json_resource(const string& resource, const rgw_http_param_pair *pp, +int RGWRESTConn::get_json_resource(const DoutPrefixProvider *dpp, const string& resource, const rgw_http_param_pair *pp, optional_yield y, T& t) { param_vec_t params = make_param_list(pp); - return get_json_resource(resource, ¶ms, y, t); + return get_json_resource(dpp, resource, ¶ms, y, t); } class RGWStreamIntoBufferlist : public RGWHTTPStreamRWRequest::ReceiveCB { @@ -296,9 +297,9 @@ public: template int decode_resource(T *dest); - int read(optional_yield y); + int read(const DoutPrefixProvider *dpp, optional_yield y); - int aio_read(); + int aio_read(const DoutPrefixProvider *dpp); string to_str() { return req.to_str(); @@ -325,7 +326,7 @@ public: int wait(T *dest, optional_yield y); template - int fetch(T *dest, optional_yield y); + int fetch(const DoutPrefixProvider *dpp, T *dest, optional_yield y); }; @@ -344,9 +345,9 @@ int RGWRESTReadResource::decode_resource(T *dest) } template -int RGWRESTReadResource::fetch(T *dest, optional_yield y) +int RGWRESTReadResource::fetch(const DoutPrefixProvider *dpp, T *dest, optional_yield y) { - int ret = read(y); + int ret = read(dpp, y); if (ret < 0) { return ret; } @@ -417,9 +418,9 @@ public: return req.get_io_user_info(); } - int send(bufferlist& bl, optional_yield y); + int send(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y); - int aio_send(bufferlist& bl); + int aio_send(const DoutPrefixProvider *dpp, bufferlist& bl); string to_str() { return req.to_str(); diff --git a/src/rgw/rgw_rest_iam.cc b/src/rgw/rgw_rest_iam.cc index c7250129b84b2..fc050ae8bccfe 100644 --- a/src/rgw/rgw_rest_iam.cc +++ b/src/rgw/rgw_rest_iam.cc @@ -19,7 +19,7 @@ void RGWHandler_REST_IAM::rgw_iam_parse_input() { if (post_body.size() > 0) { - ldout(s->cct, 10) << "Content of POST: " << post_body << dendl; + ldpp_dout(s, 10) << "Content of POST: " << post_body << dendl; if (post_body.find("Action") != string::npos) { boost::char_separator sep("&"); @@ -89,7 +89,7 @@ int RGWHandler_REST_IAM::init(rgw::sal::RGWRadosStore *store, s->dialect = "iam"; if (int ret = RGWHandler_REST_IAM::init_from_header(s, RGW_FORMAT_XML, true); ret < 0) { - ldout(s->cct, 10) << "init_from_header returned err=" << ret << dendl; + ldpp_dout(s, 10) << "init_from_header returned err=" << ret << dendl; return ret; } @@ -118,7 +118,7 @@ int RGWHandler_REST_IAM::init_from_header(struct req_state* s, } s->info.args.set(p); - s->info.args.parse(); + s->info.args.parse(s); /* must be called after the args parsing */ if (int ret = allocate_formatter(s, default_formatter, configurable_format); ret < 0) diff --git a/src/rgw/rgw_rest_log.cc b/src/rgw/rgw_rest_log.cc index 4003e157164f5..2acefaf1fdbb8 100644 --- a/src/rgw/rgw_rest_log.cc +++ b/src/rgw/rgw_rest_log.cc @@ -48,14 +48,14 @@ void RGWOp_MDLog_List::execute(optional_yield y) { if (s->info.args.exists("start-time") || s->info.args.exists("end-time")) { - dout(5) << "start-time and end-time are no longer accepted" << dendl; + ldpp_dout(this, 5) << "start-time and end-time are no longer accepted" << dendl; op_ret = -EINVAL; return; } shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err); if (!err.empty()) { - dout(5) << "Error parsing shard_id " << shard << dendl; + ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl; op_ret = -EINVAL; return; } @@ -63,7 +63,7 @@ void RGWOp_MDLog_List::execute(optional_yield y) { if (!max_entries_str.empty()) { max_entries = (unsigned)strict_strtol(max_entries_str.c_str(), 10, &err); if (!err.empty()) { - dout(5) << "Error parsing max-entries " << max_entries_str << dendl; + ldpp_dout(this, 5) << "Error parsing max-entries " << max_entries_str << dendl; op_ret = -EINVAL; return; } @@ -73,10 +73,10 @@ void RGWOp_MDLog_List::execute(optional_yield y) { } if (period.empty()) { - ldout(s->cct, 5) << "Missing period id trying to use current" << dendl; + ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl; period = store->svc()->zone->get_current_period_id(); if (period.empty()) { - ldout(s->cct, 5) << "Missing period id" << dendl; + ldpp_dout(this, 5) << "Missing period id" << dendl; op_ret = -EINVAL; return; } @@ -86,7 +86,7 @@ void RGWOp_MDLog_List::execute(optional_yield y) { meta_log.init_list_entries(shard_id, {}, {}, marker, &handle); - op_ret = meta_log.list_entries(handle, max_entries, entries, + op_ret = meta_log.list_entries(this, handle, max_entries, entries, &last_marker, &truncated); meta_log.complete_list_entries(handle); @@ -119,7 +119,7 @@ void RGWOp_MDLog_List::send_response() { void RGWOp_MDLog_Info::execute(optional_yield y) { num_objects = s->cct->_conf->rgw_md_log_max_shards; - period = store->svc()->mdlog->read_oldest_log_period(y); + period = store->svc()->mdlog->read_oldest_log_period(y, s); op_ret = period.get_error(); } @@ -145,24 +145,24 @@ void RGWOp_MDLog_ShardInfo::execute(optional_yield y) { unsigned shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err); if (!err.empty()) { - dout(5) << "Error parsing shard_id " << shard << dendl; + ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl; op_ret = -EINVAL; return; } if (period.empty()) { - ldout(s->cct, 5) << "Missing period id trying to use current" << dendl; + ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl; period = store->svc()->zone->get_current_period_id(); if (period.empty()) { - ldout(s->cct, 5) << "Missing period id" << dendl; + ldpp_dout(this, 5) << "Missing period id" << dendl; op_ret = -EINVAL; return; } } RGWMetadataLog meta_log{s->cct, store->svc()->zone, store->svc()->cls, period}; - op_ret = meta_log.get_info(shard_id, &info); + op_ret = meta_log.get_info(this, shard_id, &info); } void RGWOp_MDLog_ShardInfo::send_response() { @@ -184,12 +184,12 @@ void RGWOp_MDLog_Delete::execute(optional_yield y) { if (s->info.args.exists("start-time") || s->info.args.exists("end-time")) { - dout(5) << "start-time and end-time are no longer accepted" << dendl; + ldpp_dout(this, 5) << "start-time and end-time are no longer accepted" << dendl; op_ret = -EINVAL; } if (s->info.args.exists("start-marker")) { - dout(5) << "start-marker is no longer accepted" << dendl; + ldpp_dout(this, 5) << "start-marker is no longer accepted" << dendl; op_ret = -EINVAL; } @@ -197,7 +197,7 @@ void RGWOp_MDLog_Delete::execute(optional_yield y) { if (!s->info.args.exists("marker")) { marker = s->info.args.get("end-marker"); } else { - dout(5) << "end-marker and marker cannot both be provided" << dendl; + ldpp_dout(this, 5) << "end-marker and marker cannot both be provided" << dendl; op_ret = -EINVAL; } } @@ -206,7 +206,7 @@ void RGWOp_MDLog_Delete::execute(optional_yield y) { shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err); if (!err.empty()) { - dout(5) << "Error parsing shard_id " << shard << dendl; + ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl; op_ret = -EINVAL; return; } @@ -217,18 +217,18 @@ void RGWOp_MDLog_Delete::execute(optional_yield y) { } if (period.empty()) { - ldout(s->cct, 5) << "Missing period id trying to use current" << dendl; + ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl; period = store->svc()->zone->get_current_period_id(); if (period.empty()) { - ldout(s->cct, 5) << "Missing period id" << dendl; + ldpp_dout(this, 5) << "Missing period id" << dendl; op_ret = -EINVAL; return; } } RGWMetadataLog meta_log{s->cct, store->svc()->zone, store->svc()->cls, period}; - op_ret = meta_log.trim(shard_id, {}, {}, {}, marker); + op_ret = meta_log.trim(this, shard_id, {}, {}, {}, marker); } void RGWOp_MDLog_Lock::execute(optional_yield y) { @@ -244,7 +244,7 @@ void RGWOp_MDLog_Lock::execute(optional_yield y) { zone_id = s->info.args.get("zone-id"); if (period.empty()) { - ldout(s->cct, 5) << "Missing period id trying to use current" << dendl; + ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl; period = store->svc()->zone->get_current_period_id(); } @@ -253,7 +253,7 @@ void RGWOp_MDLog_Lock::execute(optional_yield y) { (duration_str.empty()) || locker_id.empty() || zone_id.empty()) { - dout(5) << "Error invalid parameter list" << dendl; + ldpp_dout(this, 5) << "Error invalid parameter list" << dendl; op_ret = -EINVAL; return; } @@ -261,7 +261,7 @@ void RGWOp_MDLog_Lock::execute(optional_yield y) { string err; shard_id = (unsigned)strict_strtol(shard_id_str.c_str(), 10, &err); if (!err.empty()) { - dout(5) << "Error parsing shard_id param " << shard_id_str << dendl; + ldpp_dout(this, 5) << "Error parsing shard_id param " << shard_id_str << dendl; op_ret = -EINVAL; return; } @@ -270,11 +270,11 @@ void RGWOp_MDLog_Lock::execute(optional_yield y) { unsigned dur; dur = (unsigned)strict_strtol(duration_str.c_str(), 10, &err); if (!err.empty() || dur <= 0) { - dout(5) << "invalid length param " << duration_str << dendl; + ldpp_dout(this, 5) << "invalid length param " << duration_str << dendl; op_ret = -EINVAL; return; } - op_ret = meta_log.lock_exclusive(shard_id, make_timespan(dur), zone_id, + op_ret = meta_log.lock_exclusive(s, shard_id, make_timespan(dur), zone_id, locker_id); if (op_ret == -EBUSY) op_ret = -ERR_LOCKED; @@ -292,7 +292,7 @@ void RGWOp_MDLog_Unlock::execute(optional_yield y) { zone_id = s->info.args.get("zone-id"); if (period.empty()) { - ldout(s->cct, 5) << "Missing period id trying to use current" << dendl; + ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl; period = store->svc()->zone->get_current_period_id(); } @@ -300,7 +300,7 @@ void RGWOp_MDLog_Unlock::execute(optional_yield y) { shard_id_str.empty() || locker_id.empty() || zone_id.empty()) { - dout(5) << "Error invalid parameter list" << dendl; + ldpp_dout(this, 5) << "Error invalid parameter list" << dendl; op_ret = -EINVAL; return; } @@ -308,13 +308,13 @@ void RGWOp_MDLog_Unlock::execute(optional_yield y) { string err; shard_id = (unsigned)strict_strtol(shard_id_str.c_str(), 10, &err); if (!err.empty()) { - dout(5) << "Error parsing shard_id param " << shard_id_str << dendl; + ldpp_dout(this, 5) << "Error parsing shard_id param " << shard_id_str << dendl; op_ret = -EINVAL; return; } RGWMetadataLog meta_log{s->cct, store->svc()->zone, store->svc()->cls, period}; - op_ret = meta_log.unlock(shard_id, zone_id, locker_id); + op_ret = meta_log.unlock(s, shard_id, zone_id, locker_id); } void RGWOp_MDLog_Notify::execute(optional_yield y) { @@ -329,12 +329,12 @@ void RGWOp_MDLog_Notify::execute(optional_yield y) { } char* buf = data.c_str(); - ldout(s->cct, 20) << __func__ << "(): read data: " << buf << dendl; + ldpp_dout(this, 20) << __func__ << "(): read data: " << buf << dendl; JSONParser p; r = p.parse(buf, data.length()); if (r < 0) { - ldout(s->cct, 0) << "ERROR: failed to parse JSON" << dendl; + ldpp_dout(this, 0) << "ERROR: failed to parse JSON" << dendl; op_ret = r; return; } @@ -343,14 +343,14 @@ void RGWOp_MDLog_Notify::execute(optional_yield y) { try { decode_json_obj(updated_shards, &p); } catch (JSONDecoder::err& err) { - ldout(s->cct, 0) << "ERROR: failed to decode JSON" << dendl; + ldpp_dout(this, 0) << "ERROR: failed to decode JSON" << dendl; op_ret = -EINVAL; return; } if (store->ctx()->_conf->subsys.should_gather()) { for (set::iterator iter = updated_shards.begin(); iter != updated_shards.end(); ++iter) { - ldout(s->cct, 20) << __func__ << "(): updated shard=" << *iter << dendl; + ldpp_dout(this, 20) << __func__ << "(): updated shard=" << *iter << dendl; } } @@ -369,7 +369,7 @@ void RGWOp_BILog_List::execute(optional_yield y) { unsigned max_entries; if (bucket_name.empty() && bucket_instance.empty()) { - dout(5) << "ERROR: neither bucket nor bucket instance specified" << dendl; + ldpp_dout(this, 5) << "ERROR: neither bucket nor bucket instance specified" << dendl; op_ret = -EINVAL; return; } @@ -383,15 +383,15 @@ void RGWOp_BILog_List::execute(optional_yield y) { if (!bucket_instance.empty()) { rgw_bucket b(rgw_bucket_key(tenant_name, bn, bucket_instance)); - op_ret = store->getRados()->get_bucket_instance_info(*s->sysobj_ctx, b, bucket_info, NULL, NULL, s->yield); + op_ret = store->getRados()->get_bucket_instance_info(*s->sysobj_ctx, b, bucket_info, NULL, NULL, s->yield, this); if (op_ret < 0) { - ldpp_dout(s, 5) << "could not get bucket instance info for bucket instance id=" << bucket_instance << dendl; + ldpp_dout(this, 5) << "could not get bucket instance info for bucket instance id=" << bucket_instance << dendl; return; } } else { /* !bucket_name.empty() */ op_ret = store->getRados()->get_bucket_info(store->svc(), tenant_name, bucket_name, bucket_info, NULL, s->yield, NULL); if (op_ret < 0) { - ldpp_dout(s, 5) << "could not get bucket info for bucket=" << bucket_name << dendl; + ldpp_dout(this, 5) << "could not get bucket info for bucket=" << bucket_name << dendl; return; } } @@ -407,11 +407,11 @@ void RGWOp_BILog_List::execute(optional_yield y) { send_response(); do { list entries; - int ret = store->svc()->bilog_rados->log_list(bucket_info, shard_id, + int ret = store->svc()->bilog_rados->log_list(s, bucket_info, shard_id, marker, max_entries - count, entries, &truncated); if (ret < 0) { - ldpp_dout(s, 5) << "ERROR: list_bi_log_entries()" << dendl; + ldpp_dout(this, 5) << "ERROR: list_bi_log_entries()" << dendl; return; } @@ -462,7 +462,7 @@ void RGWOp_BILog_Info::execute(optional_yield y) { RGWBucketInfo bucket_info; if (bucket_name.empty() && bucket_instance.empty()) { - ldpp_dout(s, 5) << "ERROR: neither bucket nor bucket instance specified" << dendl; + ldpp_dout(this, 5) << "ERROR: neither bucket nor bucket instance specified" << dendl; op_ret = -EINVAL; return; } @@ -476,20 +476,20 @@ void RGWOp_BILog_Info::execute(optional_yield y) { if (!bucket_instance.empty()) { rgw_bucket b(rgw_bucket_key(tenant_name, bn, bucket_instance)); - op_ret = store->getRados()->get_bucket_instance_info(*s->sysobj_ctx, b, bucket_info, NULL, NULL, s->yield); + op_ret = store->getRados()->get_bucket_instance_info(*s->sysobj_ctx, b, bucket_info, NULL, NULL, s->yield, this); if (op_ret < 0) { - ldpp_dout(s, 5) << "could not get bucket instance info for bucket instance id=" << bucket_instance << dendl; + ldpp_dout(this, 5) << "could not get bucket instance info for bucket instance id=" << bucket_instance << dendl; return; } } else { /* !bucket_name.empty() */ op_ret = store->getRados()->get_bucket_info(store->svc(), tenant_name, bucket_name, bucket_info, NULL, s->yield, NULL); if (op_ret < 0) { - ldpp_dout(s, 5) << "could not get bucket info for bucket=" << bucket_name << dendl; + ldpp_dout(this, 5) << "could not get bucket info for bucket=" << bucket_name << dendl; return; } } map stats; - int ret = store->getRados()->get_bucket_stats(bucket_info, shard_id, &bucket_ver, &master_ver, stats, &max_marker, &syncstopped); + int ret = store->getRados()->get_bucket_stats(s, bucket_info, shard_id, &bucket_ver, &master_ver, stats, &max_marker, &syncstopped); if (ret < 0 && ret != -ENOENT) { op_ret = ret; return; @@ -526,7 +526,7 @@ void RGWOp_BILog_Delete::execute(optional_yield y) { op_ret = 0; if ((bucket_name.empty() && bucket_instance.empty()) || end_marker.empty()) { - ldpp_dout(s, 5) << "ERROR: one of bucket and bucket instance, and also end-marker is mandatory" << dendl; + ldpp_dout(this, 5) << "ERROR: one of bucket and bucket instance, and also end-marker is mandatory" << dendl; op_ret = -EINVAL; return; } @@ -540,21 +540,21 @@ void RGWOp_BILog_Delete::execute(optional_yield y) { if (!bucket_instance.empty()) { rgw_bucket b(rgw_bucket_key(tenant_name, bn, bucket_instance)); - op_ret = store->getRados()->get_bucket_instance_info(*s->sysobj_ctx, b, bucket_info, NULL, NULL, s->yield); + op_ret = store->getRados()->get_bucket_instance_info(*s->sysobj_ctx, b, bucket_info, NULL, NULL, s->yield, this); if (op_ret < 0) { - ldpp_dout(s, 5) << "could not get bucket instance info for bucket instance id=" << bucket_instance << dendl; + ldpp_dout(this, 5) << "could not get bucket instance info for bucket instance id=" << bucket_instance << dendl; return; } } else { /* !bucket_name.empty() */ op_ret = store->getRados()->get_bucket_info(store->svc(), tenant_name, bucket_name, bucket_info, NULL, s->yield, NULL); if (op_ret < 0) { - ldpp_dout(s, 5) << "could not get bucket info for bucket=" << bucket_name << dendl; + ldpp_dout(this, 5) << "could not get bucket info for bucket=" << bucket_name << dendl; return; } } - op_ret = store->svc()->bilog_rados->log_trim(bucket_info, shard_id, start_marker, end_marker); + op_ret = store->svc()->bilog_rados->log_trim(s, bucket_info, shard_id, start_marker, end_marker); if (op_ret < 0) { - ldpp_dout(s, 5) << "ERROR: trim_bi_log_entries() " << dendl; + ldpp_dout(this, 5) << "ERROR: trim_bi_log_entries() " << dendl; } return; } @@ -569,7 +569,7 @@ void RGWOp_DATALog_List::execute(optional_yield y) { if (s->info.args.exists("start-time") || s->info.args.exists("end-time")) { - dout(5) << "start-time and end-time are no longer accepted" << dendl; + ldpp_dout(this, 5) << "start-time and end-time are no longer accepted" << dendl; op_ret = -EINVAL; } @@ -577,7 +577,7 @@ void RGWOp_DATALog_List::execute(optional_yield y) { shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err); if (!err.empty()) { - dout(5) << "Error parsing shard_id " << shard << dendl; + ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl; op_ret = -EINVAL; return; } @@ -585,7 +585,7 @@ void RGWOp_DATALog_List::execute(optional_yield y) { if (!max_entries_str.empty()) { max_entries = (unsigned)strict_strtol(max_entries_str.c_str(), 10, &err); if (!err.empty()) { - dout(5) << "Error parsing max-entries " << max_entries_str << dendl; + ldpp_dout(this, 5) << "Error parsing max-entries " << max_entries_str << dendl; op_ret = -EINVAL; return; } @@ -596,7 +596,7 @@ void RGWOp_DATALog_List::execute(optional_yield y) { // Note that last_marker is updated to be the marker of the last // entry listed - op_ret = store->svc()->datalog_rados->list_entries(shard_id, + op_ret = store->svc()->datalog_rados->list_entries(this, shard_id, max_entries, entries, marker, &last_marker, &truncated); @@ -652,12 +652,12 @@ void RGWOp_DATALog_ShardInfo::execute(optional_yield y) { unsigned shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err); if (!err.empty()) { - dout(5) << "Error parsing shard_id " << shard << dendl; + ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl; op_ret = -EINVAL; return; } - op_ret = store->svc()->datalog_rados->get_info(shard_id, &info); + op_ret = store->svc()->datalog_rados->get_info(this, shard_id, &info); } void RGWOp_DATALog_ShardInfo::send_response() { @@ -682,12 +682,12 @@ void RGWOp_DATALog_Notify::execute(optional_yield y) { } char* buf = data.c_str(); - ldout(s->cct, 20) << __func__ << "(): read data: " << buf << dendl; + ldpp_dout(this, 20) << __func__ << "(): read data: " << buf << dendl; JSONParser p; r = p.parse(buf, data.length()); if (r < 0) { - ldout(s->cct, 0) << "ERROR: failed to parse JSON" << dendl; + ldpp_dout(this, 0) << "ERROR: failed to parse JSON" << dendl; op_ret = r; return; } @@ -696,17 +696,17 @@ void RGWOp_DATALog_Notify::execute(optional_yield y) { try { decode_json_obj(updated_shards, &p); } catch (JSONDecoder::err& err) { - ldout(s->cct, 0) << "ERROR: failed to decode JSON" << dendl; + ldpp_dout(this, 0) << "ERROR: failed to decode JSON" << dendl; op_ret = -EINVAL; return; } if (store->ctx()->_conf->subsys.should_gather()) { for (map >::iterator iter = updated_shards.begin(); iter != updated_shards.end(); ++iter) { - ldout(s->cct, 20) << __func__ << "(): updated shard=" << iter->first << dendl; + ldpp_dout(this, 20) << __func__ << "(): updated shard=" << iter->first << dendl; set& keys = iter->second; for (set::iterator kiter = keys.begin(); kiter != keys.end(); ++kiter) { - ldout(s->cct, 20) << __func__ << "(): modified key=" << *kiter << dendl; + ldpp_dout(this, 20) << __func__ << "(): modified key=" << *kiter << dendl; } } } @@ -726,12 +726,12 @@ void RGWOp_DATALog_Delete::execute(optional_yield y) { if (s->info.args.exists("start-time") || s->info.args.exists("end-time")) { - dout(5) << "start-time and end-time are no longer accepted" << dendl; + ldpp_dout(this, 5) << "start-time and end-time are no longer accepted" << dendl; op_ret = -EINVAL; } if (s->info.args.exists("start-marker")) { - dout(5) << "start-marker is no longer accepted" << dendl; + ldpp_dout(this, 5) << "start-marker is no longer accepted" << dendl; op_ret = -EINVAL; } @@ -739,14 +739,14 @@ void RGWOp_DATALog_Delete::execute(optional_yield y) { if (!s->info.args.exists("marker")) { marker = s->info.args.get("end-marker"); } else { - dout(5) << "end-marker and marker cannot both be provided" << dendl; + ldpp_dout(this, 5) << "end-marker and marker cannot both be provided" << dendl; op_ret = -EINVAL; } } shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err); if (!err.empty()) { - dout(5) << "Error parsing shard_id " << shard << dendl; + ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl; op_ret = -EINVAL; return; } @@ -755,7 +755,7 @@ void RGWOp_DATALog_Delete::execute(optional_yield y) { return; } - op_ret = store->svc()->datalog_rados->trim_entries(shard_id, marker); + op_ret = store->svc()->datalog_rados->trim_entries(this, shard_id, marker); } // not in header to avoid pulling in rgw_sync.h @@ -777,11 +777,11 @@ void RGWOp_MDLog_Status::execute(optional_yield y) { auto sync = store->getRados()->get_meta_sync_manager(); if (sync == nullptr) { - ldout(s->cct, 1) << "no sync manager" << dendl; + ldpp_dout(this, 1) << "no sync manager" << dendl; op_ret = -ENOENT; return; } - op_ret = sync->read_sync_status(&status); + op_ret = sync->read_sync_status(this, &status); } void RGWOp_MDLog_Status::send_response() @@ -822,7 +822,7 @@ void RGWOp_BILog_Status::execute(optional_yield y) key = source_key; } if (key.empty()) { - ldpp_dout(s, 4) << "no 'bucket' provided" << dendl; + ldpp_dout(this, 4) << "no 'bucket' provided" << dendl; op_ret = -EINVAL; return; } @@ -831,7 +831,7 @@ void RGWOp_BILog_Status::execute(optional_yield y) int shard_id{-1}; // unused op_ret = rgw_bucket_parse_bucket_key(s->cct, key, &bucket, &shard_id); if (op_ret < 0) { - ldpp_dout(s, 4) << "invalid 'bucket' provided" << dendl; + ldpp_dout(this, 4) << "invalid 'bucket' provided" << dendl; op_ret = -EINVAL; return; } @@ -839,9 +839,9 @@ void RGWOp_BILog_Status::execute(optional_yield y) // read the bucket instance info for num_shards auto ctx = store->svc()->sysobj->init_obj_ctx(); RGWBucketInfo info; - op_ret = store->getRados()->get_bucket_instance_info(ctx, bucket, info, nullptr, nullptr, s->yield); + op_ret = store->getRados()->get_bucket_instance_info(ctx, bucket, info, nullptr, nullptr, s->yield, this); if (op_ret < 0) { - ldpp_dout(s, 4) << "failed to read bucket info: " << cpp_strerror(op_ret) << dendl; + ldpp_dout(this, 4) << "failed to read bucket info: " << cpp_strerror(op_ret) << dendl; return; } @@ -853,7 +853,7 @@ void RGWOp_BILog_Status::execute(optional_yield y) } else { op_ret = rgw_bucket_parse_bucket_key(s->cct, source_key, &source_bucket, nullptr); if (op_ret < 0) { - ldpp_dout(s, 4) << "invalid 'source-bucket' provided (key=" << source_key << ")" << dendl; + ldpp_dout(this, 4) << "invalid 'source-bucket' provided (key=" << source_key << ")" << dendl; return; } } @@ -867,12 +867,12 @@ void RGWOp_BILog_Status::execute(optional_yield y) pipe.dest.zone = local_zone_id; pipe.dest.bucket = info.bucket; - ldout(s->cct, 20) << "RGWOp_BILog_Status::execute(optional_yield y): getting sync status for pipe=" << pipe << dendl; + ldpp_dout(this, 20) << "RGWOp_BILog_Status::execute(optional_yield y): getting sync status for pipe=" << pipe << dendl; op_ret = rgw_bucket_sync_status(this, store, pipe, info, nullptr, &status); if (op_ret < 0) { - lderr(s->cct) << "ERROR: rgw_bucket_sync_status() on pipe=" << pipe << " returned ret=" << op_ret << dendl; + ldpp_dout(this, -1) << "ERROR: rgw_bucket_sync_status() on pipe=" << pipe << " returned ret=" << op_ret << dendl; } return; } @@ -880,9 +880,9 @@ void RGWOp_BILog_Status::execute(optional_yield y) rgw_zone_id source_zone_id(source_zone); RGWBucketSyncPolicyHandlerRef source_handler; - op_ret = store->ctl()->bucket->get_sync_policy_handler(source_zone_id, source_bucket, &source_handler, y); + op_ret = store->ctl()->bucket->get_sync_policy_handler(source_zone_id, source_bucket, &source_handler, y, s); if (op_ret < 0) { - lderr(s->cct) << "could not get bucket sync policy handler (r=" << op_ret << ")" << dendl; + ldpp_dout(this, -1) << "could not get bucket sync policy handler (r=" << op_ret << ")" << dendl; return; } @@ -892,14 +892,14 @@ void RGWOp_BILog_Status::execute(optional_yield y) for (auto& entry : local_dests) { auto pipe = entry.second; - ldout(s->cct, 20) << "RGWOp_BILog_Status::execute(optional_yield y): getting sync status for pipe=" << pipe << dendl; + ldpp_dout(this, 20) << "RGWOp_BILog_Status::execute(optional_yield y): getting sync status for pipe=" << pipe << dendl; RGWBucketInfo *pinfo = &info; std::optional opt_dest_info; if (!pipe.dest.bucket) { /* Uh oh, something went wrong */ - ldout(s->cct, 20) << "ERROR: RGWOp_BILog_Status::execute(optional_yield y): BUG: pipe.dest.bucket was not initialized" << pipe << dendl; + ldpp_dout(this, 20) << "ERROR: RGWOp_BILog_Status::execute(optional_yield y): BUG: pipe.dest.bucket was not initialized" << pipe << dendl; op_ret = -EIO; return; } @@ -912,10 +912,11 @@ void RGWOp_BILog_Status::execute(optional_yield y) op_ret = store->ctl()->bucket->read_bucket_info(*pipe.dest.bucket, pinfo, s->yield, + s, RGWBucketCtl::BucketInstance::GetParams(), nullptr); if (op_ret < 0) { - ldpp_dout(s, 4) << "failed to read target bucket info (bucket=: " << cpp_strerror(op_ret) << dendl; + ldpp_dout(this, 4) << "failed to read target bucket info (bucket=: " << cpp_strerror(op_ret) << dendl; return; } @@ -924,7 +925,7 @@ void RGWOp_BILog_Status::execute(optional_yield y) int r = rgw_bucket_sync_status(this, store, pipe, *pinfo, &info, ¤t_status); if (r < 0) { - lderr(s->cct) << "ERROR: rgw_bucket_sync_status() on pipe=" << pipe << " returned ret=" << r << dendl; + ldpp_dout(this, -1) << "ERROR: rgw_bucket_sync_status() on pipe=" << pipe << " returned ret=" << r << dendl; op_ret = r; return; } @@ -935,7 +936,7 @@ void RGWOp_BILog_Status::execute(optional_yield y) if (current_status.size() != status.size()) { op_ret = -EINVAL; - lderr(s->cct) << "ERROR: different number of shards for sync status of buckets syncing from the same source: status.size()= " << status.size() << " current_status.size()=" << current_status.size() << dendl; + ldpp_dout(this, -1) << "ERROR: different number of shards for sync status of buckets syncing from the same source: status.size()= " << status.size() << " current_status.size()=" << current_status.size() << dendl; return; } auto m = status.begin(); @@ -982,11 +983,11 @@ void RGWOp_DATALog_Status::execute(optional_yield y) const auto source_zone = s->info.args.get("source-zone"); auto sync = store->getRados()->get_data_sync_manager(source_zone); if (sync == nullptr) { - ldout(s->cct, 1) << "no sync manager for source-zone " << source_zone << dendl; + ldpp_dout(this, 1) << "no sync manager for source-zone " << source_zone << dendl; op_ret = -ENOENT; return; } - op_ret = sync->read_sync_status(&status); + op_ret = sync->read_sync_status(this, &status); } void RGWOp_DATALog_Status::send_response() diff --git a/src/rgw/rgw_rest_metadata.cc b/src/rgw/rgw_rest_metadata.cc index 795382dbc91d8..5374729314932 100644 --- a/src/rgw/rgw_rest_metadata.cc +++ b/src/rgw/rgw_rest_metadata.cc @@ -57,9 +57,9 @@ void RGWOp_Metadata_Get::execute(optional_yield y) { auto meta_mgr = store->ctl()->meta.mgr; /* Get keys */ - op_ret = meta_mgr->get(metadata_key, s->formatter, s->yield); + op_ret = meta_mgr->get(metadata_key, s->formatter, s->yield, s); if (op_ret < 0) { - dout(5) << "ERROR: can't get key: " << cpp_strerror(op_ret) << dendl; + ldpp_dout(s, 5) << "ERROR: can't get key: " << cpp_strerror(op_ret) << dendl; return; } @@ -77,7 +77,7 @@ void RGWOp_Metadata_Get_Myself::execute(optional_yield y) { void RGWOp_Metadata_List::execute(optional_yield y) { string marker; - ldout(s->cct, 16) << __func__ + ldpp_dout(this, 16) << __func__ << " raw marker " << s->info.args.get("marker") << dendl; @@ -86,7 +86,7 @@ void RGWOp_Metadata_List::execute(optional_yield y) { if (!marker.empty()) { marker = rgw::from_base64(marker); } - ldout(s->cct, 16) << __func__ + ldpp_dout(this, 16) << __func__ << " marker " << marker << dendl; } catch (...) { marker = std::string(""); @@ -104,7 +104,7 @@ void RGWOp_Metadata_List::execute(optional_yield y) { string err; max_entries = (unsigned)strict_strtol(max_entries_str.c_str(), 10, &err); if (!err.empty()) { - dout(5) << "Error parsing max-entries " << max_entries_str << dendl; + ldpp_dout(this, 5) << "Error parsing max-entries " << max_entries_str << dendl; op_ret = -EINVAL; return; } @@ -123,9 +123,9 @@ void RGWOp_Metadata_List::execute(optional_yield y) { marker = "3:bf885d8f:root::sorry_janefonda_665:head"; */ - op_ret = store->ctl()->meta.mgr->list_keys_init(metadata_key, marker, &handle); + op_ret = store->ctl()->meta.mgr->list_keys_init(s, metadata_key, marker, &handle); if (op_ret < 0) { - dout(5) << "ERROR: can't get key: " << cpp_strerror(op_ret) << dendl; + ldpp_dout(this, 5) << "ERROR: can't get key: " << cpp_strerror(op_ret) << dendl; return; } @@ -146,7 +146,7 @@ void RGWOp_Metadata_List::execute(optional_yield y) { left = (max_entries_specified ? max_entries - count : max); op_ret = meta_mgr->list_keys_next(handle, left, keys, &truncated); if (op_ret < 0) { - dout(5) << "ERROR: lists_keys_next(): " << cpp_strerror(op_ret) + ldpp_dout(this, 5) << "ERROR: lists_keys_next(): " << cpp_strerror(op_ret) << dendl; return; } @@ -190,7 +190,7 @@ int RGWOp_Metadata_Put::get_data(bufferlist& bl) { } read_len = recv_body(s, data, cl); if (cl != (size_t)read_len) { - dout(10) << "recv_body incomplete" << dendl; + ldpp_dout(this, 10) << "recv_body incomplete" << dendl; } if (read_len < 0) { free(data); @@ -263,10 +263,10 @@ void RGWOp_Metadata_Put::execute(optional_yield y) { } } - op_ret = store->ctl()->meta.mgr->put(metadata_key, bl, s->yield, sync_type, + op_ret = store->ctl()->meta.mgr->put(metadata_key, bl, s->yield, s, sync_type, false, &ondisk_version); if (op_ret < 0) { - dout(5) << "ERROR: can't put key: " << cpp_strerror(op_ret) << dendl; + ldpp_dout(s, 5) << "ERROR: can't put key: " << cpp_strerror(op_ret) << dendl; return; } // translate internal codes into return header @@ -294,9 +294,9 @@ void RGWOp_Metadata_Delete::execute(optional_yield y) { string metadata_key; frame_metadata_key(s, metadata_key); - op_ret = store->ctl()->meta.mgr->remove(metadata_key, s->yield); + op_ret = store->ctl()->meta.mgr->remove(metadata_key, s->yield, s); if (op_ret < 0) { - dout(5) << "ERROR: can't remove key: " << cpp_strerror(op_ret) << dendl; + ldpp_dout(s, 5) << "ERROR: can't remove key: " << cpp_strerror(op_ret) << dendl; return; } op_ret = 0; diff --git a/src/rgw/rgw_rest_oidc_provider.cc b/src/rgw/rgw_rest_oidc_provider.cc index 6e619033f7607..d9fb66b63f3da 100644 --- a/src/rgw/rgw_rest_oidc_provider.cc +++ b/src/rgw/rgw_rest_oidc_provider.cc @@ -28,7 +28,7 @@ int RGWRestOIDCProvider::verify_permission(optional_yield y) provider_arn = s->info.args.get("OpenIDConnectProviderArn"); if (provider_arn.empty()) { - ldout(s->cct, 20) << "ERROR: Provider ARN is empty"<< dendl; + ldpp_dout(this, 20) << "ERROR: Provider ARN is empty"<< dendl; return -EINVAL; } @@ -107,7 +107,7 @@ int RGWCreateOIDCProvider::get_params() } if (provider_url.empty() || thumbprints.empty()) { - ldout(s->cct, 20) << "ERROR: one of url or thumbprints is empty" << dendl; + ldpp_dout(this, 20) << "ERROR: one of url or thumbprints is empty" << dendl; return -EINVAL; } @@ -123,7 +123,7 @@ void RGWCreateOIDCProvider::execute(optional_yield y) RGWOIDCProvider provider(s->cct, store->getRados()->pctl, provider_url, s->user->get_tenant(), client_ids, thumbprints); - op_ret = provider.create(true, y); + op_ret = provider.create(s, true, y); if (op_ret == 0) { s->formatter->open_object_section("CreateOpenIDConnectProviderResponse"); @@ -141,7 +141,7 @@ void RGWCreateOIDCProvider::execute(optional_yield y) void RGWDeleteOIDCProvider::execute(optional_yield y) { RGWOIDCProvider provider(s->cct, store->getRados()->pctl, provider_arn, s->user->get_tenant()); - op_ret = provider.delete_obj(y); + op_ret = provider.delete_obj(s, y); if (op_ret < 0 && op_ret != -ENOENT && op_ret != -EINVAL) { op_ret = ERR_INTERNAL_ERROR; @@ -159,7 +159,7 @@ void RGWDeleteOIDCProvider::execute(optional_yield y) void RGWGetOIDCProvider::execute(optional_yield y) { RGWOIDCProvider provider(s->cct, store->getRados()->pctl, provider_arn, s->user->get_tenant()); - op_ret = provider.get(); + op_ret = provider.get(s); if (op_ret < 0 && op_ret != -ENOENT && op_ret != -EINVAL) { op_ret = ERR_INTERNAL_ERROR; @@ -200,7 +200,7 @@ int RGWListOIDCProviders::verify_permission(optional_yield y) void RGWListOIDCProviders::execute(optional_yield y) { vector result; - op_ret = RGWOIDCProvider::get_providers(store->getRados(), s->user->get_tenant(), result); + op_ret = RGWOIDCProvider::get_providers(s, store->getRados(), s->user->get_tenant(), result); if (op_ret == 0) { s->formatter->open_array_section("ListOpenIDConnectProvidersResponse"); @@ -212,7 +212,7 @@ void RGWListOIDCProviders::execute(optional_yield y) for (const auto& it : result) { s->formatter->open_object_section("Arn"); auto& arn = it.get_arn(); - ldout(s->cct, 0) << "ARN: " << arn << dendl; + ldpp_dout(s, 0) << "ARN: " << arn << dendl; s->formatter->dump_string("Arn", arn); s->formatter->close_section(); } diff --git a/src/rgw/rgw_rest_pubsub.cc b/src/rgw/rgw_rest_pubsub.cc index 17a75be0674a4..272d98e62d056 100644 --- a/src/rgw/rgw_rest_pubsub.cc +++ b/src/rgw/rgw_rest_pubsub.cc @@ -31,7 +31,7 @@ public: int get_params() override { topic_name = s->info.args.get("Name"); if (topic_name.empty()) { - ldout(s->cct, 1) << "CreateTopic Action 'Name' argument is missing" << dendl; + ldpp_dout(this, 1) << "CreateTopic Action 'Name' argument is missing" << dendl; return -EINVAL; } @@ -57,7 +57,7 @@ public: if (!dest.push_endpoint.empty() && dest.persistent) { const auto ret = rgw::notify::add_persistent_topic(topic_name, s->yield); if (ret < 0) { - ldout(s->cct, 1) << "CreateTopic Action failed to create queue for persistent topics. error:" << ret << dendl; + ldpp_dout(this, 1) << "CreateTopic Action failed to create queue for persistent topics. error:" << ret << dendl; return ret; } } @@ -137,7 +137,7 @@ public: const auto topic_arn = rgw::ARN::parse((s->info.args.get("TopicArn"))); if (!topic_arn || topic_arn->resource.empty()) { - ldout(s->cct, 1) << "GetTopic Action 'TopicArn' argument is missing or invalid" << dendl; + ldpp_dout(this, 1) << "GetTopic Action 'TopicArn' argument is missing or invalid" << dendl; return -EINVAL; } @@ -178,7 +178,7 @@ public: const auto topic_arn = rgw::ARN::parse((s->info.args.get("TopicArn"))); if (!topic_arn || topic_arn->resource.empty()) { - ldout(s->cct, 1) << "GetTopicAttribute Action 'TopicArn' argument is missing or invalid" << dendl; + ldpp_dout(this, 1) << "GetTopicAttribute Action 'TopicArn' argument is missing or invalid" << dendl; return -EINVAL; } @@ -219,7 +219,7 @@ public: const auto topic_arn = rgw::ARN::parse((s->info.args.get("TopicArn"))); if (!topic_arn || topic_arn->resource.empty()) { - ldout(s->cct, 1) << "DeleteTopic Action 'TopicArn' argument is missing or invalid" << dendl; + ldpp_dout(this, 1) << "DeleteTopic Action 'TopicArn' argument is missing or invalid" << dendl; return -EINVAL; } @@ -233,7 +233,7 @@ public: return 0; } if (ret < 0) { - ldout(s->cct, 1) << "DeleteTopic Action failed to remove queue for persistent topics. error:" << ret << dendl; + ldpp_dout(this, 1) << "DeleteTopic Action failed to remove queue for persistent topics. error:" << ret << dendl; return ret; } @@ -343,7 +343,7 @@ void update_attribute_map(const std::string& input, AttributeMap& map) { void RGWHandler_REST_PSTopic_AWS::rgw_topic_parse_input() { if (post_body.size() > 0) { - ldout(s->cct, 10) << "Content of POST: " << post_body << dendl; + ldpp_dout(s, 10) << "Content of POST: " << post_body << dendl; if (post_body.find("Action") != string::npos) { const boost::char_separator sep("&"); @@ -434,29 +434,29 @@ class RGWPSCreateNotif_ObjStore_S3 : public RGWPSCreateNotifOp { std::tie(r, data) = rgw_rest_read_all_input(s, max_size, false); if (r < 0) { - ldout(s->cct, 1) << "failed to read XML payload" << dendl; + ldpp_dout(this, 1) << "failed to read XML payload" << dendl; return r; } if (data.length() == 0) { - ldout(s->cct, 1) << "XML payload missing" << dendl; + ldpp_dout(this, 1) << "XML payload missing" << dendl; return -EINVAL; } RGWXMLDecoder::XMLParser parser; if (!parser.init()){ - ldout(s->cct, 1) << "failed to initialize XML parser" << dendl; + ldpp_dout(this, 1) << "failed to initialize XML parser" << dendl; return -EINVAL; } if (!parser.parse(data.c_str(), data.length(), 1)) { - ldout(s->cct, 1) << "failed to parse XML payload" << dendl; + ldpp_dout(this, 1) << "failed to parse XML payload" << dendl; return -ERR_MALFORMED_XML; } try { // NotificationConfigurations is mandatory RGWXMLDecoder::decode_xml("NotificationConfiguration", configurations, &parser, true); } catch (RGWXMLDecoder::err& err) { - ldout(s->cct, 1) << "failed to parse XML payload. error: " << err << dendl; + ldpp_dout(this, 1) << "failed to parse XML payload. error: " << err << dendl; return -ERR_MALFORMED_XML; } return 0; @@ -466,15 +466,15 @@ class RGWPSCreateNotif_ObjStore_S3 : public RGWPSCreateNotifOp { bool exists; const auto no_value = s->info.args.get("notification", &exists); if (!exists) { - ldout(s->cct, 1) << "missing required param 'notification'" << dendl; + ldpp_dout(this, 1) << "missing required param 'notification'" << dendl; return -EINVAL; } if (no_value.length() > 0) { - ldout(s->cct, 1) << "param 'notification' should not have any value" << dendl; + ldpp_dout(this, 1) << "param 'notification' should not have any value" << dendl; return -EINVAL; } if (s->bucket_name.empty()) { - ldout(s->cct, 1) << "request must be on a bucket" << dendl; + ldpp_dout(this, 1) << "request must be on a bucket" << dendl; return -EINVAL; } bucket_name = s->bucket_name; @@ -512,25 +512,25 @@ void RGWPSCreateNotif_ObjStore_S3::execute(optional_yield y) { for (const auto& c : configurations.list) { const auto& notif_name = c.id; if (notif_name.empty()) { - ldout(s->cct, 1) << "missing notification id" << dendl; + ldpp_dout(this, 1) << "missing notification id" << dendl; op_ret = -EINVAL; return; } if (c.topic_arn.empty()) { - ldout(s->cct, 1) << "missing topic ARN in notification: '" << notif_name << "'" << dendl; + ldpp_dout(this, 1) << "missing topic ARN in notification: '" << notif_name << "'" << dendl; op_ret = -EINVAL; return; } const auto arn = rgw::ARN::parse(c.topic_arn); if (!arn || arn->resource.empty()) { - ldout(s->cct, 1) << "topic ARN has invalid format: '" << c.topic_arn << "' in notification: '" << notif_name << "'" << dendl; + ldpp_dout(this, 1) << "topic ARN has invalid format: '" << c.topic_arn << "' in notification: '" << notif_name << "'" << dendl; op_ret = -EINVAL; return; } if (std::find(c.events.begin(), c.events.end(), rgw::notify::UnknownEvent) != c.events.end()) { - ldout(s->cct, 1) << "unknown event type in notification: '" << notif_name << "'" << dendl; + ldpp_dout(this, 1) << "unknown event type in notification: '" << notif_name << "'" << dendl; op_ret = -EINVAL; return; } @@ -541,7 +541,7 @@ void RGWPSCreateNotif_ObjStore_S3::execute(optional_yield y) { rgw_pubsub_topic topic_info; op_ret = ps->get_topic(topic_name, &topic_info); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to get topic '" << topic_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to get topic '" << topic_name << "', ret=" << op_ret << dendl; return; } // make sure that full topic configuration match @@ -554,24 +554,24 @@ void RGWPSCreateNotif_ObjStore_S3::execute(optional_yield y) { // generate the internal topic. destination is stored here for the "push-only" case // when no subscription exists // ARN is cached to make the "GET" method faster - op_ret = ps->create_topic(unique_topic_name, topic_info.dest, topic_info.arn, topic_info.opaque_data, y); + op_ret = ps->create_topic(this, unique_topic_name, topic_info.dest, topic_info.arn, topic_info.opaque_data, y); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to auto-generate unique topic '" << unique_topic_name << + ldpp_dout(this, 1) << "failed to auto-generate unique topic '" << unique_topic_name << "', ret=" << op_ret << dendl; return; } - ldout(s->cct, 20) << "successfully auto-generated unique topic '" << unique_topic_name << "'" << dendl; + ldpp_dout(this, 20) << "successfully auto-generated unique topic '" << unique_topic_name << "'" << dendl; // generate the notification rgw::notify::EventTypeList events; - op_ret = b->create_notification(unique_topic_name, c.events, std::make_optional(c.filter), notif_name, y); + op_ret = b->create_notification(this, unique_topic_name, c.events, std::make_optional(c.filter), notif_name, y); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to auto-generate notification for unique topic '" << unique_topic_name << + ldpp_dout(this, 1) << "failed to auto-generate notification for unique topic '" << unique_topic_name << "', ret=" << op_ret << dendl; // rollback generated topic (ignore return value) - ps->remove_topic(unique_topic_name, y); + ps->remove_topic(this, unique_topic_name, y); return; } - ldout(s->cct, 20) << "successfully auto-generated notification for unique topic '" << unique_topic_name << "'" << dendl; + ldpp_dout(this, 20) << "successfully auto-generated notification for unique topic '" << unique_topic_name << "'" << dendl; if (!push_only) { // generate the subscription with destination information from the original topic @@ -579,16 +579,16 @@ void RGWPSCreateNotif_ObjStore_S3::execute(optional_yield y) { dest.bucket_name = data_bucket_prefix + s->owner.get_id().to_str() + "-" + unique_topic_name; dest.oid_prefix = data_oid_prefix + notif_name + "/"; auto sub = ps->get_sub(notif_name); - op_ret = sub->subscribe(unique_topic_name, dest, y, notif_name); + op_ret = sub->subscribe(this, unique_topic_name, dest, y, notif_name); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to auto-generate subscription '" << notif_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to auto-generate subscription '" << notif_name << "', ret=" << op_ret << dendl; // rollback generated notification (ignore return value) - b->remove_notification(unique_topic_name, y); + b->remove_notification(this, unique_topic_name, y); // rollback generated topic (ignore return value) - ps->remove_topic(unique_topic_name, y); + ps->remove_topic(this, unique_topic_name, y); return; } - ldout(s->cct, 20) << "successfully auto-generated subscription '" << notif_name << "'" << dendl; + ldpp_dout(this, 20) << "successfully auto-generated subscription '" << notif_name << "'" << dendl; } } } @@ -602,11 +602,11 @@ private: bool exists; notif_name = s->info.args.get("notification", &exists); if (!exists) { - ldout(s->cct, 1) << "missing required param 'notification'" << dendl; + ldpp_dout(this, 1) << "missing required param 'notification'" << dendl; return -EINVAL; } if (s->bucket_name.empty()) { - ldout(s->cct, 1) << "request must be on a bucket" << dendl; + ldpp_dout(this, 1) << "request must be on a bucket" << dendl; return -EINVAL; } bucket_name = s->bucket_name; @@ -614,13 +614,13 @@ private: } void remove_notification_by_topic(const std::string& topic_name, const RGWPubSub::BucketRef& b, optional_yield y) { - op_ret = b->remove_notification(topic_name, y); + op_ret = b->remove_notification(this, topic_name, y); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to remove notification of topic '" << topic_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to remove notification of topic '" << topic_name << "', ret=" << op_ret << dendl; } - op_ret = ps->remove_topic(topic_name, y); + op_ret = ps->remove_topic(this, topic_name, y); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to remove auto-generated topic '" << topic_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to remove auto-generated topic '" << topic_name << "', ret=" << op_ret << dendl; } } @@ -643,7 +643,7 @@ void RGWPSDeleteNotif_ObjStore_S3::execute(optional_yield y) { rgw_pubsub_bucket_topics bucket_topics; op_ret = b->get_topics(&bucket_topics); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to get list of topics from bucket '" << bucket_info.bucket.name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to get list of topics from bucket '" << bucket_info.bucket.name << "', ret=" << op_ret << dendl; return; } @@ -654,16 +654,16 @@ void RGWPSDeleteNotif_ObjStore_S3::execute(optional_yield y) { // remove the auto generated subscription according to notification name (if exist) const auto unique_topic_name = unique_topic->get().topic.name; auto sub = ps->get_sub(notif_name); - op_ret = sub->unsubscribe(unique_topic_name, y); + op_ret = sub->unsubscribe(this, unique_topic_name, y); if (op_ret < 0 && op_ret != -ENOENT) { - ldout(s->cct, 1) << "failed to remove auto-generated subscription '" << notif_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to remove auto-generated subscription '" << notif_name << "', ret=" << op_ret << dendl; return; } remove_notification_by_topic(unique_topic_name, b, y); return; } // notification to be removed is not found - considered success - ldout(s->cct, 20) << "notification '" << notif_name << "' already removed" << dendl; + ldpp_dout(this, 20) << "notification '" << notif_name << "' already removed" << dendl; return; } @@ -677,15 +677,15 @@ void RGWPSDeleteNotif_ObjStore_S3::execute(optional_yield y) { rgw_pubsub_sub_config sub_conf; op_ret = sub->get_conf(&sub_conf); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to get subscription '" << topic_sub_name << "' info, ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to get subscription '" << topic_sub_name << "' info, ret=" << op_ret << dendl; return; } if (!sub_conf.s3_id.empty()) { // S3 notification, has autogenerated subscription const auto& sub_topic_name = sub_conf.topic; - op_ret = sub->unsubscribe(sub_topic_name, y); + op_ret = sub->unsubscribe(this, sub_topic_name, y); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to remove auto-generated subscription '" << topic_sub_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to remove auto-generated subscription '" << topic_sub_name << "', ret=" << op_ret << dendl; return; } } @@ -704,11 +704,11 @@ private: bool exists; notif_name = s->info.args.get("notification", &exists); if (!exists) { - ldout(s->cct, 1) << "missing required param 'notification'" << dendl; + ldpp_dout(this, 1) << "missing required param 'notification'" << dendl; return -EINVAL; } if (s->bucket_name.empty()) { - ldout(s->cct, 1) << "request must be on a bucket" << dendl; + ldpp_dout(this, 1) << "request must be on a bucket" << dendl; return -EINVAL; } bucket_name = s->bucket_name; @@ -742,7 +742,7 @@ void RGWPSListNotifs_ObjStore_S3::execute(optional_yield y) { rgw_pubsub_bucket_topics bucket_topics; op_ret = b->get_topics(&bucket_topics); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to get list of topics from bucket '" << bucket_info.bucket.name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to get list of topics from bucket '" << bucket_info.bucket.name << "', ret=" << op_ret << dendl; return; } if (!notif_name.empty()) { @@ -753,7 +753,7 @@ void RGWPSListNotifs_ObjStore_S3::execute(optional_yield y) { return; } op_ret = -ENOENT; - ldout(s->cct, 1) << "failed to get notification info for '" << notif_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to get notification info for '" << notif_name << "', ret=" << op_ret << dendl; return; } // loop through all topics of the bucket diff --git a/src/rgw/rgw_rest_pubsub_common.cc b/src/rgw/rgw_rest_pubsub_common.cc index 1d1311d2b9292..710dbbb671ae6 100644 --- a/src/rgw/rgw_rest_pubsub_common.cc +++ b/src/rgw/rgw_rest_pubsub_common.cc @@ -54,12 +54,12 @@ void RGWPSCreateTopicOp::execute(optional_yield y) { } ps.emplace(store, s->owner.get_id().tenant); - op_ret = ps->create_topic(topic_name, dest, topic_arn, opaque_data, y); + op_ret = ps->create_topic(this, topic_name, dest, topic_arn, opaque_data, y); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to create topic '" << topic_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to create topic '" << topic_name << "', ret=" << op_ret << dendl; return; } - ldout(s->cct, 20) << "successfully created topic '" << topic_name << "'" << dendl; + ldpp_dout(this, 20) << "successfully created topic '" << topic_name << "'" << dendl; } void RGWPSListTopicsOp::execute(optional_yield y) { @@ -68,15 +68,15 @@ void RGWPSListTopicsOp::execute(optional_yield y) { // if there are no topics it is not considered an error op_ret = op_ret == -ENOENT ? 0 : op_ret; if (op_ret < 0) { - ldout(s->cct, 1) << "failed to get topics, ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to get topics, ret=" << op_ret << dendl; return; } if (topics_has_endpoint_secret(result) && !rgw_transport_is_secure(s->cct, *(s->info.env))) { - ldout(s->cct, 1) << "topics contain secret and cannot be sent over insecure transport" << dendl; + ldpp_dout(this, 1) << "topics contain secret and cannot be sent over insecure transport" << dendl; op_ret = -EPERM; return; } - ldout(s->cct, 20) << "successfully got topics" << dendl; + ldpp_dout(this, 20) << "successfully got topics" << dendl; } void RGWPSGetTopicOp::execute(optional_yield y) { @@ -87,15 +87,15 @@ void RGWPSGetTopicOp::execute(optional_yield y) { ps.emplace(store, s->owner.get_id().tenant); op_ret = ps->get_topic(topic_name, &result); if (topic_has_endpoint_secret(result) && !rgw_transport_is_secure(s->cct, *(s->info.env))) { - ldout(s->cct, 1) << "topic '" << topic_name << "' contain secret and cannot be sent over insecure transport" << dendl; + ldpp_dout(this, 1) << "topic '" << topic_name << "' contain secret and cannot be sent over insecure transport" << dendl; op_ret = -EPERM; return; } if (op_ret < 0) { - ldout(s->cct, 1) << "failed to get topic '" << topic_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to get topic '" << topic_name << "', ret=" << op_ret << dendl; return; } - ldout(s->cct, 1) << "successfully got topic '" << topic_name << "'" << dendl; + ldpp_dout(this, 1) << "successfully got topic '" << topic_name << "'" << dendl; } void RGWPSDeleteTopicOp::execute(optional_yield y) { @@ -104,12 +104,12 @@ void RGWPSDeleteTopicOp::execute(optional_yield y) { return; } ps.emplace(store, s->owner.get_id().tenant); - op_ret = ps->remove_topic(topic_name, y); + op_ret = ps->remove_topic(this, topic_name, y); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to remove topic '" << topic_name << ", ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to remove topic '" << topic_name << ", ret=" << op_ret << dendl; return; } - ldout(s->cct, 1) << "successfully removed topic '" << topic_name << "'" << dendl; + ldpp_dout(this, 1) << "successfully removed topic '" << topic_name << "'" << dendl; } void RGWPSCreateSubOp::execute(optional_yield y) { @@ -119,12 +119,12 @@ void RGWPSCreateSubOp::execute(optional_yield y) { } ps.emplace(store, s->owner.get_id().tenant); auto sub = ps->get_sub(sub_name); - op_ret = sub->subscribe(topic_name, dest, y); + op_ret = sub->subscribe(this, topic_name, dest, y); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to create subscription '" << sub_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to create subscription '" << sub_name << "', ret=" << op_ret << dendl; return; } - ldout(s->cct, 20) << "successfully created subscription '" << sub_name << "'" << dendl; + ldpp_dout(this, 20) << "successfully created subscription '" << sub_name << "'" << dendl; } void RGWPSGetSubOp::execute(optional_yield y) { @@ -136,15 +136,15 @@ void RGWPSGetSubOp::execute(optional_yield y) { auto sub = ps->get_sub(sub_name); op_ret = sub->get_conf(&result); if (subscription_has_endpoint_secret(result) && !rgw_transport_is_secure(s->cct, *(s->info.env))) { - ldout(s->cct, 1) << "subscription '" << sub_name << "' contain secret and cannot be sent over insecure transport" << dendl; + ldpp_dout(this, 1) << "subscription '" << sub_name << "' contain secret and cannot be sent over insecure transport" << dendl; op_ret = -EPERM; return; } if (op_ret < 0) { - ldout(s->cct, 1) << "failed to get subscription '" << sub_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to get subscription '" << sub_name << "', ret=" << op_ret << dendl; return; } - ldout(s->cct, 20) << "successfully got subscription '" << sub_name << "'" << dendl; + ldpp_dout(this, 20) << "successfully got subscription '" << sub_name << "'" << dendl; } void RGWPSDeleteSubOp::execute(optional_yield y) { @@ -154,12 +154,12 @@ void RGWPSDeleteSubOp::execute(optional_yield y) { } ps.emplace(store, s->owner.get_id().tenant); auto sub = ps->get_sub(sub_name); - op_ret = sub->unsubscribe(topic_name, y); + op_ret = sub->unsubscribe(this, topic_name, y); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to remove subscription '" << sub_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to remove subscription '" << sub_name << "', ret=" << op_ret << dendl; return; } - ldout(s->cct, 20) << "successfully removed subscription '" << sub_name << "'" << dendl; + ldpp_dout(this, 20) << "successfully removed subscription '" << sub_name << "'" << dendl; } void RGWPSAckSubEventOp::execute(optional_yield y) { @@ -169,12 +169,12 @@ void RGWPSAckSubEventOp::execute(optional_yield y) { } ps.emplace(store, s->owner.get_id().tenant); auto sub = ps->get_sub_with_events(sub_name); - op_ret = sub->remove_event(event_id); + op_ret = sub->remove_event(s, event_id); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to ack event on subscription '" << sub_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to ack event on subscription '" << sub_name << "', ret=" << op_ret << dendl; return; } - ldout(s->cct, 20) << "successfully acked event on subscription '" << sub_name << "'" << dendl; + ldpp_dout(this, 20) << "successfully acked event on subscription '" << sub_name << "'" << dendl; } void RGWPSPullSubEventsOp::execute(optional_yield y) { @@ -186,15 +186,15 @@ void RGWPSPullSubEventsOp::execute(optional_yield y) { sub = ps->get_sub_with_events(sub_name); if (!sub) { op_ret = -ENOENT; - ldout(s->cct, 1) << "failed to get subscription '" << sub_name << "' for events, ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to get subscription '" << sub_name << "' for events, ret=" << op_ret << dendl; return; } - op_ret = sub->list_events(marker, max_entries); + op_ret = sub->list_events(s, marker, max_entries); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to get events from subscription '" << sub_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to get events from subscription '" << sub_name << "', ret=" << op_ret << dendl; return; } - ldout(s->cct, 20) << "successfully got events from subscription '" << sub_name << "'" << dendl; + ldpp_dout(this, 20) << "successfully got events from subscription '" << sub_name << "'" << dendl; } @@ -209,12 +209,12 @@ int RGWPSCreateNotifOp::verify_permission(optional_yield y) { ret = store->getRados()->get_bucket_info(store->svc(), id.tenant, bucket_name, bucket_info, nullptr, y, nullptr); if (ret < 0) { - ldout(s->cct, 1) << "failed to get bucket info, cannot verify ownership" << dendl; + ldpp_dout(this, 1) << "failed to get bucket info, cannot verify ownership" << dendl; return ret; } if (bucket_info.owner != id) { - ldout(s->cct, 1) << "user doesn't own bucket, not allowed to create notification" << dendl; + ldpp_dout(this, 1) << "user doesn't own bucket, not allowed to create notification" << dendl; return -EPERM; } return 0; @@ -233,7 +233,7 @@ int RGWPSDeleteNotifOp::verify_permission(optional_yield y) { } if (bucket_info.owner != s->owner.get_id()) { - ldout(s->cct, 1) << "user doesn't own bucket, cannot remove notification" << dendl; + ldpp_dout(this, 1) << "user doesn't own bucket, cannot remove notification" << dendl; return -EPERM; } return 0; @@ -252,7 +252,7 @@ int RGWPSListNotifsOp::verify_permission(optional_yield y) { } if (bucket_info.owner != s->owner.get_id()) { - ldout(s->cct, 1) << "user doesn't own bucket, cannot get notification list" << dendl; + ldpp_dout(this, 1) << "user doesn't own bucket, cannot get notification list" << dendl; return -EPERM; } diff --git a/src/rgw/rgw_rest_realm.cc b/src/rgw/rgw_rest_realm.cc index 12aee6dcc7f82..704988af8c2ab 100644 --- a/src/rgw/rgw_rest_realm.cc +++ b/src/rgw/rgw_rest_realm.cc @@ -36,7 +36,7 @@ void RGWOp_Period_Base::send_response() if (op_ret < 0) { if (!s->err.message.empty()) { - ldout(s->cct, 4) << "Request failed with " << op_ret + ldpp_dout(this, 4) << "Request failed with " << op_ret << ": " << s->err.message << dendl; } end_header(s); @@ -73,9 +73,9 @@ void RGWOp_Period_Get::execute(optional_yield y) period.set_id(period_id); period.set_epoch(epoch); - op_ret = period.init(store->ctx(), store->svc()->sysobj, realm_id, y, realm_name); + op_ret = period.init(this, store->ctx(), store->svc()->sysobj, realm_id, y, realm_name); if (op_ret < 0) - ldout(store->ctx(), 5) << "failed to read period" << dendl; + ldpp_dout(this, 5) << "failed to read period" << dendl; } // POST /admin/realm/period @@ -96,14 +96,14 @@ void RGWOp_Period_Post::execute(optional_yield y) auto cct = store->ctx(); // initialize the period without reading from rados - period.init(cct, store->svc()->sysobj, y, false); + period.init(this, cct, store->svc()->sysobj, y, false); // decode the period from input const auto max_size = cct->_conf->rgw_max_put_param_size; bool empty; op_ret = rgw_rest_get_json_input(cct, s, period, max_size, &empty); if (op_ret < 0) { - lderr(cct) << "failed to decode period" << dendl; + ldpp_dout(this, -1) << "failed to decode period" << dendl; return; } @@ -119,55 +119,55 @@ void RGWOp_Period_Post::execute(optional_yield y) // period that we haven't restarted with yet. we also don't want to modify // the objects in use by RGWRados RGWRealm realm(period.get_realm()); - op_ret = realm.init(cct, store->svc()->sysobj, y); + op_ret = realm.init(this, cct, store->svc()->sysobj, y); if (op_ret < 0) { - lderr(cct) << "failed to read current realm: " + ldpp_dout(this, -1) << "failed to read current realm: " << cpp_strerror(-op_ret) << dendl; return; } RGWPeriod current_period; - op_ret = current_period.init(cct, store->svc()->sysobj, realm.get_id(), y); + op_ret = current_period.init(this, cct, store->svc()->sysobj, realm.get_id(), y); if (op_ret < 0) { - lderr(cct) << "failed to read current period: " + ldpp_dout(this, -1) << "failed to read current period: " << cpp_strerror(-op_ret) << dendl; return; } // if period id is empty, handle as 'period commit' if (period.get_id().empty()) { - op_ret = period.commit(store, realm, current_period, error_stream, y); + op_ret = period.commit(this, store, realm, current_period, error_stream, y); if (op_ret < 0) { - lderr(cct) << "master zone failed to commit period" << dendl; + ldpp_dout(this, -1) << "master zone failed to commit period" << dendl; } return; } // if it's not period commit, nobody is allowed to push to the master zone if (period.get_master_zone() == store->svc()->zone->get_zone_params().get_id()) { - ldout(cct, 10) << "master zone rejecting period id=" + ldpp_dout(this, 10) << "master zone rejecting period id=" << period.get_id() << " epoch=" << period.get_epoch() << dendl; op_ret = -EINVAL; // XXX: error code return; } // write the period to rados - op_ret = period.store_info(false, y); + op_ret = period.store_info(this, false, y); if (op_ret < 0) { - lderr(cct) << "failed to store period " << period.get_id() << dendl; + ldpp_dout(this, -1) << "failed to store period " << period.get_id() << dendl; return; } // set as latest epoch - op_ret = period.update_latest_epoch(period.get_epoch(), y); + op_ret = period.update_latest_epoch(this, period.get_epoch(), y); if (op_ret == -EEXIST) { // already have this epoch (or a more recent one) - ldout(cct, 4) << "already have epoch >= " << period.get_epoch() + ldpp_dout(this, 4) << "already have epoch >= " << period.get_epoch() << " for period " << period.get_id() << dendl; op_ret = 0; return; } if (op_ret < 0) { - lderr(cct) << "failed to set latest epoch" << dendl; + ldpp_dout(this, -1) << "failed to set latest epoch" << dendl; return; } @@ -178,7 +178,7 @@ void RGWOp_Period_Post::execute(optional_yield y) auto current_epoch = current_period.get_realm_epoch(); // discard periods in the past if (period.get_realm_epoch() < current_epoch) { - ldout(cct, 10) << "discarding period " << period.get_id() + ldpp_dout(this, 10) << "discarding period " << period.get_id() << " with realm epoch " << period.get_realm_epoch() << " older than current epoch " << current_epoch << dendl; // return success to ack that we have this period @@ -186,18 +186,18 @@ void RGWOp_Period_Post::execute(optional_yield y) } // discard periods too far in the future if (period.get_realm_epoch() > current_epoch + PERIOD_HISTORY_FETCH_MAX) { - lderr(cct) << "discarding period " << period.get_id() + ldpp_dout(this, -1) << "discarding period " << period.get_id() << " with realm epoch " << period.get_realm_epoch() << " too far in " "the future from current epoch " << current_epoch << dendl; op_ret = -ENOENT; // XXX: error code return; } // attach a copy of the period into the period history - auto cursor = period_history->attach(RGWPeriod{period}, y); + auto cursor = period_history->attach(this, RGWPeriod{period}, y); if (!cursor) { // we're missing some history between the new period and current_period op_ret = cursor.get_error(); - lderr(cct) << "failed to collect the periods between current period " + ldpp_dout(this, -1) << "failed to collect the periods between current period " << current_period.get_id() << " (realm epoch " << current_epoch << ") and the new period " << period.get_id() << " (realm epoch " << period.get_realm_epoch() @@ -206,33 +206,33 @@ void RGWOp_Period_Post::execute(optional_yield y) } if (cursor.has_next()) { // don't switch if we have a newer period in our history - ldout(cct, 4) << "attached period " << period.get_id() + ldpp_dout(this, 4) << "attached period " << period.get_id() << " to history, but the history contains newer periods" << dendl; return; } // set as current period - op_ret = realm.set_current_period(period, y); + op_ret = realm.set_current_period(this, period, y); if (op_ret < 0) { - lderr(cct) << "failed to update realm's current period" << dendl; + ldpp_dout(this, -1) << "failed to update realm's current period" << dendl; return; } - ldout(cct, 4) << "period " << period.get_id() + ldpp_dout(this, 4) << "period " << period.get_id() << " is newer than current period " << current_period.get_id() << ", updating realm's current period and notifying zone" << dendl; - realm.notify_new_period(period, y); + realm.notify_new_period(this, period, y); return; } // reflect the period into our local objects - op_ret = period.reflect(y); + op_ret = period.reflect(this, y); if (op_ret < 0) { - lderr(cct) << "failed to update local objects: " + ldpp_dout(this, -1) << "failed to update local objects: " << cpp_strerror(-op_ret) << dendl; return; } - ldout(cct, 4) << "period epoch " << period.get_epoch() + ldpp_dout(this, 4) << "period epoch " << period.get_epoch() << " is newer than current epoch " << current_period.get_epoch() << ", updating period's latest epoch and notifying zone" << dendl; - realm.notify_new_period(period, y); + realm.notify_new_period(this, period, y); // update the period history period_history->insert(RGWPeriod{period}); } @@ -280,9 +280,9 @@ void RGWOp_Realm_Get::execute(optional_yield y) // read realm realm.reset(new RGWRealm(id, name)); - op_ret = realm->init(g_ceph_context, store->svc()->sysobj, y); + op_ret = realm->init(this, g_ceph_context, store->svc()->sysobj, y); if (op_ret < 0) - lderr(store->ctx()) << "failed to read realm id=" << id + ldpp_dout(this, -1) << "failed to read realm id=" << id << " name=" << name << dendl; } @@ -322,11 +322,11 @@ void RGWOp_Realm_List::execute(optional_yield y) { // read default realm RGWRealm realm(store->ctx(), store->svc()->sysobj); - [[maybe_unused]] int ret = realm.read_default_id(default_id, y); + [[maybe_unused]] int ret = realm.read_default_id(this, default_id, y); } - op_ret = store->svc()->zone->list_realms(realms); + op_ret = store->svc()->zone->list_realms(this, realms); if (op_ret < 0) - lderr(store->ctx()) << "failed to list realms" << dendl; + ldpp_dout(this, -1) << "failed to list realms" << dendl; } void RGWOp_Realm_List::send_response() diff --git a/src/rgw/rgw_rest_role.cc b/src/rgw/rgw_rest_role.cc index 13e1c37e413bc..1e9cdf71b217d 100644 --- a/src/rgw/rgw_rest_role.cc +++ b/src/rgw/rgw_rest_role.cc @@ -27,7 +27,7 @@ int RGWRestRole::verify_permission(optional_yield y) string role_name = s->info.args.get("RoleName"); RGWRole role(s->cct, store->getRados()->pctl, role_name, s->user->get_tenant()); - if (op_ret = role.get(y); op_ret < 0) { + if (op_ret = role.get(s, y); op_ret < 0) { if (op_ret == -ENOENT) { op_ret = -ERR_NO_ROLE_FOUND; } @@ -107,7 +107,7 @@ int RGWCreateRole::get_params() max_session_duration = s->info.args.get("MaxSessionDuration"); if (role_name.empty() || trust_policy.empty()) { - ldout(s->cct, 20) << "ERROR: one of role name or assume role policy document is empty" + ldpp_dout(this, 20) << "ERROR: one of role name or assume role policy document is empty" << dendl; return -EINVAL; } @@ -117,7 +117,7 @@ int RGWCreateRole::get_params() const rgw::IAM::Policy p(s->cct, s->user->get_tenant(), bl); } catch (rgw::IAM::PolicyParseException& e) { - ldout(s->cct, 20) << "failed to parse policy: " << e.what() << dendl; + ldpp_dout(this, 20) << "failed to parse policy: " << e.what() << dendl; return -ERR_MALFORMED_DOC; } @@ -132,7 +132,7 @@ void RGWCreateRole::execute(optional_yield y) } RGWRole role(s->cct, store->getRados()->pctl, role_name, role_path, trust_policy, s->user->get_tenant(), max_session_duration); - op_ret = role.create(true, y); + op_ret = role.create(s, true, y); if (op_ret == -EEXIST) { op_ret = -ERR_ROLE_EXISTS; @@ -157,7 +157,7 @@ int RGWDeleteRole::get_params() role_name = s->info.args.get("RoleName"); if (role_name.empty()) { - ldout(s->cct, 20) << "ERROR: Role name is empty"<< dendl; + ldpp_dout(this, 20) << "ERROR: Role name is empty"<< dendl; return -EINVAL; } @@ -171,7 +171,7 @@ void RGWDeleteRole::execute(optional_yield y) return; } - op_ret = _role.delete_obj(y); + op_ret = _role.delete_obj(s, y); if (op_ret == -ENOENT) { op_ret = -ERR_NO_ROLE_FOUND; @@ -216,7 +216,7 @@ int RGWGetRole::get_params() role_name = s->info.args.get("RoleName"); if (role_name.empty()) { - ldout(s->cct, 20) << "ERROR: Role name is empty"<< dendl; + ldpp_dout(this, 20) << "ERROR: Role name is empty"<< dendl; return -EINVAL; } @@ -230,7 +230,7 @@ void RGWGetRole::execute(optional_yield y) return; } RGWRole role(s->cct, store->getRados()->pctl, role_name, s->user->get_tenant()); - op_ret = role.get(y); + op_ret = role.get(s, y); if (op_ret == -ENOENT) { op_ret = -ERR_NO_ROLE_FOUND; @@ -259,12 +259,12 @@ int RGWModifyRole::get_params() trust_policy = s->info.args.get("PolicyDocument"); if (role_name.empty() || trust_policy.empty()) { - ldout(s->cct, 20) << "ERROR: One of role name or trust policy is empty"<< dendl; + ldpp_dout(this, 20) << "ERROR: One of role name or trust policy is empty"<< dendl; return -EINVAL; } JSONParser p; if (!p.parse(trust_policy.c_str(), trust_policy.length())) { - ldout(s->cct, 20) << "ERROR: failed to parse assume role policy doc" << dendl; + ldpp_dout(this, 20) << "ERROR: failed to parse assume role policy doc" << dendl; return -ERR_MALFORMED_DOC; } @@ -279,7 +279,7 @@ void RGWModifyRole::execute(optional_yield y) } _role.update_trust_policy(trust_policy); - op_ret = _role.update(y); + op_ret = _role.update(this, y); s->formatter->open_object_section("UpdateAssumeRolePolicyResponse"); s->formatter->open_object_section("ResponseMetadata"); @@ -322,7 +322,7 @@ void RGWListRoles::execute(optional_yield y) return; } vector result; - op_ret = RGWRole::get_roles_by_path_prefix(store->getRados(), s->cct, path_prefix, s->user->get_tenant(), result, y); + op_ret = RGWRole::get_roles_by_path_prefix(s, store->getRados(), s->cct, path_prefix, s->user->get_tenant(), result, y); if (op_ret == 0) { s->formatter->open_array_section("ListRolesResponse"); @@ -349,7 +349,7 @@ int RGWPutRolePolicy::get_params() perm_policy = s->info.args.get("PolicyDocument"); if (role_name.empty() || policy_name.empty() || perm_policy.empty()) { - ldout(s->cct, 20) << "ERROR: One of role name, policy name or perm policy is empty"<< dendl; + ldpp_dout(this, 20) << "ERROR: One of role name, policy name or perm policy is empty"<< dendl; return -EINVAL; } bufferlist bl = bufferlist::static_from_string(perm_policy); @@ -357,7 +357,7 @@ int RGWPutRolePolicy::get_params() const rgw::IAM::Policy p(s->cct, s->user->get_tenant(), bl); } catch (rgw::IAM::PolicyParseException& e) { - ldout(s->cct, 20) << "failed to parse policy: " << e.what() << dendl; + ldpp_dout(this, 20) << "failed to parse policy: " << e.what() << dendl; return -ERR_MALFORMED_DOC; } return 0; @@ -371,7 +371,7 @@ void RGWPutRolePolicy::execute(optional_yield y) } _role.set_perm_policy(policy_name, perm_policy); - op_ret = _role.update(y); + op_ret = _role.update(this, y); if (op_ret == 0) { s->formatter->open_object_section("PutRolePolicyResponse"); @@ -388,7 +388,7 @@ int RGWGetRolePolicy::get_params() policy_name = s->info.args.get("PolicyName"); if (role_name.empty() || policy_name.empty()) { - ldout(s->cct, 20) << "ERROR: One of role name or policy name is empty"<< dendl; + ldpp_dout(this, 20) << "ERROR: One of role name or policy name is empty"<< dendl; return -EINVAL; } return 0; @@ -426,7 +426,7 @@ int RGWListRolePolicies::get_params() role_name = s->info.args.get("RoleName"); if (role_name.empty()) { - ldout(s->cct, 20) << "ERROR: Role name is empty"<< dendl; + ldpp_dout(this, 20) << "ERROR: Role name is empty"<< dendl; return -EINVAL; } return 0; @@ -460,7 +460,7 @@ int RGWDeleteRolePolicy::get_params() policy_name = s->info.args.get("PolicyName"); if (role_name.empty() || policy_name.empty()) { - ldout(s->cct, 20) << "ERROR: One of role name or policy name is empty"<< dendl; + ldpp_dout(this, 20) << "ERROR: One of role name or policy name is empty"<< dendl; return -EINVAL; } return 0; @@ -479,7 +479,7 @@ void RGWDeleteRolePolicy::execute(optional_yield y) } if (op_ret == 0) { - op_ret = _role.update(y); + op_ret = _role.update(this, y); } s->formatter->open_object_section("DeleteRolePoliciesResponse"); diff --git a/src/rgw/rgw_rest_s3.cc b/src/rgw/rgw_rest_s3.cc index 26606bc99d55f..7910c461ea75d 100644 --- a/src/rgw/rgw_rest_s3.cc +++ b/src/rgw/rgw_rest_s3.cc @@ -449,7 +449,7 @@ int RGWGetObj_ObjStore_S3::get_decrypt_filter(std::unique_ptr if (block_crypt != nullptr) { auto f = std::make_unique(s->cct, cb, std::move(block_crypt)); if (manifest_bl != nullptr) { - res = f->read_manifest(*manifest_bl); + res = f->read_manifest(this, *manifest_bl); if (res == 0) { *filter = std::move(f); } @@ -606,7 +606,7 @@ void RGWGetBucketTags_ObjStore_S3::send_response_data(bufferlist& bl) try { tagset.decode(iter); } catch (buffer::error& err) { - ldout(s->cct,0) << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl; + ldpp_dout(this,0) << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl; op_ret= -EIO; return; } @@ -618,7 +618,7 @@ void RGWGetBucketTags_ObjStore_S3::send_response_data(bufferlist& bl) } } -int RGWPutBucketTags_ObjStore_S3::get_params(optional_yield y) +int RGWPutBucketTags_ObjStore_S3::get_params(const DoutPrefixProvider *dpp, optional_yield y) { RGWXMLParser parser; @@ -644,7 +644,7 @@ int RGWPutBucketTags_ObjStore_S3::get_params(optional_yield y) RGWXMLDecoder::decode_xml("Tagging", tagging, &parser); } catch (RGWXMLDecoder::err& err) { - ldout(s->cct, 5) << "Malformed tagging request: " << err << dendl; + ldpp_dout(dpp, 5) << "Malformed tagging request: " << err << dendl; return -ERR_MALFORMED_XML; } @@ -654,7 +654,7 @@ int RGWPutBucketTags_ObjStore_S3::get_params(optional_yield y) return r; obj_tags.encode(tags_bl); - ldout(s->cct, 20) << "Read " << obj_tags.count() << "tags" << dendl; + ldpp_dout(dpp, 20) << "Read " << obj_tags.count() << "tags" << dendl; // forward bucket tags requests to meta master zone if (!store->svc()->zone->is_meta_master()) { @@ -1128,7 +1128,7 @@ struct ReplicationConfiguration { bool enabled; int r = rule.to_sync_policy_pipe(s, store, &pipe, &enabled); if (r < 0) { - ldout(s->cct, 5) << "NOTICE: failed to convert replication configuration into sync policy pipe (rule.id=" << rule.id << "): " << cpp_strerror(-r) << dendl; + ldpp_dout(s, 5) << "NOTICE: failed to convert replication configuration into sync policy pipe (rule.id=" << rule.id << "): " << cpp_strerror(-r) << dendl; return r; } @@ -1212,7 +1212,7 @@ int RGWPutBucketReplication_ObjStore_S3::get_params(optional_yield y) RGWXMLDecoder::decode_xml("ReplicationConfiguration", conf, &parser); } catch (RGWXMLDecoder::err& err) { - ldout(s->cct, 5) << "Malformed tagging request: " << err << dendl; + ldpp_dout(this, 5) << "Malformed tagging request: " << err << dendl; return -ERR_MALFORMED_XML; } @@ -1466,7 +1466,7 @@ int RGWListBucket_ObjStore_S3::get_common_params() string err; shard_id = strict_strtol(shard_id_str, 10, &err); if (!err.empty()) { - ldout(s->cct, 5) << "bad shard id specified: " << shard_id_str << dendl; + ldpp_dout(this, 5) << "bad shard id specified: " << shard_id_str << dendl; return -EINVAL; } } else { @@ -2073,16 +2073,16 @@ int RGWSetBucketWebsite_ObjStore_S3::get_params(optional_yield y) if (website_conf.is_redirect_all && website_conf.redirect_all.hostname.empty()) { s->err.message = "A host name must be provided to redirect all requests (e.g. \"example.com\")."; - ldout(s->cct, 5) << s->err.message << dendl; + ldpp_dout(this, 5) << s->err.message << dendl; return -EINVAL; } else if (!website_conf.is_redirect_all && !website_conf.is_set_index_doc) { s->err.message = "A value for IndexDocument Suffix must be provided if RedirectAllRequestsTo is empty"; - ldout(s->cct, 5) << s->err.message << dendl; + ldpp_dout(this, 5) << s->err.message << dendl; return -EINVAL; } else if (!website_conf.is_redirect_all && website_conf.is_set_index_doc && website_conf.index_doc_suffix.empty()) { s->err.message = "The IndexDocument Suffix is not well formed"; - ldout(s->cct, 5) << s->err.message << dendl; + ldpp_dout(this, 5) << s->err.message << dendl; return -EINVAL; } @@ -2181,7 +2181,7 @@ static int create_s3_policy(struct req_state *s, rgw::sal::RGWRadosStore *store, if (!s->canned_acl.empty()) return -ERR_INVALID_REQUEST; - return s3policy.create_from_headers(store->ctl()->user, s->info.env, owner); + return s3policy.create_from_headers(s, store->ctl()->user, s->info.env, owner); } return s3policy.create_canned(owner, s->bucket_owner, s->canned_acl); @@ -2545,7 +2545,7 @@ static inline int get_obj_attrs(rgw::sal::RGWRadosStore *store, struct req_state read_op.params.attrs = &attrs; - return read_op.prepare(s->yield); + return read_op.prepare(s->yield, s); } static inline void set_attr(map& attrs, const char* key, const std::string& value) @@ -2579,7 +2579,7 @@ int RGWPutObj_ObjStore_S3::get_decrypt_filter( //RGWGetObj_BlockDecrypt* f = new RGWGetObj_BlockDecrypt(s->cct, cb, std::move(block_crypt)); if (f != nullptr) { if (manifest_bl != nullptr) { - res = f->read_manifest(*manifest_bl); + res = f->read_manifest(this, *manifest_bl); if (res == 0) { *filter = std::move(f); } @@ -3463,7 +3463,7 @@ void RGWGetCORS_ObjStore_S3::send_response() int RGWPutCORS_ObjStore_S3::get_params(optional_yield y) { - RGWCORSXMLParser_S3 parser(s->cct); + RGWCORSXMLParser_S3 parser(this, s->cct); RGWCORSConfiguration_S3 *cors_config; const auto max_size = s->cct->_conf->rgw_max_put_param_size; @@ -3982,7 +3982,7 @@ void RGWGetObjLayout_ObjStore_S3::send_response() ::encode_json("head", head_obj, &f); ::encode_json("manifest", *manifest, &f); f.open_array_section("data_location"); - for (auto miter = manifest->obj_begin(); miter != manifest->obj_end(); ++miter) { + for (auto miter = manifest->obj_begin(this); miter != manifest->obj_end(this); ++miter) { f.open_object_section("obj"); rgw_raw_obj raw_loc = miter.get_location().get_raw_obj(store); uint64_t ofs = miter.get_ofs(); @@ -4582,7 +4582,7 @@ int RGWHandler_REST_S3::init_from_header(rgw::sal::RGWRadosStore *store, } s->info.args.set(p); - s->info.args.parse(); + s->info.args.parse(s); /* must be called after the args parsing */ int ret = allocate_formatter(s, default_formatter, configurable_format); @@ -4660,7 +4660,7 @@ static int verify_mfa(rgw::sal::RGWRadosStore *store, RGWUserInfo *user, return -EACCES; } - int ret = store->svc()->cls->mfa.check_mfa(user->user_id, serial, pin, y); + int ret = store->svc()->cls->mfa.check_mfa(dpp, user->user_id, serial, pin, y); if (ret < 0) { ldpp_dout(dpp, 20) << "NOTICE: failed to check MFA, serial=" << serial << dendl; return -EACCES; @@ -4682,7 +4682,7 @@ int RGWHandler_REST_S3::postauth_init(optional_yield y) s->bucket_tenant = s->auth.identity->get_role_tenant(); } - dout(10) << "s->object=" << s->object + ldpp_dout(s, 10) << "s->object=" << s->object << " s->bucket=" << rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name) << dendl; int ret; @@ -4741,7 +4741,8 @@ int RGWHandler_REST_S3::init(rgw::sal::RGWRadosStore *store, struct req_state *s ret = RGWCopyObj::parse_copy_location(copy_source, s->init_state.src_bucket, - key); + key, + s); if (!ret) { ldpp_dout(s, 0) << "failed to parse copy location" << dendl; return -EINVAL; // XXX why not -ERR_INVALID_BUCKET_NAME or -ERR_BAD_URL? @@ -4909,7 +4910,7 @@ bool RGWHandler_REST_S3Website::web_dir() const { obj_ctx.set_prefetch_data(obj); RGWObjState* state = nullptr; - if (store->getRados()->get_obj_state(&obj_ctx, s->bucket->get_info(), obj, &state, false, s->yield) < 0) { + if (store->getRados()->get_obj_state(s, &obj_ctx, s->bucket->get_info(), obj, &state, false, s->yield) < 0) { return false; } if (! state->exists) { @@ -4940,7 +4941,7 @@ int RGWHandler_REST_S3Website::retarget(RGWOp* op, RGWOp** new_op, optional_yiel if (!(s->prot_flags & RGW_REST_WEBSITE)) return 0; - int ret = store->get_bucket(nullptr, s->bucket_tenant, s->bucket_name, &s->bucket, y); + int ret = store->get_bucket(s, nullptr, s->bucket_tenant, s->bucket_name, &s->bucket, y); if (ret < 0) { // TODO-FUTURE: if the bucket does not exist, maybe expose it here? return -ERR_NO_SUCH_BUCKET; @@ -5009,7 +5010,7 @@ RGWOp* RGWHandler_REST_S3Website::op_head() return get_obj_op(false); } -int RGWHandler_REST_S3Website::serve_errordoc(int http_ret, const string& errordoc_key, optional_yield y) { +int RGWHandler_REST_S3Website::serve_errordoc(const DoutPrefixProvider *dpp, int http_ret, const string& errordoc_key, optional_yield y) { int ret = 0; s->formatter->reset(); /* Try to throw it all away */ @@ -5122,7 +5123,7 @@ int RGWHandler_REST_S3Website::error_handler(int err_no, On success, it will return zero, and no further content should be sent to the socket On failure, we need the double-error handler */ - new_err_no = RGWHandler_REST_S3Website::serve_errordoc(http_error_code, s->bucket->get_info().website_conf.error_doc, y); + new_err_no = RGWHandler_REST_S3Website::serve_errordoc(s, http_error_code, s->bucket->get_info().website_conf.error_doc, y); if (new_err_no != -1) { err_no = new_err_no; } @@ -5222,7 +5223,8 @@ AWSGeneralAbstractor::get_auth_data_v4(const req_state* const s, client_signature, date, session_token, - using_qs); + using_qs, + s); if (ret < 0) { throw ret; } @@ -5287,20 +5289,23 @@ AWSGeneralAbstractor::get_auth_data_v4(const req_state* const s, std::move(canonical_qs), std::move(*canonical_headers), signed_hdrs, - exp_payload_hash); + exp_payload_hash, + s); auto string_to_sign = \ rgw::auth::s3::get_v4_string_to_sign(s->cct, AWS4_HMAC_SHA256_STR, date, credential_scope, - std::move(canonical_req_hash)); + std::move(canonical_req_hash), + s); const auto sig_factory = std::bind(rgw::auth::s3::get_v4_signature, credential_scope, std::placeholders::_1, std::placeholders::_2, - std::placeholders::_3); + std::placeholders::_3, + s); /* Requests authenticated with the Query Parameters are treated as unsigned. * From "Authenticating Requests: Using Query Parameters (AWS Signature @@ -5364,7 +5369,7 @@ AWSGeneralAbstractor::get_auth_data_v4(const req_state* const s, case RGW_OP_GET_OBJ://s3select its post-method(payload contain the query) , the request is get-object break; default: - dout(10) << "ERROR: AWS4 completion for this operation NOT IMPLEMENTED" << dendl; + ldpp_dout(s, 10) << "ERROR: AWS4 completion for this operation NOT IMPLEMENTED" << dendl; throw -ERR_NOT_IMPLEMENTED; } @@ -5383,7 +5388,7 @@ AWSGeneralAbstractor::get_auth_data_v4(const req_state* const s, /* IMHO "streamed" doesn't fit too good here. I would prefer to call * it "chunked" but let's be coherent with Amazon's terminology. */ - dout(10) << "body content detected in multiple chunks" << dendl; + ldpp_dout(s, 10) << "body content detected in multiple chunks" << dendl; /* payload in multiple chunks */ @@ -5392,11 +5397,11 @@ AWSGeneralAbstractor::get_auth_data_v4(const req_state* const s, case RGW_OP_PUT_OBJ: break; default: - dout(10) << "ERROR: AWS4 completion for this operation NOT IMPLEMENTED (streaming mode)" << dendl; + ldpp_dout(s, 10) << "ERROR: AWS4 completion for this operation NOT IMPLEMENTED (streaming mode)" << dendl; throw -ERR_NOT_IMPLEMENTED; } - dout(10) << "aws4 seed signature ok... delaying v4 auth" << dendl; + ldpp_dout(s, 10) << "aws4 seed signature ok... delaying v4 auth" << dendl; /* In the case of streamed payload client sets the x-amz-content-sha256 * to "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" but uses "UNSIGNED-PAYLOAD" @@ -5495,7 +5500,7 @@ AWSGeneralAbstractor::get_auth_data_v2(const req_state* const s) const /* Let's canonize the HTTP headers that are covered by the AWS auth v2. */ std::string string_to_sign; utime_t header_time; - if (! rgw_create_s3_canonical_header(s->info, &header_time, string_to_sign, + if (! rgw_create_s3_canonical_header(s, s->info, &header_time, string_to_sign, qsr)) { ldpp_dout(s, 10) << "failed to create the canonized auth header\n" << rgw::crypt_sanitize::auth{s,string_to_sign} << dendl; @@ -5541,17 +5546,18 @@ AWSBrowserUploadAbstractor::get_auth_data_v4(const req_state* const s) const /* grab access key id */ const size_t pos = credential.find("/"); const std::string_view access_key_id = credential.substr(0, pos); - dout(10) << "access key id = " << access_key_id << dendl; + ldpp_dout(s, 10) << "access key id = " << access_key_id << dendl; /* grab credential scope */ const std::string_view credential_scope = credential.substr(pos + 1); - dout(10) << "credential scope = " << credential_scope << dendl; + ldpp_dout(s, 10) << "credential scope = " << credential_scope << dendl; const auto sig_factory = std::bind(rgw::auth::s3::get_v4_signature, credential_scope, std::placeholders::_1, std::placeholders::_2, - std::placeholders::_3); + std::placeholders::_3, + s); return { access_key_id, @@ -5726,7 +5732,7 @@ rgw::auth::s3::LocalEngine::authenticate( RGWUserInfo user_info; /* TODO(rzarzynski): we need to have string-view taking variant. */ const std::string access_key_id(_access_key_id); - if (rgw_get_user_info_by_access_key(ctl->user, access_key_id, user_info, y) < 0) { + if (rgw_get_user_info_by_access_key(dpp, ctl->user, access_key_id, user_info, y) < 0) { ldpp_dout(dpp, 5) << "error reading user info, uid=" << access_key_id << " can't authenticate" << dendl; return result_t::deny(-ERR_INVALID_ACCESS_KEY); @@ -5825,7 +5831,7 @@ rgw::auth::s3::STSEngine::get_session_token(const DoutPrefixProvider* dpp, const auto iter = dec_output.cbegin(); decode(token, iter); } catch (const buffer::error& e) { - ldout(cct, 0) << "ERROR: decode SessionToken failed: " << error << dendl; + ldpp_dout(dpp, 0) << "ERROR: decode SessionToken failed: " << error << dendl; return -EINVAL; } } @@ -5900,7 +5906,7 @@ rgw::auth::s3::STSEngine::authenticate( rgw::auth::RoleApplier::Role r; if (! token.roleId.empty()) { RGWRole role(s->cct, ctl, token.roleId); - if (role.get_by_id(y) < 0) { + if (role.get_by_id(dpp, y) < 0) { return result_t::deny(-EPERM); } r.id = token.roleId; @@ -5920,7 +5926,7 @@ rgw::auth::s3::STSEngine::authenticate( if (! token.user.empty() && token.acct_type != TYPE_ROLE) { // get user info - int ret = rgw_get_user_info_by_uid(ctl->user, token.user, user_info, y, NULL); + int ret = rgw_get_user_info_by_uid(dpp, ctl->user, token.user, user_info, y, NULL); if (ret < 0) { ldpp_dout(dpp, 5) << "ERROR: failed reading user info: uid=" << token.user << dendl; return result_t::reject(-EPERM); @@ -5983,16 +5989,16 @@ int RGWSelectObj_ObjStore_S3::get_params(optional_yield y) int max_size = 4096; std::tie(ret, data) = rgw_rest_read_all_input(s, max_size, false); if (ret != 0) { - ldout(s->cct, 10) << "s3-select query: failed to retrieve query; ret = " << ret << dendl; + ldpp_dout(this, 10) << "s3-select query: failed to retrieve query; ret = " << ret << dendl; return ret; } m_s3select_query = data.to_str(); if (m_s3select_query.length() > 0) { - ldout(s->cct, 10) << "s3-select query: " << m_s3select_query << dendl; + ldpp_dout(this, 10) << "s3-select query: " << m_s3select_query << dendl; } else { - ldout(s->cct, 10) << "s3-select query: failed to retrieve query;" << dendl; + ldpp_dout(this, 10) << "s3-select query: failed to retrieve query;" << dendl; return -1; } @@ -6143,7 +6149,7 @@ int RGWSelectObj_ObjStore_S3::run_s3select(const char* query, const char* input, if (s3select_syntax->get_error_description().empty() == false) { m_result.append(s3select_syntax->get_error_description()); - ldout(s->cct, 10) << "s3-select query: failed to prase query; {" << s3select_syntax->get_error_description() << "}"<< dendl; + ldpp_dout(this, 10) << "s3-select query: failed to prase query; {" << s3select_syntax->get_error_description() << "}"<< dendl; status = -1; } else { @@ -6194,7 +6200,7 @@ int RGWSelectObj_ObjStore_S3::handle_aws_cli_parameters(std::string& sql_query) extract_by_tag("QuoteEscapeCharacter", m_escape_char); extract_by_tag("CompressionType", m_compression_type); if (m_compression_type.length()>0 && m_compression_type.compare("NONE") != 0) { - ldout(s->cct, 10) << "RGW supports currently only NONE option for compression type" << dendl; + ldpp_dout(this, 10) << "RGW supports currently only NONE option for compression type" << dendl; return -1; } diff --git a/src/rgw/rgw_rest_s3.h b/src/rgw/rgw_rest_s3.h index 8c59174efe0f2..14b4e6b61ea78 100644 --- a/src/rgw/rgw_rest_s3.h +++ b/src/rgw/rgw_rest_s3.h @@ -94,7 +94,7 @@ public: class RGWPutBucketTags_ObjStore_S3 : public RGWPutBucketTags_ObjStore { public: - int get_params(optional_yield y) override; + int get_params(const DoutPrefixProvider *dpp, optional_yield y) override; void send_response() override; }; diff --git a/src/rgw/rgw_rest_s3website.h b/src/rgw/rgw_rest_s3website.h index c363eb563b95e..e35514838f33a 100644 --- a/src/rgw/rgw_rest_s3website.h +++ b/src/rgw/rgw_rest_s3website.h @@ -35,7 +35,7 @@ protected: RGWOp *op_copy() override { return NULL; } RGWOp *op_options() override { return NULL; } - int serve_errordoc(int http_ret, const string &errordoc_key, optional_yield y); + int serve_errordoc(const DoutPrefixProvider *dpp, int http_ret, const string &errordoc_key, optional_yield y); public: using RGWHandler_REST_S3::RGWHandler_REST_S3; ~RGWHandler_REST_S3Website() override = default; diff --git a/src/rgw/rgw_rest_sts.cc b/src/rgw/rgw_rest_sts.cc index 3efee205067f1..af1b96ce0d8e7 100644 --- a/src/rgw/rgw_rest_sts.cc +++ b/src/rgw/rgw_rest_sts.cc @@ -61,7 +61,7 @@ WebTokenEngine::get_role_tenant(const string& role_arn) const } boost::optional -WebTokenEngine::get_provider(const string& role_arn, const string& iss) const +WebTokenEngine::get_provider(const DoutPrefixProvider *dpp, const string& role_arn, const string& iss) const { string tenant = get_role_tenant(role_arn); @@ -83,7 +83,7 @@ WebTokenEngine::get_provider(const string& role_arn, const string& iss) const auto provider_arn = rgw::ARN(idp_url, "oidc-provider", tenant); string p_arn = provider_arn.to_string(); RGWOIDCProvider provider(cct, ctl, p_arn, tenant); - auto ret = provider.get(); + auto ret = provider.get(dpp); if (ret < 0) { return boost::none; } @@ -158,7 +158,7 @@ WebTokenEngine::get_from_jwt(const DoutPrefixProvider* dpp, const std::string& t t.client_id = decoded.get_payload_claim("clientId").as_string(); } string role_arn = s->info.args.get("RoleArn"); - auto provider = get_provider(role_arn, t.iss); + auto provider = get_provider(dpp, role_arn, t.iss); if (! provider) { ldpp_dout(dpp, 0) << "Couldn't get oidc provider info using input iss" << t.iss << dendl; throw -EACCES; @@ -344,7 +344,7 @@ WebTokenEngine::authenticate( const DoutPrefixProvider* dpp, if (t) { string role_session = s->info.args.get("RoleSessionName"); if (role_session.empty()) { - ldout(s->cct, 0) << "Role Session Name is empty " << dendl; + ldpp_dout(dpp, 0) << "Role Session Name is empty " << dendl; return result_t::deny(-EACCES); } string role_arn = s->info.args.get("RoleArn"); @@ -363,9 +363,9 @@ int RGWREST_STS::verify_permission(optional_yield y) sts = std::move(_sts); string rArn = s->info.args.get("RoleArn"); - const auto& [ret, role] = sts.getRoleInfo(rArn, y); + const auto& [ret, role] = sts.getRoleInfo(s, rArn, y); if (ret < 0) { - ldout(s->cct, 0) << "failed to get role info using role arn: " << rArn << dendl; + ldpp_dout(this, 0) << "failed to get role info using role arn: " << rArn << dendl; return ret; } string policy = role.get_assume_role_policy(); @@ -379,16 +379,16 @@ int RGWREST_STS::verify_permission(optional_yield y) // If yes, then return 0, else -EPERM auto p_res = p.eval_principal(s->env, *s->auth.identity); if (p_res == rgw::IAM::Effect::Deny) { - ldout(s->cct, 0) << "evaluating principal returned deny" << dendl; + ldpp_dout(this, 0) << "evaluating principal returned deny" << dendl; return -EPERM; } auto c_res = p.eval_conditions(s->env); if (c_res == rgw::IAM::Effect::Deny) { - ldout(s->cct, 0) << "evaluating condition returned deny" << dendl; + ldpp_dout(this, 0) << "evaluating condition returned deny" << dendl; return -EPERM; } } catch (rgw::IAM::PolicyParseException& e) { - ldout(s->cct, 0) << "failed to parse policy: " << e.what() << dendl; + ldpp_dout(this, 0) << "failed to parse policy: " << e.what() << dendl; return -EPERM; } @@ -412,7 +412,7 @@ int RGWSTSGetSessionToken::verify_permission(optional_yield y) s, rgw::ARN(partition, service, "", s->user->get_tenant(), ""), rgw::IAM::stsGetSessionToken)) { - ldout(s->cct, 0) << "User does not have permssion to perform GetSessionToken" << dendl; + ldpp_dout(this, 0) << "User does not have permssion to perform GetSessionToken" << dendl; return -EACCES; } @@ -429,13 +429,13 @@ int RGWSTSGetSessionToken::get_params() string err; uint64_t duration_in_secs = strict_strtoll(duration.c_str(), 10, &err); if (!err.empty()) { - ldout(s->cct, 0) << "Invalid value of input duration: " << duration << dendl; + ldpp_dout(this, 0) << "Invalid value of input duration: " << duration << dendl; return -EINVAL; } if (duration_in_secs < STS::GetSessionTokenRequest::getMinDuration() || duration_in_secs > s->cct->_conf->rgw_sts_max_session_duration) { - ldout(s->cct, 0) << "Invalid duration in secs: " << duration_in_secs << dendl; + ldpp_dout(this, 0) << "Invalid duration in secs: " << duration_in_secs << dendl; return -EINVAL; } } @@ -478,7 +478,7 @@ int RGWSTSAssumeRoleWithWebIdentity::get_params() aud = s->info.args.get("aud"); if (roleArn.empty() || roleSessionName.empty() || sub.empty() || aud.empty()) { - ldout(s->cct, 0) << "ERROR: one of role arn or role session name or token is empty" << dendl; + ldpp_dout(this, 0) << "ERROR: one of role arn or role session name or token is empty" << dendl; return -EINVAL; } @@ -488,7 +488,7 @@ int RGWSTSAssumeRoleWithWebIdentity::get_params() const rgw::IAM::Policy p(s->cct, s->user->get_tenant(), bl); } catch (rgw::IAM::PolicyParseException& e) { - ldout(s->cct, 20) << "failed to parse policy: " << e.what() << "policy" << policy << dendl; + ldpp_dout(this, 20) << "failed to parse policy: " << e.what() << "policy" << policy << dendl; return -ERR_MALFORMED_DOC; } } @@ -537,7 +537,7 @@ int RGWSTSAssumeRole::get_params() tokenCode = s->info.args.get("TokenCode"); if (roleArn.empty() || roleSessionName.empty()) { - ldout(s->cct, 0) << "ERROR: one of role arn or role session name is empty" << dendl; + ldpp_dout(this, 0) << "ERROR: one of role arn or role session name is empty" << dendl; return -EINVAL; } @@ -547,7 +547,7 @@ int RGWSTSAssumeRole::get_params() const rgw::IAM::Policy p(s->cct, s->user->get_tenant(), bl); } catch (rgw::IAM::PolicyParseException& e) { - ldout(s->cct, 0) << "failed to parse policy: " << e.what() << "policy" << policy << dendl; + ldpp_dout(this, 0) << "failed to parse policy: " << e.what() << "policy" << policy << dendl; return -ERR_MALFORMED_DOC; } } @@ -563,7 +563,7 @@ void RGWSTSAssumeRole::execute(optional_yield y) STS::AssumeRoleRequest req(s->cct, duration, externalId, policy, roleArn, roleSessionName, serialNumber, tokenCode); - STS::AssumeRoleResponse response = sts.assumeRole(req, y); + STS::AssumeRoleResponse response = sts.assumeRole(s, req, y); op_ret = std::move(response.retCode); //Dump the output if (op_ret == 0) { @@ -592,7 +592,7 @@ int RGW_Auth_STS::authorize(const DoutPrefixProvider *dpp, void RGWHandler_REST_STS::rgw_sts_parse_input() { if (post_body.size() > 0) { - ldout(s->cct, 10) << "Content of POST: " << post_body << dendl; + ldpp_dout(s, 10) << "Content of POST: " << post_body << dendl; if (post_body.find("Action") != string::npos) { boost::char_separator sep("&"); @@ -635,7 +635,7 @@ int RGWHandler_REST_STS::init(rgw::sal::RGWRadosStore *store, s->dialect = "sts"; if (int ret = RGWHandler_REST_STS::init_from_header(s, RGW_FORMAT_XML, true); ret < 0) { - ldout(s->cct, 10) << "init_from_header returned err=" << ret << dendl; + ldpp_dout(s, 10) << "init_from_header returned err=" << ret << dendl; return ret; } @@ -667,7 +667,7 @@ int RGWHandler_REST_STS::init_from_header(struct req_state* s, } s->info.args.set(p); - s->info.args.parse(); + s->info.args.parse(s); /* must be called after the args parsing */ if (int ret = allocate_formatter(s, default_formatter, configurable_format); ret < 0) diff --git a/src/rgw/rgw_rest_sts.h b/src/rgw/rgw_rest_sts.h index 5ef1fa82adc08..810ff242d734f 100644 --- a/src/rgw/rgw_rest_sts.h +++ b/src/rgw/rgw_rest_sts.h @@ -29,7 +29,7 @@ class WebTokenEngine : public rgw::auth::Engine { bool is_cert_valid(const vector& thumbprints, const string& cert) const; - boost::optional get_provider(const string& role_arn, const string& iss) const; + boost::optional get_provider(const DoutPrefixProvider *dpp, const string& role_arn, const string& iss) const; std::string get_role_tenant(const string& role_arn) const; diff --git a/src/rgw/rgw_rest_swift.cc b/src/rgw/rgw_rest_swift.cc index 0a8d6cdf99325..be4050ee84687 100644 --- a/src/rgw/rgw_rest_swift.cc +++ b/src/rgw/rgw_rest_swift.cc @@ -545,7 +545,7 @@ static void dump_container_metadata(struct req_state *s, void RGWStatAccount_ObjStore_SWIFT::execute(optional_yield y) { RGWStatAccount_ObjStore::execute(y); - op_ret = store->ctl()->user->get_attrs_by_uid(s->user->get_id(), &attrs, s->yield); + op_ret = store->ctl()->user->get_attrs_by_uid(s, s->user->get_id(), &attrs, s->yield); } void RGWStatAccount_ObjStore_SWIFT::send_response() @@ -598,7 +598,7 @@ static int get_swift_container_settings(req_state * const s, if (read_list || write_list) { RGWAccessControlPolicy_SWIFT swift_policy(s->cct); - const auto r = swift_policy.create(store->ctl()->user, + const auto r = swift_policy.create(s, store->ctl()->user, s->user->get_id(), s->user->get_display_name(), read_list, @@ -623,7 +623,7 @@ static int get_swift_container_settings(req_state * const s, RGWCORSConfiguration_SWIFT *swift_cors = new RGWCORSConfiguration_SWIFT; int r = swift_cors->create_update(allow_origins, allow_headers, expose_headers, max_age); if (r < 0) { - dout(0) << "Error creating/updating the cors configuration" << dendl; + ldpp_dout(s, 0) << "Error creating/updating the cors configuration" << dendl; delete swift_cors; return r; } @@ -851,7 +851,7 @@ int RGWPutObj_ObjStore_SWIFT::update_slo_segment_size(rgw_slo_entry& entry) { map bucket_attrs; r = store->getRados()->get_bucket_info(store->svc(), s->user->get_id().tenant, bucket_name, bucket_info, nullptr, - s->yield, &bucket_attrs); + s->yield, s, &bucket_attrs); if (r < 0) { ldpp_dout(this, 0) << "could not get bucket info for bucket=" << bucket_name << dendl; @@ -881,7 +881,7 @@ int RGWPutObj_ObjStore_SWIFT::update_slo_segment_size(rgw_slo_entry& entry) { read_op.params.attrs = &attrs; read_op.params.obj_size = &size_bytes; - r = read_op.prepare(s->yield); + r = read_op.prepare(s->yield, s); if (r < 0) { return r; } @@ -1060,7 +1060,7 @@ static int get_swift_account_settings(req_state * const s, const char * const acl_attr = s->info.env->get("HTTP_X_ACCOUNT_ACCESS_CONTROL"); if (acl_attr) { RGWAccessControlPolicy_SWIFTAcct swift_acct_policy(s->cct); - const bool r = swift_acct_policy.create(store->ctl()->user, + const bool r = swift_acct_policy.create(s, store->ctl()->user, s->user->get_id(), s->user->get_display_name(), string(acl_attr)); @@ -2093,14 +2093,14 @@ void RGWFormPost::get_owner_info(const req_state* const s, if (uid.tenant.empty()) { const rgw_user tenanted_uid(uid.id, uid.id); - if (user_ctl->get_info_by_uid(tenanted_uid, &uinfo, s->yield) >= 0) { + if (user_ctl->get_info_by_uid(s, tenanted_uid, &uinfo, s->yield) >= 0) { /* Succeeded. */ bucket_tenant = uinfo.user_id.tenant; found = true; } } - if (!found && user_ctl->get_info_by_uid(uid, &uinfo, s->yield) < 0) { + if (!found && user_ctl->get_info_by_uid(s, uid, &uinfo, s->yield) < 0) { throw -EPERM; } else { bucket_tenant = uinfo.user_id.tenant; @@ -2111,7 +2111,7 @@ void RGWFormPost::get_owner_info(const req_state* const s, RGWBucketInfo bucket_info; int ret = store->getRados()->get_bucket_info(store->svc(), bucket_tenant, bucket_name, - bucket_info, nullptr, s->yield); + bucket_info, nullptr, s->yield, s); if (ret < 0) { throw ret; } @@ -2119,7 +2119,7 @@ void RGWFormPost::get_owner_info(const req_state* const s, ldpp_dout(this, 20) << "temp url user (bucket owner): " << bucket_info.owner << dendl; - if (user_ctl->get_info_by_uid(bucket_info.owner, &owner_info, s->yield) < 0) { + if (user_ctl->get_info_by_uid(s, bucket_info.owner, &owner_info, s->yield) < 0) { throw -EPERM; } } @@ -2570,7 +2570,7 @@ bool RGWSwiftWebsiteHandler::is_web_dir() const obj.set_prefetch_data(&obj_ctx); RGWObjState* state = nullptr; - if (obj.get_obj_state(&obj_ctx, *s->bucket, &state, s->yield, false)) { + if (obj.get_obj_state(s, &obj_ctx, *s->bucket, &state, s->yield, false)) { return false; } @@ -2600,7 +2600,7 @@ bool RGWSwiftWebsiteHandler::is_index_present(const std::string& index) const obj.set_prefetch_data(&obj_ctx); RGWObjState* state = nullptr; - if (obj.get_obj_state(&obj_ctx, *s->bucket, &state, s->yield, false)) { + if (obj.get_obj_state(s, &obj_ctx, *s->bucket, &state, s->yield, false)) { return false; } @@ -2813,7 +2813,7 @@ int RGWHandler_REST_SWIFT::postauth_init(optional_yield y) s->object = store->get_object(rgw_obj_key()); } - dout(10) << "s->object=" << + ldpp_dout(s, 10) << "s->object=" << (!s->object->empty() ? s->object->get_key() : rgw_obj_key("")) << " s->bucket=" << rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name) @@ -2929,7 +2929,7 @@ int RGWHandler_REST_SWIFT::init_from_header(rgw::sal::RGWRadosStore* store, } s->info.args.set(p); - s->info.args.parse(); + s->info.args.parse(s); /* Skip the leading slash of URL hierarchy. */ if (req_name[0] != '/') { @@ -3015,7 +3015,7 @@ int RGWHandler_REST_SWIFT::init_from_header(rgw::sal::RGWRadosStore* store, next_tok(req, first, '/'); - dout(10) << "ver=" << ver << " first=" << first << " req=" << req << dendl; + ldpp_dout(s, 10) << "ver=" << ver << " first=" << first << " req=" << req << dendl; if (first.size() == 0) return 0; @@ -3043,7 +3043,7 @@ int RGWHandler_REST_SWIFT::init(rgw::sal::RGWRadosStore* store, struct req_state std::string copy_source = s->info.env->get("HTTP_X_COPY_FROM", ""); if (! copy_source.empty()) { rgw_obj_key key; - bool result = RGWCopyObj::parse_copy_location(copy_source, t->src_bucket, key); + bool result = RGWCopyObj::parse_copy_location(copy_source, t->src_bucket, key, s); if (!result) return -ERR_BAD_URL; s->src_object = store->get_object(key); @@ -3060,7 +3060,7 @@ int RGWHandler_REST_SWIFT::init(rgw::sal::RGWRadosStore* store, struct req_state rgw_obj_key dest_obj_key; bool result = RGWCopyObj::parse_copy_location(req_dest, dest_bucket_name, - dest_obj_key); + dest_obj_key, s); if (!result) return -ERR_BAD_URL; diff --git a/src/rgw/rgw_rest_usage.cc b/src/rgw/rgw_rest_usage.cc index e550a412227af..7b8e14bc99d55 100644 --- a/src/rgw/rgw_rest_usage.cc +++ b/src/rgw/rgw_rest_usage.cc @@ -53,7 +53,7 @@ void RGWOp_Usage_Get::execute(optional_yield y) { } } - op_ret = RGWUsage::show(store->getRados(), uid, bucket_name, start, end, show_entries, show_summary, &categories, flusher); + op_ret = RGWUsage::show(this, store->getRados(), uid, bucket_name, start, end, show_entries, show_summary, &categories, flusher); } class RGWOp_Usage_Delete : public RGWRESTOp { @@ -93,7 +93,7 @@ void RGWOp_Usage_Delete::execute(optional_yield y) { } } - op_ret = RGWUsage::trim(store->getRados(), uid, bucket_name, start, end); + op_ret = RGWUsage::trim(this, store->getRados(), uid, bucket_name, start, end); } RGWOp *RGWHandler_Usage::op_get() diff --git a/src/rgw/rgw_rest_user.cc b/src/rgw/rgw_rest_user.cc index 06fd022f11599..1cdce91da70c5 100644 --- a/src/rgw/rgw_rest_user.cc +++ b/src/rgw/rgw_rest_user.cc @@ -42,7 +42,7 @@ void RGWOp_User_List::execute(optional_yield y) op_state.max_entries = max_entries; op_state.marker = marker; - op_ret = RGWUserAdminOp_User::list(store, op_state, flusher); + op_ret = RGWUserAdminOp_User::list(this, store, op_state, flusher); } class RGWOp_User_Info : public RGWRESTOp { @@ -89,7 +89,7 @@ void RGWOp_User_Info::execute(optional_yield y) op_state.set_fetch_stats(fetch_stats); op_state.set_sync_stats(sync_stats); - op_ret = RGWUserAdminOp_User::info(store, op_state, flusher, y); + op_ret = RGWUserAdminOp_User::info(s, store, op_state, flusher, y); } class RGWOp_User_Create : public RGWRESTOp { @@ -151,7 +151,7 @@ void RGWOp_User_Create::execute(optional_yield y) RESTArgs::get_string(s, "placement-tags", placement_tags_str, &placement_tags_str); if (!s->user->get_info().system && system) { - ldout(s->cct, 0) << "cannot set system flag by non-system user" << dendl; + ldpp_dout(this, 0) << "cannot set system flag by non-system user" << dendl; op_ret = -EINVAL; return; } @@ -172,7 +172,7 @@ void RGWOp_User_Create::execute(optional_yield y) uint32_t op_mask; int ret = rgw_parse_op_type_list(op_mask_str, &op_mask); if (ret < 0) { - ldout(s->cct, 0) << "failed to parse op_mask: " << ret << dendl; + ldpp_dout(this, 0) << "failed to parse op_mask: " << ret << dendl; op_ret = -EINVAL; return; } @@ -211,7 +211,7 @@ void RGWOp_User_Create::execute(optional_yield y) rgw_placement_rule target_rule; target_rule.from_str(default_placement_str); if (!store->svc()->zone->get_zone_params().valid_placement(target_rule)) { - ldout(s->cct, 0) << "NOTICE: invalid dest placement: " << target_rule.to_str() << dendl; + ldpp_dout(this, 0) << "NOTICE: invalid dest placement: " << target_rule.to_str() << dendl; op_ret = -EINVAL; return; } @@ -225,12 +225,12 @@ void RGWOp_User_Create::execute(optional_yield y) } bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = RGWUserAdminOp_User::create(store, op_state, flusher, y); + op_ret = RGWUserAdminOp_User::create(s, store, op_state, flusher, y); } class RGWOp_User_Modify : public RGWRESTOp { @@ -288,7 +288,7 @@ void RGWOp_User_Modify::execute(optional_yield y) RESTArgs::get_string(s, "placement-tags", placement_tags_str, &placement_tags_str); if (!s->user->get_info().system && system) { - ldout(s->cct, 0) << "cannot set system flag by non-system user" << dendl; + ldpp_dout(this, 0) << "cannot set system flag by non-system user" << dendl; op_ret = -EINVAL; return; } @@ -325,7 +325,7 @@ void RGWOp_User_Modify::execute(optional_yield y) if (!op_mask_str.empty()) { uint32_t op_mask; if (rgw_parse_op_type_list(op_mask_str, &op_mask) < 0) { - ldout(s->cct, 0) << "failed to parse op_mask" << dendl; + ldpp_dout(this, 0) << "failed to parse op_mask" << dendl; op_ret = -EINVAL; return; } @@ -342,7 +342,7 @@ void RGWOp_User_Modify::execute(optional_yield y) uint32_t op_mask; int ret = rgw_parse_op_type_list(op_mask_str, &op_mask); if (ret < 0) { - ldout(s->cct, 0) << "failed to parse op_mask: " << ret << dendl; + ldpp_dout(this, 0) << "failed to parse op_mask: " << ret << dendl; op_ret = -EINVAL; return; } @@ -353,7 +353,7 @@ void RGWOp_User_Modify::execute(optional_yield y) rgw_placement_rule target_rule; target_rule.from_str(default_placement_str); if (!store->svc()->zone->get_zone_params().valid_placement(target_rule)) { - ldout(s->cct, 0) << "NOTICE: invalid dest placement: " << target_rule.to_str() << dendl; + ldpp_dout(this, 0) << "NOTICE: invalid dest placement: " << target_rule.to_str() << dendl; op_ret = -EINVAL; return; } @@ -367,12 +367,12 @@ void RGWOp_User_Modify::execute(optional_yield y) } bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = RGWUserAdminOp_User::modify(store, op_state, flusher, y); + op_ret = RGWUserAdminOp_User::modify(s, store, op_state, flusher, y); } class RGWOp_User_Remove : public RGWRESTOp { @@ -408,12 +408,12 @@ void RGWOp_User_Remove::execute(optional_yield y) op_state.set_purge_data(purge_data); bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = RGWUserAdminOp_User::remove(store, op_state, flusher, s->yield); + op_ret = RGWUserAdminOp_User::remove(s, store, op_state, flusher, s->yield); } class RGWOp_Subuser_Create : public RGWRESTOp { @@ -484,12 +484,12 @@ void RGWOp_Subuser_Create::execute(optional_yield y) op_state.set_key_type(key_type); bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = RGWUserAdminOp_Subuser::create(store, op_state, flusher, y); + op_ret = RGWUserAdminOp_Subuser::create(s, store, op_state, flusher, y); } class RGWOp_Subuser_Modify : public RGWRESTOp { @@ -551,12 +551,12 @@ void RGWOp_Subuser_Modify::execute(optional_yield y) op_state.set_key_type(key_type); bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = RGWUserAdminOp_Subuser::modify(store, op_state, flusher, y); + op_ret = RGWUserAdminOp_Subuser::modify(s, store, op_state, flusher, y); } class RGWOp_Subuser_Remove : public RGWRESTOp { @@ -594,12 +594,12 @@ void RGWOp_Subuser_Remove::execute(optional_yield y) op_state.set_purge_keys(); bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = RGWUserAdminOp_Subuser::remove(store, op_state, flusher, y); + op_ret = RGWUserAdminOp_Subuser::remove(s, store, op_state, flusher, y); } class RGWOp_Key_Create : public RGWRESTOp { @@ -655,7 +655,7 @@ void RGWOp_Key_Create::execute(optional_yield y) op_state.set_key_type(key_type); } - op_ret = RGWUserAdminOp_Key::create(store, op_state, flusher, y); + op_ret = RGWUserAdminOp_Key::create(s, store, op_state, flusher, y); } class RGWOp_Key_Remove : public RGWRESTOp { @@ -702,7 +702,7 @@ void RGWOp_Key_Remove::execute(optional_yield y) op_state.set_key_type(key_type); } - op_ret = RGWUserAdminOp_Key::remove(store, op_state, flusher, y); + op_ret = RGWUserAdminOp_Key::remove(s, store, op_state, flusher, y); } class RGWOp_Caps_Add : public RGWRESTOp { @@ -735,12 +735,12 @@ void RGWOp_Caps_Add::execute(optional_yield y) op_state.set_caps(caps); bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = RGWUserAdminOp_Caps::add(store, op_state, flusher, y); + op_ret = RGWUserAdminOp_Caps::add(s, store, op_state, flusher, y); } class RGWOp_Caps_Remove : public RGWRESTOp { @@ -773,12 +773,12 @@ void RGWOp_Caps_Remove::execute(optional_yield y) op_state.set_caps(caps); bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = RGWUserAdminOp_Caps::remove(store, op_state, flusher, y); + op_ret = RGWUserAdminOp_Caps::remove(s, store, op_state, flusher, y); } struct UserQuotas { @@ -844,7 +844,7 @@ void RGWOp_Quota_Info::execute(optional_yield y) op_state.set_user_id(uid); RGWUser user; - op_ret = user.init(store, op_state, y); + op_ret = user.init(s, store, op_state, y); if (op_ret < 0) return; @@ -956,7 +956,7 @@ void RGWOp_Quota_Set::execute(optional_yield y) bool set_user = set_all || (quota_type == "user"); if (!(set_all || set_bucket || set_user)) { - ldout(store->ctx(), 20) << "invalid quota type" << dendl; + ldpp_dout(this, 20) << "invalid quota type" << dendl; op_ret = -EINVAL; return; } @@ -971,7 +971,7 @@ void RGWOp_Quota_Set::execute(optional_yield y) } if (use_http_params && set_all) { - ldout(store->ctx(), 20) << "quota type was not specified, can't set all quotas via http headers" << dendl; + ldpp_dout(this, 20) << "quota type was not specified, can't set all quotas via http headers" << dendl; op_ret = -EINVAL; return; } @@ -979,9 +979,9 @@ void RGWOp_Quota_Set::execute(optional_yield y) op_state.set_user_id(uid); RGWUser user; - op_ret = user.init(store, op_state, y); + op_ret = user.init(s, store, op_state, y); if (op_ret < 0) { - ldout(store->ctx(), 20) << "failed initializing user info: " << op_ret << dendl; + ldpp_dout(this, 20) << "failed initializing user info: " << op_ret << dendl; return; } @@ -995,7 +995,7 @@ void RGWOp_Quota_Set::execute(optional_yield y) UserQuotas quotas; if ((op_ret = rgw_rest_get_json_input(store->ctx(), s, quotas, QUOTA_INPUT_MAX_LEN, NULL)) < 0) { - ldout(store->ctx(), 20) << "failed to retrieve input" << dendl; + ldpp_dout(this, 20) << "failed to retrieve input" << dendl; return; } @@ -1008,7 +1008,7 @@ void RGWOp_Quota_Set::execute(optional_yield y) bool empty; op_ret = rgw_rest_get_json_input(store->ctx(), s, quota, QUOTA_INPUT_MAX_LEN, &empty); if (op_ret < 0) { - ldout(store->ctx(), 20) << "failed to retrieve input" << dendl; + ldpp_dout(this, 20) << "failed to retrieve input" << dendl; if (!empty) return; @@ -1022,7 +1022,7 @@ void RGWOp_Quota_Set::execute(optional_yield y) string err_msg; op_ret = user.info(info, &err_msg); if (op_ret < 0) { - ldout(store->ctx(), 20) << "failed to get user info: " << op_ret << dendl; + ldpp_dout(this, 20) << "failed to get user info: " << op_ret << dendl; return; } RGWQuotaInfo *old_quota; @@ -1051,9 +1051,9 @@ void RGWOp_Quota_Set::execute(optional_yield y) } string err; - op_ret = user.modify(op_state, y, &err); + op_ret = user.modify(s, op_state, y, &err); if (op_ret < 0) { - ldout(store->ctx(), 20) << "failed updating user info: " << op_ret << ": " << err << dendl; + ldpp_dout(this, 20) << "failed updating user info: " << op_ret << ": " << err << dendl; return; } } diff --git a/src/rgw/rgw_rest_user_policy.cc b/src/rgw/rgw_rest_user_policy.cc index 7c8429bdcb284..dca93cf5c0009 100644 --- a/src/rgw/rgw_rest_user_policy.cc +++ b/src/rgw/rgw_rest_user_policy.cc @@ -62,13 +62,13 @@ int RGWRestUserPolicy::verify_permission(optional_yield y) bool RGWRestUserPolicy::validate_input() { if (policy_name.length() > MAX_POLICY_NAME_LEN) { - ldout(s->cct, 0) << "ERROR: Invalid policy name length " << dendl; + ldpp_dout(this, 0) << "ERROR: Invalid policy name length " << dendl; return false; } std::regex regex_policy_name("[A-Za-z0-9:=,.@-]+"); if (! std::regex_match(policy_name, regex_policy_name)) { - ldout(s->cct, 0) << "ERROR: Invalid chars in policy name " << dendl; + ldpp_dout(this, 0) << "ERROR: Invalid chars in policy name " << dendl; return false; } @@ -97,7 +97,7 @@ int RGWPutUserPolicy::get_params() policy = url_decode(s->info.args.get("PolicyDocument"), true); if (policy_name.empty() || user_name.empty() || policy.empty()) { - ldout(s->cct, 20) << "ERROR: one of policy name, user name or policy document is empty" + ldpp_dout(this, 20) << "ERROR: one of policy name, user name or policy document is empty" << dendl; return -EINVAL; } @@ -120,21 +120,21 @@ void RGWPutUserPolicy::execute(optional_yield y) RGWUserInfo info; rgw_user user_id(user_name); - op_ret = store->ctl()->user->get_info_by_uid(user_id, &info, s->yield); + op_ret = store->ctl()->user->get_info_by_uid(s, user_id, &info, s->yield); if (op_ret < 0) { op_ret = -ERR_NO_SUCH_ENTITY; return; } map uattrs; - op_ret = store->ctl()->user->get_attrs_by_uid(user_id, &uattrs, s->yield); + op_ret = store->ctl()->user->get_attrs_by_uid(s, user_id, &uattrs, s->yield); if (op_ret == -ENOENT) { op_ret = -ERR_NO_SUCH_ENTITY; return; } ceph::bufferlist in_data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -153,7 +153,7 @@ void RGWPutUserPolicy::execute(optional_yield y) uattrs[RGW_ATTR_USER_POLICY] = in_bl; RGWObjVersionTracker objv_tracker; - op_ret = store->ctl()->user->store_info(info, s->yield, + op_ret = store->ctl()->user->store_info(s, info, s->yield, RGWUserCtl::PutParams() .set_objv_tracker(&objv_tracker) .set_attrs(&uattrs)); @@ -161,7 +161,7 @@ void RGWPutUserPolicy::execute(optional_yield y) op_ret = -ERR_INTERNAL_ERROR; } } catch (rgw::IAM::PolicyParseException& e) { - ldout(s->cct, 20) << "failed to parse policy: " << e.what() << dendl; + ldpp_dout(this, 20) << "failed to parse policy: " << e.what() << dendl; op_ret = -ERR_MALFORMED_DOC; } @@ -185,7 +185,7 @@ int RGWGetUserPolicy::get_params() user_name = s->info.args.get("UserName"); if (policy_name.empty() || user_name.empty()) { - ldout(s->cct, 20) << "ERROR: one of policy name or user name is empty" + ldpp_dout(this, 20) << "ERROR: one of policy name or user name is empty" << dendl; return -EINVAL; } @@ -202,9 +202,9 @@ void RGWGetUserPolicy::execute(optional_yield y) rgw_user user_id(user_name); map uattrs; - op_ret = store->ctl()->user->get_attrs_by_uid(user_id, &uattrs, s->yield); + op_ret = store->ctl()->user->get_attrs_by_uid(s, user_id, &uattrs, s->yield); if (op_ret == -ENOENT) { - ldout(s->cct, 0) << "ERROR: attrs not found for user" << user_name << dendl; + ldpp_dout(this, 0) << "ERROR: attrs not found for user" << user_name << dendl; op_ret = -ERR_NO_SUCH_ENTITY; return; } @@ -223,12 +223,12 @@ void RGWGetUserPolicy::execute(optional_yield y) policy = policies[policy_name]; dump(s->formatter); } else { - ldout(s->cct, 0) << "ERROR: policy not found" << policy << dendl; + ldpp_dout(this, 0) << "ERROR: policy not found" << policy << dendl; op_ret = -ERR_NO_SUCH_ENTITY; return; } } else { - ldout(s->cct, 0) << "ERROR: RGW_ATTR_USER_POLICY not found" << dendl; + ldpp_dout(this, 0) << "ERROR: RGW_ATTR_USER_POLICY not found" << dendl; op_ret = -ERR_NO_SUCH_ENTITY; return; } @@ -250,7 +250,7 @@ int RGWListUserPolicies::get_params() user_name = s->info.args.get("UserName"); if (user_name.empty()) { - ldout(s->cct, 20) << "ERROR: user name is empty" << dendl; + ldpp_dout(this, 20) << "ERROR: user name is empty" << dendl; return -EINVAL; } @@ -266,9 +266,9 @@ void RGWListUserPolicies::execute(optional_yield y) rgw_user user_id(user_name); map uattrs; - op_ret = store->ctl()->user->get_attrs_by_uid(user_id, &uattrs, s->yield); + op_ret = store->ctl()->user->get_attrs_by_uid(s, user_id, &uattrs, s->yield); if (op_ret == -ENOENT) { - ldout(s->cct, 0) << "ERROR: attrs not found for user" << user_name << dendl; + ldpp_dout(this, 0) << "ERROR: attrs not found for user" << user_name << dendl; op_ret = -ERR_NO_SUCH_ENTITY; return; } @@ -291,7 +291,7 @@ void RGWListUserPolicies::execute(optional_yield y) s->formatter->close_section(); s->formatter->close_section(); } else { - ldout(s->cct, 0) << "ERROR: RGW_ATTR_USER_POLICY not found" << dendl; + ldpp_dout(this, 0) << "ERROR: RGW_ATTR_USER_POLICY not found" << dendl; op_ret = -ERR_NO_SUCH_ENTITY; return; } @@ -312,7 +312,7 @@ int RGWDeleteUserPolicy::get_params() user_name = s->info.args.get("UserName"); if (policy_name.empty() || user_name.empty()) { - ldout(s->cct, 20) << "ERROR: One of policy name or user name is empty"<< dendl; + ldpp_dout(this, 20) << "ERROR: One of policy name or user name is empty"<< dendl; return -EINVAL; } @@ -329,7 +329,7 @@ void RGWDeleteUserPolicy::execute(optional_yield y) RGWUserInfo info; map uattrs; rgw_user user_id(user_name); - op_ret = store->ctl()->user->get_info_by_uid(user_id, &info, s->yield, + op_ret = store->ctl()->user->get_info_by_uid(s, user_id, &info, s->yield, RGWUserCtl::GetParams() .set_attrs(&uattrs)); if (op_ret < 0) { @@ -338,7 +338,7 @@ void RGWDeleteUserPolicy::execute(optional_yield y) } ceph::bufferlist in_data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { // a policy might've been uploaded to this site when there was no sync // req. in earlier releases, proceed deletion @@ -361,7 +361,7 @@ void RGWDeleteUserPolicy::execute(optional_yield y) uattrs[RGW_ATTR_USER_POLICY] = in_bl; RGWObjVersionTracker objv_tracker; - op_ret = store->ctl()->user->store_info(info, s->yield, + op_ret = store->ctl()->user->store_info(s, info, s->yield, RGWUserCtl::PutParams() .set_old_info(&info) .set_objv_tracker(&objv_tracker) diff --git a/src/rgw/rgw_role.cc b/src/rgw/rgw_role.cc index 9ed173131d559..f69134174fe65 100644 --- a/src/rgw/rgw_role.cc +++ b/src/rgw/rgw_role.cc @@ -30,7 +30,7 @@ const string RGWRole::role_oid_prefix = "roles."; const string RGWRole::role_path_oid_prefix = "role_paths."; const string RGWRole::role_arn_prefix = "arn:aws:iam::"; -int RGWRole::store_info(bool exclusive, optional_yield y) +int RGWRole::store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) { using ceph::encode; string oid = get_info_oid_prefix() + id; @@ -41,11 +41,11 @@ int RGWRole::store_info(bool exclusive, optional_yield y) auto svc = ctl->svc; auto obj_ctx = ctl->svc->sysobj->init_obj_ctx(); - return rgw_put_system_obj(obj_ctx, svc->zone->get_zone_params().roles_pool, oid, + return rgw_put_system_obj(dpp, obj_ctx, svc->zone->get_zone_params().roles_pool, oid, bl, exclusive, NULL, real_time(), y, NULL); } -int RGWRole::store_name(bool exclusive, optional_yield y) +int RGWRole::store_name(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) { RGWNameToId nameToId; nameToId.obj_id = id; @@ -59,11 +59,11 @@ int RGWRole::store_name(bool exclusive, optional_yield y) auto svc = ctl->svc; auto obj_ctx = svc->sysobj->init_obj_ctx(); - return rgw_put_system_obj(obj_ctx, svc->zone->get_zone_params().roles_pool, oid, + return rgw_put_system_obj(dpp, obj_ctx, svc->zone->get_zone_params().roles_pool, oid, bl, exclusive, NULL, real_time(), y, NULL); } -int RGWRole::store_path(bool exclusive, optional_yield y) +int RGWRole::store_path(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) { string oid = tenant + get_path_oid_prefix() + path + get_info_oid_prefix() + id; @@ -71,11 +71,11 @@ int RGWRole::store_path(bool exclusive, optional_yield y) bufferlist bl; auto obj_ctx = svc->sysobj->init_obj_ctx(); - return rgw_put_system_obj(obj_ctx, svc->zone->get_zone_params().roles_pool, oid, + return rgw_put_system_obj(dpp, obj_ctx, svc->zone->get_zone_params().roles_pool, oid, bl, exclusive, NULL, real_time(), y, NULL); } -int RGWRole::create(bool exclusive, optional_yield y) +int RGWRole::create(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) { int ret; @@ -84,13 +84,13 @@ int RGWRole::create(bool exclusive, optional_yield y) } /* check to see the name is not used */ - ret = read_id(name, tenant, id, y); + ret = read_id(dpp, name, tenant, id, y); if (exclusive && ret == 0) { - ldout(cct, 0) << "ERROR: name " << name << " already in use for role id " + ldpp_dout(dpp, 0) << "ERROR: name " << name << " already in use for role id " << id << dendl; return -EEXIST; } else if ( ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "failed reading role id " << id << ": " + ldpp_dout(dpp, 0) << "failed reading role id " << id << ": " << cpp_strerror(-ret) << dendl; return ret; } @@ -121,44 +121,44 @@ int RGWRole::create(bool exclusive, optional_yield y) auto svc = ctl->svc; auto& pool = svc->zone->get_zone_params().roles_pool; - ret = store_info(exclusive, y); + ret = store_info(dpp, exclusive, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: storing role info in pool: " << pool.name << ": " + ldpp_dout(dpp, 0) << "ERROR: storing role info in pool: " << pool.name << ": " << id << ": " << cpp_strerror(-ret) << dendl; return ret; } - ret = store_name(exclusive, y); + ret = store_name(dpp, exclusive, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: storing role name in pool: " << pool.name << ": " + ldpp_dout(dpp, 0) << "ERROR: storing role name in pool: " << pool.name << ": " << name << ": " << cpp_strerror(-ret) << dendl; //Delete the role info that was stored in the previous call string oid = get_info_oid_prefix() + id; - int info_ret = rgw_delete_system_obj(svc->sysobj, pool, oid, NULL, y); + int info_ret = rgw_delete_system_obj(dpp, svc->sysobj, pool, oid, NULL, y); if (info_ret < 0) { - ldout(cct, 0) << "ERROR: cleanup of role id from pool: " << pool.name << ": " + ldpp_dout(dpp, 0) << "ERROR: cleanup of role id from pool: " << pool.name << ": " << id << ": " << cpp_strerror(-info_ret) << dendl; } return ret; } - ret = store_path(exclusive, y); + ret = store_path(dpp, exclusive, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: storing role path in pool: " << pool.name << ": " + ldpp_dout(dpp, 0) << "ERROR: storing role path in pool: " << pool.name << ": " << path << ": " << cpp_strerror(-ret) << dendl; //Delete the role info that was stored in the previous call string oid = get_info_oid_prefix() + id; - int info_ret = rgw_delete_system_obj(svc->sysobj, pool, oid, NULL, y); + int info_ret = rgw_delete_system_obj(dpp, svc->sysobj, pool, oid, NULL, y); if (info_ret < 0) { - ldout(cct, 0) << "ERROR: cleanup of role id from pool: " << pool.name << ": " + ldpp_dout(dpp, 0) << "ERROR: cleanup of role id from pool: " << pool.name << ": " << id << ": " << cpp_strerror(-info_ret) << dendl; } //Delete role name that was stored in previous call oid = tenant + get_names_oid_prefix() + name; - int name_ret = rgw_delete_system_obj(svc->sysobj, pool, oid, NULL, y); + int name_ret = rgw_delete_system_obj(dpp, svc->sysobj, pool, oid, NULL, y); if (name_ret < 0) { - ldout(cct, 0) << "ERROR: cleanup of role name from pool: " << pool.name << ": " + ldpp_dout(dpp, 0) << "ERROR: cleanup of role name from pool: " << pool.name << ": " << name << ": " << cpp_strerror(-name_ret) << dendl; } return ret; @@ -166,17 +166,17 @@ int RGWRole::create(bool exclusive, optional_yield y) return 0; } -int RGWRole::delete_obj(optional_yield y) +int RGWRole::delete_obj(const DoutPrefixProvider *dpp, optional_yield y) { auto svc = ctl->svc; auto& pool = svc->zone->get_zone_params().roles_pool; - int ret = read_name(y); + int ret = read_name(dpp, y); if (ret < 0) { return ret; } - ret = read_info(y); + ret = read_info(dpp, y); if (ret < 0) { return ret; } @@ -187,38 +187,38 @@ int RGWRole::delete_obj(optional_yield y) // Delete id string oid = get_info_oid_prefix() + id; - ret = rgw_delete_system_obj(svc->sysobj, pool, oid, NULL, y); + ret = rgw_delete_system_obj(dpp, svc->sysobj, pool, oid, NULL, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: deleting role id from pool: " << pool.name << ": " + ldpp_dout(dpp, 0) << "ERROR: deleting role id from pool: " << pool.name << ": " << id << ": " << cpp_strerror(-ret) << dendl; } // Delete name oid = tenant + get_names_oid_prefix() + name; - ret = rgw_delete_system_obj(svc->sysobj, pool, oid, NULL, y); + ret = rgw_delete_system_obj(dpp, svc->sysobj, pool, oid, NULL, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: deleting role name from pool: " << pool.name << ": " + ldpp_dout(dpp, 0) << "ERROR: deleting role name from pool: " << pool.name << ": " << name << ": " << cpp_strerror(-ret) << dendl; } // Delete path oid = tenant + get_path_oid_prefix() + path + get_info_oid_prefix() + id; - ret = rgw_delete_system_obj(svc->sysobj, pool, oid, NULL, y); + ret = rgw_delete_system_obj(dpp, svc->sysobj, pool, oid, NULL, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: deleting role path from pool: " << pool.name << ": " + ldpp_dout(dpp, 0) << "ERROR: deleting role path from pool: " << pool.name << ": " << path << ": " << cpp_strerror(-ret) << dendl; } return ret; } -int RGWRole::get(optional_yield y) +int RGWRole::get(const DoutPrefixProvider *dpp, optional_yield y) { - int ret = read_name(y); + int ret = read_name(dpp, y); if (ret < 0) { return ret; } - ret = read_info(y); + ret = read_info(dpp, y); if (ret < 0) { return ret; } @@ -226,9 +226,9 @@ int RGWRole::get(optional_yield y) return 0; } -int RGWRole::get_by_id(optional_yield y) +int RGWRole::get_by_id(const DoutPrefixProvider *dpp, optional_yield y) { - int ret = read_info(y); + int ret = read_info(dpp, y); if (ret < 0) { return ret; } @@ -236,13 +236,13 @@ int RGWRole::get_by_id(optional_yield y) return 0; } -int RGWRole::update(optional_yield y) +int RGWRole::update(const DoutPrefixProvider *dpp, optional_yield y) { auto& pool = ctl->svc->zone->get_zone_params().roles_pool; - int ret = store_info(false, y); + int ret = store_info(dpp, false, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: storing info in pool: " << pool.name << ": " + ldpp_dout(dpp, 0) << "ERROR: storing info in pool: " << pool.name << ": " << id << ": " << cpp_strerror(-ret) << dendl; return ret; } @@ -312,7 +312,7 @@ void RGWRole::decode_json(JSONObj *obj) JSONDecoder::decode_json("assume_role_policy_document", trust_policy, obj); } -int RGWRole::read_id(const string& role_name, const string& tenant, string& role_id, optional_yield y) +int RGWRole::read_id(const DoutPrefixProvider *dpp, const string& role_name, const string& tenant, string& role_id, optional_yield y) { auto svc = ctl->svc; auto& pool = svc->zone->get_zone_params().roles_pool; @@ -320,7 +320,7 @@ int RGWRole::read_id(const string& role_name, const string& tenant, string& role bufferlist bl; auto obj_ctx = svc->sysobj->init_obj_ctx(); - int ret = rgw_get_system_obj(obj_ctx, pool, oid, bl, NULL, NULL, y); + int ret = rgw_get_system_obj(obj_ctx, pool, oid, bl, NULL, NULL, y, dpp); if (ret < 0) { return ret; } @@ -331,7 +331,7 @@ int RGWRole::read_id(const string& role_name, const string& tenant, string& role using ceph::decode; decode(nameToId, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: failed to decode role from pool: " << pool.name << ": " + ldpp_dout(dpp, 0) << "ERROR: failed to decode role from pool: " << pool.name << ": " << role_name << dendl; return -EIO; } @@ -339,7 +339,7 @@ int RGWRole::read_id(const string& role_name, const string& tenant, string& role return 0; } -int RGWRole::read_info(optional_yield y) +int RGWRole::read_info(const DoutPrefixProvider *dpp, optional_yield y) { auto svc = ctl->svc; auto& pool = svc->zone->get_zone_params().roles_pool; @@ -347,9 +347,9 @@ int RGWRole::read_info(optional_yield y) bufferlist bl; auto obj_ctx = svc->sysobj->init_obj_ctx(); - int ret = rgw_get_system_obj(obj_ctx, pool, oid, bl, NULL, NULL, y); + int ret = rgw_get_system_obj(obj_ctx, pool, oid, bl, NULL, NULL, y, dpp); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed reading role info from pool: " << pool.name << + ldpp_dout(dpp, 0) << "ERROR: failed reading role info from pool: " << pool.name << ": " << id << ": " << cpp_strerror(-ret) << dendl; return ret; } @@ -359,7 +359,7 @@ int RGWRole::read_info(optional_yield y) auto iter = bl.cbegin(); decode(*this, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: failed to decode role info from pool: " << pool.name << + ldpp_dout(dpp, 0) << "ERROR: failed to decode role info from pool: " << pool.name << ": " << id << dendl; return -EIO; } @@ -367,7 +367,7 @@ int RGWRole::read_info(optional_yield y) return 0; } -int RGWRole::read_name(optional_yield y) +int RGWRole::read_name(const DoutPrefixProvider *dpp, optional_yield y) { auto svc = ctl->svc; auto& pool = svc->zone->get_zone_params().roles_pool; @@ -375,9 +375,9 @@ int RGWRole::read_name(optional_yield y) bufferlist bl; auto obj_ctx = svc->sysobj->init_obj_ctx(); - int ret = rgw_get_system_obj(obj_ctx, pool, oid, bl, NULL, NULL, y); + int ret = rgw_get_system_obj(obj_ctx, pool, oid, bl, NULL, NULL, y, dpp); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed reading role name from pool: " << pool.name << ": " + ldpp_dout(dpp, 0) << "ERROR: failed reading role name from pool: " << pool.name << ": " << name << ": " << cpp_strerror(-ret) << dendl; return ret; } @@ -388,7 +388,7 @@ int RGWRole::read_name(optional_yield y) auto iter = bl.cbegin(); decode(nameToId, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: failed to decode role name from pool: " << pool.name << ": " + ldpp_dout(dpp, 0) << "ERROR: failed to decode role name from pool: " << pool.name << ": " << name << dendl; return -EIO; } @@ -442,7 +442,8 @@ void RGWRole::update_trust_policy(string& trust_policy) this->trust_policy = trust_policy; } -int RGWRole::get_roles_by_path_prefix(RGWRados *store, +int RGWRole::get_roles_by_path_prefix(const DoutPrefixProvider *dpp, + RGWRados *store, CephContext *cct, const string& path_prefix, const string& tenant, @@ -465,9 +466,9 @@ int RGWRole::get_roles_by_path_prefix(RGWRados *store, RGWListRawObjsCtx ctx; do { list oids; - int r = store->list_raw_objects(pool, prefix, 1000, ctx, oids, &is_truncated); + int r = store->list_raw_objects(dpp, pool, prefix, 1000, ctx, oids, &is_truncated); if (r < 0) { - ldout(cct, 0) << "ERROR: listing filtered objects failed: " << pool.name << ": " + ldpp_dout(dpp, 0) << "ERROR: listing filtered objects failed: " << pool.name << ": " << prefix << ": " << cpp_strerror(-r) << dendl; return r; } @@ -493,7 +494,7 @@ int RGWRole::get_roles_by_path_prefix(RGWRados *store, RGWRole role(cct, store->pctl); role.set_id(id); - int ret = role.read_info(y); + int ret = role.read_info(dpp, y); if (ret < 0) { return ret; } diff --git a/src/rgw/rgw_role.h b/src/rgw/rgw_role.h index d72c5ecba4707..91b4846d00604 100644 --- a/src/rgw/rgw_role.h +++ b/src/rgw/rgw_role.h @@ -39,12 +39,12 @@ class RGWRole string tenant; uint64_t max_session_duration; - int store_info(bool exclusive, optional_yield y); - int store_name(bool exclusive, optional_yield y); - int store_path(bool exclusive, optional_yield y); - int read_id(const string& role_name, const string& tenant, string& role_id, optional_yield y); - int read_name(optional_yield y); - int read_info(optional_yield y); + int store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y); + int store_name(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y); + int store_path(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y); + int read_id(const DoutPrefixProvider *dpp, const string& role_name, const string& tenant, string& role_id, optional_yield y); + int read_name(const DoutPrefixProvider *dpp, optional_yield y); + int read_info(const DoutPrefixProvider *dpp, optional_yield y); bool validate_input(); void extract_name_tenant(const std::string& str); @@ -141,11 +141,11 @@ public: void set_id(const string& id) { this->id = id; } - int create(bool exclusive, optional_yield y); - int delete_obj(optional_yield y); - int get(optional_yield y); - int get_by_id(optional_yield y); - int update(optional_yield y); + int create(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y); + int delete_obj(const DoutPrefixProvider *dpp, optional_yield y); + int get(const DoutPrefixProvider *dpp, optional_yield y); + int get_by_id(const DoutPrefixProvider *dpp, optional_yield y); + int update(const DoutPrefixProvider *dpp, optional_yield y); void update_trust_policy(string& trust_policy); void set_perm_policy(const string& policy_name, const string& perm_policy); vector get_role_policy_names(); @@ -157,7 +157,8 @@ public: static const string& get_names_oid_prefix(); static const string& get_info_oid_prefix(); static const string& get_path_oid_prefix(); - static int get_roles_by_path_prefix(RGWRados *store, + static int get_roles_by_path_prefix(const DoutPrefixProvider *dpp, + RGWRados *store, CephContext *cct, const string& path_prefix, const string& tenant, diff --git a/src/rgw/rgw_sal.h b/src/rgw/rgw_sal.h index 42363e28ca9f5..7af83d6c03a15 100644 --- a/src/rgw/rgw_sal.h +++ b/src/rgw/rgw_sal.h @@ -66,17 +66,18 @@ enum AttrsMod { using RGWAttrs = std::map; -class RGWStore : public DoutPrefixProvider { +class RGWStore { public: RGWStore() {} virtual ~RGWStore() = default; virtual std::unique_ptr get_user(const rgw_user& u) = 0; virtual std::unique_ptr get_object(const rgw_obj_key& k) = 0; - virtual int get_bucket(RGWUser* u, const rgw_bucket& b, std::unique_ptr* bucket, optional_yield y) = 0; + virtual int get_bucket(const DoutPrefixProvider *dpp, RGWUser* u, const rgw_bucket& b, std::unique_ptr* bucket, optional_yield y) = 0; virtual int get_bucket(RGWUser* u, const RGWBucketInfo& i, std::unique_ptr* bucket) = 0; - virtual int get_bucket(RGWUser* u, const std::string& tenant, const std::string& name, std::unique_ptr* bucket, optional_yield y) = 0; - virtual int create_bucket(RGWUser& u, const rgw_bucket& b, + virtual int get_bucket(const DoutPrefixProvider *dpp, RGWUser* u, const std::string& tenant, const std::string& name, std::unique_ptr* bucket, optional_yield y) = 0; + virtual int create_bucket(const DoutPrefixProvider *dpp, + RGWUser& u, const rgw_bucket& b, const std::string& zonegroup_id, rgw_placement_rule& placement_rule, std::string& swift_ver_location, @@ -93,19 +94,19 @@ class RGWStore : public DoutPrefixProvider { optional_yield y) = 0; virtual RGWBucketList* list_buckets(void) = 0; virtual bool is_meta_master() = 0; - virtual int forward_request_to_master(RGWUser* user, obj_version *objv, + virtual int forward_request_to_master(const DoutPrefixProvider *dpp, RGWUser* user, obj_version *objv, bufferlist& in_data, JSONParser *jp, req_info& info, optional_yield y) = 0; - virtual int defer_gc(RGWObjectCtx *rctx, RGWBucket* bucket, RGWObject* obj, + virtual int defer_gc(const DoutPrefixProvider *dpp, RGWObjectCtx *rctx, RGWBucket* bucket, RGWObject* obj, optional_yield y) = 0; virtual const RGWZoneGroup& get_zonegroup() = 0; virtual int get_zonegroup(const string& id, RGWZoneGroup& zonegroup) = 0; virtual int cluster_stat(RGWClusterStat& stats) = 0; virtual std::unique_ptr get_lifecycle(void) = 0; virtual RGWLC* get_rgwlc(void) = 0; - virtual int delete_raw_obj(const rgw_raw_obj& obj) = 0; + virtual int delete_raw_obj(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj) = 0; virtual void get_raw_obj(const rgw_placement_rule& placement_rule, const rgw_obj& obj, rgw_raw_obj* raw_obj) = 0; - virtual int get_raw_chunk_size(const rgw_raw_obj& obj, uint64_t* chunk_size) = 0; + virtual int get_raw_chunk_size(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, uint64_t* chunk_size) = 0; virtual void finalize(void)=0; @@ -127,7 +128,8 @@ class RGWUser { RGWUser(const RGWUserInfo& _i) : info(_i) {} virtual ~RGWUser() = default; - virtual int list_buckets(const std::string& marker, const std::string& end_marker, + virtual int list_buckets(const DoutPrefixProvider *dpp, + const std::string& marker, const std::string& end_marker, uint64_t max, bool need_stats, RGWBucketList& buckets, optional_yield y) = 0; virtual RGWBucket* create_bucket(rgw_bucket& bucket, ceph::real_time creation_time) = 0; @@ -142,7 +144,7 @@ class RGWUser { static bool empty(RGWUser* u) { return (!u || u->info.user_id.id.empty()); } /* Placeholders */ - virtual int load_by_id(optional_yield y) = 0; + virtual int load_by_id(const DoutPrefixProvider *dpp, optional_yield y) = 0; /* dang temporary; will be removed when User is complete */ rgw_user& get_user() { return info.user_id; } @@ -224,37 +226,37 @@ class RGWBucket { } virtual ~RGWBucket() = default; - virtual int load_by_name(const std::string& tenant, const std::string& bucket_name, const std::string bucket_instance_id, RGWSysObjectCtx *rctx, optional_yield y) = 0; + virtual int load_by_name(const DoutPrefixProvider *dpp, const std::string& tenant, const std::string& bucket_name, const std::string bucket_instance_id, RGWSysObjectCtx *rctx, optional_yield y) = 0; virtual std::unique_ptr get_object(const rgw_obj_key& key) = 0; - virtual int list(ListParams&, int, ListResults&, optional_yield y) = 0; + virtual int list(const DoutPrefixProvider *dpp, ListParams&, int, ListResults&, optional_yield y) = 0; virtual RGWObject* create_object(const rgw_obj_key& key /* Attributes */) = 0; virtual RGWAttrs& get_attrs(void) { return attrs; } virtual int set_attrs(RGWAttrs a) { attrs = a; return 0; } - virtual int remove_bucket(bool delete_children, std::string prefix, std::string delimiter, bool forward_to_master, req_info* req_info, optional_yield y) = 0; + virtual int remove_bucket(const DoutPrefixProvider *dpp, bool delete_children, std::string prefix, std::string delimiter, bool forward_to_master, req_info* req_info, optional_yield y) = 0; virtual RGWAccessControlPolicy& get_acl(void) = 0; - virtual int set_acl(RGWAccessControlPolicy& acl, optional_yield y) = 0; - virtual int get_bucket_info(optional_yield y) = 0; - virtual int get_bucket_stats(RGWBucketInfo& bucket_info, int shard_id, + virtual int set_acl(const DoutPrefixProvider *dpp, RGWAccessControlPolicy& acl, optional_yield y) = 0; + virtual int get_bucket_info(const DoutPrefixProvider *dpp, optional_yield y) = 0; + virtual int get_bucket_stats(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, int shard_id, std::string *bucket_ver, std::string *master_ver, std::map& stats, std::string *max_marker = nullptr, bool *syncstopped = nullptr) = 0; - virtual int read_bucket_stats(optional_yield y) = 0; - virtual int sync_user_stats(optional_yield y) = 0; - virtual int update_container_stats(void) = 0; - virtual int check_bucket_shards(void) = 0; - virtual int link(RGWUser* new_user, optional_yield y) = 0; + virtual int read_bucket_stats(const DoutPrefixProvider *dpp, optional_yield y) = 0; + virtual int sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y) = 0; + virtual int update_container_stats(const DoutPrefixProvider *dpp) = 0; + virtual int check_bucket_shards(const DoutPrefixProvider *dpp) = 0; + virtual int link(const DoutPrefixProvider *dpp, RGWUser* new_user, optional_yield y) = 0; virtual int unlink(RGWUser* new_user, optional_yield y) = 0; - virtual int chown(RGWUser* new_user, RGWUser* old_user, optional_yield y) = 0; - virtual int put_instance_info(bool exclusive, ceph::real_time mtime) = 0; + virtual int chown(RGWUser* new_user, RGWUser* old_user, optional_yield y, const DoutPrefixProvider *dpp) = 0; + virtual int put_instance_info(const DoutPrefixProvider *dpp, bool exclusive, ceph::real_time mtime) = 0; virtual bool is_owner(RGWUser* user) = 0; virtual RGWUser* get_owner(void) { return owner; }; virtual ACLOwner get_acl_owner(void) { return ACLOwner(info.owner); }; - virtual int check_empty(optional_yield y) = 0; + virtual int check_empty(const DoutPrefixProvider *dpp, optional_yield y) = 0; virtual int check_quota(RGWQuotaInfo& user_quota, RGWQuotaInfo& bucket_quota, uint64_t obj_size, optional_yield y, bool check_size_only = false) = 0; - virtual int set_instance_attrs(RGWAttrs& attrs, optional_yield y) = 0; - virtual int try_refresh_info(ceph::real_time *pmtime) = 0; - virtual int read_usage(uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, + virtual int set_instance_attrs(const DoutPrefixProvider *dpp, RGWAttrs& attrs, optional_yield y) = 0; + virtual int try_refresh_info(const DoutPrefixProvider *dpp, ceph::real_time *pmtime) = 0; + virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool *is_truncated, RGWUsageIter& usage_iter, map& usage) = 0; @@ -376,11 +378,11 @@ class RGWObject { virtual ~ReadOp() = default; - virtual int prepare(optional_yield y) = 0; - virtual int read(int64_t ofs, int64_t end, bufferlist& bl, optional_yield y) = 0; - virtual int iterate(int64_t ofs, int64_t end, RGWGetDataCB *cb, optional_yield y) = 0; - virtual int get_manifest(RGWObjManifest **pmanifest, optional_yield y) = 0; - virtual int get_attr(const char *name, bufferlist& dest, optional_yield y) = 0; + virtual int prepare(optional_yield y, const DoutPrefixProvider *dpp) = 0; + virtual int read(int64_t ofs, int64_t end, bufferlist& bl, optional_yield y, const DoutPrefixProvider *dpp) = 0; + virtual int iterate(const DoutPrefixProvider *dpp, int64_t ofs, int64_t end, RGWGetDataCB *cb, optional_yield y) = 0; + virtual int get_manifest(const DoutPrefixProvider *dpp, RGWObjManifest **pmanifest, optional_yield y) = 0; + virtual int get_attr(const DoutPrefixProvider *dpp, const char *name, bufferlist& dest, optional_yield y) = 0; }; struct WriteOp { @@ -412,7 +414,7 @@ class RGWObject { virtual ~WriteOp() = default; virtual int prepare(optional_yield y) = 0; - virtual int write_meta(uint64_t size, uint64_t accounted_size, optional_yield y) = 0; + virtual int write_meta(const DoutPrefixProvider *dpp, uint64_t size, uint64_t accounted_size, optional_yield y) = 0; //virtual int write_data(const char *data, uint64_t ofs, uint64_t len, bool exclusive) = 0; }; @@ -443,7 +445,7 @@ class RGWObject { virtual int read(off_t offset, off_t length, std::iostream& stream) = 0; virtual int write(off_t offset, off_t length, std::iostream& stream) = 0; - virtual int delete_object(RGWObjectCtx* obj_ctx, ACLOwner obj_owner, + virtual int delete_object(const DoutPrefixProvider *dpp, RGWObjectCtx* obj_ctx, ACLOwner obj_owner, ACLOwner bucket_owner, ceph::real_time unmod_since, bool high_precision_time, uint64_t epoch, std::string& version_id, optional_yield y) = 0; @@ -470,17 +472,17 @@ class RGWObject { bool empty() const { return key.empty(); } const std::string &get_name() const { return key.name; } - virtual int get_obj_state(RGWObjectCtx *rctx, RGWBucket& bucket, RGWObjState **state, optional_yield y, bool follow_olh = false) = 0; - virtual int set_obj_attrs(RGWObjectCtx* rctx, RGWAttrs* setattrs, RGWAttrs* delattrs, optional_yield y, rgw_obj* target_obj = NULL) = 0; - virtual int get_obj_attrs(RGWObjectCtx *rctx, optional_yield y, rgw_obj* target_obj = NULL) = 0; - virtual int modify_obj_attrs(RGWObjectCtx *rctx, const char *attr_name, bufferlist& attr_val, optional_yield y) = 0; - virtual int delete_obj_attrs(RGWObjectCtx *rctx, const char *attr_name, optional_yield y) = 0; + virtual int get_obj_state(const DoutPrefixProvider *dpp, RGWObjectCtx *rctx, RGWBucket& bucket, RGWObjState **state, optional_yield y, bool follow_olh = false) = 0; + virtual int set_obj_attrs(const DoutPrefixProvider *dpp, RGWObjectCtx* rctx, RGWAttrs* setattrs, RGWAttrs* delattrs, optional_yield y, rgw_obj* target_obj = NULL) = 0; + virtual int get_obj_attrs(RGWObjectCtx *rctx, optional_yield y, const DoutPrefixProvider *dpp, rgw_obj* target_obj = NULL) = 0; + virtual int modify_obj_attrs(RGWObjectCtx *rctx, const char *attr_name, bufferlist& attr_val, optional_yield y, const DoutPrefixProvider *dpp) = 0; + virtual int delete_obj_attrs(const DoutPrefixProvider *dpp, RGWObjectCtx *rctx, const char *attr_name, optional_yield y) = 0; virtual int copy_obj_data(RGWObjectCtx& rctx, RGWBucket* dest_bucket, RGWObject* dest_obj, uint16_t olh_epoch, std::string* petag, const DoutPrefixProvider *dpp, optional_yield y) = 0; virtual bool is_expired() = 0; virtual void gen_rand_obj_instance_name() = 0; virtual void raw_obj_to_obj(const rgw_raw_obj& raw_obj) = 0; virtual void get_raw_obj(rgw_raw_obj* raw_obj) = 0; - virtual MPSerializer* get_serializer(const std::string& lock_name) = 0; + virtual MPSerializer* get_serializer(const DoutPrefixProvider *dpp, const std::string& lock_name) = 0; virtual int transition(RGWObjectCtx& rctx, RGWBucket* bucket, const rgw_placement_rule& placement_rule, @@ -488,7 +490,8 @@ class RGWObject { uint64_t olh_epoch, const DoutPrefixProvider *dpp, optional_yield y) = 0; - virtual int get_max_chunk_size(rgw_placement_rule placement_rule, + virtual int get_max_chunk_size(const DoutPrefixProvider *dpp, + rgw_placement_rule placement_rule, uint64_t* max_chunk_size, uint64_t* alignment = nullptr) = 0; virtual void get_max_aligned_size(uint64_t size, uint64_t alignment, uint64_t *max_size) = 0; @@ -530,10 +533,10 @@ class RGWObject { virtual std::unique_ptr get_write_op(RGWObjectCtx*) = 0; /* OMAP */ - virtual int omap_get_vals_by_keys(const std::string& oid, + virtual int omap_get_vals_by_keys(const DoutPrefixProvider *dpp, const std::string& oid, const std::set& keys, RGWAttrs *vals) = 0; - virtual int omap_set_val_by_key(const std::string& key, bufferlist& val, + virtual int omap_set_val_by_key(const DoutPrefixProvider *dpp, const std::string& key, bufferlist& val, bool must_exist, optional_yield y) = 0; static bool empty(RGWObject* o) { return (!o || o->empty()); } @@ -570,7 +573,7 @@ struct Serializer { Serializer() = default; virtual ~Serializer() = default; - virtual int try_lock(utime_t dur, optional_yield y) = 0; + virtual int try_lock(const DoutPrefixProvider *dpp, utime_t dur, optional_yield y) = 0; virtual int unlock() = 0; }; diff --git a/src/rgw/rgw_sal_rados.cc b/src/rgw/rgw_sal_rados.cc index 157dc88f65c10..353c7808b33d9 100644 --- a/src/rgw/rgw_sal_rados.cc +++ b/src/rgw/rgw_sal_rados.cc @@ -41,7 +41,7 @@ namespace rgw::sal { -int RGWRadosUser::list_buckets(const string& marker, const string& end_marker, +int RGWRadosUser::list_buckets(const DoutPrefixProvider *dpp, const string& marker, const string& end_marker, uint64_t max, bool need_stats, RGWBucketList &buckets, optional_yield y) { @@ -50,7 +50,7 @@ int RGWRadosUser::list_buckets(const string& marker, const string& end_marker, int ret; buckets.clear(); - ret = store->ctl()->user->list_buckets(info.user_id, marker, end_marker, max, + ret = store->ctl()->user->list_buckets(dpp, info.user_id, marker, end_marker, max, need_stats, &ulist, &is_truncated, y); if (ret < 0) return ret; @@ -69,10 +69,10 @@ RGWBucket* RGWRadosUser::create_bucket(rgw_bucket& bucket, return NULL; } -int RGWRadosUser::load_by_id(optional_yield y) +int RGWRadosUser::load_by_id(const DoutPrefixProvider *dpp, optional_yield y) { - return store->ctl()->user->get_info_by_uid(info.user_id, &info, y); + return store->ctl()->user->get_info_by_uid(dpp, info.user_id, &info, y); } std::unique_ptr RGWRadosStore::get_object(const rgw_obj_key& k) @@ -86,12 +86,12 @@ RGWObject *RGWRadosBucket::create_object(const rgw_obj_key &key) return nullptr; } -int RGWRadosBucket::remove_bucket(bool delete_children, std::string prefix, std::string delimiter, bool forward_to_master, req_info* req_info, optional_yield y) +int RGWRadosBucket::remove_bucket(const DoutPrefixProvider *dpp, bool delete_children, std::string prefix, std::string delimiter, bool forward_to_master, req_info* req_info, optional_yield y) { int ret; // Refresh info - ret = get_bucket_info(y); + ret = get_bucket_info(dpp, y); if (ret < 0) return ret; @@ -105,12 +105,12 @@ int RGWRadosBucket::remove_bucket(bool delete_children, std::string prefix, std: do { results.objs.clear(); - ret = list(params, 1000, results, y); + ret = list(dpp, params, 1000, results, y); if (ret < 0) return ret; if (!results.objs.empty() && !delete_children) { - lderr(store->ctx()) << "ERROR: could not remove non-empty bucket " << info.bucket.name << + ldpp_dout(dpp, -1) << "ERROR: could not remove non-empty bucket " << info.bucket.name << dendl; return -ENOTEMPTY; } @@ -118,7 +118,7 @@ int RGWRadosBucket::remove_bucket(bool delete_children, std::string prefix, std: for (const auto& obj : results.objs) { rgw_obj_key key(obj.key); /* xxx dang */ - ret = rgw_remove_object(store, info, info.bucket, key); + ret = rgw_remove_object(dpp, store, info, info.bucket, key); if (ret < 0 && ret != -ENOENT) { return ret; } @@ -127,13 +127,13 @@ int RGWRadosBucket::remove_bucket(bool delete_children, std::string prefix, std: /* If there's a prefix, then we are aborting multiparts as well */ if (!prefix.empty()) { - ret = abort_bucket_multiparts(store, store->ctx(), info, prefix, delimiter); + ret = abort_bucket_multiparts(dpp, store, store->ctx(), info, prefix, delimiter); if (ret < 0) { return ret; } } - ret = store->ctl()->bucket->sync_user_stats(info.owner, info, y); + ret = store->ctl()->bucket->sync_user_stats(dpp, info.owner, info, y); if (ret < 0) { ldout(store->ctx(), 1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret << dendl; } @@ -142,9 +142,9 @@ int RGWRadosBucket::remove_bucket(bool delete_children, std::string prefix, std: // if we deleted children above we will force delete, as any that // remain is detrius from a prior bug - ret = store->getRados()->delete_bucket(info, ot, y, !delete_children); + ret = store->getRados()->delete_bucket(info, ot, y, dpp, !delete_children); if (ret < 0) { - lderr(store->ctx()) << "ERROR: could not remove bucket " << + ldpp_dout(dpp, -1) << "ERROR: could not remove bucket " << info.bucket.name << dendl; return ret; } @@ -153,19 +153,19 @@ int RGWRadosBucket::remove_bucket(bool delete_children, std::string prefix, std: // they should be removed (note that any pending notifications on the bucket are still going to be sent) RGWPubSub ps(store, info.owner.tenant); RGWPubSub::Bucket ps_bucket(&ps, info.bucket); - const auto ps_ret = ps_bucket.remove_notifications(y); + const auto ps_ret = ps_bucket.remove_notifications(dpp, y); if (ps_ret < 0 && ps_ret != -ENOENT) { lderr(store->ctx()) << "ERROR: unable to remove notifications from bucket. ret=" << ps_ret << dendl; } - ret = store->ctl()->bucket->unlink_bucket(info.owner, info.bucket, y, false); + ret = store->ctl()->bucket->unlink_bucket(info.owner, info.bucket, y, dpp, false); if (ret < 0) { - lderr(store->ctx()) << "ERROR: unable to remove user bucket information" << dendl; + ldpp_dout(dpp, -1) << "ERROR: unable to remove user bucket information" << dendl; } if (forward_to_master) { bufferlist in_data; - ret = store->forward_request_to_master(owner, &ot.read_version, in_data, nullptr, *req_info, y); + ret = store->forward_request_to_master(dpp, owner, &ot.read_version, in_data, nullptr, *req_info, y); if (ret < 0) { if (ret == -ENOENT) { /* adjust error, we want to return with NoSuchBucket and not @@ -179,12 +179,12 @@ int RGWRadosBucket::remove_bucket(bool delete_children, std::string prefix, std: return ret; } -int RGWRadosBucket::get_bucket_info(optional_yield y) +int RGWRadosBucket::get_bucket_info(const DoutPrefixProvider *dpp, optional_yield y) { auto obj_ctx = store->svc()->sysobj->init_obj_ctx(); RGWSI_MetaBackend_CtxParams bectx_params = RGWSI_MetaBackend_CtxParams_SObj(&obj_ctx); RGWObjVersionTracker ep_ot; - int ret = store->ctl()->bucket->read_bucket_info(info.bucket, &info, y, + int ret = store->ctl()->bucket->read_bucket_info(info.bucket, &info, y, dpp, RGWBucketCtl::BucketInstance::GetParams() .set_mtime(&mtime) .set_attrs(&attrs) @@ -197,7 +197,7 @@ int RGWRadosBucket::get_bucket_info(optional_yield y) return ret; } -int RGWRadosBucket::load_by_name(const std::string& tenant, const std::string& bucket_name, const std::string bucket_instance_id, RGWSysObjectCtx *rctx, optional_yield y) +int RGWRadosBucket::load_by_name(const DoutPrefixProvider *dpp, const std::string& tenant, const std::string& bucket_name, const std::string bucket_instance_id, RGWSysObjectCtx *rctx, optional_yield y) { info.bucket.tenant = tenant; info.bucket.name = bucket_name; @@ -205,39 +205,39 @@ int RGWRadosBucket::load_by_name(const std::string& tenant, const std::string& b ent.bucket = info.bucket; if (bucket_instance_id.empty()) { - return get_bucket_info(y); + return get_bucket_info(dpp, y); } - return store->getRados()->get_bucket_instance_info(*rctx, info.bucket, info, NULL, &attrs, y); + return store->getRados()->get_bucket_instance_info(*rctx, info.bucket, info, NULL, &attrs, y, dpp); } -int RGWRadosBucket::get_bucket_stats(RGWBucketInfo& bucket_info, int shard_id, +int RGWRadosBucket::get_bucket_stats(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, int shard_id, std::string *bucket_ver, std::string *master_ver, std::map& stats, std::string *max_marker, bool *syncstopped) { - return store->getRados()->get_bucket_stats(bucket_info, shard_id, bucket_ver, master_ver, stats, max_marker, syncstopped); + return store->getRados()->get_bucket_stats(dpp, bucket_info, shard_id, bucket_ver, master_ver, stats, max_marker, syncstopped); } -int RGWRadosBucket::read_bucket_stats(optional_yield y) +int RGWRadosBucket::read_bucket_stats(const DoutPrefixProvider *dpp, optional_yield y) { - int ret = store->ctl()->bucket->read_bucket_stats(info.bucket, &ent, y); + int ret = store->ctl()->bucket->read_bucket_stats(info.bucket, &ent, y, dpp); info.placement_rule = ent.placement_rule; return ret; } -int RGWRadosBucket::sync_user_stats(optional_yield y) +int RGWRadosBucket::sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y) { - return store->ctl()->bucket->sync_user_stats(owner->get_id(), info, y); + return store->ctl()->bucket->sync_user_stats(dpp, owner->get_id(), info, y); } -int RGWRadosBucket::update_container_stats(void) +int RGWRadosBucket::update_container_stats(const DoutPrefixProvider *dpp) { int ret; map m; m[info.bucket.name] = ent; - ret = store->getRados()->update_containers_stats(m); + ret = store->getRados()->update_containers_stats(m, dpp); if (!ret) return -EEXIST; if (ret < 0) @@ -259,12 +259,12 @@ int RGWRadosBucket::update_container_stats(void) return 0; } -int RGWRadosBucket::check_bucket_shards(void) +int RGWRadosBucket::check_bucket_shards(const DoutPrefixProvider *dpp) { - return store->getRados()->check_bucket_shards(info, info.bucket, get_count()); + return store->getRados()->check_bucket_shards(info, info.bucket, get_count(), dpp); } -int RGWRadosBucket::link(RGWUser* new_user, optional_yield y) +int RGWRadosBucket::link(const DoutPrefixProvider *dpp, RGWUser* new_user, optional_yield y) { RGWBucketEntryPoint ep; ep.bucket = info.bucket; @@ -275,7 +275,7 @@ int RGWRadosBucket::link(RGWUser* new_user, optional_yield y) rgw_ep_info ep_data{ep, ep_attrs}; return store->ctl()->bucket->link_bucket(new_user->get_user(), info.bucket, - ceph::real_time(), y, true, &ep_data); + ceph::real_time(), y, dpp, true, &ep_data); } int RGWRadosBucket::unlink(RGWUser* new_user, optional_yield y) @@ -283,18 +283,18 @@ int RGWRadosBucket::unlink(RGWUser* new_user, optional_yield y) return -1; } -int RGWRadosBucket::chown(RGWUser* new_user, RGWUser* old_user, optional_yield y) +int RGWRadosBucket::chown(RGWUser* new_user, RGWUser* old_user, optional_yield y, const DoutPrefixProvider *dpp) { string obj_marker; return store->ctl()->bucket->chown(store, info, new_user->get_user(), - old_user->get_display_name(), obj_marker, y); + old_user->get_display_name(), obj_marker, y, dpp); } -int RGWRadosBucket::put_instance_info(bool exclusive, ceph::real_time _mtime) +int RGWRadosBucket::put_instance_info(const DoutPrefixProvider *dpp, bool exclusive, ceph::real_time _mtime) { mtime = _mtime; - return store->getRados()->put_bucket_instance_info(info, exclusive, mtime, &attrs); + return store->getRados()->put_bucket_instance_info(info, exclusive, mtime, &attrs, dpp); } /* Make sure to call get_bucket_info() if you need it first */ @@ -303,9 +303,9 @@ bool RGWRadosBucket::is_owner(RGWUser* user) return (info.owner.compare(user->get_user()) == 0); } -int RGWRadosBucket::check_empty(optional_yield y) +int RGWRadosBucket::check_empty(const DoutPrefixProvider *dpp, optional_yield y) { - return store->getRados()->check_bucket_empty(info, y); + return store->getRados()->check_bucket_empty(dpp, info, y); } int RGWRadosBucket::check_quota(RGWQuotaInfo& user_quota, RGWQuotaInfo& bucket_quota, uint64_t obj_size, @@ -315,35 +315,35 @@ int RGWRadosBucket::check_quota(RGWQuotaInfo& user_quota, RGWQuotaInfo& bucket_q user_quota, bucket_quota, obj_size, y, check_size_only); } -int RGWRadosBucket::set_instance_attrs(RGWAttrs& attrs, optional_yield y) +int RGWRadosBucket::set_instance_attrs(const DoutPrefixProvider *dpp, RGWAttrs& attrs, optional_yield y) { return store->ctl()->bucket->set_bucket_instance_attrs(get_info(), - attrs, &get_info().objv_tracker, y); + attrs, &get_info().objv_tracker, y, dpp); } -int RGWRadosBucket::try_refresh_info(ceph::real_time *pmtime) +int RGWRadosBucket::try_refresh_info(const DoutPrefixProvider *dpp, ceph::real_time *pmtime) { - return store->getRados()->try_refresh_bucket_info(info, pmtime, &attrs); + return store->getRados()->try_refresh_bucket_info(info, pmtime, dpp, &attrs); } -int RGWRadosBucket::read_usage(uint64_t start_epoch, uint64_t end_epoch, +int RGWRadosBucket::read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool *is_truncated, RGWUsageIter& usage_iter, map& usage) { - return store->getRados()->read_usage(owner->get_id(), get_name(), start_epoch, + return store->getRados()->read_usage(dpp, owner->get_id(), get_name(), start_epoch, end_epoch, max_entries, is_truncated, usage_iter, usage); } -int RGWRadosBucket::set_acl(RGWAccessControlPolicy &acl, optional_yield y) +int RGWRadosBucket::set_acl(const DoutPrefixProvider *dpp, RGWAccessControlPolicy &acl, optional_yield y) { bufferlist aclbl; acls = acl; acl.encode(aclbl); - return store->ctl()->bucket->set_acl(acl.get_owner(), info.bucket, info, aclbl, y); + return store->ctl()->bucket->set_acl(acl.get_owner(), info.bucket, info, aclbl, y, dpp); } std::unique_ptr RGWRadosBucket::get_object(const rgw_obj_key& k) @@ -351,7 +351,7 @@ std::unique_ptr RGWRadosBucket::get_object(const rgw_obj_key& k) return std::unique_ptr(new RGWRadosObject(this->store, k, this)); } -int RGWRadosBucket::list(ListParams& params, int max, ListResults& results, optional_yield y) +int RGWRadosBucket::list(const DoutPrefixProvider *dpp, ListParams& params, int max, ListResults& results, optional_yield y) { RGWRados::Bucket target(store->getRados(), get_info()); if (params.shard_id >= 0) { @@ -367,7 +367,7 @@ int RGWRadosBucket::list(ListParams& params, int max, ListResults& results, opti list_op.params.list_versions = params.list_versions; list_op.params.allow_unordered = params.allow_unordered; - int ret = list_op.list_objects(max, &results.objs, &results.common_prefixes, &results.is_truncated, y); + int ret = list_op.list_objects(dpp, max, &results.objs, &results.common_prefixes, &results.is_truncated, y); if (ret >= 0) { results.next_marker = list_op.get_next_marker(); } @@ -417,24 +417,24 @@ int RGWObject::range_to_ofs(uint64_t obj_size, int64_t &ofs, int64_t &end) return 0; } -int RGWRadosObject::get_obj_state(RGWObjectCtx *rctx, RGWBucket& bucket, RGWObjState **state, optional_yield y, bool follow_olh) +int RGWRadosObject::get_obj_state(const DoutPrefixProvider *dpp, RGWObjectCtx *rctx, RGWBucket& bucket, RGWObjState **state, optional_yield y, bool follow_olh) { rgw_obj obj(bucket.get_key(), key.name); - return store->getRados()->get_obj_state(rctx, bucket.get_info(), obj, state, follow_olh, y); + return store->getRados()->get_obj_state(dpp, rctx, bucket.get_info(), obj, state, follow_olh, y); } -int RGWRadosObject::read_attrs(RGWRados::Object::Read &read_op, optional_yield y, rgw_obj *target_obj) +int RGWRadosObject::read_attrs(RGWRados::Object::Read &read_op, optional_yield y, const DoutPrefixProvider *dpp, rgw_obj *target_obj) { read_op.params.attrs = &attrs; read_op.params.target_obj = target_obj; read_op.params.obj_size = &obj_size; read_op.params.lastmod = &mtime; - return read_op.prepare(y); + return read_op.prepare(y, dpp); } -int RGWRadosObject::set_obj_attrs(RGWObjectCtx* rctx, RGWAttrs* setattrs, RGWAttrs* delattrs, optional_yield y, rgw_obj* target_obj) +int RGWRadosObject::set_obj_attrs(const DoutPrefixProvider *dpp, RGWObjectCtx* rctx, RGWAttrs* setattrs, RGWAttrs* delattrs, optional_yield y, rgw_obj* target_obj) { RGWAttrs empty; rgw_obj target = get_obj(); @@ -442,7 +442,7 @@ int RGWRadosObject::set_obj_attrs(RGWObjectCtx* rctx, RGWAttrs* setattrs, RGWAtt if (!target_obj) target_obj = ⌖ - return store->getRados()->set_attrs(rctx, + return store->getRados()->set_attrs(dpp, rctx, bucket->get_info(), *target_obj, setattrs ? *setattrs : empty, @@ -450,34 +450,34 @@ int RGWRadosObject::set_obj_attrs(RGWObjectCtx* rctx, RGWAttrs* setattrs, RGWAtt y); } -int RGWRadosObject::get_obj_attrs(RGWObjectCtx *rctx, optional_yield y, rgw_obj* target_obj) +int RGWRadosObject::get_obj_attrs(RGWObjectCtx *rctx, optional_yield y, const DoutPrefixProvider *dpp, rgw_obj* target_obj) { RGWRados::Object op_target(store->getRados(), bucket->get_info(), *rctx, get_obj()); RGWRados::Object::Read read_op(&op_target); - return read_attrs(read_op, y, target_obj); + return read_attrs(read_op, y, dpp, target_obj); } -int RGWRadosObject::modify_obj_attrs(RGWObjectCtx *rctx, const char *attr_name, bufferlist& attr_val, optional_yield y) +int RGWRadosObject::modify_obj_attrs(RGWObjectCtx *rctx, const char *attr_name, bufferlist& attr_val, optional_yield y, const DoutPrefixProvider *dpp) { rgw_obj target = get_obj(); - int r = get_obj_attrs(rctx, y, &target); + int r = get_obj_attrs(rctx, y, dpp, &target); if (r < 0) { return r; } set_atomic(rctx); attrs[attr_name] = attr_val; - return set_obj_attrs(rctx, &attrs, nullptr, y, &target); + return set_obj_attrs(dpp, rctx, &attrs, nullptr, y, &target); } -int RGWRadosObject::delete_obj_attrs(RGWObjectCtx *rctx, const char *attr_name, optional_yield y) +int RGWRadosObject::delete_obj_attrs(const DoutPrefixProvider *dpp, RGWObjectCtx *rctx, const char *attr_name, optional_yield y) { RGWAttrs rmattr; bufferlist bl; set_atomic(rctx); rmattr[attr_name] = bl; - return set_obj_attrs(rctx, nullptr, &rmattr, y); + return set_obj_attrs(dpp, rctx, nullptr, &rmattr, y); } int RGWRadosObject::copy_obj_data(RGWObjectCtx& rctx, RGWBucket* dest_bucket, @@ -491,7 +491,7 @@ int RGWRadosObject::copy_obj_data(RGWObjectCtx& rctx, RGWBucket* dest_bucket, RGWRados::Object op_target(store->getRados(), dest_bucket->get_info(), rctx, get_obj()); RGWRados::Object::Read read_op(&op_target); - int ret = read_attrs(read_op, y); + int ret = read_attrs(read_op, y, dpp); if (ret < 0) return ret; @@ -555,7 +555,8 @@ void RGWRadosObject::get_raw_obj(rgw_raw_obj* raw_obj) store->getRados()->obj_to_raw((bucket->get_info()).placement_rule, get_obj(), raw_obj); } -int RGWRadosObject::omap_get_vals_by_keys(const std::string& oid, +int RGWRadosObject::omap_get_vals_by_keys(const DoutPrefixProvider *dpp, + const std::string& oid, const std::set& keys, RGWAttrs *vals) { @@ -565,7 +566,7 @@ int RGWRadosObject::omap_get_vals_by_keys(const std::string& oid, rgw_obj obj = get_obj(); store->getRados()->obj_to_raw(bucket->get_placement_rule(), obj, &head_obj); - ret = store->get_obj_head_ioctx(bucket->get_info(), obj, &cur_ioctx); + ret = store->get_obj_head_ioctx(dpp, bucket->get_info(), obj, &cur_ioctx); if (ret < 0) { return ret; } @@ -573,7 +574,7 @@ int RGWRadosObject::omap_get_vals_by_keys(const std::string& oid, return cur_ioctx.omap_get_vals_by_keys(oid, keys, vals); } -int RGWRadosObject::omap_set_val_by_key(const std::string& key, bufferlist& val, +int RGWRadosObject::omap_set_val_by_key(const DoutPrefixProvider *dpp, const std::string& key, bufferlist& val, bool must_exist, optional_yield y) { rgw_raw_obj raw_meta_obj; @@ -584,12 +585,12 @@ int RGWRadosObject::omap_set_val_by_key(const std::string& key, bufferlist& val, auto obj_ctx = store->svc()->sysobj->init_obj_ctx(); auto sysobj = obj_ctx.get_obj(raw_meta_obj); - return sysobj.omap().set_must_exist(must_exist).set(key, val, y); + return sysobj.omap().set_must_exist(must_exist).set(dpp, key, val, y); } -MPSerializer* RGWRadosObject::get_serializer(const std::string& lock_name) +MPSerializer* RGWRadosObject::get_serializer(const DoutPrefixProvider *dpp, const std::string& lock_name) { - return new MPRadosSerializer(store, this, lock_name); + return new MPRadosSerializer(dpp, store, this, lock_name); } int RGWRadosObject::transition(RGWObjectCtx& rctx, @@ -603,9 +604,9 @@ int RGWRadosObject::transition(RGWObjectCtx& rctx, return store->getRados()->transition_obj(rctx, bucket, *this, placement_rule, mtime, olh_epoch, dpp, y); } -int RGWRadosObject::get_max_chunk_size(rgw_placement_rule placement_rule, uint64_t *max_chunk_size, uint64_t *alignment) +int RGWRadosObject::get_max_chunk_size(const DoutPrefixProvider *dpp, rgw_placement_rule placement_rule, uint64_t *max_chunk_size, uint64_t *alignment) { - return store->getRados()->get_max_chunk_size(placement_rule, get_obj(), max_chunk_size, alignment); + return store->getRados()->get_max_chunk_size(placement_rule, get_obj(), max_chunk_size, dpp, alignment); } void RGWRadosObject::get_max_aligned_size(uint64_t size, uint64_t alignment, @@ -649,7 +650,7 @@ RGWRadosObject::RadosReadOp::RadosReadOp(RGWRadosObject *_source, RGWObjectCtx * parent_op(&op_target) { } -int RGWRadosObject::RadosReadOp::prepare(optional_yield y) +int RGWRadosObject::RadosReadOp::prepare(optional_yield y, const DoutPrefixProvider *dpp) { uint64_t obj_size; @@ -665,7 +666,7 @@ int RGWRadosObject::RadosReadOp::prepare(optional_yield y) parent_op.params.obj_size = &obj_size; parent_op.params.attrs = &source->get_attrs(); - int ret = parent_op.prepare(y); + int ret = parent_op.prepare(y, dpp); if (ret < 0) return ret; @@ -676,23 +677,23 @@ int RGWRadosObject::RadosReadOp::prepare(optional_yield y) return ret; } -int RGWRadosObject::RadosReadOp::read(int64_t ofs, int64_t end, bufferlist& bl, optional_yield y) +int RGWRadosObject::RadosReadOp::read(int64_t ofs, int64_t end, bufferlist& bl, optional_yield y, const DoutPrefixProvider *dpp) { - return parent_op.read(ofs, end, bl, y); + return parent_op.read(ofs, end, bl, y, dpp); } -int RGWRadosObject::RadosReadOp::get_manifest(RGWObjManifest **pmanifest, +int RGWRadosObject::RadosReadOp::get_manifest(const DoutPrefixProvider *dpp, RGWObjManifest **pmanifest, optional_yield y) { - return op_target.get_manifest(pmanifest, y); + return op_target.get_manifest(dpp, pmanifest, y); } -int RGWRadosObject::RadosReadOp::get_attr(const char *name, bufferlist& dest, optional_yield y) +int RGWRadosObject::RadosReadOp::get_attr(const DoutPrefixProvider *dpp, const char *name, bufferlist& dest, optional_yield y) { - return parent_op.get_attr(name, dest, y); + return parent_op.get_attr(dpp, name, dest, y); } -int RGWRadosObject::delete_object(RGWObjectCtx* obj_ctx, ACLOwner obj_owner, ACLOwner bucket_owner, ceph::real_time unmod_since, bool high_precision_time, uint64_t epoch, string& version_id, optional_yield y) +int RGWRadosObject::delete_object(const DoutPrefixProvider *dpp, RGWObjectCtx* obj_ctx, ACLOwner obj_owner, ACLOwner bucket_owner, ceph::real_time unmod_since, bool high_precision_time, uint64_t epoch, string& version_id, optional_yield y) { int ret = 0; RGWRados::Object del_target(store->getRados(), bucket->get_info(), *obj_ctx, get_obj()); @@ -706,7 +707,7 @@ int RGWRadosObject::delete_object(RGWObjectCtx* obj_ctx, ACLOwner obj_owner, ACL del_op.params.unmod_since = unmod_since; del_op.params.high_precision_time = high_precision_time; - ret = del_op.delete_obj(y); + ret = del_op.delete_obj(y, dpp); if (ret >= 0) { delete_marker = del_op.result.delete_marker; version_id = del_op.result.version_id; @@ -775,9 +776,9 @@ int RGWRadosObject::copy_object(RGWObjectCtx& obj_ctx, y); } -int RGWRadosObject::RadosReadOp::iterate(int64_t ofs, int64_t end, RGWGetDataCB *cb, optional_yield y) +int RGWRadosObject::RadosReadOp::iterate(const DoutPrefixProvider *dpp, int64_t ofs, int64_t end, RGWGetDataCB *cb, optional_yield y) { - return parent_op.iterate(ofs, end, cb, y); + return parent_op.iterate(dpp, ofs, end, cb, y); } std::unique_ptr RGWRadosObject::get_write_op(RGWObjectCtx* ctx) @@ -822,9 +823,9 @@ int RGWRadosObject::RadosWriteOp::prepare(optional_yield y) return 0; } -int RGWRadosObject::RadosWriteOp::write_meta(uint64_t size, uint64_t accounted_size, optional_yield y) +int RGWRadosObject::RadosWriteOp::write_meta(const DoutPrefixProvider *dpp, uint64_t size, uint64_t accounted_size, optional_yield y) { - int ret = parent_op.write_meta(size, accounted_size, *params.attrs, y); + int ret = parent_op.write_meta(dpp, size, accounted_size, *params.attrs, y); params.canceled = parent_op.meta.canceled; return ret; @@ -854,13 +855,13 @@ int RGWRadosObject::swift_versioning_copy(RGWObjectCtx* obj_ctx, y); } -int RGWRadosStore::get_bucket(RGWUser* u, const rgw_bucket& b, std::unique_ptr* bucket, optional_yield y) +int RGWRadosStore::get_bucket(const DoutPrefixProvider *dpp, RGWUser* u, const rgw_bucket& b, std::unique_ptr* bucket, optional_yield y) { int ret; RGWBucket* bp; bp = new RGWRadosBucket(this, b, u); - ret = bp->get_bucket_info(y); + ret = bp->get_bucket_info(dpp, y); if (ret < 0) { delete bp; return ret; @@ -881,17 +882,18 @@ int RGWRadosStore::get_bucket(RGWUser* u, const RGWBucketInfo& i, std::unique_pt return 0; } -int RGWRadosStore::get_bucket(RGWUser* u, const std::string& tenant, const std::string&name, std::unique_ptr* bucket, optional_yield y) +int RGWRadosStore::get_bucket(const DoutPrefixProvider *dpp, RGWUser* u, const std::string& tenant, const std::string&name, std::unique_ptr* bucket, optional_yield y) { rgw_bucket b; b.tenant = tenant; b.name = name; - return get_bucket(u, b, bucket, y); + return get_bucket(dpp, u, b, bucket, y); } -static int decode_policy(CephContext *cct, +static int decode_policy(const DoutPrefixProvider *dpp, + CephContext *cct, bufferlist& bl, RGWAccessControlPolicy *policy) { @@ -899,11 +901,11 @@ static int decode_policy(CephContext *cct, try { policy->decode(iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl; return -EIO; } if (cct->_conf->subsys.should_gather()) { - ldout(cct, 15) << __func__ << " Read AccessControlPolicy"; + ldpp_dout(dpp, 15) << __func__ << " Read AccessControlPolicy"; RGWAccessControlPolicy_S3 *s3policy = static_cast(policy); s3policy->to_xml(*_dout); *_dout << dendl; @@ -911,7 +913,7 @@ static int decode_policy(CephContext *cct, return 0; } -static int rgw_op_get_bucket_policy_from_attr(RGWRadosStore *store, +static int rgw_op_get_bucket_policy_from_attr(const DoutPrefixProvider *dpp, RGWRadosStore *store, RGWUser& user, RGWAttrs& bucket_attrs, RGWAccessControlPolicy *policy, @@ -920,13 +922,13 @@ static int rgw_op_get_bucket_policy_from_attr(RGWRadosStore *store, auto aiter = bucket_attrs.find(RGW_ATTR_ACL); if (aiter != bucket_attrs.end()) { - int ret = decode_policy(store->ctx(), aiter->second, policy); + int ret = decode_policy(dpp, store->ctx(), aiter->second, policy); if (ret < 0) return ret; } else { ldout(store->ctx(), 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl; /* object exists, but policy is broken */ - int r = user.load_by_id(y); + int r = user.load_by_id(dpp, y); if (r < 0) return r; @@ -940,7 +942,8 @@ bool RGWRadosStore::is_meta_master() return svc()->zone->is_meta_master(); } -int RGWRadosStore::forward_request_to_master(RGWUser* user, obj_version *objv, +int RGWRadosStore::forward_request_to_master(const DoutPrefixProvider *dpp, + RGWUser* user, obj_version *objv, bufferlist& in_data, JSONParser *jp, req_info& info, optional_yield y) @@ -954,17 +957,17 @@ int RGWRadosStore::forward_request_to_master(RGWUser* user, obj_version *objv, ldout(ctx(), 0) << "rest connection is invalid" << dendl; return -EINVAL; } - ldout(ctx(), 0) << "sending request to master zonegroup" << dendl; + ldpp_dout(dpp, 0) << "sending request to master zonegroup" << dendl; bufferlist response; string uid_str = user->get_id().to_str(); #define MAX_REST_RESPONSE (128 * 1024) // we expect a very small response - int ret = svc()->zone->get_master_conn()->forward(rgw_user(uid_str), info, + int ret = svc()->zone->get_master_conn()->forward(dpp, rgw_user(uid_str), info, objv, MAX_REST_RESPONSE, &in_data, &response, y); if (ret < 0) return ret; - ldout(ctx(), 20) << "response: " << response.c_str() << dendl; + ldpp_dout(dpp, 20) << "response: " << response.c_str() << dendl; if (jp && !jp->parse(response.c_str(), response.length())) { ldout(ctx(), 0) << "failed parsing response from master zonegroup" << dendl; return -EINVAL; @@ -973,9 +976,9 @@ int RGWRadosStore::forward_request_to_master(RGWUser* user, obj_version *objv, return 0; } -int RGWRadosStore::defer_gc(RGWObjectCtx *rctx, RGWBucket* bucket, RGWObject* obj, optional_yield y) +int RGWRadosStore::defer_gc(const DoutPrefixProvider *dpp, RGWObjectCtx *rctx, RGWBucket* bucket, RGWObject* obj, optional_yield y) { - return rados->defer_gc(rctx, bucket->get_info(), obj->get_obj(), y); + return rados->defer_gc(dpp, rctx, bucket->get_info(), obj->get_obj(), y); } const RGWZoneGroup& RGWRadosStore::get_zonegroup() @@ -1005,7 +1008,8 @@ int RGWRadosStore::cluster_stat(RGWClusterStat& stats) return ret; } -int RGWRadosStore::create_bucket(RGWUser& u, const rgw_bucket& b, +int RGWRadosStore::create_bucket(const DoutPrefixProvider *dpp, + RGWUser& u, const rgw_bucket& b, const string& zonegroup_id, rgw_placement_rule& placement_rule, string& swift_ver_location, @@ -1031,7 +1035,7 @@ int RGWRadosStore::create_bucket(RGWUser& u, const rgw_bucket& b, obj_version objv, *pobjv = NULL; /* If it exists, look it up; otherwise create it */ - ret = get_bucket(&u, b, &bucket, y); + ret = get_bucket(dpp, &u, b, &bucket, y); if (ret < 0 && ret != -ENOENT) return ret; @@ -1044,7 +1048,7 @@ int RGWRadosStore::create_bucket(RGWUser& u, const rgw_bucket& b, placement_rule.inherit_from(bucket->get_info().placement_rule); // don't allow changes to the acl policy - int r = rgw_op_get_bucket_policy_from_attr(this, u, bucket->get_attrs(), + int r = rgw_op_get_bucket_policy_from_attr(dpp, this, u, bucket->get_attrs(), &old_policy, y); if (r >= 0 && old_policy != policy) { bucket_out->swap(bucket); @@ -1058,7 +1062,7 @@ int RGWRadosStore::create_bucket(RGWUser& u, const rgw_bucket& b, if (!svc()->zone->is_meta_master()) { JSONParser jp; - ret = forward_request_to_master(&u, NULL, in_data, &jp, req_info, y); + ret = forward_request_to_master(dpp, &u, NULL, in_data, &jp, req_info, y); if (ret < 0) { return ret; } @@ -1066,9 +1070,9 @@ int RGWRadosStore::create_bucket(RGWUser& u, const rgw_bucket& b, JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp); JSONDecoder::decode_json("object_ver", objv, &jp); JSONDecoder::decode_json("bucket_info", master_info, &jp); - ldpp_dout(this, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl; + ldpp_dout(dpp, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl; std::time_t ctime = ceph::real_clock::to_time_t(master_info.creation_time); - ldpp_dout(this, 20) << "got creation time: << " << std::put_time(std::localtime(&ctime), "%F %T") << dendl; + ldpp_dout(dpp, 20) << "got creation time: << " << std::put_time(std::localtime(&ctime), "%F %T") << dendl; pmaster_bucket= &master_info.bucket; creation_time = master_info.creation_time; pmaster_num_shards = &master_info.layout.current_index.layout.normal.num_shards; @@ -1090,7 +1094,7 @@ int RGWRadosStore::create_bucket(RGWUser& u, const rgw_bucket& b, if (*existed) { rgw_placement_rule selected_placement_rule; - ret = svc()->zone->select_bucket_placement(u.get_info(), + ret = svc()->zone->select_bucket_placement(dpp, u.get_info(), zid, placement_rule, &selected_placement_rule, nullptr, y); if (selected_placement_rule != info.placement_rule) { @@ -1104,7 +1108,7 @@ int RGWRadosStore::create_bucket(RGWUser& u, const rgw_bucket& b, zid, placement_rule, swift_ver_location, pquota_info, attrs, info, pobjv, &ep_objv, creation_time, - pmaster_bucket, pmaster_num_shards, y, exclusive); + pmaster_bucket, pmaster_num_shards, y, dpp, exclusive); if (ret == -EEXIST) { *existed = true; ret = 0; @@ -1126,9 +1130,9 @@ std::unique_ptr RGWRadosStore::get_lifecycle(void) return std::unique_ptr(new RadosLifecycle(this)); } -int RGWRadosStore::delete_raw_obj(const rgw_raw_obj& obj) +int RGWRadosStore::delete_raw_obj(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj) { - return rados->delete_raw_obj(obj); + return rados->delete_raw_obj(dpp, obj); } void RGWRadosStore::get_raw_obj(const rgw_placement_rule& placement_rule, const rgw_obj& obj, rgw_raw_obj* raw_obj) @@ -1136,12 +1140,12 @@ void RGWRadosStore::get_raw_obj(const rgw_placement_rule& placement_rule, const rados->obj_to_raw(placement_rule, obj, raw_obj); } -int RGWRadosStore::get_raw_chunk_size(const rgw_raw_obj& obj, uint64_t* chunk_size) +int RGWRadosStore::get_raw_chunk_size(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, uint64_t* chunk_size) { - return rados->get_max_chunk_size(obj.pool, chunk_size); + return rados->get_max_chunk_size(obj.pool, chunk_size, dpp); } -MPRadosSerializer::MPRadosSerializer(RGWRadosStore* store, RGWRadosObject* obj, const std::string& lock_name) : +MPRadosSerializer::MPRadosSerializer(const DoutPrefixProvider *dpp, RGWRadosStore* store, RGWRadosObject* obj, const std::string& lock_name) : lock(lock_name) { rgw_pool meta_pool; @@ -1151,15 +1155,15 @@ MPRadosSerializer::MPRadosSerializer(RGWRadosStore* store, RGWRadosObject* obj, oid = raw_obj.oid; store->getRados()->get_obj_data_pool(obj->get_bucket()->get_placement_rule(), obj->get_obj(), &meta_pool); - store->getRados()->open_pool_ctx(meta_pool, ioctx, true); + store->getRados()->open_pool_ctx(dpp, meta_pool, ioctx, true); } -int MPRadosSerializer::try_lock(utime_t dur, optional_yield y) +int MPRadosSerializer::try_lock(const DoutPrefixProvider *dpp, utime_t dur, optional_yield y) { op.assert_exists(); lock.set_duration(dur); lock.lock_exclusive(&op); - int ret = rgw_rados_operate(ioctx, oid, &op, y); + int ret = rgw_rados_operate(dpp, ioctx, oid, &op, y); if (! ret) { locked = true; } @@ -1173,7 +1177,7 @@ LCRadosSerializer::LCRadosSerializer(RGWRadosStore* store, const std::string& _o lock.set_cookie(cookie); } -int LCRadosSerializer::try_lock(utime_t dur, optional_yield y) +int LCRadosSerializer::try_lock(const DoutPrefixProvider *dpp, utime_t dur, optional_yield y) { lock.set_duration(dur); return lock.lock_exclusive(ioctx, oid); @@ -1274,7 +1278,7 @@ LCSerializer* RadosLifecycle::get_serializer(const std::string& lock_name, const } // namespace rgw::sal -rgw::sal::RGWRadosStore *RGWStoreManager::init_storage_provider(CephContext *cct, bool use_gc_thread, bool use_lc_thread, bool quota_threads, bool run_sync_thread, bool run_reshard_thread, bool use_cache) +rgw::sal::RGWRadosStore *RGWStoreManager::init_storage_provider(const DoutPrefixProvider *dpp, CephContext *cct, bool use_gc_thread, bool use_lc_thread, bool quota_threads, bool run_sync_thread, bool run_reshard_thread, bool use_cache) { RGWRados *rados = new RGWRados; rgw::sal::RGWRadosStore *store = new rgw::sal::RGWRadosStore(); @@ -1288,7 +1292,7 @@ rgw::sal::RGWRadosStore *RGWStoreManager::init_storage_provider(CephContext *cct .set_run_quota_threads(quota_threads) .set_run_sync_thread(run_sync_thread) .set_run_reshard_thread(run_reshard_thread) - .initialize(cct) < 0) { + .initialize(cct, dpp) < 0) { delete store; return NULL; } @@ -1296,7 +1300,7 @@ rgw::sal::RGWRadosStore *RGWStoreManager::init_storage_provider(CephContext *cct return store; } -rgw::sal::RGWRadosStore *RGWStoreManager::init_raw_storage_provider(CephContext *cct) +rgw::sal::RGWRadosStore *RGWStoreManager::init_raw_storage_provider(const DoutPrefixProvider *dpp, CephContext *cct) { RGWRados *rados = new RGWRados; rgw::sal::RGWRadosStore *store = new rgw::sal::RGWRadosStore(); @@ -1306,7 +1310,7 @@ rgw::sal::RGWRadosStore *RGWStoreManager::init_raw_storage_provider(CephContext rados->set_context(cct); - int ret = rados->init_svc(true); + int ret = rados->init_svc(true, dpp); if (ret < 0) { ldout(cct, 0) << "ERROR: failed to init services (ret=" << cpp_strerror(-ret) << ")" << dendl; delete store; @@ -1321,9 +1325,9 @@ rgw::sal::RGWRadosStore *RGWStoreManager::init_raw_storage_provider(CephContext return store; } -int rgw::sal::RGWRadosStore::get_obj_head_ioctx(const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::IoCtx *ioctx) +int rgw::sal::RGWRadosStore::get_obj_head_ioctx(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::IoCtx *ioctx) { - return rados->get_obj_head_ioctx(bucket_info, obj, ioctx); + return rados->get_obj_head_ioctx(dpp, bucket_info, obj, ioctx); } void RGWStoreManager::close_storage(rgw::sal::RGWRadosStore *store) diff --git a/src/rgw/rgw_sal_rados.h b/src/rgw/rgw_sal_rados.h index 3f22a52d4ce2e..e5edafeea050d 100644 --- a/src/rgw/rgw_sal_rados.h +++ b/src/rgw/rgw_sal_rados.h @@ -33,13 +33,13 @@ class RGWRadosUser : public RGWUser { RGWRadosUser(RGWRadosStore *_st) : store(_st) { } RGWRadosUser() {} - int list_buckets(const std::string& marker, const std::string& end_marker, + int list_buckets(const DoutPrefixProvider *dpp, const std::string& marker, const std::string& end_marker, uint64_t max, bool need_stats, RGWBucketList& buckets, optional_yield y) override; RGWBucket* create_bucket(rgw_bucket& bucket, ceph::real_time creation_time); /* Placeholders */ - virtual int load_by_id(optional_yield y); + virtual int load_by_id(const DoutPrefixProvider *dpp, optional_yield y); friend class RGWRadosBucket; }; @@ -61,11 +61,11 @@ class RGWRadosObject : public RGWObject { public: RadosReadOp(RGWRadosObject *_source, RGWObjectCtx *_rctx); - virtual int prepare(optional_yield y) override; - virtual int read(int64_t ofs, int64_t end, bufferlist& bl, optional_yield y) override; - virtual int iterate(int64_t ofs, int64_t end, RGWGetDataCB *cb, optional_yield y) override; - virtual int get_manifest(RGWObjManifest **pmanifest, optional_yield y) override; - virtual int get_attr(const char *name, bufferlist& dest, optional_yield y) override; + virtual int prepare(optional_yield y, const DoutPrefixProvider *dpp) override; + virtual int read(int64_t ofs, int64_t end, bufferlist& bl, optional_yield y, const DoutPrefixProvider *dpp) override; + virtual int iterate(const DoutPrefixProvider *dpp, int64_t ofs, int64_t end, RGWGetDataCB *cb, optional_yield y) override; + virtual int get_manifest(const DoutPrefixProvider *dpp, RGWObjManifest **pmanifest, optional_yield y) override; + virtual int get_attr(const DoutPrefixProvider *dpp, const char *name, bufferlist& dest, optional_yield y) override; }; struct RadosWriteOp : public WriteOp { @@ -79,7 +79,7 @@ class RGWRadosObject : public RGWObject { RadosWriteOp(RGWRadosObject* _source, RGWObjectCtx* _rctx); virtual int prepare(optional_yield y) override; - virtual int write_meta(uint64_t size, uint64_t accounted_size, optional_yield y) override; + virtual int write_meta(const DoutPrefixProvider *dpp, uint64_t size, uint64_t accounted_size, optional_yield y) override; //virtual int write_data(const char *data, uint64_t ofs, uint64_t len, bool exclusive) override; }; @@ -99,7 +99,7 @@ class RGWRadosObject : public RGWObject { int read(off_t offset, off_t length, std::iostream& stream) { return length; } int write(off_t offset, off_t length, std::iostream& stream) { return length; } - virtual int delete_object(RGWObjectCtx* obj_ctx, ACLOwner obj_owner, + virtual int delete_object(const DoutPrefixProvider *dpp, RGWObjectCtx* obj_ctx, ACLOwner obj_owner, ACLOwner bucket_owner, ceph::real_time unmod_since, bool high_precision_time, uint64_t epoch, std::string& version_id,optional_yield y) override; @@ -123,11 +123,11 @@ class RGWRadosObject : public RGWObject { virtual void set_atomic(RGWObjectCtx *rctx) const; virtual void set_prefetch_data(RGWObjectCtx *rctx); - virtual int get_obj_state(RGWObjectCtx *rctx, RGWBucket& bucket, RGWObjState **state, optional_yield y, bool follow_olh = true) override; - virtual int set_obj_attrs(RGWObjectCtx* rctx, RGWAttrs* setattrs, RGWAttrs* delattrs, optional_yield y, rgw_obj* target_obj = NULL) override; - virtual int get_obj_attrs(RGWObjectCtx *rctx, optional_yield y, rgw_obj* target_obj = NULL) override; - virtual int modify_obj_attrs(RGWObjectCtx *rctx, const char *attr_name, bufferlist& attr_val, optional_yield y) override; - virtual int delete_obj_attrs(RGWObjectCtx *rctx, const char *attr_name, optional_yield y) override; + virtual int get_obj_state(const DoutPrefixProvider *dpp, RGWObjectCtx *rctx, RGWBucket& bucket, RGWObjState **state, optional_yield y, bool follow_olh = true) override; + virtual int set_obj_attrs(const DoutPrefixProvider *dpp, RGWObjectCtx* rctx, RGWAttrs* setattrs, RGWAttrs* delattrs, optional_yield y, rgw_obj* target_obj = NULL) override; + virtual int get_obj_attrs(RGWObjectCtx *rctx, optional_yield y, const DoutPrefixProvider *dpp, rgw_obj* target_obj = NULL) override; + virtual int modify_obj_attrs(RGWObjectCtx *rctx, const char *attr_name, bufferlist& attr_val, optional_yield y, const DoutPrefixProvider *dpp) override; + virtual int delete_obj_attrs(const DoutPrefixProvider *dpp, RGWObjectCtx *rctx, const char *attr_name, optional_yield y) override; virtual int copy_obj_data(RGWObjectCtx& rctx, RGWBucket* dest_bucket, RGWObject* dest_obj, uint16_t olh_epoch, std::string* petag, const DoutPrefixProvider *dpp, optional_yield y) override; virtual bool is_expired() override; virtual void gen_rand_obj_instance_name() override; @@ -136,7 +136,7 @@ class RGWRadosObject : public RGWObject { virtual std::unique_ptr clone() { return std::unique_ptr(new RGWRadosObject(*this)); } - virtual MPSerializer* get_serializer(const std::string& lock_name) override; + virtual MPSerializer* get_serializer(const DoutPrefixProvider *dpp, const std::string& lock_name) override; virtual int transition(RGWObjectCtx& rctx, RGWBucket* bucket, const rgw_placement_rule& placement_rule, @@ -144,7 +144,8 @@ class RGWRadosObject : public RGWObject { uint64_t olh_epoch, const DoutPrefixProvider *dpp, optional_yield y) override; - virtual int get_max_chunk_size(rgw_placement_rule placement_rule, + virtual int get_max_chunk_size(const DoutPrefixProvider *dpp, + rgw_placement_rule placement_rule, uint64_t *max_chunk_size, uint64_t *alignment = nullptr) override; virtual void get_max_aligned_size(uint64_t size, uint64_t alignment, uint64_t *max_size) override; @@ -163,14 +164,14 @@ class RGWRadosObject : public RGWObject { virtual std::unique_ptr get_write_op(RGWObjectCtx *) override; /* OMAP */ - virtual int omap_get_vals_by_keys(const std::string& oid, + virtual int omap_get_vals_by_keys(const DoutPrefixProvider *dpp, const std::string& oid, const std::set& keys, RGWAttrs *vals) override; - virtual int omap_set_val_by_key(const std::string& key, bufferlist& val, + virtual int omap_set_val_by_key(const DoutPrefixProvider *dpp, const std::string& key, bufferlist& val, bool must_exist, optional_yield y) override; private: - int read_attrs(RGWRados::Object::Read &read_op, optional_yield y, rgw_obj *target_obj = nullptr); + int read_attrs(RGWRados::Object::Read &read_op, optional_yield y, const DoutPrefixProvider *dpp, rgw_obj *target_obj = nullptr); }; class RGWRadosBucket : public RGWBucket { @@ -222,34 +223,34 @@ class RGWRadosBucket : public RGWBucket { ~RGWRadosBucket() { } - virtual int load_by_name(const std::string& tenant, const std::string& bucket_name, const std::string bucket_instance_id, RGWSysObjectCtx *rctx, optional_yield y) override; + virtual int load_by_name(const DoutPrefixProvider *dpp, const std::string& tenant, const std::string& bucket_name, const std::string bucket_instance_id, RGWSysObjectCtx *rctx, optional_yield y) override; virtual std::unique_ptr get_object(const rgw_obj_key& k) override; RGWBucketList* list(void) { return new RGWBucketList(); } - virtual int list(ListParams&, int, ListResults&, optional_yield y) override; + virtual int list(const DoutPrefixProvider *dpp, ListParams&, int, ListResults&, optional_yield y) override; RGWObject* create_object(const rgw_obj_key& key /* Attributes */) override; - virtual int remove_bucket(bool delete_children, std::string prefix, std::string delimiter, bool forward_to_master, req_info* req_info, optional_yield y) override; + virtual int remove_bucket(const DoutPrefixProvider *dpp, bool delete_children, std::string prefix, std::string delimiter, bool forward_to_master, req_info* req_info, optional_yield y) override; RGWAccessControlPolicy& get_acl(void) { return acls; } - virtual int set_acl(RGWAccessControlPolicy& acl, optional_yield y) override; - virtual int get_bucket_info(optional_yield y) override; - virtual int get_bucket_stats(RGWBucketInfo& bucket_info, int shard_id, + virtual int set_acl(const DoutPrefixProvider *dpp, RGWAccessControlPolicy& acl, optional_yield y) override; + virtual int get_bucket_info(const DoutPrefixProvider *dpp, optional_yield y) override; + virtual int get_bucket_stats(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, int shard_id, std::string *bucket_ver, std::string *master_ver, std::map& stats, std::string *max_marker = nullptr, bool *syncstopped = nullptr) override; - virtual int read_bucket_stats(optional_yield y) override; - virtual int sync_user_stats(optional_yield y) override; - virtual int update_container_stats(void) override; - virtual int check_bucket_shards(void) override; - virtual int link(RGWUser* new_user, optional_yield y) override; + virtual int read_bucket_stats(const DoutPrefixProvider *dpp, optional_yield y) override; + virtual int sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y) override; + virtual int update_container_stats(const DoutPrefixProvider *dpp) override; + virtual int check_bucket_shards(const DoutPrefixProvider *dpp) override; + virtual int link(const DoutPrefixProvider *dpp, RGWUser* new_user, optional_yield y) override; virtual int unlink(RGWUser* new_user, optional_yield y) override; - virtual int chown(RGWUser* new_user, RGWUser* old_user, optional_yield y) override; - virtual int put_instance_info(bool exclusive, ceph::real_time mtime) override; + virtual int chown(RGWUser* new_user, RGWUser* old_user, optional_yield y, const DoutPrefixProvider *dpp) override; + virtual int put_instance_info(const DoutPrefixProvider *dpp, bool exclusive, ceph::real_time mtime) override; virtual bool is_owner(RGWUser* user) override; - virtual int check_empty(optional_yield y) override; + virtual int check_empty(const DoutPrefixProvider *dpp, optional_yield y) override; virtual int check_quota(RGWQuotaInfo& user_quota, RGWQuotaInfo& bucket_quota, uint64_t obj_size, optional_yield y, bool check_size_only = false) override; - virtual int set_instance_attrs(RGWAttrs& attrs, optional_yield y) override; - virtual int try_refresh_info(ceph::real_time *pmtime) override; - virtual int read_usage(uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, + virtual int set_instance_attrs(const DoutPrefixProvider *dpp, RGWAttrs& attrs, optional_yield y) override; + virtual int try_refresh_info(const DoutPrefixProvider *dpp, ceph::real_time *pmtime) override; + virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool *is_truncated, RGWUsageIter& usage_iter, map& usage) override; virtual std::unique_ptr clone() { @@ -275,10 +276,11 @@ class RGWRadosStore : public RGWStore { virtual std::unique_ptr get_user(const rgw_user& u); virtual std::unique_ptr get_object(const rgw_obj_key& k) override; - virtual int get_bucket(RGWUser* u, const rgw_bucket& b, std::unique_ptr* bucket, optional_yield y) override; + virtual int get_bucket(const DoutPrefixProvider *dpp, RGWUser* u, const rgw_bucket& b, std::unique_ptr* bucket, optional_yield y) override; virtual int get_bucket(RGWUser* u, const RGWBucketInfo& i, std::unique_ptr* bucket) override; - virtual int get_bucket(RGWUser* u, const std::string& tenant, const std::string&name, std::unique_ptr* bucket, optional_yield y) override; - virtual int create_bucket(RGWUser& u, const rgw_bucket& b, + virtual int get_bucket(const DoutPrefixProvider *dpp, RGWUser* u, const std::string& tenant, const std::string&name, std::unique_ptr* bucket, optional_yield y) override; + virtual int create_bucket(const DoutPrefixProvider *dpp, + RGWUser& u, const rgw_bucket& b, const std::string& zonegroup_id, rgw_placement_rule& placement_rule, std::string& swift_ver_location, @@ -295,19 +297,19 @@ class RGWRadosStore : public RGWStore { optional_yield y); virtual RGWBucketList* list_buckets(void) { return new RGWBucketList(); } virtual bool is_meta_master() override; - virtual int forward_request_to_master(RGWUser* user, obj_version *objv, + virtual int forward_request_to_master(const DoutPrefixProvider *dpp, RGWUser* user, obj_version *objv, bufferlist& in_data, JSONParser *jp, req_info& info, optional_yield y) override; - virtual int defer_gc(RGWObjectCtx *rctx, RGWBucket* bucket, RGWObject* obj, + virtual int defer_gc(const DoutPrefixProvider *dpp, RGWObjectCtx *rctx, RGWBucket* bucket, RGWObject* obj, optional_yield y) override; virtual const RGWZoneGroup& get_zonegroup() override; virtual int get_zonegroup(const string& id, RGWZoneGroup& zonegroup) override; virtual int cluster_stat(RGWClusterStat& stats) override; virtual std::unique_ptr get_lifecycle(void) override; virtual RGWLC* get_rgwlc(void) { return rados->get_lc(); } - virtual int delete_raw_obj(const rgw_raw_obj& obj) override; + virtual int delete_raw_obj(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj) override; virtual void get_raw_obj(const rgw_placement_rule& placement_rule, const rgw_obj& obj, rgw_raw_obj* raw_obj) override; - virtual int get_raw_chunk_size(const rgw_raw_obj& obj, uint64_t* chunk_size) override; + virtual int get_raw_chunk_size(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, uint64_t* chunk_size) override; void setRados(RGWRados * st) { rados = st; } RGWRados *getRados(void) { return rados; } @@ -324,14 +326,9 @@ class RGWRadosStore : public RGWStore { virtual CephContext *ctx(void) { return rados->ctx(); } - int get_obj_head_ioctx(const RGWBucketInfo& bucket_info, const rgw_obj& obj, + int get_obj_head_ioctx(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::IoCtx *ioctx); - // implements DoutPrefixProvider - std::ostream& gen_prefix(std::ostream& out) const { return out << "RGWRadosStore "; } - CephContext* get_cct() const override { return rados->ctx(); } - unsigned get_subsys() const override { return ceph_subsys_rgw; } - const std::string& get_luarocks_path() const override { return luarocks_path; } @@ -347,9 +344,9 @@ class MPRadosSerializer : public MPSerializer { librados::ObjectWriteOperation op; public: - MPRadosSerializer(RGWRadosStore* store, RGWRadosObject* obj, const std::string& lock_name); + MPRadosSerializer(const DoutPrefixProvider *dpp, RGWRadosStore* store, RGWRadosObject* obj, const std::string& lock_name); - virtual int try_lock(utime_t dur, optional_yield y) override; + virtual int try_lock(const DoutPrefixProvider *dpp, utime_t dur, optional_yield y) override; int unlock() { return lock.unlock(&ioctx, oid); } @@ -363,7 +360,7 @@ class LCRadosSerializer : public LCSerializer { public: LCRadosSerializer(RGWRadosStore* store, const std::string& oid, const std::string& lock_name, const std::string& cookie); - virtual int try_lock(utime_t dur, optional_yield y) override; + virtual int try_lock(const DoutPrefixProvider *dpp, utime_t dur, optional_yield y) override; int unlock() { return lock.unlock(ioctx, oid); } @@ -391,18 +388,18 @@ public: class RGWStoreManager { public: RGWStoreManager() {} - static rgw::sal::RGWRadosStore *get_storage(CephContext *cct, bool use_gc_thread, bool use_lc_thread, bool quota_threads, + static rgw::sal::RGWRadosStore *get_storage(const DoutPrefixProvider *dpp, CephContext *cct, bool use_gc_thread, bool use_lc_thread, bool quota_threads, bool run_sync_thread, bool run_reshard_thread, bool use_cache = true) { - rgw::sal::RGWRadosStore *store = init_storage_provider(cct, use_gc_thread, use_lc_thread, + rgw::sal::RGWRadosStore *store = init_storage_provider(dpp, cct, use_gc_thread, use_lc_thread, quota_threads, run_sync_thread, run_reshard_thread, use_cache); return store; } - static rgw::sal::RGWRadosStore *get_raw_storage(CephContext *cct) { - rgw::sal::RGWRadosStore *rados = init_raw_storage_provider(cct); + static rgw::sal::RGWRadosStore *get_raw_storage(const DoutPrefixProvider *dpp, CephContext *cct) { + rgw::sal::RGWRadosStore *rados = init_raw_storage_provider(dpp, cct); return rados; } - static rgw::sal::RGWRadosStore *init_storage_provider(CephContext *cct, bool use_gc_thread, bool use_lc_thread, bool quota_threads, bool run_sync_thread, bool run_reshard_thread, bool use_metadata_cache); - static rgw::sal::RGWRadosStore *init_raw_storage_provider(CephContext *cct); + static rgw::sal::RGWRadosStore *init_storage_provider(const DoutPrefixProvider *dpp, CephContext *cct, bool use_gc_thread, bool use_lc_thread, bool quota_threads, bool run_sync_thread, bool run_reshard_thread, bool use_metadata_cache); + static rgw::sal::RGWRadosStore *init_raw_storage_provider(const DoutPrefixProvider *dpp, CephContext *cct); static void close_storage(rgw::sal::RGWRadosStore *store); }; diff --git a/src/rgw/rgw_service.cc b/src/rgw/rgw_service.cc index 7c7d8a02675d4..37d24fecc426d 100644 --- a/src/rgw/rgw_service.cc +++ b/src/rgw/rgw_service.cc @@ -47,7 +47,8 @@ int RGWServices_Def::init(CephContext *cct, bool have_cache, bool raw, bool run_sync, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { finisher = std::make_unique(cct); bucket_sobj = std::make_unique(cct); @@ -73,7 +74,7 @@ int RGWServices_Def::init(CephContext *cct, user_rados = std::make_unique(cct); if (have_cache) { - sysobj_cache = std::make_unique(cct); + sysobj_cache = std::make_unique(dpp, cct); } vector meta_bes{meta_be_sobj.get(), meta_be_otp.get()}; @@ -113,34 +114,34 @@ int RGWServices_Def::init(CephContext *cct, can_shutdown = true; - int r = finisher->start(y); + int r = finisher->start(y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to start finisher service (" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start finisher service (" << cpp_strerror(-r) << dendl; return r; } if (!raw) { - r = notify->start(y); + r = notify->start(y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to start notify service (" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start notify service (" << cpp_strerror(-r) << dendl; return r; } } - r = rados->start(y); + r = rados->start(y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to start rados service (" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start rados service (" << cpp_strerror(-r) << dendl; return r; } if (!raw) { - r = zone->start(y); + r = zone->start(y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to start zone service (" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start zone service (" << cpp_strerror(-r) << dendl; return r; } - r = datalog_rados->start(&zone->get_zone(), + r = datalog_rados->start(dpp, &zone->get_zone(), zone->get_zone_params(), rados->get_rados_handle()); if (r < 0) { @@ -148,97 +149,97 @@ int RGWServices_Def::init(CephContext *cct, return r; } - r = mdlog->start(y); + r = mdlog->start(y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to start mdlog service (" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start mdlog service (" << cpp_strerror(-r) << dendl; return r; } - r = sync_modules->start(y); + r = sync_modules->start(y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to start sync modules service (" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start sync modules service (" << cpp_strerror(-r) << dendl; return r; } } - r = cls->start(y); + r = cls->start(y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to start cls service (" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start cls service (" << cpp_strerror(-r) << dendl; return r; } - r = config_key_rados->start(y); + r = config_key_rados->start(y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to start config_key service (" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start config_key service (" << cpp_strerror(-r) << dendl; return r; } - r = zone_utils->start(y); + r = zone_utils->start(y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to start zone_utils service (" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start zone_utils service (" << cpp_strerror(-r) << dendl; return r; } - r = quota->start(y); + r = quota->start(y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to start quota service (" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start quota service (" << cpp_strerror(-r) << dendl; return r; } - r = sysobj_core->start(y); + r = sysobj_core->start(y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to start sysobj_core service (" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start sysobj_core service (" << cpp_strerror(-r) << dendl; return r; } if (have_cache) { - r = sysobj_cache->start(y); + r = sysobj_cache->start(y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to start sysobj_cache service (" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start sysobj_cache service (" << cpp_strerror(-r) << dendl; return r; } } - r = sysobj->start(y); + r = sysobj->start(y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to start sysobj service (" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start sysobj service (" << cpp_strerror(-r) << dendl; return r; } if (!raw) { - r = meta_be_sobj->start(y); + r = meta_be_sobj->start(y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to start meta_be_sobj service (" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start meta_be_sobj service (" << cpp_strerror(-r) << dendl; return r; } - r = meta->start(y); + r = meta->start(y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to start meta service (" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start meta service (" << cpp_strerror(-r) << dendl; return r; } - r = bucket_sobj->start(y); + r = bucket_sobj->start(y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to start bucket service (" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start bucket service (" << cpp_strerror(-r) << dendl; return r; } - r = bucket_sync_sobj->start(y); + r = bucket_sync_sobj->start(y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to start bucket_sync service (" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start bucket_sync service (" << cpp_strerror(-r) << dendl; return r; } - r = user_rados->start(y); + r = user_rados->start(y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to start user_rados service (" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start user_rados service (" << cpp_strerror(-r) << dendl; return r; } - r = otp->start(y); + r = otp->start(y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to start otp service (" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start otp service (" << cpp_strerror(-r) << dendl; return r; } } @@ -274,11 +275,11 @@ void RGWServices_Def::shutdown() } -int RGWServices::do_init(CephContext *_cct, bool have_cache, bool raw, bool run_sync, optional_yield y) +int RGWServices::do_init(CephContext *_cct, bool have_cache, bool raw, bool run_sync, optional_yield y, const DoutPrefixProvider *dpp) { cct = _cct; - int r = _svc.init(cct, have_cache, raw, run_sync, y); + int r = _svc.init(cct, have_cache, raw, run_sync, y, dpp); if (r < 0) { return r; } @@ -314,7 +315,7 @@ int RGWServices::do_init(CephContext *_cct, bool have_cache, bool raw, bool run_ return 0; } -int RGWServiceInstance::start(optional_yield y) +int RGWServiceInstance::start(optional_yield y, const DoutPrefixProvider *dpp) { if (start_state != StateInit) { return 0; @@ -323,7 +324,7 @@ int RGWServiceInstance::start(optional_yield y) start_state = StateStarting;; /* setting started prior to do_start() on purpose so that circular references can call start() on each other */ - int r = do_start(y); + int r = do_start(y, dpp); if (r < 0) { return r; } @@ -339,7 +340,7 @@ RGWCtlDef::_meta::_meta() {} RGWCtlDef::_meta::~_meta() {} -int RGWCtlDef::init(RGWServices& svc) +int RGWCtlDef::init(RGWServices& svc, const DoutPrefixProvider *dpp) { meta.mgr.reset(new RGWMetadataManager(svc.meta)); @@ -376,21 +377,22 @@ int RGWCtlDef::init(RGWServices& svc) bucket->init(user.get(), (RGWBucketMetadataHandler *)bucket_meta_handler, (RGWBucketInstanceMetadataHandler *)bi_meta_handler, - svc.datalog_rados); + svc.datalog_rados, + dpp); otp->init((RGWOTPMetadataHandler *)meta.otp.get()); return 0; } -int RGWCtl::init(RGWServices *_svc) +int RGWCtl::init(RGWServices *_svc, const DoutPrefixProvider *dpp) { svc = _svc; cct = svc->cct; - int r = _ctl.init(*svc); + int r = _ctl.init(*svc, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to start init ctls (" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start init ctls (" << cpp_strerror(-r) << dendl; return r; } diff --git a/src/rgw/rgw_service.h b/src/rgw/rgw_service.h index 36a3fed618dfc..883eeae10255c 100644 --- a/src/rgw/rgw_service.h +++ b/src/rgw/rgw_service.h @@ -29,14 +29,14 @@ protected: } start_state{StateInit}; virtual void shutdown() {} - virtual int do_start(optional_yield) { + virtual int do_start(optional_yield, const DoutPrefixProvider *dpp) { return 0; } public: RGWServiceInstance(CephContext *_cct) : cct(_cct) {} virtual ~RGWServiceInstance() {} - int start(optional_yield y); + int start(optional_yield y, const DoutPrefixProvider *dpp); bool is_started() { return (start_state == StateStarted); } @@ -108,7 +108,7 @@ struct RGWServices_Def RGWServices_Def(); ~RGWServices_Def(); - int init(CephContext *cct, bool have_cache, bool raw_storage, bool run_sync, optional_yield y); + int init(CephContext *cct, bool have_cache, bool raw_storage, bool run_sync, optional_yield y, const DoutPrefixProvider *dpp); void shutdown(); }; @@ -147,14 +147,14 @@ struct RGWServices RGWSI_SysObj_Core *core{nullptr}; RGWSI_User *user{nullptr}; - int do_init(CephContext *cct, bool have_cache, bool raw_storage, bool run_sync, optional_yield y); + int do_init(CephContext *cct, bool have_cache, bool raw_storage, bool run_sync, optional_yield y, const DoutPrefixProvider *dpp); - int init(CephContext *cct, bool have_cache, bool run_sync, optional_yield y) { - return do_init(cct, have_cache, false, run_sync, y); + int init(CephContext *cct, bool have_cache, bool run_sync, optional_yield y, const DoutPrefixProvider *dpp) { + return do_init(cct, have_cache, false, run_sync, y, dpp); } - int init_raw(CephContext *cct, bool have_cache, optional_yield y) { - return do_init(cct, have_cache, true, false, y); + int init_raw(CephContext *cct, bool have_cache, optional_yield y, const DoutPrefixProvider *dpp) { + return do_init(cct, have_cache, true, false, y, dpp); } void shutdown() { _svc.shutdown(); @@ -186,7 +186,7 @@ struct RGWCtlDef { RGWCtlDef(); ~RGWCtlDef(); - int init(RGWServices& svc); + int init(RGWServices& svc, const DoutPrefixProvider *dpp); }; struct RGWCtl { @@ -208,7 +208,7 @@ struct RGWCtl { RGWBucketCtl *bucket{nullptr}; RGWOTPCtl *otp{nullptr}; - int init(RGWServices *_svc); + int init(RGWServices *_svc, const DoutPrefixProvider *dpp); }; #endif diff --git a/src/rgw/rgw_sts.cc b/src/rgw/rgw_sts.cc index 8149ba1cabd60..741c16d397c01 100644 --- a/src/rgw/rgw_sts.cc +++ b/src/rgw/rgw_sts.cc @@ -276,16 +276,17 @@ int AssumeRoleRequest::validate_input() const return AssumeRoleRequestBase::validate_input(); } -std::tuple STSService::getRoleInfo(const string& arn, +std::tuple STSService::getRoleInfo(const DoutPrefixProvider *dpp, + const string& arn, optional_yield y) { if (auto r_arn = rgw::ARN::parse(arn); r_arn) { auto pos = r_arn->resource.find_last_of('/'); string roleName = r_arn->resource.substr(pos + 1); RGWRole role(cct, store->getRados()->pctl, roleName, r_arn->account); - if (int ret = role.get(y); ret < 0) { + if (int ret = role.get(dpp, y); ret < 0) { if (ret == -ENOENT) { - ldout(cct, 0) << "Role doesn't exist: " << roleName << dendl; + ldpp_dout(dpp, 0) << "Role doesn't exist: " << roleName << dendl; ret = -ERR_NO_ROLE_FOUND; } return make_tuple(ret, this->role); @@ -299,30 +300,30 @@ std::tuple STSService::getRoleInfo(const string& arn, } string r_path = role.get_path(); if (path != r_path) { - ldout(cct, 0) << "Invalid Role ARN: Path in ARN does not match with the role path: " << path << " " << r_path << dendl; + ldpp_dout(dpp, 0) << "Invalid Role ARN: Path in ARN does not match with the role path: " << path << " " << r_path << dendl; return make_tuple(-EACCES, this->role); } this->role = std::move(role); return make_tuple(0, this->role); } } else { - ldout(cct, 0) << "Invalid role arn: " << arn << dendl; + ldpp_dout(dpp, 0) << "Invalid role arn: " << arn << dendl; return make_tuple(-EINVAL, this->role); } } -int STSService::storeARN(string& arn, optional_yield y) +int STSService::storeARN(const DoutPrefixProvider *dpp, string& arn, optional_yield y) { int ret = 0; RGWUserInfo info; - if (ret = rgw_get_user_info_by_uid(store->ctl()->user, user_id, info, y); ret < 0) { + if (ret = rgw_get_user_info_by_uid(dpp, store->ctl()->user, user_id, info, y); ret < 0) { return -ERR_NO_SUCH_ENTITY; } info.assumed_role_arn = arn; RGWObjVersionTracker objv_tracker; - if (ret = rgw_store_user_info(store->ctl()->user, info, &info, &objv_tracker, real_time(), + if (ret = rgw_store_user_info(dpp, store->ctl()->user, info, &info, &objv_tracker, real_time(), false, y); ret < 0) { return -ERR_INTERNAL_ERROR; } @@ -392,7 +393,8 @@ AssumeRoleWithWebIdentityResponse STSService::assumeRoleWithWebIdentity(AssumeRo return response; } -AssumeRoleResponse STSService::assumeRole(AssumeRoleRequest& req, +AssumeRoleResponse STSService::assumeRole(const DoutPrefixProvider *dpp, + AssumeRoleRequest& req, optional_yield y) { AssumeRoleResponse response; @@ -401,7 +403,7 @@ AssumeRoleResponse STSService::assumeRole(AssumeRoleRequest& req, //Get the role info which is being assumed boost::optional r_arn = rgw::ARN::parse(req.getRoleARN()); if (r_arn == boost::none) { - ldout(cct, 0) << "Error in parsing role arn: " << req.getRoleARN() << dendl; + ldpp_dout(dpp, 0) << "Error in parsing role arn: " << req.getRoleARN() << dendl; response.retCode = -EINVAL; return response; } @@ -439,7 +441,7 @@ AssumeRoleResponse STSService::assumeRole(AssumeRoleRequest& req, //Save ARN with the user string arn = response.user.getARN(); - response.retCode = storeARN(arn, y); + response.retCode = storeARN(dpp, arn, y); if (response.retCode < 0) { return response; } diff --git a/src/rgw/rgw_sts.h b/src/rgw/rgw_sts.h index a54ef3e1028f4..14c783c1cde01 100644 --- a/src/rgw/rgw_sts.h +++ b/src/rgw/rgw_sts.h @@ -229,14 +229,14 @@ class STSService { rgw_user user_id; RGWRole role; rgw::auth::Identity* identity; - int storeARN(string& arn, optional_yield y); + int storeARN(const DoutPrefixProvider *dpp, string& arn, optional_yield y); public: STSService() = default; STSService(CephContext* cct, rgw::sal::RGWRadosStore *store, rgw_user user_id, rgw::auth::Identity* identity) : cct(cct), store(store), user_id(user_id), identity(identity) {} - std::tuple getRoleInfo(const string& arn, optional_yield y); - AssumeRoleResponse assumeRole(AssumeRoleRequest& req, optional_yield y); + std::tuple getRoleInfo(const DoutPrefixProvider *dpp, const string& arn, optional_yield y); + AssumeRoleResponse assumeRole(const DoutPrefixProvider *dpp, AssumeRoleRequest& req, optional_yield y); GetSessionTokenResponse getSessionToken(GetSessionTokenRequest& req); AssumeRoleWithWebIdentityResponse assumeRoleWithWebIdentity(AssumeRoleWithWebIdentityRequest& req); }; diff --git a/src/rgw/rgw_swift_auth.cc b/src/rgw/rgw_swift_auth.cc index 136f9df0556c9..da0a370b71ab7 100644 --- a/src/rgw/rgw_swift_auth.cc +++ b/src/rgw/rgw_swift_auth.cc @@ -92,14 +92,14 @@ void TempURLEngine::get_owner_info(const DoutPrefixProvider* dpp, const req_stat if (uid.tenant.empty()) { const rgw_user tenanted_uid(uid.id, uid.id); - if (ctl->user->get_info_by_uid(tenanted_uid, &uinfo, s->yield) >= 0) { + if (ctl->user->get_info_by_uid(dpp, tenanted_uid, &uinfo, s->yield) >= 0) { /* Succeeded. */ bucket_tenant = uinfo.user_id.tenant; found = true; } } - if (!found && ctl->user->get_info_by_uid(uid, &uinfo, s->yield) < 0) { + if (!found && ctl->user->get_info_by_uid(dpp, uid, &uinfo, s->yield) < 0) { throw -EPERM; } else { bucket_tenant = uinfo.user_id.tenant; @@ -113,7 +113,7 @@ void TempURLEngine::get_owner_info(const DoutPrefixProvider* dpp, const req_stat /* Need to get user info of bucket owner. */ RGWBucketInfo bucket_info; RGWSI_MetaBackend_CtxParams bectx_params = RGWSI_MetaBackend_CtxParams_SObj(s->sysobj_ctx); - int ret = ctl->bucket->read_bucket_info(b, &bucket_info, y, RGWBucketCtl::BucketInstance::GetParams().set_bectx_params(bectx_params)); + int ret = ctl->bucket->read_bucket_info(b, &bucket_info, y, dpp, RGWBucketCtl::BucketInstance::GetParams().set_bectx_params(bectx_params)); if (ret < 0) { throw ret; } @@ -121,7 +121,7 @@ void TempURLEngine::get_owner_info(const DoutPrefixProvider* dpp, const req_stat ldpp_dout(dpp, 20) << "temp url user (bucket owner): " << bucket_info.owner << dendl; - if (ctl->user->get_info_by_uid(bucket_info.owner, &owner_info, s->yield) < 0) { + if (ctl->user->get_info_by_uid(dpp, bucket_info.owner, &owner_info, s->yield) < 0) { throw -EPERM; } } @@ -449,7 +449,7 @@ ExternalTokenEngine::authenticate(const DoutPrefixProvider* dpp, ldpp_dout(dpp, 10) << "swift user=" << swift_user << dendl; RGWUserInfo tmp_uinfo; - ret = ctl->user->get_info_by_swift(swift_user, &tmp_uinfo, s->yield); + ret = ctl->user->get_info_by_swift(dpp, swift_user, &tmp_uinfo, s->yield); if (ret < 0) { ldpp_dout(dpp, 0) << "NOTICE: couldn't map swift user" << dendl; throw ret; @@ -570,7 +570,7 @@ SignedTokenEngine::authenticate(const DoutPrefixProvider* dpp, } RGWUserInfo user_info; - ret = ctl->user->get_info_by_swift(swift_user, &user_info, s->yield); + ret = ctl->user->get_info_by_swift(dpp, swift_user, &user_info, s->yield); if (ret < 0) { throw ret; } @@ -687,7 +687,7 @@ void RGW_SWIFT_Auth_Get::execute(optional_yield y) user_str = user; - if ((ret = store->ctl()->user->get_info_by_swift(user_str, &info, s->yield)) < 0) + if ((ret = store->ctl()->user->get_info_by_swift(s, user_str, &info, s->yield)) < 0) { ret = -EACCES; goto done; diff --git a/src/rgw/rgw_sync.cc b/src/rgw/rgw_sync.cc index a934a2e28a50d..ac15956c6da99 100644 --- a/src/rgw/rgw_sync.cc +++ b/src/rgw/rgw_sync.cc @@ -52,7 +52,7 @@ string RGWSyncErrorLogger::get_shard_oid(const string& oid_prefix, int shard_id) return string(buf); } -RGWCoroutine *RGWSyncErrorLogger::log_error_cr(const string& source_zone, const string& section, const string& name, uint32_t error_code, const string& message) { +RGWCoroutine *RGWSyncErrorLogger::log_error_cr(const DoutPrefixProvider *dpp, const string& source_zone, const string& section, const string& name, uint32_t error_code, const string& message) { cls_log_entry entry; rgw_sync_error_info info(source_zone, error_code, message); @@ -63,7 +63,7 @@ RGWCoroutine *RGWSyncErrorLogger::log_error_cr(const string& source_zone, const uint32_t shard_id = ++counter % num_shards; - return new RGWRadosTimelogAddCR(store, oids[shard_id], entry); + return new RGWRadosTimelogAddCR(dpp, store, oids[shard_id], entry); } void RGWSyncBackoff::update_wait_time() @@ -90,7 +90,7 @@ void RGWSyncBackoff::backoff(RGWCoroutine *op) op->wait(utime_t(cur_wait, 0)); } -int RGWBackoffControlCR::operate() { +int RGWBackoffControlCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { // retry the operation until it succeeds while (true) { @@ -153,7 +153,7 @@ void rgw_mdlog_shard_data::decode_json(JSONObj *obj) { JSONDecoder::decode_json("entries", entries, obj); }; -int RGWShardCollectCR::operate() { +int RGWShardCollectCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { while (spawn_next()) { current_running++; @@ -239,12 +239,12 @@ RGWRemoteMetaLog::~RGWRemoteMetaLog() delete error_logger; } -int RGWRemoteMetaLog::read_log_info(rgw_mdlog_info *log_info) +int RGWRemoteMetaLog::read_log_info(const DoutPrefixProvider *dpp, rgw_mdlog_info *log_info) { rgw_http_param_pair pairs[] = { { "type", "metadata" }, { NULL, NULL } }; - int ret = conn->get_json_resource("/admin/log", pairs, null_yield, *log_info); + int ret = conn->get_json_resource(dpp, "/admin/log", pairs, null_yield, *log_info); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to fetch mdlog info" << dendl; return ret; @@ -255,28 +255,28 @@ int RGWRemoteMetaLog::read_log_info(rgw_mdlog_info *log_info) return 0; } -int RGWRemoteMetaLog::read_master_log_shards_info(const string &master_period, map *shards_info) +int RGWRemoteMetaLog::read_master_log_shards_info(const DoutPrefixProvider *dpp, const string &master_period, map *shards_info) { if (store->svc()->zone->is_meta_master()) { return 0; } rgw_mdlog_info log_info; - int ret = read_log_info(&log_info); + int ret = read_log_info(dpp, &log_info); if (ret < 0) { return ret; } - return run(new RGWReadRemoteMDLogInfoCR(&sync_env, master_period, log_info.num_shards, shards_info)); + return run(dpp, new RGWReadRemoteMDLogInfoCR(&sync_env, master_period, log_info.num_shards, shards_info)); } -int RGWRemoteMetaLog::read_master_log_shards_next(const string& period, map shard_markers, map *result) +int RGWRemoteMetaLog::read_master_log_shards_next(const DoutPrefixProvider *dpp, const string& period, map shard_markers, map *result) { if (store->svc()->zone->is_meta_master()) { return 0; } - return run(new RGWListRemoteMDLogCR(&sync_env, period, shard_markers, 1, result)); + return run(dpp, new RGWListRemoteMDLogCR(&sync_env, period, shard_markers, 1, result)); } int RGWRemoteMetaLog::init() @@ -306,35 +306,35 @@ void RGWRemoteMetaLog::finish() #define CLONE_MAX_ENTRIES 100 -int RGWMetaSyncStatusManager::init() +int RGWMetaSyncStatusManager::init(const DoutPrefixProvider *dpp) { if (store->svc()->zone->is_meta_master()) { return 0; } if (!store->svc()->zone->get_master_conn()) { - lderr(store->ctx()) << "no REST connection to master zone" << dendl; + ldpp_dout(dpp, -1) << "no REST connection to master zone" << dendl; return -EIO; } - int r = rgw_init_ioctx(store->getRados()->get_rados_handle(), store->svc()->zone->get_zone_params().log_pool, ioctx, true); + int r = rgw_init_ioctx(dpp, store->getRados()->get_rados_handle(), store->svc()->zone->get_zone_params().log_pool, ioctx, true); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to open log pool (" << store->svc()->zone->get_zone_params().log_pool << " ret=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to open log pool (" << store->svc()->zone->get_zone_params().log_pool << " ret=" << r << dendl; return r; } r = master_log.init(); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to init remote log, r=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to init remote log, r=" << r << dendl; return r; } RGWMetaSyncEnv& sync_env = master_log.get_sync_env(); rgw_meta_sync_status sync_status; - r = read_sync_status(&sync_status); + r = read_sync_status(dpp, &sync_status); if (r < 0 && r != -ENOENT) { - lderr(store->ctx()) << "ERROR: failed to read sync status, r=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to read sync status, r=" << r << dendl; return r; } @@ -392,13 +392,14 @@ string RGWMetaSyncEnv::shard_obj_name(int shard_id) } class RGWAsyncReadMDLogEntries : public RGWAsyncRadosRequest { + const DoutPrefixProvider *dpp; rgw::sal::RGWRadosStore *store; RGWMetadataLog *mdlog; int shard_id; int max_entries; protected: - int _send_request() override { + int _send_request(const DoutPrefixProvider *dpp) override { real_time from_time; real_time end_time; @@ -406,7 +407,7 @@ protected: mdlog->init_list_entries(shard_id, from_time, end_time, marker, &handle); - int ret = mdlog->list_entries(handle, max_entries, entries, &marker, &truncated); + int ret = mdlog->list_entries(dpp, handle, max_entries, entries, &marker, &truncated); mdlog->complete_list_entries(handle); @@ -417,10 +418,10 @@ public: list entries; bool truncated; - RGWAsyncReadMDLogEntries(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store, + RGWAsyncReadMDLogEntries(const DoutPrefixProvider *_dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store, RGWMetadataLog* mdlog, int _shard_id, std::string _marker, int _max_entries) - : RGWAsyncRadosRequest(caller, cn), store(_store), mdlog(mdlog), + : RGWAsyncRadosRequest(caller, cn), dpp(_dpp), store(_store), mdlog(mdlog), shard_id(_shard_id), max_entries(_max_entries), marker(std::move(_marker)) {} }; @@ -450,9 +451,9 @@ public: } } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { marker = *pmarker; - req = new RGWAsyncReadMDLogEntries(this, stack->create_completion_notifier(), + req = new RGWAsyncReadMDLogEntries(dpp, this, stack->create_completion_notifier(), sync_env->store, mdlog, shard_id, marker, max_entries); sync_env->async_rados->queue(req); @@ -482,7 +483,7 @@ public: : RGWCoroutine(env->store->ctx()), env(env), http_op(NULL), period(period), shard_id(_shard_id), shard_info(_shard_info) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { auto store = env->store; RGWRESTConn *conn = store->svc()->zone->get_master_conn(); reenter(this) { @@ -502,7 +503,7 @@ public: init_new_io(http_op); - int ret = http_op->aio_read(); + int ret = http_op->aio_read(dpp); if (ret < 0) { ldpp_dout(env->dpp, 0) << "ERROR: failed to read from " << p << dendl; log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl; @@ -550,7 +551,7 @@ public: : RGWSimpleCoroutine(env->store->ctx()), sync_env(env), http_op(NULL), period(period), shard_id(_shard_id), marker(_marker), max_entries(_max_entries), result(_result) {} - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { RGWRESTConn *conn = sync_env->conn; char buf[32]; @@ -573,9 +574,9 @@ public: http_op = new RGWRESTReadResource(conn, p, pairs, NULL, sync_env->http_manager); init_new_io(http_op); - int ret = http_op->aio_read(); + int ret = http_op->aio_read(dpp); if (ret < 0) { - ldpp_dout(sync_env->dpp, 0) << "ERROR: failed to read from " << p << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to read from " << p << dendl; log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl; http_op->put(); return ret; @@ -645,7 +646,7 @@ public: } } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { int ret; reenter(this) { yield { @@ -660,7 +661,7 @@ public: } while (!lease_cr->is_locked()) { if (lease_cr->is_done()) { - ldpp_dout(sync_env->dpp, 5) << "lease cr failed, done early " << dendl; + ldpp_dout(dpp, 5) << "lease cr failed, done early " << dendl; set_status("lease lock failed, early abort"); return set_cr_error(lease_cr->get_ret_status()); } @@ -670,14 +671,14 @@ public: yield { set_status("writing sync status"); rgw::sal::RGWRadosStore *store = sync_env->store; - call(new RGWSimpleRadosWriteCR(sync_env->async_rados, store->svc()->sysobj, + call(new RGWSimpleRadosWriteCR(dpp, sync_env->async_rados, store->svc()->sysobj, rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()), status)); } if (retcode < 0) { set_status("failed to write sync status"); - ldpp_dout(sync_env->dpp, 0) << "ERROR: failed to write sync status, retcode=" << retcode << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to write sync status, retcode=" << retcode << dendl; yield lease_cr->go_down(); return set_cr_error(retcode); } @@ -700,7 +701,8 @@ public: marker.next_step_marker = info.marker; marker.timestamp = info.last_update; rgw::sal::RGWRadosStore *store = sync_env->store; - spawn(new RGWSimpleRadosWriteCR(sync_env->async_rados, + spawn(new RGWSimpleRadosWriteCR(dpp, + sync_env->async_rados, store->svc()->sysobj, rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->shard_obj_name(i)), marker), true); @@ -710,7 +712,7 @@ public: set_status("changing sync state: build full sync maps"); status.state = rgw_meta_sync_info::StateBuildingFullSyncMaps; rgw::sal::RGWRadosStore *store = sync_env->store; - call(new RGWSimpleRadosWriteCR(sync_env->async_rados, store->svc()->sysobj, + call(new RGWSimpleRadosWriteCR(dpp, sync_env->async_rados, store->svc()->sysobj, rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()), status)); } @@ -754,7 +756,7 @@ bool RGWReadSyncStatusMarkersCR::spawn_next() using CR = RGWSimpleRadosReadCR; rgw_raw_obj obj{env->store->svc()->zone->get_zone_params().log_pool, env->shard_obj_name(shard_id)}; - spawn(new CR(env->async_rados, env->store->svc()->sysobj, obj, &markers[shard_id]), false); + spawn(new CR(env->dpp, env->async_rados, env->store->svc()->sysobj, obj, &markers[shard_id]), false); shard_id++; return true; } @@ -768,10 +770,10 @@ public: rgw_meta_sync_status *_status) : RGWCoroutine(_sync_env->cct), sync_env(_sync_env), sync_status(_status) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int RGWReadSyncStatusCoroutine::operate() +int RGWReadSyncStatusCoroutine::operate(const DoutPrefixProvider *dpp) { reenter(this) { // read sync info @@ -780,11 +782,11 @@ int RGWReadSyncStatusCoroutine::operate() bool empty_on_enoent = false; // fail on ENOENT rgw_raw_obj obj{sync_env->store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()}; - call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj, obj, + call(new ReadInfoCR(dpp, sync_env->async_rados, sync_env->store->svc()->sysobj, obj, &sync_status->sync_info, empty_on_enoent)); } if (retcode < 0) { - ldpp_dout(sync_env->dpp, 4) << "failed to read sync status info with " + ldpp_dout(dpp, 4) << "failed to read sync status info with " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } @@ -793,7 +795,7 @@ int RGWReadSyncStatusCoroutine::operate() yield call(new ReadMarkersCR(sync_env, sync_status->sync_info.num_shards, sync_status->sync_markers)); if (retcode < 0) { - ldpp_dout(sync_env->dpp, 4) << "failed to read sync status markers with " + ldpp_dout(dpp, 4) << "failed to read sync status markers with " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } @@ -879,7 +881,7 @@ public: std::back_inserter(sections)); } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { RGWRESTConn *conn = sync_env->conn; reenter(this) { @@ -895,7 +897,7 @@ public: } while (!lease_cr->is_locked()) { if (lease_cr->is_done()) { - ldpp_dout(sync_env->dpp, 5) << "lease cr failed, done early " << dendl; + ldpp_dout(dpp, 5) << "lease cr failed, done early " << dendl; set_status("failed acquiring lock"); return set_cr_error(lease_cr->get_ret_status()); } @@ -910,7 +912,7 @@ public: "/admin/metadata", NULL, §ions)); } if (get_ret_status() < 0) { - ldpp_dout(sync_env->dpp, 0) << "ERROR: failed to fetch metadata sections" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to fetch metadata sections" << dendl; yield entries_index->finish(); yield lease_cr->go_down(); drain_all(); @@ -976,7 +978,7 @@ public: int shard_id = (int)iter->first; rgw_meta_sync_marker& marker = iter->second; marker.total_entries = entries_index->get_total_entries(shard_id); - spawn(new RGWSimpleRadosWriteCR(sync_env->async_rados, sync_env->store->svc()->sysobj, + spawn(new RGWSimpleRadosWriteCR(dpp, sync_env->async_rados, sync_env->store->svc()->sysobj, rgw_raw_obj(sync_env->store->svc()->zone->get_zone_params().log_pool, sync_env->shard_obj_name(shard_id)), marker), true); } @@ -1042,7 +1044,7 @@ public: section + ":" + key); } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { RGWRESTConn *conn = sync_env->conn; reenter(this) { yield { @@ -1057,9 +1059,9 @@ public: init_new_io(http_op); - int ret = http_op->aio_read(); + int ret = http_op->aio_read(dpp); if (ret < 0) { - ldpp_dout(sync_env->dpp, 0) << "ERROR: failed to fetch mdlog data" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to fetch mdlog data" << dendl; log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl; http_op->put(); return set_cr_error(ret); @@ -1084,11 +1086,12 @@ class RGWAsyncMetaStoreEntry : public RGWAsyncRadosRequest { rgw::sal::RGWRadosStore *store; string raw_key; bufferlist bl; + const DoutPrefixProvider *dpp; protected: - int _send_request() override { - int ret = store->ctl()->meta.mgr->put(raw_key, bl, null_yield, RGWMDLogSyncType::APPLY_ALWAYS, true); + int _send_request(const DoutPrefixProvider *dpp) override { + int ret = store->ctl()->meta.mgr->put(raw_key, bl, null_yield, dpp, RGWMDLogSyncType::APPLY_ALWAYS, true); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: can't store key: " << raw_key << " ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: can't store key: " << raw_key << " ret=" << ret << dendl; return ret; } return 0; @@ -1096,8 +1099,9 @@ protected: public: RGWAsyncMetaStoreEntry(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store, const string& _raw_key, - bufferlist& _bl) : RGWAsyncRadosRequest(caller, cn), store(_store), - raw_key(_raw_key), bl(_bl) {} + bufferlist& _bl, + const DoutPrefixProvider *dpp) : RGWAsyncRadosRequest(caller, cn), store(_store), + raw_key(_raw_key), bl(_bl), dpp(dpp) {} }; @@ -1121,9 +1125,9 @@ public: } } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { req = new RGWAsyncMetaStoreEntry(this, stack->create_completion_notifier(), - sync_env->store, raw_key, bl); + sync_env->store, raw_key, bl, dpp); sync_env->async_rados->queue(req); return 0; } @@ -1136,19 +1140,20 @@ public: class RGWAsyncMetaRemoveEntry : public RGWAsyncRadosRequest { rgw::sal::RGWRadosStore *store; string raw_key; + const DoutPrefixProvider *dpp; protected: - int _send_request() override { - int ret = store->ctl()->meta.mgr->remove(raw_key, null_yield); + int _send_request(const DoutPrefixProvider *dpp) override { + int ret = store->ctl()->meta.mgr->remove(raw_key, null_yield, dpp); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: can't remove key: " << raw_key << " ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: can't remove key: " << raw_key << " ret=" << ret << dendl; return ret; } return 0; } public: RGWAsyncMetaRemoveEntry(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store, - const string& _raw_key) : RGWAsyncRadosRequest(caller, cn), store(_store), - raw_key(_raw_key) {} + const string& _raw_key, const DoutPrefixProvider *dpp) : RGWAsyncRadosRequest(caller, cn), store(_store), + raw_key(_raw_key), dpp(dpp) {} }; @@ -1170,9 +1175,9 @@ public: } } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { req = new RGWAsyncMetaRemoveEntry(this, stack->create_completion_notifier(), - sync_env->store, raw_key); + sync_env->store, raw_key, dpp); sync_env->async_rados->queue(req); return 0; } @@ -1189,7 +1194,7 @@ public: #define META_SYNC_UPDATE_MARKER_WINDOW 10 -int RGWLastCallerWinsCR::operate() { +int RGWLastCallerWinsCR::operate(const DoutPrefixProvider *dpp) { RGWCoroutine *call_cr; reenter(this) { while (cr) { @@ -1234,7 +1239,7 @@ public: ldpp_dout(sync_env->dpp, 20) << __func__ << "(): updating marker marker_oid=" << marker_oid << " marker=" << new_marker << " realm_epoch=" << sync_marker.realm_epoch << dendl; tn->log(20, SSTR("new marker=" << new_marker)); rgw::sal::RGWRadosStore *store = sync_env->store; - return new RGWSimpleRadosWriteCR(sync_env->async_rados, + return new RGWSimpleRadosWriteCR(sync_env->dpp, sync_env->async_rados, store->svc()->sysobj, rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, marker_oid), sync_marker); @@ -1258,7 +1263,7 @@ RGWMetaSyncSingleEntryCR::RGWMetaSyncSingleEntryCR(RGWMetaSyncEnv *_sync_env, tn = sync_env->sync_tracer->add_node(_tn_parent, "entry", raw_key); } -int RGWMetaSyncSingleEntryCR::operate() { +int RGWMetaSyncSingleEntryCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { #define NUM_TRANSIENT_ERROR_RETRIES 10 @@ -1293,14 +1298,14 @@ int RGWMetaSyncSingleEntryCR::operate() { } if ((sync_status == -EAGAIN || sync_status == -ECANCELED) && (tries < NUM_TRANSIENT_ERROR_RETRIES - 1)) { - ldpp_dout(sync_env->dpp, 20) << *this << ": failed to fetch remote metadata: " << section << ":" << key << ", will retry" << dendl; + ldpp_dout(dpp, 20) << *this << ": failed to fetch remote metadata: " << section << ":" << key << ", will retry" << dendl; continue; } if (sync_status < 0) { tn->log(10, SSTR("failed to send read remote metadata entry: section=" << section << " key=" << key << " status=" << sync_status)); log_error() << "failed to send read remote metadata entry: section=" << section << " key=" << key << " status=" << sync_status << std::endl; - yield call(sync_env->error_logger->log_error_cr(sync_env->conn->get_remote_id(), section, key, -sync_status, + yield call(sync_env->error_logger->log_error_cr(dpp, sync_env->conn->get_remote_id(), section, key, -sync_status, string("failed to read remote metadata entry: ") + cpp_strerror(-sync_status))); return set_cr_error(sync_status); } @@ -1318,7 +1323,7 @@ int RGWMetaSyncSingleEntryCR::operate() { yield call(new RGWMetaRemoveEntryCR(sync_env, raw_key)); } if ((retcode == -EAGAIN || retcode == -ECANCELED) && (tries < NUM_TRANSIENT_ERROR_RETRIES - 1)) { - ldpp_dout(sync_env->dpp, 20) << *this << ": failed to store metadata: " << section << ":" << key << ", got retcode=" << retcode << dendl; + ldpp_dout(dpp, 20) << *this << ": failed to store metadata: " << section << ":" << key << ", got retcode=" << retcode << dendl; continue; } break; @@ -1378,12 +1383,12 @@ public: } } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; int state_init(); int state_read_shard_status(); int state_read_shard_status_complete(); - int state_send_rest_request(); + int state_send_rest_request(const DoutPrefixProvider *dpp); int state_receive_rest_response(); int state_store_mdlog_entries(); int state_store_mdlog_entries_complete(); @@ -1470,21 +1475,21 @@ public: marker_tracker = mt; } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { int r; while (true) { switch (sync_marker.state) { case rgw_meta_sync_marker::FullSync: r = full_sync(); if (r < 0) { - ldpp_dout(sync_env->dpp, 10) << "sync: full_sync: shard_id=" << shard_id << " r=" << r << dendl; + ldpp_dout(dpp, 10) << "sync: full_sync: shard_id=" << shard_id << " r=" << r << dendl; return set_cr_error(r); } return 0; case rgw_meta_sync_marker::IncrementalSync: r = incremental_sync(); if (r < 0) { - ldpp_dout(sync_env->dpp, 10) << "sync: incremental_sync: shard_id=" << shard_id << " r=" << r << dendl; + ldpp_dout(dpp, 10) << "sync: incremental_sync: shard_id=" << shard_id << " r=" << r << dendl; return set_cr_error(r); } return 0; @@ -1646,7 +1651,7 @@ public: ldpp_dout(sync_env->dpp, 4) << *this << ": saving marker pos=" << temp_marker->marker << " realm_epoch=" << realm_epoch << dendl; using WriteMarkerCR = RGWSimpleRadosWriteCR; - yield call(new WriteMarkerCR(sync_env->async_rados, sync_env->store->svc()->sysobj, + yield call(new WriteMarkerCR(sync_env->dpp, sync_env->async_rados, sync_env->store->svc()->sysobj, rgw_raw_obj(pool, sync_env->shard_obj_name(shard_id)), *temp_marker)); } @@ -1890,7 +1895,7 @@ public: RGWCoroutine *alloc_finisher_cr() override { rgw::sal::RGWRadosStore *store = sync_env->store; - return new RGWSimpleRadosReadCR(sync_env->async_rados, store->svc()->sysobj, + return new RGWSimpleRadosReadCR(sync_env->dpp, sync_env->async_rados, store->svc()->sysobj, rgw_raw_obj(pool, sync_env->shard_obj_name(shard_id)), &sync_marker); } @@ -1925,7 +1930,7 @@ public: ~RGWMetaSyncCR() { } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { // loop through one period at a time tn->log(1, "start"); @@ -1933,15 +1938,15 @@ public: if (cursor == sync_env->store->svc()->mdlog->get_period_history()->get_current()) { next = RGWPeriodHistory::Cursor{}; if (cursor) { - ldpp_dout(sync_env->dpp, 10) << "RGWMetaSyncCR on current period=" + ldpp_dout(dpp, 10) << "RGWMetaSyncCR on current period=" << cursor.get_period().get_id() << dendl; } else { - ldpp_dout(sync_env->dpp, 10) << "RGWMetaSyncCR with no period" << dendl; + ldpp_dout(dpp, 10) << "RGWMetaSyncCR with no period" << dendl; } } else { next = cursor; next.next(); - ldpp_dout(sync_env->dpp, 10) << "RGWMetaSyncCR on period=" + ldpp_dout(dpp, 10) << "RGWMetaSyncCR on period=" << cursor.get_period().get_id() << ", next=" << next.get_period().get_id() << dendl; } @@ -1968,7 +1973,7 @@ public: period_marker = next.get_period().get_sync_status()[shard_id]; if (period_marker.empty()) { // no metadata changes have occurred on this shard, skip it - ldpp_dout(sync_env->dpp, 10) << "RGWMetaSyncCR: skipping shard " << shard_id + ldpp_dout(dpp, 10) << "RGWMetaSyncCR: skipping shard " << shard_id << " with empty period marker" << dendl; continue; } @@ -2003,7 +2008,7 @@ public: // write the updated sync info sync_status.sync_info.period = cursor.get_period().get_id(); sync_status.sync_info.realm_epoch = cursor.get_epoch(); - yield call(new RGWSimpleRadosWriteCR(sync_env->async_rados, + yield call(new RGWSimpleRadosWriteCR(dpp, sync_env->async_rados, sync_env->store->svc()->sysobj, rgw_raw_obj(pool, sync_env->status_oid()), sync_status.sync_info)); @@ -2033,7 +2038,7 @@ void RGWRemoteMetaLog::init_sync_env(RGWMetaSyncEnv *env) { env->sync_tracer = store->getRados()->get_sync_tracer(); } -int RGWRemoteMetaLog::read_sync_status(rgw_meta_sync_status *sync_status) +int RGWRemoteMetaLog::read_sync_status(const DoutPrefixProvider *dpp, rgw_meta_sync_status *sync_status) { if (store->svc()->zone->is_meta_master()) { return 0; @@ -2049,21 +2054,21 @@ int RGWRemoteMetaLog::read_sync_status(rgw_meta_sync_status *sync_status) RGWMetaSyncEnv sync_env_local = sync_env; sync_env_local.http_manager = &http_manager; tn->log(20, "read sync status"); - ret = crs.run(new RGWReadSyncStatusCoroutine(&sync_env_local, sync_status)); + ret = crs.run(dpp, new RGWReadSyncStatusCoroutine(&sync_env_local, sync_status)); http_manager.stop(); return ret; } -int RGWRemoteMetaLog::init_sync_status() +int RGWRemoteMetaLog::init_sync_status(const DoutPrefixProvider *dpp) { if (store->svc()->zone->is_meta_master()) { return 0; } rgw_mdlog_info mdlog_info; - int r = read_log_info(&mdlog_info); + int r = read_log_info(dpp, &mdlog_info); if (r < 0) { - lderr(store->ctx()) << "ERROR: fail to fetch master log info (r=" << r << ")" << dendl; + ldpp_dout(dpp, -1) << "ERROR: fail to fetch master log info (r=" << r << ")" << dendl; return r; } @@ -2075,19 +2080,20 @@ int RGWRemoteMetaLog::init_sync_status() sync_info.realm_epoch = cursor.get_epoch(); } - return run(new RGWInitSyncStatusCoroutine(&sync_env, sync_info)); + return run(dpp, new RGWInitSyncStatusCoroutine(&sync_env, sync_info)); } -int RGWRemoteMetaLog::store_sync_info(const rgw_meta_sync_info& sync_info) +int RGWRemoteMetaLog::store_sync_info(const DoutPrefixProvider *dpp, const rgw_meta_sync_info& sync_info) { tn->log(20, "store sync info"); - return run(new RGWSimpleRadosWriteCR(async_rados, store->svc()->sysobj, + return run(dpp, new RGWSimpleRadosWriteCR(dpp, async_rados, store->svc()->sysobj, rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env.status_oid()), sync_info)); } // return a cursor to the period at our sync position -static RGWPeriodHistory::Cursor get_period_at(rgw::sal::RGWRadosStore* store, +static RGWPeriodHistory::Cursor get_period_at(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore* store, const rgw_meta_sync_info& info, optional_yield y) { @@ -2102,7 +2108,7 @@ static RGWPeriodHistory::Cursor get_period_at(rgw::sal::RGWRadosStore* store, // verify that the period ids match auto& existing = cursor.get_period().get_id(); if (existing != info.period) { - lderr(store->ctx()) << "ERROR: sync status period=" << info.period + ldpp_dout(dpp, -1) << "ERROR: sync status period=" << info.period << " does not match period=" << existing << " in history at realm epoch=" << info.realm_epoch << dendl; return RGWPeriodHistory::Cursor{-EEXIST}; @@ -2112,23 +2118,23 @@ static RGWPeriodHistory::Cursor get_period_at(rgw::sal::RGWRadosStore* store, // read the period from rados or pull it from the master RGWPeriod period; - int r = store->svc()->mdlog->pull_period(info.period, period, y); + int r = store->svc()->mdlog->pull_period(dpp, info.period, period, y); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to read period id " + ldpp_dout(dpp, -1) << "ERROR: failed to read period id " << info.period << ": " << cpp_strerror(r) << dendl; return RGWPeriodHistory::Cursor{r}; } // attach the period to our history - cursor = store->svc()->mdlog->get_period_history()->attach(std::move(period), y); + cursor = store->svc()->mdlog->get_period_history()->attach(dpp, std::move(period), y); if (!cursor) { r = cursor.get_error(); - lderr(store->ctx()) << "ERROR: failed to read period history back to " + ldpp_dout(dpp, -1) << "ERROR: failed to read period history back to " << info.period << ": " << cpp_strerror(r) << dendl; } return cursor; } -int RGWRemoteMetaLog::run_sync(optional_yield y) +int RGWRemoteMetaLog::run_sync(const DoutPrefixProvider *dpp, optional_yield y) { if (store->svc()->zone->is_meta_master()) { return 0; @@ -2143,7 +2149,7 @@ int RGWRemoteMetaLog::run_sync(optional_yield y) ldpp_dout(dpp, 1) << __func__ << "(): going down" << dendl; return 0; } - r = read_log_info(&mdlog_info); + r = read_log_info(dpp, &mdlog_info); if (r == -EIO || r == -ENOENT) { // keep retrying if master isn't alive or hasn't initialized the log ldpp_dout(dpp, 10) << __func__ << "(): waiting for master.." << dendl; @@ -2152,7 +2158,7 @@ int RGWRemoteMetaLog::run_sync(optional_yield y) } backoff.reset(); if (r < 0) { - lderr(store->ctx()) << "ERROR: fail to fetch master log info (r=" << r << ")" << dendl; + ldpp_dout(dpp, -1) << "ERROR: fail to fetch master log info (r=" << r << ")" << dendl; return r; } break; @@ -2164,7 +2170,7 @@ int RGWRemoteMetaLog::run_sync(optional_yield y) ldpp_dout(dpp, 1) << __func__ << "(): going down" << dendl; return 0; } - r = run(new RGWReadSyncStatusCoroutine(&sync_env, &sync_status)); + r = run(dpp, new RGWReadSyncStatusCoroutine(&sync_env, &sync_status)); if (r < 0 && r != -ENOENT) { ldpp_dout(dpp, 0) << "ERROR: failed to fetch sync status r=" << r << dendl; return r; @@ -2199,7 +2205,7 @@ int RGWRemoteMetaLog::run_sync(optional_yield y) sync_status.sync_info.period = cursor.get_period().get_id(); sync_status.sync_info.realm_epoch = cursor.get_epoch(); } - r = run(new RGWInitSyncStatusCoroutine(&sync_env, sync_status.sync_info)); + r = run(dpp, new RGWInitSyncStatusCoroutine(&sync_env, sync_status.sync_info)); if (r == -EBUSY) { backoff.backoff_sleep(); continue; @@ -2214,13 +2220,13 @@ int RGWRemoteMetaLog::run_sync(optional_yield y) auto num_shards = sync_status.sync_info.num_shards; if (num_shards != mdlog_info.num_shards) { - lderr(store->ctx()) << "ERROR: can't sync, mismatch between num shards, master num_shards=" << mdlog_info.num_shards << " local num_shards=" << num_shards << dendl; + ldpp_dout(dpp, -1) << "ERROR: can't sync, mismatch between num shards, master num_shards=" << mdlog_info.num_shards << " local num_shards=" << num_shards << dendl; return -EINVAL; } RGWPeriodHistory::Cursor cursor; do { - r = run(new RGWReadSyncStatusCoroutine(&sync_env, &sync_status)); + r = run(dpp, new RGWReadSyncStatusCoroutine(&sync_env, &sync_status)); if (r < 0 && r != -ENOENT) { tn->log(0, SSTR("ERROR: failed to fetch sync status r=" << r)); return r; @@ -2229,7 +2235,7 @@ int RGWRemoteMetaLog::run_sync(optional_yield y) switch ((rgw_meta_sync_info::SyncState)sync_status.sync_info.state) { case rgw_meta_sync_info::StateBuildingFullSyncMaps: tn->log(20, "building full sync maps"); - r = run(new RGWFetchAllMetaCR(&sync_env, num_shards, sync_status.sync_markers, tn)); + r = run(dpp, new RGWFetchAllMetaCR(&sync_env, num_shards, sync_status.sync_markers, tn)); if (r == -EBUSY || r == -EAGAIN) { backoff.backoff_sleep(); continue; @@ -2241,7 +2247,7 @@ int RGWRemoteMetaLog::run_sync(optional_yield y) } sync_status.sync_info.state = rgw_meta_sync_info::StateSync; - r = store_sync_info(sync_status.sync_info); + r = store_sync_info(dpp, sync_status.sync_info); if (r < 0) { tn->log(0, SSTR("ERROR: failed to update sync status (r=" << r << ")")); return r; @@ -2250,13 +2256,13 @@ int RGWRemoteMetaLog::run_sync(optional_yield y) case rgw_meta_sync_info::StateSync: tn->log(20, "sync"); // find our position in the period history (if any) - cursor = get_period_at(store, sync_status.sync_info, y); + cursor = get_period_at(dpp, store, sync_status.sync_info, y); r = cursor.get_error(); if (r < 0) { return r; } meta_sync_cr = new RGWMetaSyncCR(&sync_env, cursor, sync_status, tn); - r = run(meta_sync_cr); + r = run(dpp, meta_sync_cr); if (r < 0) { tn->log(0, "ERROR: failed to fetch all metadata keys"); return r; @@ -2279,37 +2285,37 @@ void RGWRemoteMetaLog::wakeup(int shard_id) meta_sync_cr->wakeup(shard_id); } -int RGWCloneMetaLogCoroutine::operate() +int RGWCloneMetaLogCoroutine::operate(const DoutPrefixProvider *dpp) { reenter(this) { do { yield { - ldpp_dout(sync_env->dpp, 20) << __func__ << ": shard_id=" << shard_id << ": init request" << dendl; + ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": init request" << dendl; return state_init(); } yield { - ldpp_dout(sync_env->dpp, 20) << __func__ << ": shard_id=" << shard_id << ": reading shard status" << dendl; + ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": reading shard status" << dendl; return state_read_shard_status(); } yield { - ldpp_dout(sync_env->dpp, 20) << __func__ << ": shard_id=" << shard_id << ": reading shard status complete" << dendl; + ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": reading shard status complete" << dendl; return state_read_shard_status_complete(); } yield { - ldpp_dout(sync_env->dpp, 20) << __func__ << ": shard_id=" << shard_id << ": sending rest request" << dendl; - return state_send_rest_request(); + ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": sending rest request" << dendl; + return state_send_rest_request(dpp); } yield { - ldpp_dout(sync_env->dpp, 20) << __func__ << ": shard_id=" << shard_id << ": receiving rest response" << dendl; + ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": receiving rest response" << dendl; return state_receive_rest_response(); } yield { - ldpp_dout(sync_env->dpp, 20) << __func__ << ": shard_id=" << shard_id << ": storing mdlog entries" << dendl; + ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": storing mdlog entries" << dendl; return state_store_mdlog_entries(); } } while (truncated); yield { - ldpp_dout(sync_env->dpp, 20) << __func__ << ": shard_id=" << shard_id << ": storing mdlog entries complete" << dendl; + ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": storing mdlog entries complete" << dendl; return state_store_mdlog_entries_complete(); } } @@ -2343,7 +2349,7 @@ int RGWCloneMetaLogCoroutine::state_read_shard_status() io_complete(); }), add_ref); - int ret = mdlog->get_info_async(shard_id, completion.get()); + int ret = mdlog->get_info_async(sync_env->dpp, shard_id, completion.get()); if (ret < 0) { ldpp_dout(sync_env->dpp, 0) << "ERROR: mdlog->get_info_async() returned ret=" << ret << dendl; return set_cr_error(ret); @@ -2363,7 +2369,7 @@ int RGWCloneMetaLogCoroutine::state_read_shard_status_complete() return 0; } -int RGWCloneMetaLogCoroutine::state_send_rest_request() +int RGWCloneMetaLogCoroutine::state_send_rest_request(const DoutPrefixProvider *dpp) { RGWRESTConn *conn = sync_env->conn; @@ -2386,9 +2392,9 @@ int RGWCloneMetaLogCoroutine::state_send_rest_request() init_new_io(http_op); - int ret = http_op->aio_read(); + int ret = http_op->aio_read(dpp); if (ret < 0) { - ldpp_dout(sync_env->dpp, 0) << "ERROR: failed to fetch mdlog data" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to fetch mdlog data" << dendl; log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl; http_op->put(); http_op = NULL; @@ -2454,7 +2460,7 @@ int RGWCloneMetaLogCoroutine::state_store_mdlog_entries() RGWAioCompletionNotifier *cn = stack->create_completion_notifier(); - int ret = mdlog->store_entries_in_shard(dest_entries, shard_id, cn->completion()); + int ret = mdlog->store_entries_in_shard(sync_env->dpp, dest_entries, shard_id, cn->completion()); if (ret < 0) { cn->put(); ldpp_dout(sync_env->dpp, 10) << "failed to store md log entries shard_id=" << shard_id << " ret=" << ret << dendl; diff --git a/src/rgw/rgw_sync.h b/src/rgw/rgw_sync.h index 4d0d72c9839f9..1f1e191e4d14b 100644 --- a/src/rgw/rgw_sync.h +++ b/src/rgw/rgw_sync.h @@ -79,7 +79,7 @@ class RGWSyncErrorLogger { std::atomic counter = { 0 }; public: RGWSyncErrorLogger(rgw::sal::RGWRadosStore *_store, const string &oid_prefix, int _num_shards); - RGWCoroutine *log_error_cr(const string& source_zone, const string& section, const string& name, uint32_t error_code, const string& message); + RGWCoroutine *log_error_cr(const DoutPrefixProvider *dpp, const string& source_zone, const string& section, const string& name, uint32_t error_code, const string& message); static string get_shard_oid(const string& oid_prefix, int shard_id); }; @@ -170,7 +170,7 @@ public: virtual RGWCoroutine *alloc_cr() = 0; virtual RGWCoroutine *alloc_finisher_cr() { return NULL; } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; struct RGWMetaSyncEnv { @@ -211,7 +211,7 @@ class RGWRemoteMetaLog : public RGWCoroutinesManager { RGWMetaSyncEnv sync_env; void init_sync_env(RGWMetaSyncEnv *env); - int store_sync_info(const rgw_meta_sync_info& sync_info); + int store_sync_info(const DoutPrefixProvider *dpp, const rgw_meta_sync_info& sync_info); std::atomic going_down = { false }; @@ -231,12 +231,12 @@ public: int init(); void finish(); - int read_log_info(rgw_mdlog_info *log_info); - int read_master_log_shards_info(const string& master_period, map *shards_info); - int read_master_log_shards_next(const string& period, map shard_markers, map *result); - int read_sync_status(rgw_meta_sync_status *sync_status); - int init_sync_status(); - int run_sync(optional_yield y); + int read_log_info(const DoutPrefixProvider *dpp, rgw_mdlog_info *log_info); + int read_master_log_shards_info(const DoutPrefixProvider *dpp, const string& master_period, map *shards_info); + int read_master_log_shards_next(const DoutPrefixProvider *dpp, const string& period, map shard_markers, map *result); + int read_sync_status(const DoutPrefixProvider *dpp, rgw_meta_sync_status *sync_status); + int init_sync_status(const DoutPrefixProvider *dpp); + int run_sync(const DoutPrefixProvider *dpp, optional_yield y); void wakeup(int shard_id); @@ -275,23 +275,23 @@ public: RGWMetaSyncStatusManager(rgw::sal::RGWRadosStore *_store, RGWAsyncRadosProcessor *async_rados) : store(_store), master_log(this, store, async_rados, this) {} - int init(); + int init(const DoutPrefixProvider *dpp); - int read_sync_status(rgw_meta_sync_status *sync_status) { - return master_log.read_sync_status(sync_status); + int read_sync_status(const DoutPrefixProvider *dpp, rgw_meta_sync_status *sync_status) { + return master_log.read_sync_status(dpp, sync_status); } - int init_sync_status() { return master_log.init_sync_status(); } - int read_log_info(rgw_mdlog_info *log_info) { - return master_log.read_log_info(log_info); + int init_sync_status(const DoutPrefixProvider *dpp) { return master_log.init_sync_status(dpp); } + int read_log_info(const DoutPrefixProvider *dpp, rgw_mdlog_info *log_info) { + return master_log.read_log_info(dpp, log_info); } - int read_master_log_shards_info(const string& master_period, map *shards_info) { - return master_log.read_master_log_shards_info(master_period, shards_info); + int read_master_log_shards_info(const DoutPrefixProvider *dpp, const string& master_period, map *shards_info) { + return master_log.read_master_log_shards_info(dpp, master_period, shards_info); } - int read_master_log_shards_next(const string& period, map shard_markers, map *result) { - return master_log.read_master_log_shards_next(period, shard_markers, result); + int read_master_log_shards_next(const DoutPrefixProvider *dpp, const string& period, map shard_markers, map *result) { + return master_log.read_master_log_shards_next(dpp, period, shard_markers, result); } - int run(optional_yield y) { return master_log.run_sync(y); } + int run(const DoutPrefixProvider *dpp, optional_yield y) { return master_log.run_sync(dpp, y); } // implements DoutPrefixProvider @@ -325,7 +325,7 @@ public: } } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; void call_cr(RGWCoroutine *_cr) override { if (cr) { @@ -507,7 +507,7 @@ public: const RGWMDLogStatus& _op_status, RGWMetaSyncShardMarkerTrack *_marker_tracker, const RGWSyncTraceNodeRef& _tn_parent); - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; class RGWShardCollectCR : public RGWCoroutine { @@ -523,7 +523,7 @@ public: status(0) {} virtual bool spawn_next() = 0; - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; // factory functions for meta sync coroutines needed in mdlog trimming diff --git a/src/rgw/rgw_sync_checkpoint.cc b/src/rgw/rgw_sync_checkpoint.cc index d9ee136784aca..78fd802cfc469 100644 --- a/src/rgw/rgw_sync_checkpoint.cc +++ b/src/rgw/rgw_sync_checkpoint.cc @@ -126,7 +126,8 @@ int bucket_source_sync_checkpoint(const DoutPrefixProvider* dpp, return 0; } -int source_bilog_markers(RGWSI_Zone* zone_svc, +int source_bilog_markers(const DoutPrefixProvider *dpp, + RGWSI_Zone* zone_svc, const rgw_sync_bucket_pipe& pipe, BucketIndexShardsManager& remote_markers, optional_yield y) @@ -139,7 +140,7 @@ int source_bilog_markers(RGWSI_Zone* zone_svc, return -EINVAL; } - return rgw_read_remote_bilog_info(conn->second, *pipe.source.bucket, + return rgw_read_remote_bilog_info(dpp, conn->second, *pipe.source.bucket, remote_markers, y); } @@ -178,7 +179,7 @@ int rgw_bucket_sync_checkpoint(const DoutPrefixProvider* dpp, // fetch remote markers spawn::spawn(ioctx, [&] (spawn::yield_context yield) { auto y = optional_yield{ioctx, yield}; - int r = source_bilog_markers(store->svc()->zone, entry.pipe, + int r = source_bilog_markers(dpp, store->svc()->zone, entry.pipe, entry.remote_markers, y); if (r < 0) { ldpp_dout(dpp, 0) << "failed to fetch remote bilog markers: " @@ -192,7 +193,7 @@ int rgw_bucket_sync_checkpoint(const DoutPrefixProvider* dpp, auto obj_ctx = store->svc()->sysobj->init_obj_ctx(); int r = store->getRados()->get_bucket_instance_info( obj_ctx, *entry.pipe.source.bucket, entry.source_bucket_info, - nullptr, nullptr, y); + nullptr, nullptr, y, dpp); if (r < 0) { ldpp_dout(dpp, 0) << "failed to read source bucket info: " << cpp_strerror(r) << dendl; diff --git a/src/rgw/rgw_sync_error_repo.cc b/src/rgw/rgw_sync_error_repo.cc index e952ce912307c..1f332276d0d6b 100644 --- a/src/rgw/rgw_sync_error_repo.cc +++ b/src/rgw/rgw_sync_error_repo.cc @@ -65,13 +65,13 @@ class RGWErrorRepoWriteCR : public RGWSimpleCoroutine { key(key), timestamp(timestamp) {} - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { librados::ObjectWriteOperation op; int r = rgw_error_repo_write(op, key, timestamp); if (r < 0) { return r; } - r = obj.open(); + r = obj.open(dpp); if (r < 0) { return r; } @@ -108,13 +108,13 @@ class RGWErrorRepoRemoveCR : public RGWSimpleCoroutine { key(key), timestamp(timestamp) {} - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { librados::ObjectWriteOperation op; int r = rgw_error_repo_remove(op, key, timestamp); if (r < 0) { return r; } - r = obj.open(); + r = obj.open(dpp); if (r < 0) { return r; } diff --git a/src/rgw/rgw_sync_module.cc b/src/rgw/rgw_sync_module.cc index 7100646098f9b..9dd153c8660d1 100644 --- a/src/rgw/rgw_sync_module.cc +++ b/src/rgw/rgw_sync_module.cc @@ -39,7 +39,7 @@ RGWCallStatRemoteObjCR::RGWCallStatRemoteObjCR(RGWDataSyncCtx *_sc, src_bucket(_src_bucket), key(_key) { } -int RGWCallStatRemoteObjCR::operate() { +int RGWCallStatRemoteObjCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { yield { call(new RGWStatRemoteObjCR(sync_env->async_rados, sync_env->store, @@ -47,10 +47,10 @@ int RGWCallStatRemoteObjCR::operate() { src_bucket, key, &mtime, &size, &etag, &attrs, &headers)); } if (retcode < 0) { - ldout(sync_env->cct, 10) << "RGWStatRemoteObjCR() returned " << retcode << dendl; + ldpp_dout(dpp, 10) << "RGWStatRemoteObjCR() returned " << retcode << dendl; return set_cr_error(retcode); } - ldout(sync_env->cct, 20) << "stat of remote obj: z=" << sc->source_zone + ldpp_dout(dpp, 20) << "stat of remote obj: z=" << sc->source_zone << " b=" << src_bucket << " k=" << key << " size=" << size << " mtime=" << mtime << dendl; yield { @@ -61,7 +61,7 @@ int RGWCallStatRemoteObjCR::operate() { } } if (retcode < 0) { - ldout(sync_env->cct, 10) << "RGWStatRemoteObjCR() callback returned " << retcode << dendl; + ldpp_dout(dpp, 10) << "RGWStatRemoteObjCR() callback returned " << retcode << dendl; return set_cr_error(retcode); } return set_cr_done(); diff --git a/src/rgw/rgw_sync_module.h b/src/rgw/rgw_sync_module.h index c46d5fccae13a..5a3f62f741308 100644 --- a/src/rgw/rgw_sync_module.h +++ b/src/rgw/rgw_sync_module.h @@ -190,7 +190,7 @@ public: ~RGWCallStatRemoteObjCR() override {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; virtual RGWStatRemoteObjCBCR *allocate_callback() { return nullptr; diff --git a/src/rgw/rgw_sync_module_aws.cc b/src/rgw/rgw_sync_module_aws.cc index c837070c7e887..e57327b19f245 100644 --- a/src/rgw/rgw_sync_module_aws.cc +++ b/src/rgw/rgw_sync_module_aws.cc @@ -734,7 +734,7 @@ public: src_properties(_src_properties) { } - int init() override { + int init(const DoutPrefixProvider *dpp) override { /* init input connection */ @@ -753,15 +753,15 @@ public: } RGWRESTStreamRWRequest *in_req; - int ret = conn->get_obj(src_obj, req_params, false /* send */, &in_req); + int ret = conn->get_obj(dpp, src_obj, req_params, false /* send */, &in_req); if (ret < 0) { - ldout(sc->cct, 0) << "ERROR: " << __func__ << "(): conn->get_obj() returned ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): conn->get_obj() returned ret=" << ret << dendl; return ret; } set_req(in_req); - return RGWStreamReadHTTPResourceCRF::init(); + return RGWStreamReadHTTPResourceCRF::init(dpp); } int decode_rest_obj(map& headers, bufferlist& extra_data) override { @@ -956,7 +956,7 @@ public: } } - void send_ready(const rgw_rest_obj& rest_obj) override { + void send_ready(const DoutPrefixProvider *dpp, const rgw_rest_obj& rest_obj) override { RGWRESTStreamS3PutObj *r = static_cast(req); map new_attrs; @@ -968,7 +968,7 @@ public: RGWAccessControlPolicy policy; - r->send_ready(target->conn->get_key(), new_attrs, policy, false); + r->send_ready(dpp, target->conn->get_key(), new_attrs, policy, false); } void handle_headers(const map& headers) { @@ -1015,7 +1015,7 @@ public: dest_obj(_dest_obj), src_properties(_src_properties) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { /* init input */ in_crf.reset(new RGWRESTStreamGetCRF(cct, get_env(), this, sc, @@ -1076,7 +1076,7 @@ public: part_info(_part_info), petag(_petag) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { /* init input */ in_crf.reset(new RGWRESTStreamGetCRF(cct, get_env(), this, sc, @@ -1125,7 +1125,7 @@ public: dest_obj(_dest_obj), upload_id(_upload_id) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { @@ -1136,7 +1136,7 @@ public: } if (retcode < 0) { - ldout(sc->cct, 0) << "ERROR: failed to abort multipart upload for dest object=" << dest_obj << " (retcode=" << retcode << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to abort multipart upload for dest object=" << dest_obj << " (retcode=" << retcode << ")" << dendl; return set_cr_error(retcode); } @@ -1185,7 +1185,7 @@ public: attrs(_attrs), upload_id(_upload_id) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { @@ -1196,7 +1196,7 @@ public: } if (retcode < 0) { - ldout(sc->cct, 0) << "ERROR: failed to initialize multipart upload for dest object=" << dest_obj << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to initialize multipart upload for dest object=" << dest_obj << dendl; return set_cr_error(retcode); } { @@ -1226,7 +1226,7 @@ public: } } - ldout(sc->cct, 20) << "init multipart result: bucket=" << result.bucket << " key=" << result.key << " upload_id=" << result.upload_id << dendl; + ldpp_dout(dpp, 20) << "init multipart result: bucket=" << result.bucket << " key=" << result.key << " upload_id=" << result.upload_id << dendl; *upload_id = result.upload_id; @@ -1287,7 +1287,7 @@ public: upload_id(_upload_id), req_enc(_parts) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { @@ -1307,7 +1307,7 @@ public: } if (retcode < 0) { - ldout(sc->cct, 0) << "ERROR: failed to initialize multipart upload for dest object=" << dest_obj << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to initialize multipart upload for dest object=" << dest_obj << dendl; return set_cr_error(retcode); } { @@ -1337,7 +1337,7 @@ public: } } - ldout(sc->cct, 20) << "complete multipart result: location=" << result.location << " bucket=" << result.bucket << " key=" << result.key << " etag=" << result.etag << dendl; + ldpp_dout(dpp, 20) << "complete multipart result: location=" << result.location << " bucket=" << result.bucket << " key=" << result.key << " etag=" << result.etag << dendl; return set_cr_done(); } @@ -1367,16 +1367,16 @@ public: status_obj(_status_obj), upload_id(_upload_id) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield call(new RGWAWSAbortMultipartCR(sc, dest_conn, dest_obj, upload_id)); if (retcode < 0) { - ldout(sc->cct, 0) << "ERROR: failed to abort multipart upload dest obj=" << dest_obj << " upload_id=" << upload_id << " retcode=" << retcode << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to abort multipart upload dest obj=" << dest_obj << " upload_id=" << upload_id << " retcode=" << retcode << dendl; /* ignore error, best effort */ } yield call(new RGWRadosRemoveCR(sc->env->store, status_obj)); if (retcode < 0) { - ldout(sc->cct, 0) << "ERROR: failed to remove sync status obj obj=" << status_obj << " retcode=" << retcode << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to remove sync status obj obj=" << status_obj << " retcode=" << retcode << dendl; /* ignore error, best effort */ } return set_cr_done(); @@ -1436,13 +1436,13 @@ public: } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { - yield call(new RGWSimpleRadosReadCR(sync_env->async_rados, sync_env->svc->sysobj, + yield call(new RGWSimpleRadosReadCR(dpp, sync_env->async_rados, sync_env->svc->sysobj, status_obj, &status, false)); if (retcode < 0 && retcode != -ENOENT) { - ldout(sc->cct, 0) << "ERROR: failed to read sync status of object " << src_obj << " retcode=" << retcode << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to read sync status of object " << src_obj << " retcode=" << retcode << dendl; return retcode; } @@ -1495,15 +1495,15 @@ public: } if (retcode < 0) { - ldout(sc->cct, 0) << "ERROR: failed to sync obj=" << src_obj << ", sync via multipart upload, upload_id=" << status.upload_id << " part number " << status.cur_part << " (error: " << cpp_strerror(-retcode) << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to sync obj=" << src_obj << ", sync via multipart upload, upload_id=" << status.upload_id << " part number " << status.cur_part << " (error: " << cpp_strerror(-retcode) << ")" << dendl; ret_err = retcode; yield call(new RGWAWSStreamAbortMultipartUploadCR(sc, target->conn.get(), dest_obj, status_obj, status.upload_id)); return set_cr_error(ret_err); } - yield call(new RGWSimpleRadosWriteCR(sync_env->async_rados, sync_env->svc->sysobj, status_obj, status)); + yield call(new RGWSimpleRadosWriteCR(dpp, sync_env->async_rados, sync_env->svc->sysobj, status_obj, status)); if (retcode < 0) { - ldout(sc->cct, 0) << "ERROR: failed to store multipart upload state, retcode=" << retcode << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to store multipart upload state, retcode=" << retcode << dendl; /* continue with upload anyway */ } ldout(sc->cct, 20) << "sync of object=" << src_obj << " via multipart upload, finished sending part #" << status.cur_part << " etag=" << pcur_part_info->etag << dendl; @@ -1511,7 +1511,7 @@ public: yield call(new RGWAWSCompleteMultipartCR(sc, target->conn.get(), dest_obj, status.upload_id, status.parts)); if (retcode < 0) { - ldout(sc->cct, 0) << "ERROR: failed to complete multipart upload of obj=" << src_obj << " (error: " << cpp_strerror(-retcode) << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to complete multipart upload of obj=" << src_obj << " (error: " << cpp_strerror(-retcode) << ")" << dendl; ret_err = retcode; yield call(new RGWAWSStreamAbortMultipartUploadCR(sc, target->conn.get(), dest_obj, status_obj, status.upload_id)); return set_cr_error(ret_err); @@ -1520,7 +1520,7 @@ public: /* remove status obj */ yield call(new RGWRadosRemoveCR(sync_env->store, status_obj)); if (retcode < 0) { - ldout(sc->cct, 0) << "ERROR: failed to abort multipart upload obj=" << src_obj << " upload_id=" << status.upload_id << " part number " << status.cur_part << " (" << cpp_strerror(-retcode) << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to abort multipart upload obj=" << src_obj << " upload_id=" << status.upload_id << " part number " << status.cur_part << " (" << cpp_strerror(-retcode) << ")" << dendl; /* ignore error, best effort */ } return set_cr_done(); @@ -1593,7 +1593,7 @@ public: ~RGWAWSHandleRemoteObjCBCR(){ } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { ret = decode_attr(attrs, RGW_ATTR_PG_VER, &src_pg_ver, (uint64_t)0); if (ret < 0) { @@ -1605,7 +1605,7 @@ public: src_pg_ver = 0; /* all or nothing */ } } - ldout(sc->cct, 4) << "AWS: download begin: z=" << sc->source_zone + ldpp_dout(dpp, 4) << "AWS: download begin: z=" << sc->source_zone << " b=" << src_bucket << " k=" << key << " size=" << size << " mtime=" << mtime << " etag=" << etag << " zone_short_id=" << src_zone_short_id << " pg_ver=" << src_pg_ver @@ -1736,7 +1736,7 @@ public: AWSSyncInstanceEnv& _instance) : RGWCoroutine(_sc->cct), sc(_sc), sync_pipe(_sync_pipe), key(_key), mtime(_mtime), instance(_instance) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { ldout(sc->cct, 0) << ": remove remote obj: z=" << sc->source_zone << " b=" <source_zone << dendl; + ldpp_dout(dpp, 5) << conf->id << ": get elasticsearch info for zone: " << sc->source_zone << dendl; yield call(new RGWReadRESTResourceCR (sync_env->cct, conf->conn.get(), sync_env->http_manager, @@ -657,11 +657,11 @@ public: &(conf->default_headers), &(conf->es_info))); if (retcode < 0) { - ldout(sync_env->cct, 5) << conf->id << ": get elasticsearch failed: " << retcode << dendl; + ldpp_dout(dpp, 5) << conf->id << ": get elasticsearch failed: " << retcode << dendl; return set_cr_error(retcode); } - ldout(sync_env->cct, 5) << conf->id << ": got elastic version=" << conf->es_info.get_version_str() << dendl; + ldpp_dout(dpp, 5) << conf->id << ": got elastic version=" << conf->es_info.get_version_str() << dendl; return set_cr_done(); } return 0; @@ -678,9 +678,9 @@ public: ElasticConfigRef _conf) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), conf(_conf) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { - ldout(sc->cct, 5) << conf->id << ": put elasticsearch index for zone: " << sc->source_zone << dendl; + ldpp_dout(dpp, 5) << conf->id << ": put elasticsearch index for zone: " << sc->source_zone << dendl; yield { string path = conf->get_index_path(); @@ -688,10 +688,10 @@ public: std::unique_ptr index_conf; if (conf->es_info.version >= ES_V5) { - ldout(sc->cct, 0) << "elasticsearch: index mapping: version >= 5" << dendl; + ldpp_dout(dpp, 0) << "elasticsearch: index mapping: version >= 5" << dendl; index_conf.reset(new es_index_config(settings, conf->es_info.version)); } else { - ldout(sc->cct, 0) << "elasticsearch: index mapping: version < 5" << dendl; + ldpp_dout(dpp, 0) << "elasticsearch: index mapping: version < 5" << dendl; index_conf.reset(new es_index_config(settings, conf->es_info.version)); } call(new RGWPutRESTResourceCR (sc->cct, @@ -705,11 +705,11 @@ public: if (err_response.error.type != "index_already_exists_exception" && err_response.error.type != "resource_already_exists_exception") { - ldout(sync_env->cct, 0) << "elasticsearch: failed to initialize index: response.type=" << err_response.error.type << " response.reason=" << err_response.error.reason << dendl; + ldpp_dout(dpp, 0) << "elasticsearch: failed to initialize index: response.type=" << err_response.error.type << " response.reason=" << err_response.error.reason << dendl; return set_cr_error(retcode); } - ldout(sync_env->cct, 0) << "elasticsearch: index already exists, assuming external initialization" << dendl; + ldpp_dout(dpp, 0) << "elasticsearch: index already exists, assuming external initialization" << dendl; } return set_cr_done(); } @@ -752,7 +752,7 @@ public: ElasticConfigRef _conf) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), conf(_conf) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield call(new RGWElasticGetESInfoCBCR(sc, conf)); @@ -782,9 +782,9 @@ public: ElasticConfigRef _conf, uint64_t _versioned_epoch) : RGWStatRemoteObjCBCR(_sc, _sync_pipe.info.source_bs.bucket, _key), sync_pipe(_sync_pipe), conf(_conf), versioned_epoch(_versioned_epoch) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { - ldout(sync_env->cct, 10) << ": stat of remote obj: z=" << sc->source_zone + ldpp_dout(dpp, 10) << ": stat of remote obj: z=" << sc->source_zone << " b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " size=" << size << " mtime=" << mtime << dendl; @@ -840,9 +840,9 @@ public: ElasticConfigRef _conf) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), sync_pipe(_sync_pipe), key(_key), mtime(_mtime), conf(_conf) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { - ldout(sync_env->cct, 10) << ": remove remote obj: z=" << sc->source_zone + ldpp_dout(dpp, 10) << ": remove remote obj: z=" << sc->source_zone << " b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " mtime=" << mtime << dendl; yield { string path = conf->get_obj_path(sync_pipe.dest_bucket_info, key); diff --git a/src/rgw/rgw_sync_module_es_rest.cc b/src/rgw/rgw_sync_module_es_rest.cc index d78fc895dc208..f040a166b3674 100644 --- a/src/rgw/rgw_sync_module_es_rest.cc +++ b/src/rgw/rgw_sync_module_es_rest.cc @@ -218,7 +218,7 @@ void RGWMetadataSearchOp::execute(optional_yield y) bool valid = es_query.compile(&err); if (!valid) { - ldout(s->cct, 10) << "invalid query, failed generating request json" << dendl; + ldpp_dout(this, 10) << "invalid query, failed generating request json" << dendl; op_ret = -EINVAL; return; } @@ -245,20 +245,20 @@ void RGWMetadataSearchOp::execute(optional_yield y) if (marker > 0) { params.push_back(param_pair_t("from", marker_str.c_str())); } - ldout(s->cct, 20) << "sending request to elasticsearch, payload=" << string(in.c_str(), in.length()) << dendl; + ldpp_dout(this, 20) << "sending request to elasticsearch, payload=" << string(in.c_str(), in.length()) << dendl; auto& extra_headers = es_module->get_request_headers(); - op_ret = conn->get_resource(resource, ¶ms, &extra_headers, + op_ret = conn->get_resource(s, resource, ¶ms, &extra_headers, out, &in, nullptr, y); if (op_ret < 0) { - ldout(s->cct, 0) << "ERROR: failed to fetch resource (r=" << resource << ", ret=" << op_ret << ")" << dendl; + ldpp_dout(this, 0) << "ERROR: failed to fetch resource (r=" << resource << ", ret=" << op_ret << ")" << dendl; return; } - ldout(s->cct, 20) << "response: " << string(out.c_str(), out.length()) << dendl; + ldpp_dout(this, 20) << "response: " << string(out.c_str(), out.length()) << dendl; JSONParser jparser; if (!jparser.parse(out.c_str(), out.length())) { - ldout(s->cct, 0) << "ERROR: failed to parse elasticsearch response" << dendl; + ldpp_dout(this, 0) << "ERROR: failed to parse elasticsearch response" << dendl; op_ret = -EINVAL; return; } @@ -266,7 +266,7 @@ void RGWMetadataSearchOp::execute(optional_yield y) try { decode_json_obj(response, &jparser); } catch (const JSONDecoder::err& e) { - ldout(s->cct, 0) << "ERROR: failed to decode JSON input: " << e.what() << dendl; + ldpp_dout(this, 0) << "ERROR: failed to decode JSON input: " << e.what() << dendl; op_ret = -EINVAL; return; } @@ -419,7 +419,7 @@ RGWHandler_REST* RGWRESTMgr_MDSearch_S3::get_handler(rgw::sal::RGWRadosStore *st RGWHandler_REST *handler = new RGWHandler_REST_MDSearch_S3(auth_registry); - ldout(s->cct, 20) << __func__ << " handler=" << typeid(*handler).name() + ldpp_dout(s, 20) << __func__ << " handler=" << typeid(*handler).name() << dendl; return handler; } diff --git a/src/rgw/rgw_sync_module_log.cc b/src/rgw/rgw_sync_module_log.cc index d0475509daea3..c85fd478f1c28 100644 --- a/src/rgw/rgw_sync_module_log.cc +++ b/src/rgw/rgw_sync_module_log.cc @@ -14,8 +14,8 @@ class RGWLogStatRemoteObjCBCR : public RGWStatRemoteObjCBCR { public: RGWLogStatRemoteObjCBCR(RGWDataSyncCtx *_sc, rgw_bucket& _src_bucket, rgw_obj_key& _key) : RGWStatRemoteObjCBCR(_sc, _src_bucket, _key) {} - int operate() override { - ldout(sync_env->cct, 0) << "SYNC_LOG: stat of remote obj: z=" << sc->source_zone + int operate(const DoutPrefixProvider *dpp) override { + ldpp_dout(dpp, 0) << "SYNC_LOG: stat of remote obj: z=" << sc->source_zone << " b=" << src_bucket << " k=" << key << " size=" << size << " mtime=" << mtime << " attrs=" << attrs << dendl; return set_cr_done(); diff --git a/src/rgw/rgw_sync_module_pubsub.cc b/src/rgw/rgw_sync_module_pubsub.cc index 11388eadf1a76..d241ec4b9e6ea 100644 --- a/src/rgw/rgw_sync_module_pubsub.cc +++ b/src/rgw/rgw_sync_module_pubsub.cc @@ -43,10 +43,10 @@ config: // utility function to convert the args list from string format // (ampresend separated with equal sign) to prased structure -RGWHTTPArgs string_to_args(const std::string& str_args) { +RGWHTTPArgs string_to_args(const std::string& str_args, const DoutPrefixProvider *dpp) { RGWHTTPArgs args; args.set(str_args); - args.parse(); + args.parse(dpp); return args; } @@ -61,7 +61,7 @@ struct PSSubConfig { std::string arn_topic; RGWPubSubEndpoint::Ptr push_endpoint; - void from_user_conf(CephContext *cct, const rgw_pubsub_sub_config& uc) { + void from_user_conf(CephContext *cct, const rgw_pubsub_sub_config& uc, const DoutPrefixProvider *dpp) { name = uc.name; topic = uc.topic; push_endpoint_name = uc.dest.push_endpoint; @@ -72,10 +72,10 @@ struct PSSubConfig { if (!push_endpoint_name.empty()) { push_endpoint_args = uc.dest.push_endpoint_args; try { - push_endpoint = RGWPubSubEndpoint::create(push_endpoint_name, arn_topic, string_to_args(push_endpoint_args), cct); - ldout(cct, 20) << "push endpoint created: " << push_endpoint->to_str() << dendl; + push_endpoint = RGWPubSubEndpoint::create(push_endpoint_name, arn_topic, string_to_args(push_endpoint_args, dpp), cct); + ldpp_dout(dpp, 20) << "push endpoint created: " << push_endpoint->to_str() << dendl; } catch (const RGWPubSubEndpoint::configuration_error& e) { - ldout(cct, 1) << "ERROR: failed to create push endpoint: " + ldpp_dout(dpp, 1) << "ERROR: failed to create push endpoint: " << push_endpoint_name << " due to: " << e.what() << dendl; } } @@ -366,20 +366,20 @@ class RGWSingletonCR : public RGWCoroutine { return true; } - int operate_wrapper() override { + int operate_wrapper(const DoutPrefixProvider *dpp) override { reenter(&wrapper_state) { while (!is_done()) { - ldout(cct, 20) << __func__ << "(): operate_wrapper() -> operate()" << dendl; - operate_ret = operate(); + ldpp_dout(dpp, 20) << __func__ << "(): operate_wrapper() -> operate()" << dendl; + operate_ret = operate(dpp); if (operate_ret < 0) { - ldout(cct, 20) << *this << ": operate() returned r=" << operate_ret << dendl; + ldpp_dout(dpp, 20) << *this << ": operate() returned r=" << operate_ret << dendl; } if (!is_done()) { yield; } } - ldout(cct, 20) << __func__ << "(): RGWSingletonCR: operate_wrapper() done, need to wake up " << waiters.size() << " waiters" << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): RGWSingletonCR: operate_wrapper() done, need to wake up " << waiters.size() << " waiters" << dendl; /* we're done, can't yield anymore */ WaiterInfoRef waiter; @@ -465,7 +465,7 @@ class PSSubscription { retention_days = conf->events_retention_days; } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { rule.init_simple_days_rule("Pubsub Expiration", "" /* all objects in bucket */, retention_days); @@ -479,7 +479,7 @@ class PSSubscription { try { old_config.decode(iter); } catch (const buffer::error& e) { - ldpp_dout(sync_env->dpp, 0) << __func__ << "(): decode life cycle config failed" << dendl; + ldpp_dout(dpp, 0) << __func__ << "(): decode life cycle config failed" << dendl; } } @@ -490,7 +490,7 @@ class PSSubscription { if (old_rule.get_prefix().empty() && old_rule.get_expiration().get_days() == retention_days && old_rule.is_enabled()) { - ldpp_dout(sync_env->dpp, 20) << "no need to set lifecycle rule on bucket, existing rule matches config" << dendl; + ldpp_dout(dpp, 20) << "no need to set lifecycle rule on bucket, existing rule matches config" << dendl; return set_cr_done(); } } @@ -500,9 +500,9 @@ class PSSubscription { yield call(new RGWBucketLifecycleConfigCR(sync_env->async_rados, sync_env->store, lc_config, - sync_env->dpp)); + dpp)); if (retcode < 0) { - ldpp_dout(sync_env->dpp, 1) << "ERROR: failed to set lifecycle on bucket: ret=" << retcode << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to set lifecycle on bucket: ret=" << retcode << dendl; return set_cr_error(retcode); } @@ -530,7 +530,7 @@ class PSSubscription { sub_conf(sub->sub_conf) { } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { get_bucket_info.tenant = conf->user.tenant; get_bucket_info.bucket_name = sub_conf->data_bucket_name; @@ -540,9 +540,10 @@ class PSSubscription { yield call(new RGWGetBucketInfoCR(sync_env->async_rados, sync_env->store, get_bucket_info, - sub->get_bucket_info_result)); + sub->get_bucket_info_result, + dpp)); if (retcode < 0 && retcode != -ENOENT) { - ldpp_dout(sync_env->dpp, 1) << "ERROR: failed to geting bucket info: " << "tenant=" + ldpp_dout(dpp, 1) << "ERROR: failed to geting bucket info: " << "tenant=" << get_bucket_info.tenant << " name=" << get_bucket_info.bucket_name << ": ret=" << retcode << dendl; } if (retcode == 0) { @@ -552,7 +553,7 @@ class PSSubscription { int ret = sub->data_access->get_bucket(result->bucket_info, result->attrs, &sub->bucket); if (ret < 0) { - ldpp_dout(sync_env->dpp, 1) << "ERROR: data_access.get_bucket() bucket=" << result->bucket_info.bucket << " failed, ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: data_access.get_bucket() bucket=" << result->bucket_info.bucket << " failed, ret=" << ret << dendl; return set_cr_error(ret); } } @@ -561,7 +562,7 @@ class PSSubscription { sub->get_bucket_info_result->bucket_info, sub->get_bucket_info_result->attrs)); if (retcode < 0) { - ldpp_dout(sync_env->dpp, 1) << "ERROR: failed to init lifecycle on bucket (bucket=" << sub_conf->data_bucket_name << ") ret=" << retcode << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to init lifecycle on bucket (bucket=" << sub_conf->data_bucket_name << ") ret=" << retcode << dendl; return set_cr_error(retcode); } @@ -570,13 +571,13 @@ class PSSubscription { create_bucket.user_info = sub->env->data_user_info; create_bucket.bucket_name = sub_conf->data_bucket_name; - ldpp_dout(sync_env->dpp, 20) << "pubsub: bucket create: using user info: " << json_str("obj", *sub->env->data_user_info, true) << dendl; + ldpp_dout(dpp, 20) << "pubsub: bucket create: using user info: " << json_str("obj", *sub->env->data_user_info, true) << dendl; yield call(new RGWBucketCreateLocalCR(sync_env->async_rados, sync_env->store, create_bucket, - sync_env->dpp)); + dpp)); if (retcode < 0) { - ldpp_dout(sync_env->dpp, 1) << "ERROR: failed to create bucket: " << "tenant=" + ldpp_dout(dpp, 1) << "ERROR: failed to create bucket: " << "tenant=" << get_bucket_info.tenant << " name=" << get_bucket_info.bucket_name << ": ret=" << retcode << dendl; return set_cr_error(retcode); } @@ -585,7 +586,7 @@ class PSSubscription { } /* failed twice on -ENOENT, unexpected */ - ldpp_dout(sync_env->dpp, 1) << "ERROR: failed to create bucket " << "tenant=" << get_bucket_info.tenant + ldpp_dout(dpp, 1) << "ERROR: failed to create bucket " << "tenant=" << get_bucket_info.tenant << " name=" << get_bucket_info.bucket_name << dendl; return set_cr_error(-EIO); } @@ -611,7 +612,7 @@ class PSSubscription { oid_prefix(sub->sub_conf->data_oid_prefix) { } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { rgw_object_simple_put_params put_obj; reenter(this) { @@ -631,12 +632,12 @@ class PSSubscription { yield call(new RGWObjectSimplePutCR(sync_env->async_rados, sync_env->store, put_obj, - sync_env->dpp)); + dpp)); if (retcode < 0) { - ldpp_dout(sync_env->dpp, 10) << "failed to store event: " << put_obj.bucket << "/" << put_obj.key << " ret=" << retcode << dendl; + ldpp_dout(dpp, 10) << "failed to store event: " << put_obj.bucket << "/" << put_obj.key << " ret=" << retcode << dendl; return set_cr_error(retcode); } else { - ldpp_dout(sync_env->dpp, 20) << "event stored: " << put_obj.bucket << "/" << put_obj.key << dendl; + ldpp_dout(dpp, 20) << "event stored: " << put_obj.bucket << "/" << put_obj.key << dendl; } return set_cr_done(); @@ -661,18 +662,18 @@ class PSSubscription { sub_conf(_sub->sub_conf) { } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { ceph_assert(sub_conf->push_endpoint); yield call(sub_conf->push_endpoint->send_to_completion_async(*event.get(), sync_env)); if (retcode < 0) { - ldout(sync_env->cct, 10) << "failed to push event: " << event->id << + ldpp_dout(dpp, 10) << "failed to push event: " << event->id << " to endpoint: " << sub_conf->push_endpoint_name << " ret=" << retcode << dendl; return set_cr_error(retcode); } - ldout(sync_env->cct, 20) << "event: " << event->id << + ldpp_dout(dpp, 20) << "event: " << event->id << " pushed to endpoint: " << sub_conf->push_endpoint_name << dendl; return set_cr_done(); } @@ -694,7 +695,7 @@ public: env(_env), sub_conf(std::make_shared()), data_access(std::make_shared(sync_env->store)) { - sub_conf->from_user_conf(sync_env->cct, user_sub_conf); + sub_conf->from_user_conf(sync_env->cct, user_sub_conf, sync_env->dpp); } virtual ~PSSubscription() { if (init_cr) { @@ -765,10 +766,10 @@ class PSManager } ~GetSubCR() { } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { if (owner.empty()) { - ldout(sync_env->cct, 1) << "ERROR: missing user info when getting subscription: " << sub_name << dendl; + ldpp_dout(dpp, 1) << "ERROR: missing user info when getting subscription: " << sub_name << dendl; mgr->remove_get_sub(owner, sub_name); return set_cr_error(-EINVAL); } else { @@ -778,7 +779,7 @@ class PSManager rgw_raw_obj obj; ps.get_sub_meta_obj(sub_name, &obj); bool empty_on_enoent = false; - call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj, + call(new ReadInfoCR(dpp, sync_env->async_rados, sync_env->store->svc()->sysobj, obj, &user_sub_conf, empty_on_enoent)); } @@ -792,7 +793,7 @@ class PSManager yield (*ref)->call_init_cr(this); if (retcode < 0) { - ldout(sync_env->cct, 1) << "ERROR: failed to init subscription when getting subscription: " << sub_name << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to init subscription when getting subscription: " << sub_name << dendl; mgr->remove_get_sub(owner, sub_name); return set_cr_error(retcode); } @@ -886,29 +887,29 @@ public: PSEnvRef& _env) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), env(_env), conf(env->conf) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { - ldpp_dout(sync_env->dpp, 1) << ": init pubsub config zone=" << sc->source_zone << dendl; + ldpp_dout(dpp, 1) << ": init pubsub config zone=" << sc->source_zone << dendl; /* nothing to do here right now */ create_user.user = conf->user; create_user.max_buckets = 0; /* unlimited */ create_user.display_name = "pubsub"; create_user.generate_key = false; - yield call(new RGWUserCreateCR(sync_env->async_rados, sync_env->store, create_user, sync_env->dpp)); + yield call(new RGWUserCreateCR(sync_env->async_rados, sync_env->store, create_user, dpp)); if (retcode < 0 && retcode != -ERR_USER_EXIST) { - ldpp_dout(sync_env->dpp, 1) << "ERROR: failed to create rgw user: ret=" << retcode << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to create rgw user: ret=" << retcode << dendl; return set_cr_error(retcode); } get_user_info.user = conf->user; - yield call(new RGWGetUserInfoCR(sync_env->async_rados, sync_env->store, get_user_info, env->data_user_info)); + yield call(new RGWGetUserInfoCR(sync_env->async_rados, sync_env->store, get_user_info, env->data_user_info, dpp)); if (retcode < 0) { - ldpp_dout(sync_env->dpp, 1) << "ERROR: failed to create rgw user: ret=" << retcode << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to create rgw user: ret=" << retcode << dendl; return set_cr_error(retcode); } - ldpp_dout(sync_env->dpp, 20) << "pubsub: get user info cr returned: " << json_str("obj", *env->data_user_info, true) << dendl; + ldpp_dout(dpp, 20) << "pubsub: get user info cr returned: " << json_str("obj", *env->data_user_info, true) << dendl; return set_cr_done(); @@ -961,7 +962,7 @@ public: topics(_topics) { *topics = std::make_shared >(); } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { ps.get_bucket_meta_obj(bucket, &bucket_obj); ps.get_meta_obj(&user_obj); @@ -969,7 +970,7 @@ public: using ReadInfoCR = RGWSimpleRadosReadCR; yield { bool empty_on_enoent = true; - call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj, + call(new ReadInfoCR(dpp, sync_env->async_rados, sync_env->store->svc()->sysobj, bucket_obj, &bucket_topics, empty_on_enoent)); } @@ -977,13 +978,13 @@ public: return set_cr_error(retcode); } - ldout(sync_env->cct, 20) << "RGWPSFindBucketTopicsCR(): found " << bucket_topics.topics.size() << " topics for bucket " << bucket << dendl; + ldpp_dout(dpp, 20) << "RGWPSFindBucketTopicsCR(): found " << bucket_topics.topics.size() << " topics for bucket " << bucket << dendl; if (!bucket_topics.topics.empty()) { using ReadUserTopicsInfoCR = RGWSimpleRadosReadCR; yield { bool empty_on_enoent = true; - call(new ReadUserTopicsInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj, + call(new ReadUserTopicsInfoCR(dpp, sync_env->async_rados, sync_env->store->svc()->sysobj, user_obj, &user_topics, empty_on_enoent)); } @@ -1040,13 +1041,13 @@ public: has_subscriptions(false), event_handled(false) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { - ldout(sc->cct, 20) << ": handle event: obj: z=" << sc->source_zone + ldpp_dout(dpp, 20) << ": handle event: obj: z=" << sc->source_zone << " event=" << json_str("event", *event, false) << " owner=" << owner << dendl; - ldout(sc->cct, 20) << "pubsub: " << topics->size() << " topics found for path" << dendl; + ldpp_dout(dpp, 20) << "pubsub: " << topics->size() << " topics found for path" << dendl; // outside caller should check that ceph_assert(!topics->empty()); @@ -1055,17 +1056,17 @@ public: // loop over all topics related to the bucket/object for (titer = topics->begin(); titer != topics->end(); ++titer) { - ldout(sc->cct, 20) << ": notification for " << event->source << ": topic=" << + ldpp_dout(dpp, 20) << ": notification for " << event->source << ": topic=" << (*titer)->name << ", has " << (*titer)->subs.size() << " subscriptions" << dendl; // loop over all subscriptions of the topic for (siter = (*titer)->subs.begin(); siter != (*titer)->subs.end(); ++siter) { - ldout(sc->cct, 20) << ": subscription: " << *siter << dendl; + ldpp_dout(dpp, 20) << ": subscription: " << *siter << dendl; has_subscriptions = true; // try to read subscription configuration yield PSManager::call_get_subscription_cr(sc, env->manager, this, owner, *siter, &sub); if (retcode < 0) { if (perfcounter) perfcounter->inc(l_rgw_pubsub_missing_conf); - ldout(sc->cct, 1) << "ERROR: failed to find subscription config for subscription=" << *siter + ldpp_dout(dpp, 1) << "ERROR: failed to find subscription config for subscription=" << *siter << " ret=" << retcode << dendl; if (retcode == -ENOENT) { // missing subscription info should be reflected back as invalid argument @@ -1077,21 +1078,21 @@ public: } if (sub->sub_conf->s3_id.empty()) { // subscription was not made by S3 compatible API - ldout(sc->cct, 20) << "storing event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl; + ldpp_dout(dpp, 20) << "storing event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl; yield call(PSSubscription::store_event_cr(sc, sub, event)); if (retcode < 0) { if (perfcounter) perfcounter->inc(l_rgw_pubsub_store_fail); - ldout(sc->cct, 1) << "ERROR: failed to store event for subscription=" << *siter << " ret=" << retcode << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to store event for subscription=" << *siter << " ret=" << retcode << dendl; } else { if (perfcounter) perfcounter->inc(l_rgw_pubsub_store_ok); event_handled = true; } if (sub->sub_conf->push_endpoint) { - ldout(sc->cct, 20) << "push event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl; + ldpp_dout(dpp, 20) << "push event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl; yield call(PSSubscription::push_event_cr(sc, sub, event)); if (retcode < 0) { if (perfcounter) perfcounter->inc(l_rgw_pubsub_push_failed); - ldout(sc->cct, 1) << "ERROR: failed to push event for subscription=" << *siter << " ret=" << retcode << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to push event for subscription=" << *siter << " ret=" << retcode << dendl; } else { if (perfcounter) perfcounter->inc(l_rgw_pubsub_push_ok); event_handled = true; @@ -1099,23 +1100,23 @@ public: } } else { // subscription was made by S3 compatible API - ldout(sc->cct, 20) << "storing s3 event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl; + ldpp_dout(dpp, 20) << "storing s3 event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl; s3_event->configurationId = sub->sub_conf->s3_id; s3_event->opaque_data = (*titer)->opaque_data; yield call(PSSubscription::store_event_cr(sc, sub, s3_event)); if (retcode < 0) { if (perfcounter) perfcounter->inc(l_rgw_pubsub_store_fail); - ldout(sc->cct, 1) << "ERROR: failed to store s3 event for subscription=" << *siter << " ret=" << retcode << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to store s3 event for subscription=" << *siter << " ret=" << retcode << dendl; } else { if (perfcounter) perfcounter->inc(l_rgw_pubsub_store_ok); event_handled = true; } if (sub->sub_conf->push_endpoint) { - ldout(sc->cct, 20) << "push s3 event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl; + ldpp_dout(dpp, 20) << "push s3 event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl; yield call(PSSubscription::push_event_cr(sc, sub, s3_event)); if (retcode < 0) { if (perfcounter) perfcounter->inc(l_rgw_pubsub_push_failed); - ldout(sc->cct, 1) << "ERROR: failed to push s3 event for subscription=" << *siter << " ret=" << retcode << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to push s3 event for subscription=" << *siter << " ret=" << retcode << dendl; } else { if (perfcounter) perfcounter->inc(l_rgw_pubsub_push_ok); event_handled = true; @@ -1158,9 +1159,9 @@ public: versioned_epoch(_versioned_epoch), topics(_topics) { } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { - ldout(sc->cct, 20) << ": stat of remote obj: z=" << sc->source_zone + ldpp_dout(dpp, 20) << ": stat of remote obj: z=" << sc->source_zone << " b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " size=" << size << " mtime=" << mtime << " attrs=" << attrs << dendl; { @@ -1237,18 +1238,18 @@ public: ~RGWPSHandleObjCreateCR() override {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield call(new RGWPSFindBucketTopicsCR(sc, env, sync_pipe.dest_bucket_info.owner, sync_pipe.info.source_bs.bucket, key, rgw::notify::ObjectCreated, &topics)); if (retcode < 0) { - ldout(sc->cct, 1) << "ERROR: RGWPSFindBucketTopicsCR returned ret=" << retcode << dendl; + ldpp_dout(dpp, 1) << "ERROR: RGWPSFindBucketTopicsCR returned ret=" << retcode << dendl; return set_cr_error(retcode); } if (topics->empty()) { - ldout(sc->cct, 20) << "no topics found for " << sync_pipe.info.source_bs.bucket << "/" << key << dendl; + ldpp_dout(dpp, 20) << "no topics found for " << sync_pipe.info.source_bs.bucket << "/" << key << dendl; return set_cr_done(); } yield call(new RGWPSHandleRemoteObjCR(sc, sync_pipe, key, env, versioned_epoch, topics)); @@ -1284,17 +1285,17 @@ public: bucket(_sync_pipe.dest_bucket_info.bucket), key(_key), mtime(_mtime), event_type(_event_type) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { - ldout(sc->cct, 20) << ": remove remote obj: z=" << sc->source_zone + ldpp_dout(dpp, 20) << ": remove remote obj: z=" << sc->source_zone << " b=" << bucket << " k=" << key << " mtime=" << mtime << dendl; yield call(new RGWPSFindBucketTopicsCR(sc, env, owner, bucket, key, event_type, &topics)); if (retcode < 0) { - ldout(sc->cct, 1) << "ERROR: RGWPSFindBucketTopicsCR returned ret=" << retcode << dendl; + ldpp_dout(dpp, 1) << "ERROR: RGWPSFindBucketTopicsCR returned ret=" << retcode << dendl; return set_cr_error(retcode); } if (topics->empty()) { - ldout(sc->cct, 20) << "no topics found for " << bucket << "/" << key << dendl; + ldpp_dout(dpp, 20) << "no topics found for " << bucket << "/" << key << dendl; return set_cr_done(); } // at this point we don't know whether we need the ceph event or S3 event diff --git a/src/rgw/rgw_sync_module_pubsub_rest.cc b/src/rgw/rgw_sync_module_pubsub_rest.cc index c7feff500226b..9600a465dc88c 100644 --- a/src/rgw/rgw_sync_module_pubsub_rest.cc +++ b/src/rgw/rgw_sync_module_pubsub_rest.cc @@ -167,7 +167,7 @@ public: bool exists; topic_name = s->info.args.get("topic", &exists); if (!exists) { - ldout(s->cct, 1) << "missing required param 'topic'" << dendl; + ldpp_dout(this, 1) << "missing required param 'topic'" << dendl; return -EINVAL; } @@ -232,7 +232,7 @@ public: event_id = s->info.args.get("event-id", &exists); if (!exists) { - ldout(s->cct, 1) << "missing required param 'event-id'" << dendl; + ldpp_dout(this, 1) << "missing required param 'event-id'" << dendl; return -EINVAL; } return 0; @@ -248,7 +248,7 @@ public: const int ret = s->info.args.get_int("max-entries", &max_entries, RGWPubSub::Sub::DEFAULT_MAX_EVENTS); if (ret < 0) { - ldout(s->cct, 1) << "failed to parse 'max-entries' param" << dendl; + ldpp_dout(this, 1) << "failed to parse 'max-entries' param" << dendl; return -EINVAL; } return 0; @@ -350,7 +350,7 @@ private: bool exists; topic_name = s->info.args.get("topic", &exists); if (!exists) { - ldout(s->cct, 1) << "missing required param 'topic'" << dendl; + ldpp_dout(this, 1) << "missing required param 'topic'" << dendl; return -EINVAL; } @@ -361,7 +361,7 @@ private: } rgw::notify::from_string_list(events_str, events); if (std::find(events.begin(), events.end(), rgw::notify::UnknownEvent) != events.end()) { - ldout(s->cct, 1) << "invalid event type in list: " << events_str << dendl; + ldpp_dout(this, 1) << "invalid event type in list: " << events_str << dendl; return -EINVAL; } return notif_bucket_path(s->object->get_name(), bucket_name); @@ -377,12 +377,12 @@ void RGWPSCreateNotif_ObjStore::execute(optional_yield y) ps.emplace(store, s->owner.get_id().tenant); auto b = ps->get_bucket(bucket_info.bucket); - op_ret = b->create_notification(topic_name, events, y); + op_ret = b->create_notification(this, topic_name, events, y); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to create notification for topic '" << topic_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to create notification for topic '" << topic_name << "', ret=" << op_ret << dendl; return; } - ldout(s->cct, 20) << "successfully created notification for topic '" << topic_name << "'" << dendl; + ldpp_dout(this, 20) << "successfully created notification for topic '" << topic_name << "'" << dendl; } // command: DELETE /notifications/bucket/?topic= @@ -394,7 +394,7 @@ private: bool exists; topic_name = s->info.args.get("topic", &exists); if (!exists) { - ldout(s->cct, 1) << "missing required param 'topic'" << dendl; + ldpp_dout(this, 1) << "missing required param 'topic'" << dendl; return -EINVAL; } return notif_bucket_path(s->object->get_name(), bucket_name); @@ -413,12 +413,12 @@ void RGWPSDeleteNotif_ObjStore::execute(optional_yield y) { ps.emplace(store, s->owner.get_id().tenant); auto b = ps->get_bucket(bucket_info.bucket); - op_ret = b->remove_notification(topic_name, y); + op_ret = b->remove_notification(this, topic_name, y); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to remove notification from topic '" << topic_name << "', ret=" << op_ret << dendl; + ldpp_dout(s, 1) << "failed to remove notification from topic '" << topic_name << "', ret=" << op_ret << dendl; return; } - ldout(s->cct, 20) << "successfully removed notification from topic '" << topic_name << "'" << dendl; + ldpp_dout(this, 20) << "successfully removed notification from topic '" << topic_name << "'" << dendl; } // command: GET /notifications/bucket/ @@ -454,7 +454,7 @@ void RGWPSListNotifs_ObjStore::execute(optional_yield y) auto b = ps->get_bucket(bucket_info.bucket); op_ret = b->get_topics(&result); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to get topics, ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to get topics, ret=" << op_ret << dendl; return; } } @@ -522,7 +522,7 @@ RGWHandler_REST* RGWRESTMgr_PubSub::get_handler(rgw::sal::RGWRadosStore *store, } } - ldout(s->cct, 20) << __func__ << " handler=" << (handler ? typeid(*handler).name() : "") << dendl; + ldpp_dout(s, 20) << __func__ << " handler=" << (handler ? typeid(*handler).name() : "") << dendl; return handler; } diff --git a/src/rgw/rgw_sync_trace.cc b/src/rgw/rgw_sync_trace.cc index ddcdea2495204..e99fdcf5030c3 100644 --- a/src/rgw/rgw_sync_trace.cc +++ b/src/rgw/rgw_sync_trace.cc @@ -15,7 +15,6 @@ #define dout_context g_ceph_context -#define dout_subsys ceph_subsys_rgw_sync RGWSyncTraceNode::RGWSyncTraceNode(CephContext *_cct, uint64_t _handle, const RGWSyncTraceNodeRef& _parent, @@ -65,10 +64,10 @@ public: RGWSyncTraceServiceMapThread(RGWRados *_store, RGWSyncTraceManager *_manager) : RGWRadosThread(_store, "sync-trace"), store(_store), manager(_manager) {} - int process() override; + int process(const DoutPrefixProvider *dpp) override; }; -int RGWSyncTraceServiceMapThread::process() +int RGWSyncTraceServiceMapThread::process(const DoutPrefixProvider *dpp) { map status; status["current_sync"] = manager->get_active_names(); diff --git a/src/rgw/rgw_tools.cc b/src/rgw/rgw_tools.cc index 82e0ecf546d60..4ed742ceffcbd 100644 --- a/src/rgw/rgw_tools.cc +++ b/src/rgw/rgw_tools.cc @@ -35,7 +35,8 @@ static std::map* ext_mime_map; -int rgw_init_ioctx(librados::Rados *rados, const rgw_pool& pool, +int rgw_init_ioctx(const DoutPrefixProvider *dpp, + librados::Rados *rados, const rgw_pool& pool, librados::IoCtx& ioctx, bool create, bool mostly_omap) { @@ -43,7 +44,7 @@ int rgw_init_ioctx(librados::Rados *rados, const rgw_pool& pool, if (r == -ENOENT && create) { r = rados->pool_create(pool.name.c_str()); if (r == -ERANGE) { - dout(0) + ldpp_dout(dpp, 0) << __func__ << " ERROR: librados::Rados::pool_create returned " << cpp_strerror(-r) << " (this can be due to a pool or placement group misconfiguration, e.g." @@ -74,7 +75,7 @@ int rgw_init_ioctx(librados::Rados *rados, const rgw_pool& pool, stringify(bias) + "\"}", inbl, NULL, NULL); if (r < 0) { - dout(10) << __func__ << " warning: failed to set pg_autoscale_bias on " + ldpp_dout(dpp, 10) << __func__ << " warning: failed to set pg_autoscale_bias on " << pool.name << dendl; } // set pg_num_min @@ -85,7 +86,7 @@ int rgw_init_ioctx(librados::Rados *rados, const rgw_pool& pool, stringify(min) + "\"}", inbl, NULL, NULL); if (r < 0) { - dout(10) << __func__ << " warning: failed to set pg_num_min on " + ldpp_dout(dpp, 10) << __func__ << " warning: failed to set pg_num_min on " << pool.name << dendl; } // set recovery_priority @@ -96,7 +97,7 @@ int rgw_init_ioctx(librados::Rados *rados, const rgw_pool& pool, stringify(p) + "\"}", inbl, NULL, NULL); if (r < 0) { - dout(10) << __func__ << " warning: failed to set recovery_priority on " + ldpp_dout(dpp, 10) << __func__ << " warning: failed to set recovery_priority on " << pool.name << dendl; } } @@ -155,7 +156,8 @@ int rgw_parse_list_of_flags(struct rgw_name_to_flag *mapping, return 0; } -int rgw_put_system_obj(RGWSysObjectCtx& obj_ctx, const rgw_pool& pool, const string& oid, bufferlist& data, bool exclusive, +int rgw_put_system_obj(const DoutPrefixProvider *dpp, + RGWSysObjectCtx& obj_ctx, const rgw_pool& pool, const string& oid, bufferlist& data, bool exclusive, RGWObjVersionTracker *objv_tracker, real_time set_mtime, optional_yield y, map *pattrs) { map no_attrs; @@ -171,13 +173,13 @@ int rgw_put_system_obj(RGWSysObjectCtx& obj_ctx, const rgw_pool& pool, const str .set_exclusive(exclusive) .set_mtime(set_mtime) .set_attrs(*pattrs) - .write(data, y); + .write(dpp, data, y); return ret; } int rgw_get_system_obj(RGWSysObjectCtx& obj_ctx, const rgw_pool& pool, const string& key, bufferlist& bl, - RGWObjVersionTracker *objv_tracker, real_time *pmtime, optional_yield y, map *pattrs, + RGWObjVersionTracker *objv_tracker, real_time *pmtime, optional_yield y, const DoutPrefixProvider *dpp, map *pattrs, rgw_cache_entry_info *cache_info, boost::optional refresh_version) { @@ -197,13 +199,13 @@ int rgw_get_system_obj(RGWSysObjectCtx& obj_ctx, const rgw_pool& pool, const str int ret = rop.set_attrs(pattrs) .set_last_mod(pmtime) .set_objv_tracker(objv_tracker) - .stat(y); + .stat(y, dpp); if (ret < 0) return ret; ret = rop.set_cache_info(cache_info) .set_refresh_version(refresh_version) - .read(&bl, y); + .read(dpp, &bl, y); if (ret == -ECANCELED) { /* raced, restart */ if (!original_readv.empty()) { @@ -228,7 +230,8 @@ int rgw_get_system_obj(RGWSysObjectCtx& obj_ctx, const rgw_pool& pool, const str return 0; } -int rgw_delete_system_obj(RGWSI_SysObj *sysobj_svc, const rgw_pool& pool, const string& oid, +int rgw_delete_system_obj(const DoutPrefixProvider *dpp, + RGWSI_SysObj *sysobj_svc, const rgw_pool& pool, const string& oid, RGWObjVersionTracker *objv_tracker, optional_yield y) { auto obj_ctx = sysobj_svc->init_obj_ctx(); @@ -236,12 +239,12 @@ int rgw_delete_system_obj(RGWSI_SysObj *sysobj_svc, const rgw_pool& pool, const rgw_raw_obj obj(pool, oid); return sysobj.wop() .set_objv_tracker(objv_tracker) - .remove(y); + .remove(dpp, y); } thread_local bool is_asio_thread = false; -int rgw_rados_operate(librados::IoCtx& ioctx, const std::string& oid, +int rgw_rados_operate(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid, librados::ObjectReadOperation *op, bufferlist* pbl, optional_yield y, int flags) { @@ -260,12 +263,12 @@ int rgw_rados_operate(librados::IoCtx& ioctx, const std::string& oid, } // work on asio threads should be asynchronous, so warn when they block if (is_asio_thread) { - dout(20) << "WARNING: blocking librados call" << dendl; + ldpp_dout(dpp, 20) << "WARNING: blocking librados call" << dendl; } return ioctx.operate(oid, op, nullptr, flags); } -int rgw_rados_operate(librados::IoCtx& ioctx, const std::string& oid, +int rgw_rados_operate(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid, librados::ObjectWriteOperation *op, optional_yield y, int flags) { @@ -277,12 +280,12 @@ int rgw_rados_operate(librados::IoCtx& ioctx, const std::string& oid, return -ec.value(); } if (is_asio_thread) { - dout(20) << "WARNING: blocking librados call" << dendl; + ldpp_dout(dpp, 20) << "WARNING: blocking librados call" << dendl; } return ioctx.operate(oid, op, flags); } -int rgw_rados_notify(librados::IoCtx& ioctx, const std::string& oid, +int rgw_rados_notify(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid, bufferlist& bl, uint64_t timeout_ms, bufferlist* pbl, optional_yield y) { @@ -298,7 +301,7 @@ int rgw_rados_notify(librados::IoCtx& ioctx, const std::string& oid, return -ec.value(); } if (is_asio_thread) { - dout(20) << "WARNING: blocking librados call" << dendl; + ldpp_dout(dpp, 20) << "WARNING: blocking librados call" << dendl; } return ioctx.notify2(oid, bl, timeout_ms, pbl); } @@ -432,13 +435,14 @@ int RGWDataAccess::Bucket::finish_init() return 0; } -int RGWDataAccess::Bucket::init(optional_yield y) +int RGWDataAccess::Bucket::init(const DoutPrefixProvider *dpp, optional_yield y) { int ret = sd->store->getRados()->get_bucket_info(sd->store->svc(), tenant, name, bucket_info, &mtime, y, + dpp, &attrs); if (ret < 0) { return ret; diff --git a/src/rgw/rgw_tools.h b/src/rgw/rgw_tools.h index cf586dabea9cf..c1da97c890892 100644 --- a/src/rgw/rgw_tools.h +++ b/src/rgw/rgw_tools.h @@ -26,7 +26,8 @@ namespace rgw { namespace sal { struct obj_version; -int rgw_init_ioctx(librados::Rados *rados, const rgw_pool& pool, +int rgw_init_ioctx(const DoutPrefixProvider *dpp, + librados::Rados *rados, const rgw_pool& pool, librados::IoCtx& ioctx, bool create = false, bool mostly_omap = false); @@ -71,13 +72,14 @@ struct rgw_name_to_flag { int rgw_parse_list_of_flags(struct rgw_name_to_flag *mapping, const string& str, uint32_t *perm); -int rgw_put_system_obj(RGWSysObjectCtx& obj_ctx, const rgw_pool& pool, const string& oid, bufferlist& data, bool exclusive, +int rgw_put_system_obj(const DoutPrefixProvider *dpp, RGWSysObjectCtx& obj_ctx, const rgw_pool& pool, const string& oid, bufferlist& data, bool exclusive, RGWObjVersionTracker *objv_tracker, real_time set_mtime, optional_yield y, map *pattrs = NULL); int rgw_get_system_obj(RGWSysObjectCtx& obj_ctx, const rgw_pool& pool, const string& key, bufferlist& bl, - RGWObjVersionTracker *objv_tracker, real_time *pmtime, optional_yield y, map *pattrs = NULL, + RGWObjVersionTracker *objv_tracker, real_time *pmtime, optional_yield y, const DoutPrefixProvider *dpp, map *pattrs = NULL, rgw_cache_entry_info *cache_info = NULL, boost::optional refresh_version = boost::none); -int rgw_delete_system_obj(RGWSI_SysObj *sysobj_svc, const rgw_pool& pool, const string& oid, +int rgw_delete_system_obj(const DoutPrefixProvider *dpp, + RGWSI_SysObj *sysobj_svc, const rgw_pool& pool, const string& oid, RGWObjVersionTracker *objv_tracker, optional_yield y); const char *rgw_find_mime_by_ext(string& ext); @@ -90,13 +92,13 @@ void rgw_filter_attrset(map& unfiltered_attrset, const strin extern thread_local bool is_asio_thread; /// perform the rados operation, using the yield context when given -int rgw_rados_operate(librados::IoCtx& ioctx, const std::string& oid, +int rgw_rados_operate(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid, librados::ObjectReadOperation *op, bufferlist* pbl, optional_yield y, int flags = 0); -int rgw_rados_operate(librados::IoCtx& ioctx, const std::string& oid, +int rgw_rados_operate(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid, librados::ObjectWriteOperation *op, optional_yield y, int flags = 0); -int rgw_rados_notify(librados::IoCtx& ioctx, const std::string& oid, +int rgw_rados_notify(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid, bufferlist& bl, uint64_t timeout_ms, bufferlist* pbl, optional_yield y); @@ -177,7 +179,7 @@ public: name(_name), bucket_id(_bucket_id) {} Bucket(RGWDataAccess *_sd) : sd(_sd) {} - int init(optional_yield y); + int init(const DoutPrefixProvider *dpp, optional_yield y); int init(const RGWBucketInfo& _bucket_info, const map& _attrs); public: int get_object(const rgw_obj_key& key, @@ -232,13 +234,14 @@ public: friend class Bucket; }; - int get_bucket(const string& tenant, + int get_bucket(const DoutPrefixProvider *dpp, + const string& tenant, const string name, const string bucket_id, BucketRef *bucket, optional_yield y) { bucket->reset(new Bucket(this, tenant, name, bucket_id)); - return (*bucket)->init(y); + return (*bucket)->init(dpp, y); } int get_bucket(const RGWBucketInfo& bucket_info, diff --git a/src/rgw/rgw_torrent.cc b/src/rgw/rgw_torrent.cc index 3470471d53542..53fbe9de4ddaf 100644 --- a/src/rgw/rgw_torrent.cc +++ b/src/rgw/rgw_torrent.cc @@ -66,17 +66,17 @@ int seed::get_torrent_file(rgw::sal::RGWObject* object, string oid, key; get_obj_bucket_and_oid_loc(obj, oid, key); - ldout(s->cct, 20) << "NOTICE: head obj oid= " << oid << dendl; + ldpp_dout(s, 20) << "NOTICE: head obj oid= " << oid << dendl; const set obj_key{RGW_OBJ_TORRENT}; map m; - const int r = object->omap_get_vals_by_keys(oid, obj_key, &m); + const int r = object->omap_get_vals_by_keys(s, oid, obj_key, &m); if (r < 0) { - ldout(s->cct, 0) << "ERROR: omap_get_vals_by_keys failed: " << r << dendl; + ldpp_dout(s, 0) << "ERROR: omap_get_vals_by_keys failed: " << r << dendl; return r; } if (m.size() != 1) { - ldout(s->cct, 0) << "ERROR: omap key " RGW_OBJ_TORRENT " not found" << dendl; + ldpp_dout(s, 0) << "ERROR: omap key " RGW_OBJ_TORRENT " not found" << dendl; return -EINVAL; } bl.append(std::move(m.begin()->second)); @@ -116,7 +116,7 @@ int seed::complete(optional_yield y) ret = save_torrent_file(y); if (0 != ret) { - ldout(s->cct, 0) << "ERROR: failed to save_torrent_file() ret= "<< ret << dendl; + ldpp_dout(s, 0) << "ERROR: failed to save_torrent_file() ret= "<< ret << dendl; return ret; } @@ -204,7 +204,7 @@ void seed::set_announce() if (announce_list.empty()) { - ldout(s->cct, 5) << "NOTICE: announce_list is empty " << dendl; + ldpp_dout(s, 5) << "NOTICE: announce_list is empty " << dendl; return; } @@ -257,10 +257,10 @@ int seed::save_torrent_file(optional_yield y) auto obj_ctx = store->svc()->sysobj->init_obj_ctx(); auto sysobj = obj_ctx.get_obj(raw_obj); - op_ret = sysobj.omap().set(key, bl, y); + op_ret = sysobj.omap().set(s, key, bl, y); if (op_ret < 0) { - ldout(s->cct, 0) << "ERROR: failed to omap_set() op_ret = " << op_ret << dendl; + ldpp_dout(s, 0) << "ERROR: failed to omap_set() op_ret = " << op_ret << dendl; return op_ret; } diff --git a/src/rgw/rgw_trim_bilog.cc b/src/rgw/rgw_trim_bilog.cc index 658b501f55598..1aa24517569cf 100644 --- a/src/rgw/rgw_trim_bilog.cc +++ b/src/rgw/rgw_trim_bilog.cc @@ -258,8 +258,8 @@ class BucketTrimWatcher : public librados::WatchCtx2 { stop(); } - int start() { - int r = store->getRados()->get_raw_obj_ref(obj, &ref); + int start(const DoutPrefixProvider *dpp) { + int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { return r; } @@ -274,13 +274,13 @@ class BucketTrimWatcher : public librados::WatchCtx2 { } } if (r < 0) { - lderr(store->ctx()) << "Failed to watch " << ref.obj + ldpp_dout(dpp, -1) << "Failed to watch " << ref.obj << " with " << cpp_strerror(-r) << dendl; ref.pool.ioctx().close(); return r; } - ldout(store->ctx(), 10) << "Watching " << ref.obj.oid << dendl; + ldpp_dout(dpp, 10) << "Watching " << ref.obj.oid << dendl; return 0; } @@ -381,15 +381,17 @@ int take_min_status(CephContext *cct, Iter first, Iter last, /// concurrent requests class BucketTrimShardCollectCR : public RGWShardCollectCR { static constexpr int MAX_CONCURRENT_SHARDS = 16; + const DoutPrefixProvider *dpp; rgw::sal::RGWRadosStore *const store; const RGWBucketInfo& bucket_info; const std::vector& markers; //< shard markers to trim size_t i{0}; //< index of current shard marker public: - BucketTrimShardCollectCR(rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info, + BucketTrimShardCollectCR(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info, const std::vector& markers) : RGWShardCollectCR(store->ctx(), MAX_CONCURRENT_SHARDS), - store(store), bucket_info(bucket_info), markers(markers) + dpp(dpp), store(store), bucket_info(bucket_info), markers(markers) {} bool spawn_next() override; }; @@ -402,9 +404,9 @@ bool BucketTrimShardCollectCR::spawn_next() // skip empty markers if (!marker.empty()) { - ldout(cct, 10) << "trimming bilog shard " << shard_id + ldpp_dout(dpp, 10) << "trimming bilog shard " << shard_id << " of " << bucket_info.bucket << " at marker " << marker << dendl; - spawn(new RGWRadosBILogTrimCR(store, bucket_info, shard_id, + spawn(new RGWRadosBILogTrimCR(dpp, store, bucket_info, shard_id, std::string{}, marker), false); return true; @@ -426,6 +428,7 @@ class BucketTrimInstanceCR : public RGWCoroutine { RGWBucketInfo _bucket_info; const RGWBucketInfo *pbucket_info; //< pointer to bucket instance info to locate bucket indices int child_ret = 0; + const DoutPrefixProvider *dpp; using StatusShards = std::vector; std::vector peer_status; //< sync status for each peer @@ -434,32 +437,35 @@ class BucketTrimInstanceCR : public RGWCoroutine { public: BucketTrimInstanceCR(rgw::sal::RGWRadosStore *store, RGWHTTPManager *http, BucketTrimObserver *observer, - const std::string& bucket_instance) + const std::string& bucket_instance, + const DoutPrefixProvider *dpp) : RGWCoroutine(store->ctx()), store(store), http(http), observer(observer), bucket_instance(bucket_instance), - zone_id(store->svc()->zone->get_zone().id) { + zone_id(store->svc()->zone->get_zone().id), + dpp(dpp) { rgw_bucket_parse_bucket_key(cct, bucket_instance, &bucket, nullptr); source_policy = make_shared(); } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int BucketTrimInstanceCR::operate() +int BucketTrimInstanceCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { - ldout(cct, 4) << "starting trim on bucket=" << bucket_instance << dendl; + ldpp_dout(dpp, 4) << "starting trim on bucket=" << bucket_instance << dendl; get_policy_params.zone = zone_id; get_policy_params.bucket = bucket; yield call(new RGWBucketGetSyncPolicyHandlerCR(store->svc()->rados->get_async_processor(), store, get_policy_params, - source_policy)); + source_policy, + dpp)); if (retcode < 0) { if (retcode != -ENOENT) { - ldout(cct, 0) << "ERROR: failed to fetch policy handler for bucket=" << bucket << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to fetch policy handler for bucket=" << bucket << dendl; } return set_cr_error(retcode); @@ -508,7 +514,7 @@ int BucketTrimInstanceCR::operate() auto ziter = zone_conn_map.find(zid); if (ziter == zone_conn_map.end()) { - ldout(cct, 0) << "WARNING: no connection to zone " << zid << ", can't trim bucket: " << bucket << dendl; + ldpp_dout(dpp, 0) << "WARNING: no connection to zone " << zid << ", can't trim bucket: " << bucket << dendl; return set_cr_error(-ECANCELED); } using StatusCR = RGWReadRESTResourceCR; @@ -536,21 +542,21 @@ int BucketTrimInstanceCR::operate() retcode = take_min_status(cct, peer_status.begin(), peer_status.end(), &min_markers); if (retcode < 0) { - ldout(cct, 4) << "failed to correlate bucket sync status from peers" << dendl; + ldpp_dout(dpp, 4) << "failed to correlate bucket sync status from peers" << dendl; return set_cr_error(retcode); } // trim shards with a ShardCollectCR - ldout(cct, 10) << "trimming bilogs for bucket=" << pbucket_info->bucket + ldpp_dout(dpp, 10) << "trimming bilogs for bucket=" << pbucket_info->bucket << " markers=" << min_markers << ", shards=" << min_markers.size() << dendl; set_status("trimming bilog shards"); - yield call(new BucketTrimShardCollectCR(store, *pbucket_info, min_markers)); + yield call(new BucketTrimShardCollectCR(dpp, store, *pbucket_info, min_markers)); // ENODATA just means there were no keys to trim if (retcode == -ENODATA) { retcode = 0; } if (retcode < 0) { - ldout(cct, 4) << "failed to trim bilog shards: " + ldpp_dout(dpp, 4) << "failed to trim bilog shards: " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } @@ -568,14 +574,17 @@ class BucketTrimInstanceCollectCR : public RGWShardCollectCR { BucketTrimObserver *const observer; std::vector::const_iterator bucket; std::vector::const_iterator end; + const DoutPrefixProvider *dpp; public: BucketTrimInstanceCollectCR(rgw::sal::RGWRadosStore *store, RGWHTTPManager *http, BucketTrimObserver *observer, const std::vector& buckets, - int max_concurrent) + int max_concurrent, + const DoutPrefixProvider *dpp) : RGWShardCollectCR(store->ctx(), max_concurrent), store(store), http(http), observer(observer), - bucket(buckets.begin()), end(buckets.end()) + bucket(buckets.begin()), end(buckets.end()), + dpp(dpp) {} bool spawn_next() override; }; @@ -585,7 +594,7 @@ bool BucketTrimInstanceCollectCR::spawn_next() if (bucket == end) { return false; } - spawn(new BucketTrimInstanceCR(store, http, observer, *bucket), false); + spawn(new BucketTrimInstanceCR(store, http, observer, *bucket, dpp), false); ++bucket; return true; } @@ -630,7 +639,7 @@ class AsyncMetadataList : public RGWAsyncRadosRequest { const std::string start_marker; MetadataListCallback callback; - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: AsyncMetadataList(CephContext *cct, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWMetadataManager *mgr, @@ -641,7 +650,7 @@ class AsyncMetadataList : public RGWAsyncRadosRequest { {} }; -int AsyncMetadataList::_send_request() +int AsyncMetadataList::_send_request(const DoutPrefixProvider *dpp) { void* handle = nullptr; std::list keys; @@ -649,15 +658,15 @@ int AsyncMetadataList::_send_request() std::string marker; // start a listing at the given marker - int r = mgr->list_keys_init(section, start_marker, &handle); + int r = mgr->list_keys_init(dpp, section, start_marker, &handle); if (r == -EINVAL) { // restart with empty marker below } else if (r < 0) { - ldout(cct, 10) << "failed to init metadata listing: " + ldpp_dout(dpp, 10) << "failed to init metadata listing: " << cpp_strerror(r) << dendl; return r; } else { - ldout(cct, 20) << "starting metadata listing at " << start_marker << dendl; + ldpp_dout(dpp, 20) << "starting metadata listing at " << start_marker << dendl; // release the handle when scope exits auto g = make_scope_guard([=] { mgr->list_keys_complete(handle); }); @@ -666,7 +675,7 @@ int AsyncMetadataList::_send_request() // get the next key and marker r = mgr->list_keys_next(handle, 1, keys, &truncated); if (r < 0) { - ldout(cct, 10) << "failed to list metadata: " + ldpp_dout(dpp, 10) << "failed to list metadata: " << cpp_strerror(r) << dendl; return r; } @@ -690,13 +699,13 @@ int AsyncMetadataList::_send_request() // restart the listing from the beginning (empty marker) handle = nullptr; - r = mgr->list_keys_init(section, "", &handle); + r = mgr->list_keys_init(dpp, section, "", &handle); if (r < 0) { - ldout(cct, 10) << "failed to restart metadata listing: " + ldpp_dout(dpp, 10) << "failed to restart metadata listing: " << cpp_strerror(r) << dendl; return r; } - ldout(cct, 20) << "restarting metadata listing" << dendl; + ldpp_dout(dpp, 20) << "restarting metadata listing" << dendl; // release the handle when scope exits auto g = make_scope_guard([=] { mgr->list_keys_complete(handle); }); @@ -704,7 +713,7 @@ int AsyncMetadataList::_send_request() // get the next key and marker r = mgr->list_keys_next(handle, 1, keys, &truncated); if (r < 0) { - ldout(cct, 10) << "failed to list metadata: " + ldpp_dout(dpp, 10) << "failed to list metadata: " << cpp_strerror(r) << dendl; return r; } @@ -746,7 +755,7 @@ class MetadataListCR : public RGWSimpleCoroutine { request_cleanup(); } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { req = new AsyncMetadataList(cct, this, stack->create_completion_notifier(), mgr, section, start_marker, callback); async_rados->queue(req); @@ -776,29 +785,30 @@ class BucketTrimCR : public RGWCoroutine { BucketTrimStatus status; RGWObjVersionTracker objv; //< version tracker for trim status object std::string last_cold_marker; //< position for next trim marker + const DoutPrefixProvider *dpp; static const std::string section; //< metadata section for bucket instances public: BucketTrimCR(rgw::sal::RGWRadosStore *store, RGWHTTPManager *http, const BucketTrimConfig& config, BucketTrimObserver *observer, - const rgw_raw_obj& obj) + const rgw_raw_obj& obj, const DoutPrefixProvider *dpp) : RGWCoroutine(store->ctx()), store(store), http(http), config(config), - observer(observer), obj(obj), counter(config.counter_size) + observer(observer), obj(obj), counter(config.counter_size), dpp(dpp) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; const std::string BucketTrimCR::section{"bucket.instance"}; -int BucketTrimCR::operate() +int BucketTrimCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { start_time = ceph::mono_clock::now(); if (config.buckets_per_interval) { // query watch/notify for hot buckets - ldout(cct, 10) << "fetching active bucket counters" << dendl; + ldpp_dout(dpp, 10) << "fetching active bucket counters" << dendl; set_status("fetching active bucket counters"); yield { // request the top bucket counters from each peer gateway @@ -811,7 +821,7 @@ int BucketTrimCR::operate() ¬ify_replies)); } if (retcode < 0) { - ldout(cct, 10) << "failed to fetch peer bucket counters" << dendl; + ldpp_dout(dpp, 10) << "failed to fetch peer bucket counters" << dendl; return set_cr_error(retcode); } @@ -835,17 +845,17 @@ int BucketTrimCR::operate() // read BucketTrimStatus for marker position set_status("reading trim status"); using ReadStatus = RGWSimpleRadosReadCR; - yield call(new ReadStatus(store->svc()->rados->get_async_processor(), store->svc()->sysobj, obj, + yield call(new ReadStatus(dpp, store->svc()->rados->get_async_processor(), store->svc()->sysobj, obj, &status, true, &objv)); if (retcode < 0) { - ldout(cct, 10) << "failed to read bilog trim status: " + ldpp_dout(dpp, 10) << "failed to read bilog trim status: " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } if (status.marker == "MAX") { status.marker.clear(); // restart at the beginning } - ldout(cct, 10) << "listing cold buckets from marker=" + ldpp_dout(dpp, 10) << "listing cold buckets from marker=" << status.marker << dendl; set_status("listing cold buckets for trim"); @@ -883,21 +893,21 @@ int BucketTrimCR::operate() // trim bucket instances with limited concurrency set_status("trimming buckets"); - ldout(cct, 4) << "collected " << buckets.size() << " buckets for trim" << dendl; + ldpp_dout(dpp, 4) << "collected " << buckets.size() << " buckets for trim" << dendl; yield call(new BucketTrimInstanceCollectCR(store, http, observer, buckets, - config.concurrent_buckets)); + config.concurrent_buckets, dpp)); // ignore errors from individual buckets // write updated trim status if (!last_cold_marker.empty() && status.marker != last_cold_marker) { set_status("writing updated trim status"); status.marker = std::move(last_cold_marker); - ldout(cct, 20) << "writing bucket trim marker=" << status.marker << dendl; + ldpp_dout(dpp, 20) << "writing bucket trim marker=" << status.marker << dendl; using WriteStatus = RGWSimpleRadosWriteCR; - yield call(new WriteStatus(store->svc()->rados->get_async_processor(), store->svc()->sysobj, obj, + yield call(new WriteStatus(dpp, store->svc()->rados->get_async_processor(), store->svc()->sysobj, obj, status, &objv)); if (retcode < 0) { - ldout(cct, 4) << "failed to write updated trim status: " + ldpp_dout(dpp, 4) << "failed to write updated trim status: " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } @@ -919,7 +929,7 @@ int BucketTrimCR::operate() return set_cr_error(retcode); } - ldout(cct, 4) << "bucket index log processing completed in " + ldpp_dout(dpp, 4) << "bucket index log processing completed in " << ceph::mono_clock::now() - start_time << dendl; return set_cr_done(); } @@ -934,20 +944,22 @@ class BucketTrimPollCR : public RGWCoroutine { const rgw_raw_obj& obj; const std::string name{"trim"}; //< lock name const std::string cookie; + const DoutPrefixProvider *dpp; public: BucketTrimPollCR(rgw::sal::RGWRadosStore *store, RGWHTTPManager *http, const BucketTrimConfig& config, - BucketTrimObserver *observer, const rgw_raw_obj& obj) + BucketTrimObserver *observer, const rgw_raw_obj& obj, + const DoutPrefixProvider *dpp) : RGWCoroutine(store->ctx()), store(store), http(http), config(config), observer(observer), obj(obj), - cookie(RGWSimpleRadosLockCR::gen_random_cookie(cct)) - {} + cookie(RGWSimpleRadosLockCR::gen_random_cookie(cct)), + dpp(dpp) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int BucketTrimPollCR::operate() +int BucketTrimPollCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { for (;;) { @@ -965,7 +977,7 @@ int BucketTrimPollCR::operate() } set_status("trimming"); - yield call(new BucketTrimCR(store, http, config, observer, obj)); + yield call(new BucketTrimCR(store, http, config, observer, obj, dpp)); if (retcode < 0) { // on errors, unlock so other gateways can try set_status("unlocking"); @@ -1116,7 +1128,7 @@ BucketTrimManager::~BucketTrimManager() = default; int BucketTrimManager::init() { - return impl->watcher.start(); + return impl->watcher.start(this); } void BucketTrimManager::on_bucket_changed(const std::string_view& bucket) @@ -1132,14 +1144,29 @@ void BucketTrimManager::on_bucket_changed(const std::string_view& bucket) RGWCoroutine* BucketTrimManager::create_bucket_trim_cr(RGWHTTPManager *http) { return new BucketTrimPollCR(impl->store, http, impl->config, - impl.get(), impl->status_obj); + impl.get(), impl->status_obj, this); } RGWCoroutine* BucketTrimManager::create_admin_bucket_trim_cr(RGWHTTPManager *http) { // return the trim coroutine without any polling return new BucketTrimCR(impl->store, http, impl->config, - impl.get(), impl->status_obj); + impl.get(), impl->status_obj, this); +} + +CephContext* BucketTrimManager::get_cct() const +{ + return impl->store->ctx(); +} + +unsigned BucketTrimManager::get_subsys() const +{ + return dout_subsys; +} + +std::ostream& BucketTrimManager::gen_prefix(std::ostream& out) const +{ + return out << "rgw bucket trim manager: "; } } // namespace rgw diff --git a/src/rgw/rgw_trim_bilog.h b/src/rgw/rgw_trim_bilog.h index 5bc5c3cfd9b89..a2d5f4c980c5a 100644 --- a/src/rgw/rgw_trim_bilog.h +++ b/src/rgw/rgw_trim_bilog.h @@ -23,6 +23,7 @@ #include "include/common_fwd.h" #include "include/encoding.h" #include "common/ceph_time.h" +#include "common/dout.h" class RGWCoroutine; class RGWHTTPManager; @@ -69,7 +70,7 @@ void configure_bucket_trim(CephContext *cct, BucketTrimConfig& config); /// input: the frequency of entries read from the data changes log, and a global /// listing of the bucket.instance metadata. This allows us to trim active /// buckets quickly, while also ensuring that all buckets will eventually trim -class BucketTrimManager : public BucketChangeObserver { +class BucketTrimManager : public BucketChangeObserver, public DoutPrefixProvider { class Impl; std::unique_ptr impl; public: @@ -86,6 +87,10 @@ class BucketTrimManager : public BucketChangeObserver { /// create a coroutine to trim buckets directly via radosgw-admin RGWCoroutine* create_admin_bucket_trim_cr(RGWHTTPManager *http); + + CephContext *get_cct() const override; + unsigned get_subsys() const; + std::ostream& gen_prefix(std::ostream& out) const; }; /// provides persistent storage for the trim manager's current position in the diff --git a/src/rgw/rgw_trim_datalog.cc b/src/rgw/rgw_trim_datalog.cc index 85c19a7c4437b..1cd9bb1942ec3 100644 --- a/src/rgw/rgw_trim_datalog.cc +++ b/src/rgw/rgw_trim_datalog.cc @@ -26,6 +26,7 @@ namespace { class DatalogTrimImplCR : public RGWSimpleCoroutine { + const DoutPrefixProvider *dpp; rgw::sal::RGWRadosStore *store; boost::intrusive_ptr cn; int shard; @@ -33,23 +34,23 @@ class DatalogTrimImplCR : public RGWSimpleCoroutine { std::string* last_trim_marker; public: - DatalogTrimImplCR(rgw::sal::RGWRadosStore* store, int shard, + DatalogTrimImplCR(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore* store, int shard, const std::string& marker, std::string* last_trim_marker) - : RGWSimpleCoroutine(store->ctx()), store(store), shard(shard), + : RGWSimpleCoroutine(store->ctx()), dpp(dpp), store(store), shard(shard), marker(marker), last_trim_marker(last_trim_marker) { set_description() << "Datalog trim shard=" << shard << " marker=" << marker; } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { set_status() << "sending request"; cn = stack->create_completion_notifier(); - return store->svc()->datalog_rados->trim_entries(shard, marker, + return store->svc()->datalog_rados->trim_entries(dpp, shard, marker, cn->completion()); } int request_complete() override { int r = cn->completion()->get_return_value(); - ldout(cct, 20) << __PRETTY_FUNCTION__ << "(): trim of shard=" << shard + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << "(): trim of shard=" << shard << " marker=" << marker << " returned r=" << r << dendl; set_status() << "request complete; ret=" << r; @@ -95,6 +96,7 @@ void take_min_markers(IterIn first, IterIn last, IterOut dest) class DataLogTrimCR : public RGWCoroutine { using TrimCR = DatalogTrimImplCR; + const DoutPrefixProvider *dpp; rgw::sal::RGWRadosStore *store; RGWHTTPManager *http; const int num_shards; @@ -105,9 +107,9 @@ class DataLogTrimCR : public RGWCoroutine { int ret{0}; public: - DataLogTrimCR(rgw::sal::RGWRadosStore *store, RGWHTTPManager *http, + DataLogTrimCR(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWHTTPManager *http, int num_shards, std::vector& last_trim) - : RGWCoroutine(store->ctx()), store(store), http(http), + : RGWCoroutine(store->ctx()), dpp(dpp), store(store), http(http), num_shards(num_shards), zone_id(store->svc()->zone->get_zone().id), peer_status(store->svc()->zone->get_zone_data_notify_to_map().size()), @@ -116,13 +118,13 @@ class DataLogTrimCR : public RGWCoroutine { last_trim(last_trim) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int DataLogTrimCR::operate() +int DataLogTrimCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { - ldout(cct, 10) << "fetching sync status for zone " << zone_id << dendl; + ldpp_dout(dpp, 10) << "fetching sync status for zone " << zone_id << dendl; set_status("fetching sync status"); yield { // query data sync status from each sync peer @@ -135,7 +137,7 @@ int DataLogTrimCR::operate() auto p = peer_status.begin(); for (auto& c : store->svc()->zone->get_zone_data_notify_to_map()) { - ldout(cct, 20) << "query sync status from " << c.first << dendl; + ldpp_dout(dpp, 20) << "query sync status from " << c.first << dendl; using StatusCR = RGWReadRESTResourceCR; spawn(new StatusCR(cct, c.second, http, "/admin/log/", params, &*p), false); @@ -152,11 +154,11 @@ int DataLogTrimCR::operate() drain_all(); if (ret < 0) { - ldout(cct, 4) << "failed to fetch sync status from all peers" << dendl; + ldpp_dout(dpp, 4) << "failed to fetch sync status from all peers" << dendl; return set_cr_error(ret); } - ldout(cct, 10) << "trimming log shards" << dendl; + ldpp_dout(dpp, 10) << "trimming log shards" << dendl; set_status("trimming log shards"); yield { // determine the minimum marker for each shard @@ -168,10 +170,10 @@ int DataLogTrimCR::operate() if (m <= last_trim[i]) { continue; } - ldout(cct, 10) << "trimming log shard " << i + ldpp_dout(dpp, 10) << "trimming log shard " << i << " at marker=" << m << " last_trim=" << last_trim[i] << dendl; - spawn(new TrimCR(store, i, m, &last_trim[i]), + spawn(new TrimCR(dpp, store, i, m, &last_trim[i]), true); } } @@ -180,15 +182,16 @@ int DataLogTrimCR::operate() return 0; } -RGWCoroutine* create_admin_data_log_trim_cr(rgw::sal::RGWRadosStore *store, +RGWCoroutine* create_admin_data_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWHTTPManager *http, int num_shards, std::vector& markers) { - return new DataLogTrimCR(store, http, num_shards, markers); + return new DataLogTrimCR(dpp, store, http, num_shards, markers); } class DataLogTrimPollCR : public RGWCoroutine { + const DoutPrefixProvider *dpp; rgw::sal::RGWRadosStore *store; RGWHTTPManager *http; const int num_shards; @@ -198,19 +201,19 @@ class DataLogTrimPollCR : public RGWCoroutine { std::vector last_trim; //< last trimmed marker per shard public: - DataLogTrimPollCR(rgw::sal::RGWRadosStore *store, RGWHTTPManager *http, + DataLogTrimPollCR(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWHTTPManager *http, int num_shards, utime_t interval) - : RGWCoroutine(store->ctx()), store(store), http(http), + : RGWCoroutine(store->ctx()), dpp(dpp), store(store), http(http), num_shards(num_shards), interval(interval), lock_oid(store->svc()->datalog_rados->get_oid(0, 0)), lock_cookie(RGWSimpleRadosLockCR::gen_random_cookie(cct)), last_trim(num_shards) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int DataLogTrimPollCR::operate() +int DataLogTrimPollCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { for (;;) { @@ -226,13 +229,13 @@ int DataLogTrimPollCR::operate() interval.sec())); if (retcode < 0) { // if the lock is already held, go back to sleep and try again later - ldout(cct, 4) << "failed to lock " << lock_oid << ", trying again in " + ldpp_dout(dpp, 4) << "failed to lock " << lock_oid << ", trying again in " << interval.sec() << "s" << dendl; continue; } set_status("trimming"); - yield call(new DataLogTrimCR(store, http, num_shards, last_trim)); + yield call(new DataLogTrimCR(dpp, store, http, num_shards, last_trim)); // note that the lock is not released. this is intentional, as it avoids // duplicating this work in other gateways @@ -241,9 +244,9 @@ int DataLogTrimPollCR::operate() return 0; } -RGWCoroutine* create_data_log_trim_cr(rgw::sal::RGWRadosStore *store, +RGWCoroutine* create_data_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWHTTPManager *http, int num_shards, utime_t interval) { - return new DataLogTrimPollCR(store, http, num_shards, interval); + return new DataLogTrimPollCR(dpp, store, http, num_shards, interval); } diff --git a/src/rgw/rgw_trim_datalog.h b/src/rgw/rgw_trim_datalog.h index ffdd2b38450c9..0b4de3578d1e3 100644 --- a/src/rgw/rgw_trim_datalog.h +++ b/src/rgw/rgw_trim_datalog.h @@ -6,6 +6,8 @@ #include #include +#include "common/dout.h" + class RGWCoroutine; class RGWRados; class RGWHTTPManager; @@ -15,12 +17,12 @@ namespace rgw { namespace sal { } } // DataLogTrimCR factory function -extern RGWCoroutine* create_data_log_trim_cr(rgw::sal::RGWRadosStore *store, +extern RGWCoroutine* create_data_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWHTTPManager *http, int num_shards, utime_t interval); // factory function for datalog trim via radosgw-admin -RGWCoroutine* create_admin_data_log_trim_cr(rgw::sal::RGWRadosStore *store, +RGWCoroutine* create_admin_data_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWHTTPManager *http, int num_shards, std::vector& markers); diff --git a/src/rgw/rgw_trim_mdlog.cc b/src/rgw/rgw_trim_mdlog.cc index d1df9bcf751de..f940f5c6e18d3 100644 --- a/src/rgw/rgw_trim_mdlog.cc +++ b/src/rgw/rgw_trim_mdlog.cc @@ -55,6 +55,7 @@ class PurgePeriodLogsCR : public RGWCoroutine { RGWSI_Zone *zone; RGWSI_MDLog *mdlog; } svc; + const DoutPrefixProvider *dpp; rgw::sal::RGWRadosStore *const store; RGWMetadataManager *const metadata; RGWObjVersionTracker objv; @@ -63,31 +64,31 @@ class PurgePeriodLogsCR : public RGWCoroutine { epoch_t *last_trim_epoch; //< update last trim on success public: - PurgePeriodLogsCR(rgw::sal::RGWRadosStore *store, epoch_t realm_epoch, epoch_t *last_trim) - : RGWCoroutine(store->ctx()), store(store), metadata(store->ctl()->meta.mgr), + PurgePeriodLogsCR(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, epoch_t realm_epoch, epoch_t *last_trim) + : RGWCoroutine(store->ctx()), dpp(dpp), store(store), metadata(store->ctl()->meta.mgr), realm_epoch(realm_epoch), last_trim_epoch(last_trim) { svc.zone = store->svc()->zone; svc.mdlog = store->svc()->mdlog; } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int PurgePeriodLogsCR::operate() +int PurgePeriodLogsCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { // read our current oldest log period - yield call(svc.mdlog->read_oldest_log_period_cr(&cursor, &objv)); + yield call(svc.mdlog->read_oldest_log_period_cr(dpp, &cursor, &objv)); if (retcode < 0) { return set_cr_error(retcode); } ceph_assert(cursor); - ldout(cct, 20) << "oldest log realm_epoch=" << cursor.get_epoch() + ldpp_dout(dpp, 20) << "oldest log realm_epoch=" << cursor.get_epoch() << " period=" << cursor.get_period().get_id() << dendl; // trim -up to- the given realm_epoch while (cursor.get_epoch() < realm_epoch) { - ldout(cct, 4) << "purging log shards for realm_epoch=" << cursor.get_epoch() + ldpp_dout(dpp, 4) << "purging log shards for realm_epoch=" << cursor.get_epoch() << " period=" << cursor.get_period().get_id() << dendl; yield { const auto mdlog = svc.mdlog->get_log(cursor.get_period().get_id()); @@ -96,23 +97,23 @@ int PurgePeriodLogsCR::operate() call(new PurgeLogShardsCR(store, mdlog, pool, num_shards)); } if (retcode < 0) { - ldout(cct, 1) << "failed to remove log shards: " + ldpp_dout(dpp, 1) << "failed to remove log shards: " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } - ldout(cct, 10) << "removed log shards for realm_epoch=" << cursor.get_epoch() + ldpp_dout(dpp, 10) << "removed log shards for realm_epoch=" << cursor.get_epoch() << " period=" << cursor.get_period().get_id() << dendl; // update our mdlog history - yield call(svc.mdlog->trim_log_period_cr(cursor, &objv)); + yield call(svc.mdlog->trim_log_period_cr(dpp, cursor, &objv)); if (retcode == -ENOENT) { // must have raced to update mdlog history. return success and allow the // winner to continue purging - ldout(cct, 10) << "already removed log shards for realm_epoch=" << cursor.get_epoch() + ldpp_dout(dpp, 10) << "already removed log shards for realm_epoch=" << cursor.get_epoch() << " period=" << cursor.get_period().get_id() << dendl; return set_cr_done(); } else if (retcode < 0) { - ldout(cct, 1) << "failed to remove log shards for realm_epoch=" + ldpp_dout(dpp, 1) << "failed to remove log shards for realm_epoch=" << cursor.get_epoch() << " period=" << cursor.get_period().get_id() << " with: " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); @@ -286,7 +287,7 @@ bool MetaMasterTrimShardCollectCR::spawn_next() if (stable <= last_trim) { // already trimmed - ldout(cct, 20) << "skipping log shard " << shard_id + ldpp_dout(env.dpp, 20) << "skipping log shard " << shard_id << " at marker=" << stable << " last_trim=" << last_trim << " realm_epoch=" << sync_status.sync_info.realm_epoch << dendl; @@ -296,11 +297,11 @@ bool MetaMasterTrimShardCollectCR::spawn_next() mdlog->get_shard_oid(shard_id, oid); - ldout(cct, 10) << "trimming log shard " << shard_id + ldpp_dout(env.dpp, 10) << "trimming log shard " << shard_id << " at marker=" << stable << " last_trim=" << last_trim << " realm_epoch=" << sync_status.sync_info.realm_epoch << dendl; - spawn(new RGWSyncLogTrimCR(env.store, oid, stable, &last_trim), false); + spawn(new RGWSyncLogTrimCR(env.dpp, env.store, oid, stable, &last_trim), false); shard_id++; return true; } @@ -351,25 +352,25 @@ class MetaMasterTrimCR : public RGWCoroutine { : RGWCoroutine(env.store->ctx()), env(env) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int MetaMasterTrimCR::operate() +int MetaMasterTrimCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { // TODO: detect this and fail before we spawn the trim thread? if (env.connections.empty()) { - ldout(cct, 4) << "no peers, exiting" << dendl; + ldpp_dout(dpp, 4) << "no peers, exiting" << dendl; return set_cr_done(); } - ldout(cct, 10) << "fetching sync status for zone " << env.zone << dendl; + ldpp_dout(dpp, 10) << "fetching sync status for zone " << env.zone << dendl; // query mdlog sync status from peers yield call(new MetaMasterStatusCollectCR(env)); // must get a successful reply from all peers to consider trimming if (ret < 0) { - ldout(cct, 4) << "failed to fetch sync status from all peers" << dendl; + ldpp_dout(dpp, 4) << "failed to fetch sync status from all peers" << dendl; return set_cr_error(ret); } @@ -377,19 +378,19 @@ int MetaMasterTrimCR::operate() ret = take_min_status(env.store->ctx(), env.peer_status.begin(), env.peer_status.end(), &min_status); if (ret < 0) { - ldout(cct, 4) << "failed to calculate min sync status from peers" << dendl; + ldpp_dout(dpp, 4) << "failed to calculate min sync status from peers" << dendl; return set_cr_error(ret); } yield { auto store = env.store; auto epoch = min_status.sync_info.realm_epoch; - ldout(cct, 4) << "realm epoch min=" << epoch + ldpp_dout(dpp, 4) << "realm epoch min=" << epoch << " current=" << env.current.get_epoch()<< dendl; if (epoch > env.last_trim_epoch + 1) { // delete any prior mdlog periods - spawn(new PurgePeriodLogsCR(store, epoch, &env.last_trim_epoch), true); + spawn(new PurgePeriodLogsCR(dpp, store, epoch, &env.last_trim_epoch), true); } else { - ldout(cct, 10) << "mdlogs already purged up to realm_epoch " + ldpp_dout(dpp, 10) << "mdlogs already purged up to realm_epoch " << env.last_trim_epoch << dendl; } @@ -425,17 +426,17 @@ class MetaPeerTrimShardCR : public RGWCoroutine { period_id(period_id), shard_id(shard_id), last_trim(last_trim) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int MetaPeerTrimShardCR::operate() +int MetaPeerTrimShardCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { // query master's first mdlog entry for this shard yield call(create_list_remote_mdlog_shard_cr(&env, period_id, shard_id, "", 1, &result)); if (retcode < 0) { - ldpp_dout(env.dpp, 5) << "failed to read first entry from master's mdlog shard " + ldpp_dout(dpp, 5) << "failed to read first entry from master's mdlog shard " << shard_id << " for period " << period_id << ": " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); @@ -446,12 +447,12 @@ int MetaPeerTrimShardCR::operate() // this empty reply. query the mdlog shard info to read its max timestamp, // then retry the listing to make sure it's still empty before trimming to // that - ldpp_dout(env.dpp, 10) << "empty master mdlog shard " << shard_id + ldpp_dout(dpp, 10) << "empty master mdlog shard " << shard_id << ", reading last timestamp from shard info" << dendl; // read the mdlog shard info for the last timestamp yield call(create_read_remote_mdlog_shard_info_cr(&env, period_id, shard_id, &info)); if (retcode < 0) { - ldpp_dout(env.dpp, 5) << "failed to read info from master's mdlog shard " + ldpp_dout(dpp, 5) << "failed to read info from master's mdlog shard " << shard_id << " for period " << period_id << ": " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); @@ -459,13 +460,13 @@ int MetaPeerTrimShardCR::operate() if (ceph::real_clock::is_zero(info.last_update)) { return set_cr_done(); // nothing to trim } - ldpp_dout(env.dpp, 10) << "got mdlog shard info with last update=" + ldpp_dout(dpp, 10) << "got mdlog shard info with last update=" << info.last_update << dendl; // re-read the master's first mdlog entry to make sure it hasn't changed yield call(create_list_remote_mdlog_shard_cr(&env, period_id, shard_id, "", 1, &result)); if (retcode < 0) { - ldpp_dout(env.dpp, 5) << "failed to read first entry from master's mdlog shard " + ldpp_dout(dpp, 5) << "failed to read first entry from master's mdlog shard " << shard_id << " for period " << period_id << ": " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); @@ -486,22 +487,22 @@ int MetaPeerTrimShardCR::operate() } if (stable <= *last_trim) { - ldpp_dout(env.dpp, 10) << "skipping log shard " << shard_id + ldpp_dout(dpp, 10) << "skipping log shard " << shard_id << " at timestamp=" << stable << " last_trim=" << *last_trim << dendl; return set_cr_done(); } - ldpp_dout(env.dpp, 10) << "trimming log shard " << shard_id + ldpp_dout(dpp, 10) << "trimming log shard " << shard_id << " at timestamp=" << stable << " last_trim=" << *last_trim << dendl; yield { std::string oid; mdlog->get_shard_oid(shard_id, oid); - call(new RGWRadosTimelogTrimCR(env.store, oid, real_time{}, stable, "", "")); + call(new RGWRadosTimelogTrimCR(dpp, env.store, oid, real_time{}, stable, "", "")); } if (retcode < 0 && retcode != -ENODATA) { - ldpp_dout(env.dpp, 1) << "failed to trim mdlog shard " << shard_id + ldpp_dout(dpp, 1) << "failed to trim mdlog shard " << shard_id << ": " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } @@ -552,13 +553,13 @@ class MetaPeerTrimCR : public RGWCoroutine { public: explicit MetaPeerTrimCR(PeerTrimEnv& env) : RGWCoroutine(env.store->ctx()), env(env) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int MetaPeerTrimCR::operate() +int MetaPeerTrimCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { - ldout(cct, 10) << "fetching master mdlog info" << dendl; + ldpp_dout(dpp, 10) << "fetching master mdlog info" << dendl; yield { // query mdlog_info from master for oldest_log_period rgw_http_param_pair params[] = { @@ -571,7 +572,7 @@ int MetaPeerTrimCR::operate() "/admin/log/", params, &mdlog_info)); } if (retcode < 0) { - ldout(cct, 4) << "failed to read mdlog info from master" << dendl; + ldpp_dout(dpp, 4) << "failed to read mdlog info from master" << dendl; return set_cr_error(retcode); } // use master's shard count instead @@ -579,10 +580,10 @@ int MetaPeerTrimCR::operate() if (mdlog_info.realm_epoch > env.last_trim_epoch + 1) { // delete any prior mdlog periods - yield call(new PurgePeriodLogsCR(env.store, mdlog_info.realm_epoch, + yield call(new PurgePeriodLogsCR(dpp, env.store, mdlog_info.realm_epoch, &env.last_trim_epoch)); } else { - ldout(cct, 10) << "mdlogs already purged through realm_epoch " + ldpp_dout(dpp, 10) << "mdlogs already purged through realm_epoch " << env.last_trim_epoch << dendl; } @@ -617,10 +618,10 @@ class MetaTrimPollCR : public RGWCoroutine { cookie(RGWSimpleRadosLockCR::gen_random_cookie(cct)) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int MetaTrimPollCR::operate() +int MetaTrimPollCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { for (;;) { diff --git a/src/rgw/rgw_usage.cc b/src/rgw/rgw_usage.cc index 34884e0b8234d..e1a1d1d6fd6d4 100644 --- a/src/rgw/rgw_usage.cc +++ b/src/rgw/rgw_usage.cc @@ -29,7 +29,7 @@ static void dump_usage_categories_info(Formatter *formatter, const rgw_usage_log formatter->close_section(); // categories } -int RGWUsage::show(RGWRados *store, const rgw_user& uid, const string& bucket_name, uint64_t start_epoch, +int RGWUsage::show(const DoutPrefixProvider *dpp, RGWRados *store, const rgw_user& uid, const string& bucket_name, uint64_t start_epoch, uint64_t end_epoch, bool show_log_entries, bool show_log_sum, map *categories, RGWFormatterFlusher& flusher) { @@ -52,7 +52,7 @@ int RGWUsage::show(RGWRados *store, const rgw_user& uid, const string& bucket_na bool user_section_open = false; map summary_map; while (is_truncated) { - int ret = store->read_usage(uid, bucket_name, start_epoch, end_epoch, max_entries, + int ret = store->read_usage(dpp, uid, bucket_name, start_epoch, end_epoch, max_entries, &is_truncated, usage_iter, usage); if (ret == -ENOENT) { @@ -139,13 +139,13 @@ int RGWUsage::show(RGWRados *store, const rgw_user& uid, const string& bucket_na return 0; } -int RGWUsage::trim(RGWRados *store, const rgw_user& uid, const string& bucket_name, uint64_t start_epoch, +int RGWUsage::trim(const DoutPrefixProvider *dpp, RGWRados *store, const rgw_user& uid, const string& bucket_name, uint64_t start_epoch, uint64_t end_epoch) { - return store->trim_usage(uid, bucket_name, start_epoch, end_epoch); + return store->trim_usage(dpp, uid, bucket_name, start_epoch, end_epoch); } -int RGWUsage::clear(RGWRados *store) +int RGWUsage::clear(const DoutPrefixProvider *dpp, RGWRados *store) { - return store->clear_usage(); + return store->clear_usage(dpp); } diff --git a/src/rgw/rgw_usage.h b/src/rgw/rgw_usage.h index bab4242f04c0e..8fdef7cc47fb7 100644 --- a/src/rgw/rgw_usage.h +++ b/src/rgw/rgw_usage.h @@ -8,6 +8,7 @@ #include #include "common/Formatter.h" +#include "common/dout.h" #include "rgw_formats.h" #include "rgw_user.h" @@ -17,14 +18,14 @@ class RGWRados; class RGWUsage { public: - static int show(RGWRados *store, const rgw_user& uid, const string& bucket_name, uint64_t start_epoch, + static int show(const DoutPrefixProvider *dpp, RGWRados *store, const rgw_user& uid, const string& bucket_name, uint64_t start_epoch, uint64_t end_epoch, bool show_log_entries, bool show_log_sum, std::map *categories, RGWFormatterFlusher& flusher); - static int trim(RGWRados *store, const rgw_user& uid, const string& bucket_name, uint64_t start_epoch, + static int trim(const DoutPrefixProvider *dpp, RGWRados *store, const rgw_user& uid, const string& bucket_name, uint64_t start_epoch, uint64_t end_epoch); - static int clear(RGWRados *store); + static int clear(const DoutPrefixProvider *dpp, RGWRados *store); }; diff --git a/src/rgw/rgw_user.cc b/src/rgw/rgw_user.cc index b526c77cc950e..41228237e78c4 100644 --- a/src/rgw/rgw_user.cc +++ b/src/rgw/rgw_user.cc @@ -48,7 +48,7 @@ void rgw_get_anon_user(RGWUserInfo& info) info.access_keys.clear(); } -int rgw_user_sync_all_stats(rgw::sal::RGWRadosStore *store, +int rgw_user_sync_all_stats(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, const rgw_user& user_id, optional_yield y) { rgw::sal::RGWBucketList user_buckets; @@ -60,9 +60,9 @@ int rgw_user_sync_all_stats(rgw::sal::RGWRadosStore *store, int ret; do { - ret = user.list_buckets(marker, string(), max_entries, false, user_buckets, y); + ret = user.list_buckets(dpp, marker, string(), max_entries, false, user_buckets, y); if (ret < 0) { - ldout(cct, 0) << "failed to read user buckets: ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "failed to read user buckets: ret=" << ret << dendl; return ret; } auto& buckets = user_buckets.get_buckets(); @@ -71,24 +71,24 @@ int rgw_user_sync_all_stats(rgw::sal::RGWRadosStore *store, auto& bucket = i->second; - ret = bucket->get_bucket_info(y); + ret = bucket->get_bucket_info(dpp, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: could not read bucket info: bucket=" << bucket << " ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not read bucket info: bucket=" << bucket << " ret=" << ret << dendl; continue; } - ret = bucket->sync_user_stats(y); + ret = bucket->sync_user_stats(dpp, y); if (ret < 0) { ldout(cct, 0) << "ERROR: could not sync bucket stats: ret=" << ret << dendl; return ret; } - ret = bucket->check_bucket_shards(); + ret = bucket->check_bucket_shards(dpp); if (ret < 0) { - ldout(cct, 0) << "ERROR in check_bucket_shards: " << cpp_strerror(-ret)<< dendl; + ldpp_dout(dpp, 0) << "ERROR in check_bucket_shards: " << cpp_strerror(-ret)<< dendl; } } } while (user_buckets.is_truncated()); - ret = store->ctl()->user->complete_flush_stats(user.get_user(), y); + ret = store->ctl()->user->complete_flush_stats(dpp, user.get_user(), y); if (ret < 0) { cerr << "ERROR: failed to complete syncing user stats: ret=" << ret << std::endl; return ret; @@ -97,7 +97,8 @@ int rgw_user_sync_all_stats(rgw::sal::RGWRadosStore *store, return 0; } -int rgw_user_get_all_buckets_stats(rgw::sal::RGWRadosStore *store, +int rgw_user_get_all_buckets_stats(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, const rgw_user& user_id, map& buckets_usage_map, optional_yield y) @@ -110,10 +111,10 @@ int rgw_user_get_all_buckets_stats(rgw::sal::RGWRadosStore *store, do { rgw::sal::RGWBucketList buckets; - ret = rgw_read_user_buckets(store, user_id, buckets, marker, + ret = rgw_read_user_buckets(dpp, store, user_id, buckets, marker, string(), max_entries, false, y); if (ret < 0) { - ldout(cct, 0) << "failed to read user buckets: ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "failed to read user buckets: ret=" << ret << dendl; return ret; } auto& m = buckets.get_buckets(); @@ -121,9 +122,9 @@ int rgw_user_get_all_buckets_stats(rgw::sal::RGWRadosStore *store, marker = i.first; auto& bucket_ent = i.second; - ret = bucket_ent->read_bucket_stats(y); + ret = bucket_ent->read_bucket_stats(dpp, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: could not get bucket stats: ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not get bucket stats: ret=" << ret << dendl; return ret; } cls_user_bucket_entry entry; @@ -140,7 +141,8 @@ int rgw_user_get_all_buckets_stats(rgw::sal::RGWRadosStore *store, * Save the given user information to storage. * Returns: 0 on success, -ERR# on failure. */ -int rgw_store_user_info(RGWUserCtl *user_ctl, +int rgw_store_user_info(const DoutPrefixProvider *dpp, + RGWUserCtl *user_ctl, RGWUserInfo& info, RGWUserInfo *old_info, RGWObjVersionTracker *objv_tracker, @@ -149,7 +151,7 @@ int rgw_store_user_info(RGWUserCtl *user_ctl, optional_yield y, map *pattrs) { - return user_ctl->store_info(info, y, + return user_ctl->store_info(dpp, info, y, RGWUserCtl::PutParams() .set_old_info(old_info) .set_objv_tracker(objv_tracker) @@ -162,7 +164,8 @@ int rgw_store_user_info(RGWUserCtl *user_ctl, * Given a uid, finds the user info associated with it. * returns: 0 on success, -ERR# on failure (including nonexistence) */ -int rgw_get_user_info_by_uid(RGWUserCtl *user_ctl, +int rgw_get_user_info_by_uid(const DoutPrefixProvider *dpp, + RGWUserCtl *user_ctl, const rgw_user& uid, RGWUserInfo& info, optional_yield y, @@ -171,7 +174,7 @@ int rgw_get_user_info_by_uid(RGWUserCtl *user_ctl, rgw_cache_entry_info * const cache_info, map * const pattrs) { - return user_ctl->get_info_by_uid(uid, &info, y, + return user_ctl->get_info_by_uid(dpp, uid, &info, y, RGWUserCtl::GetParams() .set_objv_tracker(objv_tracker) .set_mtime(pmtime) @@ -183,12 +186,13 @@ int rgw_get_user_info_by_uid(RGWUserCtl *user_ctl, * Given an email, finds the user info associated with it. * returns: 0 on success, -ERR# on failure (including nonexistence) */ -int rgw_get_user_info_by_email(RGWUserCtl *user_ctl, string& email, +int rgw_get_user_info_by_email(const DoutPrefixProvider *dpp, + RGWUserCtl *user_ctl, string& email, RGWUserInfo& info, optional_yield y, RGWObjVersionTracker *objv_tracker, real_time *pmtime) { - return user_ctl->get_info_by_email(email, &info, y, + return user_ctl->get_info_by_email(dpp, email, &info, y, RGWUserCtl::GetParams() .set_objv_tracker(objv_tracker) .set_mtime(pmtime)); @@ -198,14 +202,15 @@ int rgw_get_user_info_by_email(RGWUserCtl *user_ctl, string& email, * Given an swift username, finds the user_info associated with it. * returns: 0 on success, -ERR# on failure (including nonexistence) */ -int rgw_get_user_info_by_swift(RGWUserCtl *user_ctl, +int rgw_get_user_info_by_swift(const DoutPrefixProvider *dpp, + RGWUserCtl *user_ctl, const string& swift_name, RGWUserInfo& info, /* out */ optional_yield y, RGWObjVersionTracker * const objv_tracker, real_time * const pmtime) { - return user_ctl->get_info_by_swift(swift_name, &info, y, + return user_ctl->get_info_by_swift(dpp, swift_name, &info, y, RGWUserCtl::GetParams() .set_objv_tracker(objv_tracker) .set_mtime(pmtime)); @@ -215,14 +220,15 @@ int rgw_get_user_info_by_swift(RGWUserCtl *user_ctl, * Given an access key, finds the user info associated with it. * returns: 0 on success, -ERR# on failure (including nonexistence) */ -extern int rgw_get_user_info_by_access_key(RGWUserCtl *user_ctl, +extern int rgw_get_user_info_by_access_key(const DoutPrefixProvider *dpp, + RGWUserCtl *user_ctl, const std::string& access_key, RGWUserInfo& info, optional_yield y, RGWObjVersionTracker* objv_tracker, real_time *pmtime) { - return user_ctl->get_info_by_access_key(access_key, &info, y, + return user_ctl->get_info_by_access_key(dpp, access_key, &info, y, RGWUserCtl::GetParams() .set_objv_tracker(objv_tracker) .set_mtime(pmtime)); @@ -595,7 +601,7 @@ int RGWAccessKeyPool::check_op(RGWUserAdminOpState& op_state, } // Generate a new random key -int RGWAccessKeyPool::generate_key(RGWUserAdminOpState& op_state, +int RGWAccessKeyPool::generate_key(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg) { std::string id; @@ -626,13 +632,13 @@ int RGWAccessKeyPool::generate_key(RGWUserAdminOpState& op_state, if (!id.empty()) { switch (key_type) { case KEY_TYPE_SWIFT: - if (rgw_get_user_info_by_swift(user_ctl, id, duplicate_check, y) >= 0) { + if (rgw_get_user_info_by_swift(dpp, user_ctl, id, duplicate_check, y) >= 0) { set_err_msg(err_msg, "existing swift key in RGW system:" + id); return -ERR_KEY_EXIST; } break; case KEY_TYPE_S3: - if (rgw_get_user_info_by_access_key(user_ctl, id, duplicate_check, y) >= 0) { + if (rgw_get_user_info_by_access_key(dpp, user_ctl, id, duplicate_check, y) >= 0) { set_err_msg(err_msg, "existing S3 key in RGW system:" + id); return -ERR_KEY_EXIST; } @@ -672,7 +678,7 @@ int RGWAccessKeyPool::generate_key(RGWUserAdminOpState& op_state, if (!validate_access_key(id)) continue; - } while (!rgw_get_user_info_by_access_key(user_ctl, id, duplicate_check, y)); + } while (!rgw_get_user_info_by_access_key(dpp, user_ctl, id, duplicate_check, y)); } if (key_type == KEY_TYPE_SWIFT) { @@ -683,7 +689,7 @@ int RGWAccessKeyPool::generate_key(RGWUserAdminOpState& op_state, } // check that the access key doesn't exist - if (rgw_get_user_info_by_swift(user_ctl, id, duplicate_check, y) >= 0) { + if (rgw_get_user_info_by_swift(dpp, user_ctl, id, duplicate_check, y) >= 0) { set_err_msg(err_msg, "cannot create existing swift key"); return -ERR_KEY_EXIST; } @@ -781,7 +787,8 @@ int RGWAccessKeyPool::modify_key(RGWUserAdminOpState& op_state, std::string *err return 0; } -int RGWAccessKeyPool::execute_add(RGWUserAdminOpState& op_state, +int RGWAccessKeyPool::execute_add(const DoutPrefixProvider *dpp, + RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_user_update, optional_yield y) { @@ -796,7 +803,7 @@ int RGWAccessKeyPool::execute_add(RGWUserAdminOpState& op_state, switch (key_op) { case GENERATE_KEY: - ret = generate_key(op_state, y, &subprocess_msg); + ret = generate_key(dpp, op_state, y, &subprocess_msg); break; case MODIFY_KEY: ret = modify_key(op_state, &subprocess_msg); @@ -810,7 +817,7 @@ int RGWAccessKeyPool::execute_add(RGWUserAdminOpState& op_state, // store the updated info if (!defer_user_update) - ret = user->update(op_state, err_msg, y); + ret = user->update(dpp, op_state, err_msg, y); if (ret < 0) return ret; @@ -818,13 +825,15 @@ int RGWAccessKeyPool::execute_add(RGWUserAdminOpState& op_state, return 0; } -int RGWAccessKeyPool::add(RGWUserAdminOpState& op_state, optional_yield y, +int RGWAccessKeyPool::add(const DoutPrefixProvider *dpp, + RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg) { - return add(op_state, err_msg, false, y); + return add(dpp, op_state, err_msg, false, y); } -int RGWAccessKeyPool::add(RGWUserAdminOpState& op_state, std::string *err_msg, +int RGWAccessKeyPool::add(const DoutPrefixProvider *dpp, + RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_user_update, optional_yield y) { int ret; @@ -836,7 +845,7 @@ int RGWAccessKeyPool::add(RGWUserAdminOpState& op_state, std::string *err_msg, return ret; } - ret = execute_add(op_state, &subprocess_msg, defer_user_update, y); + ret = execute_add(dpp, op_state, &subprocess_msg, defer_user_update, y); if (ret < 0) { set_err_msg(err_msg, "unable to add access key, " + subprocess_msg); return ret; @@ -845,7 +854,8 @@ int RGWAccessKeyPool::add(RGWUserAdminOpState& op_state, std::string *err_msg, return 0; } -int RGWAccessKeyPool::execute_remove(RGWUserAdminOpState& op_state, +int RGWAccessKeyPool::execute_remove(const DoutPrefixProvider *dpp, + RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_user_update, optional_yield y) @@ -881,7 +891,7 @@ int RGWAccessKeyPool::execute_remove(RGWUserAdminOpState& op_state, keys_map->erase(kiter); if (!defer_user_update) - ret = user->update(op_state, err_msg, y); + ret = user->update(dpp, op_state, err_msg, y); if (ret < 0) return ret; @@ -889,13 +899,14 @@ int RGWAccessKeyPool::execute_remove(RGWUserAdminOpState& op_state, return 0; } -int RGWAccessKeyPool::remove(RGWUserAdminOpState& op_state, optional_yield y, +int RGWAccessKeyPool::remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg) { - return remove(op_state, err_msg, false, y); + return remove(dpp, op_state, err_msg, false, y); } -int RGWAccessKeyPool::remove(RGWUserAdminOpState& op_state, +int RGWAccessKeyPool::remove(const DoutPrefixProvider *dpp, + RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_user_update, optional_yield y) { @@ -909,7 +920,7 @@ int RGWAccessKeyPool::remove(RGWUserAdminOpState& op_state, return ret; } - ret = execute_remove(op_state, &subprocess_msg, defer_user_update, y); + ret = execute_remove(dpp, op_state, &subprocess_msg, defer_user_update, y); if (ret < 0) { set_err_msg(err_msg, "unable to remove access key, " + subprocess_msg); return ret; @@ -919,7 +930,8 @@ int RGWAccessKeyPool::remove(RGWUserAdminOpState& op_state, } // remove all keys associated with a subuser -int RGWAccessKeyPool::remove_subuser_keys(RGWUserAdminOpState& op_state, +int RGWAccessKeyPool::remove_subuser_keys(const DoutPrefixProvider *dpp, + RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_user_update, optional_yield y) @@ -967,7 +979,7 @@ int RGWAccessKeyPool::remove_subuser_keys(RGWUserAdminOpState& op_state, } if (!defer_user_update) - ret = user->update(op_state, err_msg, y); + ret = user->update(dpp, op_state, err_msg, y); if (ret < 0) return ret; @@ -1067,7 +1079,8 @@ int RGWSubUserPool::check_op(RGWUserAdminOpState& op_state, return 0; } -int RGWSubUserPool::execute_add(RGWUserAdminOpState& op_state, +int RGWSubUserPool::execute_add(const DoutPrefixProvider *dpp, + RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_user_update, optional_yield y) { @@ -1082,7 +1095,7 @@ int RGWSubUserPool::execute_add(RGWUserAdminOpState& op_state, // assumes key should be created if (op_state.has_key_op()) { - ret = user->keys.add(op_state, &subprocess_msg, true, y); + ret = user->keys.add(dpp, op_state, &subprocess_msg, true, y); if (ret < 0) { set_err_msg(err_msg, "unable to create subuser key, " + subprocess_msg); return ret; @@ -1101,7 +1114,7 @@ int RGWSubUserPool::execute_add(RGWUserAdminOpState& op_state, // attempt to save the subuser if (!defer_user_update) - ret = user->update(op_state, err_msg, y); + ret = user->update(dpp, op_state, err_msg, y); if (ret < 0) return ret; @@ -1109,13 +1122,13 @@ int RGWSubUserPool::execute_add(RGWUserAdminOpState& op_state, return 0; } -int RGWSubUserPool::add(RGWUserAdminOpState& op_state, optional_yield y, +int RGWSubUserPool::add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg) { - return add(op_state, err_msg, false, y); + return add(dpp, op_state, err_msg, false, y); } -int RGWSubUserPool::add(RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_user_update, optional_yield y) +int RGWSubUserPool::add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_user_update, optional_yield y) { std::string subprocess_msg; int ret; @@ -1135,7 +1148,7 @@ int RGWSubUserPool::add(RGWUserAdminOpState& op_state, std::string *err_msg, boo op_state.set_gen_secret(); } - ret = execute_add(op_state, &subprocess_msg, defer_user_update, y); + ret = execute_add(dpp, op_state, &subprocess_msg, defer_user_update, y); if (ret < 0) { set_err_msg(err_msg, "unable to create subuser, " + subprocess_msg); return ret; @@ -1144,7 +1157,8 @@ int RGWSubUserPool::add(RGWUserAdminOpState& op_state, std::string *err_msg, boo return 0; } -int RGWSubUserPool::execute_remove(RGWUserAdminOpState& op_state, +int RGWSubUserPool::execute_remove(const DoutPrefixProvider *dpp, + RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_user_update, optional_yield y) { @@ -1165,14 +1179,14 @@ int RGWSubUserPool::execute_remove(RGWUserAdminOpState& op_state, } // always purge all associate keys - user->keys.remove_subuser_keys(op_state, &subprocess_msg, true, y); + user->keys.remove_subuser_keys(dpp, op_state, &subprocess_msg, true, y); // remove the subuser from the user info subuser_map->erase(siter); // attempt to save the subuser if (!defer_user_update) - ret = user->update(op_state, err_msg, y); + ret = user->update(dpp, op_state, err_msg, y); if (ret < 0) return ret; @@ -1180,13 +1194,13 @@ int RGWSubUserPool::execute_remove(RGWUserAdminOpState& op_state, return 0; } -int RGWSubUserPool::remove(RGWUserAdminOpState& op_state, optional_yield y, +int RGWSubUserPool::remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg) { - return remove(op_state, err_msg, false, y); + return remove(dpp, op_state, err_msg, false, y); } -int RGWSubUserPool::remove(RGWUserAdminOpState& op_state, std::string *err_msg, +int RGWSubUserPool::remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_user_update, optional_yield y) { std::string subprocess_msg; @@ -1198,7 +1212,7 @@ int RGWSubUserPool::remove(RGWUserAdminOpState& op_state, std::string *err_msg, return ret; } - ret = execute_remove(op_state, &subprocess_msg, defer_user_update, y); + ret = execute_remove(dpp, op_state, &subprocess_msg, defer_user_update, y); if (ret < 0) { set_err_msg(err_msg, "unable to remove subuser, " + subprocess_msg); return ret; @@ -1207,7 +1221,7 @@ int RGWSubUserPool::remove(RGWUserAdminOpState& op_state, std::string *err_msg, return 0; } -int RGWSubUserPool::execute_modify(RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_user_update, optional_yield y) +int RGWSubUserPool::execute_modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_user_update, optional_yield y) { int ret = 0; std::string subprocess_msg; @@ -1228,7 +1242,7 @@ int RGWSubUserPool::execute_modify(RGWUserAdminOpState& op_state, std::string *e subuser = siter->second; if (op_state.has_key_op()) { - ret = user->keys.add(op_state, &subprocess_msg, true, y); + ret = user->keys.add(dpp, op_state, &subprocess_msg, true, y); if (ret < 0) { set_err_msg(err_msg, "unable to create subuser keys, " + subprocess_msg); return ret; @@ -1245,7 +1259,7 @@ int RGWSubUserPool::execute_modify(RGWUserAdminOpState& op_state, std::string *e // attempt to save the subuser if (!defer_user_update) - ret = user->update(op_state, err_msg, y); + ret = user->update(dpp, op_state, err_msg, y); if (ret < 0) return ret; @@ -1253,12 +1267,12 @@ int RGWSubUserPool::execute_modify(RGWUserAdminOpState& op_state, std::string *e return 0; } -int RGWSubUserPool::modify(RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg) +int RGWSubUserPool::modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg) { - return RGWSubUserPool::modify(op_state, y, err_msg, false); + return RGWSubUserPool::modify(dpp, op_state, y, err_msg, false); } -int RGWSubUserPool::modify(RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg, bool defer_user_update) +int RGWSubUserPool::modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg, bool defer_user_update) { std::string subprocess_msg; int ret; @@ -1271,7 +1285,7 @@ int RGWSubUserPool::modify(RGWUserAdminOpState& op_state, optional_yield y, std: return ret; } - ret = execute_modify(op_state, &subprocess_msg, defer_user_update, y); + ret = execute_modify(dpp, op_state, &subprocess_msg, defer_user_update, y); if (ret < 0) { set_err_msg(err_msg, "unable to modify subuser, " + subprocess_msg); return ret; @@ -1313,13 +1327,13 @@ int RGWUserCapPool::init(RGWUserAdminOpState& op_state) return 0; } -int RGWUserCapPool::add(RGWUserAdminOpState& op_state, optional_yield y, +int RGWUserCapPool::add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg) { - return add(op_state, err_msg, false, y); + return add(dpp, op_state, err_msg, false, y); } -int RGWUserCapPool::add(RGWUserAdminOpState& op_state, std::string *err_msg, +int RGWUserCapPool::add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, optional_yield y) { int ret = 0; @@ -1347,7 +1361,7 @@ int RGWUserCapPool::add(RGWUserAdminOpState& op_state, std::string *err_msg, } if (!defer_save) - ret = user->update(op_state, err_msg, y); + ret = user->update(dpp, op_state, err_msg, y); if (ret < 0) return ret; @@ -1355,13 +1369,13 @@ int RGWUserCapPool::add(RGWUserAdminOpState& op_state, std::string *err_msg, return 0; } -int RGWUserCapPool::remove(RGWUserAdminOpState& op_state, optional_yield y, +int RGWUserCapPool::remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg) { - return remove(op_state, err_msg, false, y); + return remove(dpp, op_state, err_msg, false, y); } -int RGWUserCapPool::remove(RGWUserAdminOpState& op_state, std::string *err_msg, +int RGWUserCapPool::remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, optional_yield y) { int ret = 0; @@ -1390,7 +1404,7 @@ int RGWUserCapPool::remove(RGWUserAdminOpState& op_state, std::string *err_msg, } if (!defer_save) - ret = user->update(op_state, err_msg, y); + ret = user->update(dpp, op_state, err_msg, y); if (ret < 0) return ret; @@ -1403,7 +1417,7 @@ RGWUser::RGWUser() : caps(this), keys(this), subusers(this) init_default(); } -int RGWUser::init(rgw::sal::RGWRadosStore *storage, +int RGWUser::init(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *storage, RGWUserAdminOpState& op_state, optional_yield y) { init_default(); @@ -1411,7 +1425,7 @@ int RGWUser::init(rgw::sal::RGWRadosStore *storage, if (ret < 0) return ret; - ret = init(op_state, y); + ret = init(dpp, op_state, y); if (ret < 0) return ret; @@ -1446,7 +1460,7 @@ int RGWUser::init_storage(rgw::sal::RGWRadosStore *storage) return 0; } -int RGWUser::init(RGWUserAdminOpState& op_state, optional_yield y) +int RGWUser::init(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y) { bool found = false; std::string swift_user; @@ -1474,21 +1488,21 @@ int RGWUser::init(RGWUserAdminOpState& op_state, optional_yield y) } if (!user_id.empty() && (user_id.compare(RGW_USER_ANON_ID) != 0)) { - found = (rgw_get_user_info_by_uid(user_ctl, user_id, user_info, y, &op_state.objv) >= 0); + found = (rgw_get_user_info_by_uid(dpp, user_ctl, user_id, user_info, y, &op_state.objv) >= 0); op_state.found_by_uid = found; } if (store->ctx()->_conf.get_val("rgw_user_unique_email")) { if (!user_email.empty() && !found) { - found = (rgw_get_user_info_by_email(user_ctl, user_email, user_info, y, &op_state.objv) >= 0); + found = (rgw_get_user_info_by_email(dpp, user_ctl, user_email, user_info, y, &op_state.objv) >= 0); op_state.found_by_email = found; } } if (!swift_user.empty() && !found) { - found = (rgw_get_user_info_by_swift(user_ctl, swift_user, user_info, y, &op_state.objv) >= 0); + found = (rgw_get_user_info_by_swift(dpp, user_ctl, swift_user, user_info, y, &op_state.objv) >= 0); op_state.found_by_key = found; } if (!access_key.empty() && !found) { - found = (rgw_get_user_info_by_access_key(user_ctl, access_key, user_info, y, &op_state.objv) >= 0); + found = (rgw_get_user_info_by_access_key(dpp, user_ctl, access_key, user_info, y, &op_state.objv) >= 0); op_state.found_by_key = found; } @@ -1533,7 +1547,7 @@ int RGWUser::init_members(RGWUserAdminOpState& op_state) return 0; } -int RGWUser::update(RGWUserAdminOpState& op_state, std::string *err_msg, +int RGWUser::update(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, optional_yield y) { int ret; @@ -1547,7 +1561,7 @@ int RGWUser::update(RGWUserAdminOpState& op_state, std::string *err_msg, RGWUserInfo *pold_info = (is_populated() ? &old_info : nullptr); - ret = rgw_store_user_info(user_ctl, user_info, pold_info, &op_state.objv, + ret = rgw_store_user_info(dpp, user_ctl, user_info, pold_info, &op_state.objv, real_time(), false, y); if (ret < 0) { set_err_msg(err_msg, "unable to store user info"); @@ -1614,7 +1628,7 @@ static void rename_swift_keys(const rgw_user& user, } } -int RGWUser::execute_rename(RGWUserAdminOpState& op_state, std::string *err_msg, optional_yield y) +int RGWUser::execute_rename(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, optional_yield y) { int ret; bool populated = op_state.is_populated(); @@ -1625,7 +1639,7 @@ int RGWUser::execute_rename(RGWUserAdminOpState& op_state, std::string *err_msg, } if (!populated) { - ret = init(op_state, y); + ret = init(dpp, op_state, y); if (ret < 0) { set_err_msg(err_msg, "unable to retrieve user info"); return ret; @@ -1647,7 +1661,7 @@ int RGWUser::execute_rename(RGWUserAdminOpState& op_state, std::string *err_msg, RGWObjVersionTracker objv; const bool exclusive = !op_state.get_overwrite_new_user(); // overwrite if requested - ret = user_ctl->store_info(stub_user_info, y, + ret = user_ctl->store_info(dpp, stub_user_info, y, RGWUserCtl::PutParams() .set_objv_tracker(&objv) .set_exclusive(exclusive)); @@ -1670,7 +1684,7 @@ int RGWUser::execute_rename(RGWUserAdminOpState& op_state, std::string *err_msg, rgw::sal::RGWBucketList buckets; do { - ret = old_user.list_buckets(marker, "", max_buckets, false, buckets, y); + ret = old_user.list_buckets(dpp, marker, "", max_buckets, false, buckets, y); if (ret < 0) { set_err_msg(err_msg, "unable to list user buckets"); return ret; @@ -1682,25 +1696,25 @@ int RGWUser::execute_rename(RGWUserAdminOpState& op_state, std::string *err_msg, auto& bucket = it->second; marker = it->first; - ret = bucket->get_bucket_info(y); + ret = bucket->get_bucket_info(dpp, y); if (ret < 0) { set_err_msg(err_msg, "failed to fetch bucket info for bucket=" + bucket->get_name()); return ret; } - ret = bucket->set_acl(policy_instance, y); + ret = bucket->set_acl(dpp, policy_instance, y); if (ret < 0) { set_err_msg(err_msg, "failed to set acl on bucket " + bucket->get_name()); return ret; } - ret = bucket->link(&new_user, y); + ret = bucket->link(dpp, &new_user, y); if (ret < 0) { set_err_msg(err_msg, "failed to link bucket " + bucket->get_name()); return ret; } - ret = bucket->chown(&new_user, &old_user, y); + ret = bucket->chown(&new_user, &old_user, y, dpp); if (ret < 0) { set_err_msg(err_msg, "failed to run bucket chown" + cpp_strerror(-ret)); return ret; @@ -1717,10 +1731,10 @@ int RGWUser::execute_rename(RGWUserAdminOpState& op_state, std::string *err_msg, rename_swift_keys(new_user.get_user(), user_info.swift_keys); - return update(op_state, err_msg, y); + return update(dpp, op_state, err_msg, y); } -int RGWUser::execute_add(RGWUserAdminOpState& op_state, std::string *err_msg, +int RGWUser::execute_add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, optional_yield y) { std::string subprocess_msg; @@ -1827,7 +1841,7 @@ int RGWUser::execute_add(RGWUserAdminOpState& op_state, std::string *err_msg, // see if we need to add an access key if (op_state.has_key_op()) { - ret = keys.add(op_state, &subprocess_msg, defer_user_update, y); + ret = keys.add(dpp, op_state, &subprocess_msg, defer_user_update, y); if (ret < 0) { set_err_msg(err_msg, "unable to create access key, " + subprocess_msg); return ret; @@ -1836,14 +1850,14 @@ int RGWUser::execute_add(RGWUserAdminOpState& op_state, std::string *err_msg, // see if we need to add some caps if (op_state.has_caps_op()) { - ret = caps.add(op_state, &subprocess_msg, defer_user_update, y); + ret = caps.add(dpp, op_state, &subprocess_msg, defer_user_update, y); if (ret < 0) { set_err_msg(err_msg, "unable to add user capabilities, " + subprocess_msg); return ret; } } - ret = update(op_state, err_msg, y); + ret = update(dpp, op_state, err_msg, y); if (ret < 0) return ret; @@ -1851,7 +1865,7 @@ int RGWUser::execute_add(RGWUserAdminOpState& op_state, std::string *err_msg, } -int RGWUser::add(RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg) +int RGWUser::add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg) { std::string subprocess_msg; int ret; @@ -1862,7 +1876,7 @@ int RGWUser::add(RGWUserAdminOpState& op_state, optional_yield y, std::string *e return ret; } - ret = execute_add(op_state, &subprocess_msg, y); + ret = execute_add(dpp, op_state, &subprocess_msg, y); if (ret < 0) { set_err_msg(err_msg, "unable to create user, " + subprocess_msg); return ret; @@ -1871,7 +1885,7 @@ int RGWUser::add(RGWUserAdminOpState& op_state, optional_yield y, std::string *e return 0; } -int RGWUser::rename(RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg) +int RGWUser::rename(RGWUserAdminOpState& op_state, optional_yield y, const DoutPrefixProvider *dpp, std::string *err_msg) { std::string subprocess_msg; int ret; @@ -1882,7 +1896,7 @@ int RGWUser::rename(RGWUserAdminOpState& op_state, optional_yield y, std::string return ret; } - ret = execute_rename(op_state, &subprocess_msg, y); + ret = execute_rename(dpp, op_state, &subprocess_msg, y); if (ret < 0) { set_err_msg(err_msg, "unable to rename user, " + subprocess_msg); return ret; @@ -1891,7 +1905,7 @@ int RGWUser::rename(RGWUserAdminOpState& op_state, optional_yield y, std::string return 0; } -int RGWUser::execute_remove(RGWUserAdminOpState& op_state, std::string *err_msg, optional_yield y) +int RGWUser::execute_remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, optional_yield y) { int ret; @@ -1909,7 +1923,7 @@ int RGWUser::execute_remove(RGWUserAdminOpState& op_state, std::string *err_msg, CephContext *cct = store->ctx(); size_t max_buckets = cct->_conf->rgw_list_buckets_max_chunk; do { - ret = rgw_read_user_buckets(store, uid, buckets, marker, string(), + ret = rgw_read_user_buckets(dpp, store, uid, buckets, marker, string(), max_buckets, false, y); if (ret < 0) { set_err_msg(err_msg, "unable to read user bucket info"); @@ -1924,7 +1938,7 @@ int RGWUser::execute_remove(RGWUserAdminOpState& op_state, std::string *err_msg, std::string prefix, delimiter; for (auto it = m.begin(); it != m.end(); ++it) { - ret = it->second->remove_bucket(true, prefix, delimiter, false, nullptr, y); + ret = it->second->remove_bucket(dpp, true, prefix, delimiter, false, nullptr, y); if (ret < 0) { set_err_msg(err_msg, "unable to delete user data"); return ret; @@ -1935,7 +1949,7 @@ int RGWUser::execute_remove(RGWUserAdminOpState& op_state, std::string *err_msg, } while (buckets.is_truncated()); - ret = user_ctl->remove_info(user_info, y, RGWUserCtl::RemoveParams() + ret = user_ctl->remove_info(dpp, user_info, y, RGWUserCtl::RemoveParams() .set_objv_tracker(&op_state.objv)); if (ret < 0) { set_err_msg(err_msg, "unable to remove user from RADOS"); @@ -1948,7 +1962,7 @@ int RGWUser::execute_remove(RGWUserAdminOpState& op_state, std::string *err_msg, return 0; } -int RGWUser::remove(RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg) +int RGWUser::remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg) { std::string subprocess_msg; int ret; @@ -1959,7 +1973,7 @@ int RGWUser::remove(RGWUserAdminOpState& op_state, optional_yield y, std::string return ret; } - ret = execute_remove(op_state, &subprocess_msg, y); + ret = execute_remove(dpp, op_state, &subprocess_msg, y); if (ret < 0) { set_err_msg(err_msg, "unable to remove user, " + subprocess_msg); return ret; @@ -1968,7 +1982,7 @@ int RGWUser::remove(RGWUserAdminOpState& op_state, optional_yield y, std::string return 0; } -int RGWUser::execute_modify(RGWUserAdminOpState& op_state, std::string *err_msg, optional_yield y) +int RGWUser::execute_modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, optional_yield y) { bool populated = op_state.is_populated(); int ret = 0; @@ -1987,7 +2001,7 @@ int RGWUser::execute_modify(RGWUserAdminOpState& op_state, std::string *err_msg, // if the user hasn't already been populated...attempt to if (!populated) { - ret = init(op_state, y); + ret = init(dpp, op_state, y); if (ret < 0) { set_err_msg(err_msg, "unable to retrieve user info"); return ret; @@ -2006,7 +2020,7 @@ int RGWUser::execute_modify(RGWUserAdminOpState& op_state, std::string *err_msg, if (!op_email.empty()) { // make sure we are not adding a duplicate email if (old_email != op_email) { - ret = rgw_get_user_info_by_email(user_ctl, op_email, duplicate_check,y ); + ret = rgw_get_user_info_by_email(dpp, user_ctl, op_email, duplicate_check,y ); if (ret >= 0 && duplicate_check.user_id.compare(user_id) != 0) { set_err_msg(err_msg, "cannot add duplicate email"); return -ERR_EMAIL_EXIST; @@ -2014,7 +2028,7 @@ int RGWUser::execute_modify(RGWUserAdminOpState& op_state, std::string *err_msg, } user_info.user_email = op_email; } else if (op_email.empty() && op_state.user_email_specified) { - ldout(store->ctx(), 10) << "removing email index: " << user_info.user_email << dendl; + ldpp_dout(dpp, 10) << "removing email index: " << user_info.user_email << dendl; /* will be physically removed later when calling update() */ user_info.user_email.clear(); } @@ -2064,7 +2078,7 @@ int RGWUser::execute_modify(RGWUserAdminOpState& op_state, std::string *err_msg, CephContext *cct = store->ctx(); size_t max_buckets = cct->_conf->rgw_list_buckets_max_chunk; do { - ret = rgw_read_user_buckets(store, user_id, buckets, marker, string(), + ret = rgw_read_user_buckets(dpp, store, user_id, buckets, marker, string(), max_buckets, false, y); if (ret < 0) { set_err_msg(err_msg, "could not get buckets for uid: " + user_id.to_str()); @@ -2081,7 +2095,7 @@ int RGWUser::execute_modify(RGWUserAdminOpState& op_state, std::string *err_msg, marker = iter->first; } - ret = store->getRados()->set_buckets_enabled(bucket_names, !suspended); + ret = store->getRados()->set_buckets_enabled(bucket_names, !suspended, dpp); if (ret < 0) { set_err_msg(err_msg, "failed to modify bucket"); return ret; @@ -2106,21 +2120,21 @@ int RGWUser::execute_modify(RGWUserAdminOpState& op_state, std::string *err_msg, // if we're supposed to modify keys, do so if (op_state.has_key_op()) { - ret = keys.add(op_state, &subprocess_msg, true, y); + ret = keys.add(dpp, op_state, &subprocess_msg, true, y); if (ret < 0) { set_err_msg(err_msg, "unable to create or modify keys, " + subprocess_msg); return ret; } } - ret = update(op_state, err_msg, y); + ret = update(dpp, op_state, err_msg, y); if (ret < 0) return ret; return 0; } -int RGWUser::modify(RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg) +int RGWUser::modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg) { std::string subprocess_msg; int ret; @@ -2138,7 +2152,7 @@ int RGWUser::modify(RGWUserAdminOpState& op_state, optional_yield y, std::string return ret; } - ret = execute_modify(op_state, &subprocess_msg, y); + ret = execute_modify(dpp, op_state, &subprocess_msg, y); if (ret < 0) { set_err_msg(err_msg, "unable to modify user, " + subprocess_msg); return ret; @@ -2147,10 +2161,10 @@ int RGWUser::modify(RGWUserAdminOpState& op_state, optional_yield y, std::string return 0; } -int RGWUser::info(RGWUserAdminOpState& op_state, RGWUserInfo& fetched_info, +int RGWUser::info(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, RGWUserInfo& fetched_info, optional_yield y, std::string *err_msg) { - int ret = init(op_state, y); + int ret = init(dpp, op_state, y); if (ret < 0) { set_err_msg(err_msg, "unable to fetch user info"); return ret; @@ -2173,7 +2187,7 @@ int RGWUser::info(RGWUserInfo& fetched_info, std::string *err_msg) return 0; } -int RGWUser::list(RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher) +int RGWUser::list(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher) { Formatter *formatter = flusher.get_formatter(); void *handle = nullptr; @@ -2184,7 +2198,7 @@ int RGWUser::list(RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher) auto meta_mgr = store->ctl()->meta.mgr; - int ret = meta_mgr->list_keys_init(metadata_key, op_state.marker, &handle); + int ret = meta_mgr->list_keys_init(dpp, metadata_key, op_state.marker, &handle); if (ret < 0) { return ret; } @@ -2230,7 +2244,7 @@ int RGWUser::list(RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher) return 0; } -int RGWUserAdminOp_User::list(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, +int RGWUserAdminOp_User::list(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher) { RGWUser user; @@ -2239,21 +2253,22 @@ int RGWUserAdminOp_User::list(rgw::sal::RGWRadosStore *store, RGWUserAdminOpStat if (ret < 0) return ret; - ret = user.list(op_state, flusher); + ret = user.list(dpp, op_state, flusher); if (ret < 0) return ret; return 0; } -int RGWUserAdminOp_User::info(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, +int RGWUserAdminOp_User::info(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y) { RGWUserInfo info; RGWUser user; - int ret = user.init(store, op_state, y); + int ret = user.init(dpp, store, op_state, y); if (ret < 0) return ret; @@ -2267,7 +2282,7 @@ int RGWUserAdminOp_User::info(rgw::sal::RGWRadosStore *store, RGWUserAdminOpStat return ret; if (op_state.sync_stats) { - ret = rgw_user_sync_all_stats(store, info.user_id, y); + ret = rgw_user_sync_all_stats(dpp, store, info.user_id, y); if (ret < 0) { return ret; } @@ -2276,7 +2291,7 @@ int RGWUserAdminOp_User::info(rgw::sal::RGWRadosStore *store, RGWUserAdminOpStat RGWStorageStats stats; RGWStorageStats *arg_stats = NULL; if (op_state.fetch_stats) { - int ret = store->ctl()->user->read_stats(info.user_id, &stats, y); + int ret = store->ctl()->user->read_stats(dpp, info.user_id, &stats, y); if (ret < 0 && ret != -ENOENT) { return ret; } @@ -2294,19 +2309,20 @@ int RGWUserAdminOp_User::info(rgw::sal::RGWRadosStore *store, RGWUserAdminOpStat return 0; } -int RGWUserAdminOp_User::create(rgw::sal::RGWRadosStore *store, +int RGWUserAdminOp_User::create(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y) { RGWUserInfo info; RGWUser user; - int ret = user.init(store, op_state, y); + int ret = user.init(dpp, store, op_state, y); if (ret < 0) return ret; Formatter *formatter = flusher.get_formatter(); - ret = user.add(op_state, y, NULL); + ret = user.add(dpp, op_state, y, NULL); if (ret < 0) { if (ret == -EEXIST) ret = -ERR_USER_EXIST; @@ -2327,18 +2343,19 @@ int RGWUserAdminOp_User::create(rgw::sal::RGWRadosStore *store, return 0; } -int RGWUserAdminOp_User::modify(rgw::sal::RGWRadosStore *store, +int RGWUserAdminOp_User::modify(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y) { RGWUserInfo info; RGWUser user; - int ret = user.init(store, op_state, y); + int ret = user.init(dpp, store, op_state, y); if (ret < 0) return ret; Formatter *formatter = flusher.get_formatter(); - ret = user.modify(op_state, y, NULL); + ret = user.modify(dpp, op_state, y, NULL); if (ret < 0) { if (ret == -ENOENT) ret = -ERR_NO_SUCH_USER; @@ -2359,31 +2376,32 @@ int RGWUserAdminOp_User::modify(rgw::sal::RGWRadosStore *store, return 0; } -int RGWUserAdminOp_User::remove(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, +int RGWUserAdminOp_User::remove(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y) { RGWUserInfo info; RGWUser user; - int ret = user.init(store, op_state, y); + int ret = user.init(dpp, store, op_state, y); if (ret < 0) return ret; - ret = user.remove(op_state, y, NULL); + ret = user.remove(dpp, op_state, y, NULL); if (ret == -ENOENT) ret = -ERR_NO_SUCH_USER; return ret; } -int RGWUserAdminOp_Subuser::create(rgw::sal::RGWRadosStore *store, +int RGWUserAdminOp_Subuser::create(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y) { RGWUserInfo info; RGWUser user; - int ret = user.init(store, op_state, y); + int ret = user.init(dpp, store, op_state, y); if (ret < 0) return ret; @@ -2392,7 +2410,7 @@ int RGWUserAdminOp_Subuser::create(rgw::sal::RGWRadosStore *store, Formatter *formatter = flusher.get_formatter(); - ret = user.subusers.add(op_state, y, NULL); + ret = user.subusers.add(dpp, op_state, y, NULL); if (ret < 0) return ret; @@ -2410,12 +2428,13 @@ int RGWUserAdminOp_Subuser::create(rgw::sal::RGWRadosStore *store, return 0; } -int RGWUserAdminOp_Subuser::modify(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, +int RGWUserAdminOp_Subuser::modify(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y) { RGWUserInfo info; RGWUser user; - int ret = user.init(store, op_state, y); + int ret = user.init(dpp, store, op_state, y); if (ret < 0) return ret; @@ -2424,7 +2443,7 @@ int RGWUserAdminOp_Subuser::modify(rgw::sal::RGWRadosStore *store, RGWUserAdminO Formatter *formatter = flusher.get_formatter(); - ret = user.subusers.modify(op_state, y, NULL); + ret = user.subusers.modify(dpp, op_state, y, NULL); if (ret < 0) return ret; @@ -2442,14 +2461,15 @@ int RGWUserAdminOp_Subuser::modify(rgw::sal::RGWRadosStore *store, RGWUserAdminO return 0; } -int RGWUserAdminOp_Subuser::remove(rgw::sal::RGWRadosStore *store, +int RGWUserAdminOp_Subuser::remove(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y) { RGWUserInfo info; RGWUser user; - int ret = user.init(store, op_state, y); + int ret = user.init(dpp, store, op_state, y); if (ret < 0) return ret; @@ -2457,20 +2477,21 @@ int RGWUserAdminOp_Subuser::remove(rgw::sal::RGWRadosStore *store, if (!op_state.has_existing_user()) return -ERR_NO_SUCH_USER; - ret = user.subusers.remove(op_state, y, NULL); + ret = user.subusers.remove(dpp, op_state, y, NULL); if (ret < 0) return ret; return 0; } -int RGWUserAdminOp_Key::create(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, +int RGWUserAdminOp_Key::create(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y) { RGWUserInfo info; RGWUser user; - int ret = user.init(store, op_state, y); + int ret = user.init(dpp, store, op_state, y); if (ret < 0) return ret; @@ -2479,7 +2500,7 @@ int RGWUserAdminOp_Key::create(rgw::sal::RGWRadosStore *store, RGWUserAdminOpSta Formatter *formatter = flusher.get_formatter(); - ret = user.keys.add(op_state, y, NULL); + ret = user.keys.add(dpp, op_state, y, NULL); if (ret < 0) return ret; @@ -2504,14 +2525,15 @@ int RGWUserAdminOp_Key::create(rgw::sal::RGWRadosStore *store, RGWUserAdminOpSta return 0; } -int RGWUserAdminOp_Key::remove(rgw::sal::RGWRadosStore *store, +int RGWUserAdminOp_Key::remove(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y) { RGWUserInfo info; RGWUser user; - int ret = user.init(store, op_state, y); + int ret = user.init(dpp, store, op_state, y); if (ret < 0) return ret; @@ -2519,20 +2541,21 @@ int RGWUserAdminOp_Key::remove(rgw::sal::RGWRadosStore *store, return -ERR_NO_SUCH_USER; - ret = user.keys.remove(op_state, y, NULL); + ret = user.keys.remove(dpp, op_state, y, NULL); if (ret < 0) return ret; return 0; } -int RGWUserAdminOp_Caps::add(rgw::sal::RGWRadosStore *store, +int RGWUserAdminOp_Caps::add(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y) { RGWUserInfo info; RGWUser user; - int ret = user.init(store, op_state, y); + int ret = user.init(dpp, store, op_state, y); if (ret < 0) return ret; @@ -2541,7 +2564,7 @@ int RGWUserAdminOp_Caps::add(rgw::sal::RGWRadosStore *store, Formatter *formatter = flusher.get_formatter(); - ret = user.caps.add(op_state, y, NULL); + ret = user.caps.add(dpp, op_state, y, NULL); if (ret < 0) return ret; @@ -2560,13 +2583,14 @@ int RGWUserAdminOp_Caps::add(rgw::sal::RGWRadosStore *store, } -int RGWUserAdminOp_Caps::remove(rgw::sal::RGWRadosStore *store, +int RGWUserAdminOp_Caps::remove(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y) { RGWUserInfo info; RGWUser user; - int ret = user.init(store, op_state, y); + int ret = user.init(dpp, store, op_state, y); if (ret < 0) return ret; @@ -2575,7 +2599,7 @@ int RGWUserAdminOp_Caps::remove(rgw::sal::RGWRadosStore *store, Formatter *formatter = flusher.get_formatter(); - ret = user.caps.remove(op_state, y, NULL); + ret = user.caps.remove(dpp, op_state, y, NULL); if (ret < 0) return ret; @@ -2606,7 +2630,7 @@ public: string get_type() override { return "user"; } - int do_get(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject **obj, optional_yield y) override { + int do_get(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject **obj, optional_yield y, const DoutPrefixProvider *dpp) override { RGWUserCompleteInfo uci; RGWObjVersionTracker objv_tracker; real_time mtime; @@ -2615,7 +2639,7 @@ public: int ret = svc.user->read_user_info(op->ctx(), user, &uci.info, &objv_tracker, &mtime, nullptr, &uci.attrs, - y); + y, dpp); if (ret < 0) { return ret; } @@ -2641,24 +2665,24 @@ public: int do_put(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject *obj, RGWObjVersionTracker& objv_tracker, - optional_yield y, + optional_yield y, const DoutPrefixProvider *dpp, RGWMDLogSyncType type, bool from_remote_zone) override; int do_remove(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWObjVersionTracker& objv_tracker, - optional_yield y) override { + optional_yield y, const DoutPrefixProvider *dpp) override { RGWUserInfo info; rgw_user user = RGWSI_User::user_from_meta_key(entry); int ret = svc.user->read_user_info(op->ctx(), user, &info, nullptr, nullptr, nullptr, nullptr, - y); + y, dpp); if (ret < 0) { return ret; } return svc.user->remove_user_info(op->ctx(), info, &objv_tracker, - y); + y, dpp); } }; @@ -2676,20 +2700,20 @@ public: uobj = static_cast(obj); } - int put_checked() override; + int put_checked(const DoutPrefixProvider *dpp) override; }; int RGWUserMetadataHandler::do_put(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject *obj, RGWObjVersionTracker& objv_tracker, - optional_yield y, + optional_yield y, const DoutPrefixProvider *dpp, RGWMDLogSyncType type, bool from_remote_zone) { RGWMetadataHandlerPut_User put_op(this, op, entry, obj, objv_tracker, y, type, from_remote_zone); - return do_put_operate(&put_op); + return do_put_operate(&put_op, dpp); } -int RGWMetadataHandlerPut_User::put_checked() +int RGWMetadataHandlerPut_User::put_checked(const DoutPrefixProvider *dpp) { RGWUserMetadataObject *orig_obj = static_cast(old_obj); RGWUserCompleteInfo& uci = uobj->get_uci(); @@ -2705,7 +2729,7 @@ int RGWMetadataHandlerPut_User::put_checked() int ret = uhandler->svc.user->store_user_info(op->ctx(), uci.info, pold_info, &objv_tracker, mtime, - false, pattrs, y); + false, pattrs, y, dpp); if (ret < 0) { return ret; } @@ -2747,7 +2771,8 @@ public: } }; -int RGWUserCtl::get_info_by_uid(const rgw_user& uid, +int RGWUserCtl::get_info_by_uid(const DoutPrefixProvider *dpp, + const rgw_user& uid, RGWUserInfo *info, optional_yield y, const GetParams& params) @@ -2761,11 +2786,13 @@ int RGWUserCtl::get_info_by_uid(const rgw_user& uid, params.mtime, params.cache_info, params.attrs, - y); + y, + dpp); }); } -int RGWUserCtl::get_info_by_email(const string& email, +int RGWUserCtl::get_info_by_email(const DoutPrefixProvider *dpp, + const string& email, RGWUserInfo *info, optional_yield y, const GetParams& params) @@ -2775,11 +2802,13 @@ int RGWUserCtl::get_info_by_email(const string& email, info, params.objv_tracker, params.mtime, - y); + y, + dpp); }); } -int RGWUserCtl::get_info_by_swift(const string& swift_name, +int RGWUserCtl::get_info_by_swift(const DoutPrefixProvider *dpp, + const string& swift_name, RGWUserInfo *info, optional_yield y, const GetParams& params) @@ -2789,11 +2818,13 @@ int RGWUserCtl::get_info_by_swift(const string& swift_name, info, params.objv_tracker, params.mtime, - y); + y, + dpp); }); } -int RGWUserCtl::get_info_by_access_key(const string& access_key, +int RGWUserCtl::get_info_by_access_key(const DoutPrefixProvider *dpp, + const string& access_key, RGWUserInfo *info, optional_yield y, const GetParams& params) @@ -2803,23 +2834,26 @@ int RGWUserCtl::get_info_by_access_key(const string& access_key, info, params.objv_tracker, params.mtime, - y); + y, + dpp); }); } -int RGWUserCtl::get_attrs_by_uid(const rgw_user& user_id, +int RGWUserCtl::get_attrs_by_uid(const DoutPrefixProvider *dpp, + const rgw_user& user_id, map *pattrs, optional_yield y, RGWObjVersionTracker *objv_tracker) { RGWUserInfo user_info; - return get_info_by_uid(user_id, &user_info, y, RGWUserCtl::GetParams() + return get_info_by_uid(dpp, user_id, &user_info, y, RGWUserCtl::GetParams() .set_attrs(pattrs) .set_objv_tracker(objv_tracker)); } -int RGWUserCtl::store_info(const RGWUserInfo& info, optional_yield y, +int RGWUserCtl::store_info(const DoutPrefixProvider *dpp, + const RGWUserInfo& info, optional_yield y, const PutParams& params) { string key = RGWSI_User::get_meta_key(info.user_id); @@ -2831,11 +2865,13 @@ int RGWUserCtl::store_info(const RGWUserInfo& info, optional_yield y, params.mtime, params.exclusive, params.attrs, - y); + y, + dpp); }); } -int RGWUserCtl::remove_info(const RGWUserInfo& info, optional_yield y, +int RGWUserCtl::remove_info(const DoutPrefixProvider *dpp, + const RGWUserInfo& info, optional_yield y, const RemoveParams& params) { @@ -2844,32 +2880,35 @@ int RGWUserCtl::remove_info(const RGWUserInfo& info, optional_yield y, return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) { return svc.user->remove_user_info(op->ctx(), info, params.objv_tracker, - y); + y, dpp); }); } -int RGWUserCtl::add_bucket(const rgw_user& user, +int RGWUserCtl::add_bucket(const DoutPrefixProvider *dpp, + const rgw_user& user, const rgw_bucket& bucket, ceph::real_time creation_time, optional_yield y) { return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) { - return svc.user->add_bucket(op->ctx(), user, bucket, creation_time, y); + return svc.user->add_bucket(dpp, op->ctx(), user, bucket, creation_time, y); }); } -int RGWUserCtl::remove_bucket(const rgw_user& user, +int RGWUserCtl::remove_bucket(const DoutPrefixProvider *dpp, + const rgw_user& user, const rgw_bucket& bucket, optional_yield y) { return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) { - return svc.user->remove_bucket(op->ctx(), user, bucket, y); + return svc.user->remove_bucket(dpp, op->ctx(), user, bucket, y); }); } -int RGWUserCtl::list_buckets(const rgw_user& user, +int RGWUserCtl::list_buckets(const DoutPrefixProvider *dpp, + const rgw_user& user, const string& marker, const string& end_marker, uint64_t max, @@ -2884,16 +2923,16 @@ int RGWUserCtl::list_buckets(const rgw_user& user, } return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) { - int ret = svc.user->list_buckets(op->ctx(), user, marker, end_marker, + int ret = svc.user->list_buckets(dpp, op->ctx(), user, marker, end_marker, max, buckets, is_truncated, y); if (ret < 0) { return ret; } if (need_stats) { map& m = buckets->get_buckets(); - ret = ctl.bucket->read_buckets_stats(m, y); + ret = ctl.bucket->read_buckets_stats(m, y, dpp); if (ret < 0 && ret != -ENOENT) { - ldout(svc.user->ctx(), 0) << "ERROR: could not get stats for buckets" << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not get stats for buckets" << dendl; return ret; } } @@ -2901,44 +2940,46 @@ int RGWUserCtl::list_buckets(const rgw_user& user, }); } -int RGWUserCtl::flush_bucket_stats(const rgw_user& user, +int RGWUserCtl::flush_bucket_stats(const DoutPrefixProvider *dpp, + const rgw_user& user, const RGWBucketEnt& ent, optional_yield y) { return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) { - return svc.user->flush_bucket_stats(op->ctx(), user, ent, y); + return svc.user->flush_bucket_stats(dpp, op->ctx(), user, ent, y); }); } -int RGWUserCtl::complete_flush_stats(const rgw_user& user, optional_yield y) +int RGWUserCtl::complete_flush_stats(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y) { return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) { - return svc.user->complete_flush_stats(op->ctx(), user, y); + return svc.user->complete_flush_stats(dpp, op->ctx(), user, y); }); } -int RGWUserCtl::reset_stats(const rgw_user& user, optional_yield y) +int RGWUserCtl::reset_stats(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y) { return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) { - return svc.user->reset_bucket_stats(op->ctx(), user, y); + return svc.user->reset_bucket_stats(dpp, op->ctx(), user, y); }); } -int RGWUserCtl::read_stats(const rgw_user& user, RGWStorageStats *stats, +int RGWUserCtl::read_stats(const DoutPrefixProvider *dpp, + const rgw_user& user, RGWStorageStats *stats, optional_yield y, ceph::real_time *last_stats_sync, ceph::real_time *last_stats_update) { return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) { - return svc.user->read_stats(op->ctx(), user, stats, + return svc.user->read_stats(dpp, op->ctx(), user, stats, last_stats_sync, last_stats_update, y); }); } -int RGWUserCtl::read_stats_async(const rgw_user& user, RGWGetUserStats_CB *cb) +int RGWUserCtl::read_stats_async(const DoutPrefixProvider *dpp, const rgw_user& user, RGWGetUserStats_CB *cb) { return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) { - return svc.user->read_stats_async(op->ctx(), user, cb); + return svc.user->read_stats_async(dpp, op->ctx(), user, cb); }); } diff --git a/src/rgw/rgw_user.h b/src/rgw/rgw_user.h index 5063ea3fccbb2..1f138dcd7820a 100644 --- a/src/rgw/rgw_user.h +++ b/src/rgw/rgw_user.h @@ -57,8 +57,8 @@ struct RGWUID }; WRITE_CLASS_ENCODER(RGWUID) -extern int rgw_user_sync_all_stats(rgw::sal::RGWRadosStore *store, const rgw_user& user_id, optional_yield y); -extern int rgw_user_get_all_buckets_stats( +extern int rgw_user_sync_all_stats(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, const rgw_user& user_id, optional_yield y); +extern int rgw_user_get_all_buckets_stats(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store,const rgw_user& user_id, map& buckets_usage_map, optional_yield y); @@ -71,7 +71,8 @@ extern void rgw_get_anon_user(RGWUserInfo& info); * Save the given user information to storage. * Returns: 0 on success, -ERR# on failure. */ -extern int rgw_store_user_info(RGWUserCtl *user_ctl, +extern int rgw_store_user_info(const DoutPrefixProvider *dpp, + RGWUserCtl *user_ctl, RGWUserInfo& info, RGWUserInfo *old_info, RGWObjVersionTracker *objv_tracker, @@ -84,7 +85,8 @@ extern int rgw_store_user_info(RGWUserCtl *user_ctl, * Given an user_id, finds the user info associated with it. * returns: 0 on success, -ERR# on failure (including nonexistence) */ -extern int rgw_get_user_info_by_uid(RGWUserCtl *user_ctl, +extern int rgw_get_user_info_by_uid(const DoutPrefixProvider *dpp, + RGWUserCtl *user_ctl, const rgw_user& user_id, RGWUserInfo& info, optional_yield y, @@ -96,7 +98,8 @@ extern int rgw_get_user_info_by_uid(RGWUserCtl *user_ctl, * Given an email, finds the user info associated with it. * returns: 0 on success, -ERR# on failure (including nonexistence) */ -extern int rgw_get_user_info_by_email(RGWUserCtl *user_ctl, +extern int rgw_get_user_info_by_email(const DoutPrefixProvider *dpp, + RGWUserCtl *user_ctl, string& email, RGWUserInfo& info, optional_yield y, RGWObjVersionTracker *objv_tracker = NULL, @@ -105,7 +108,8 @@ extern int rgw_get_user_info_by_email(RGWUserCtl *user_ctl, * Given an swift username, finds the user info associated with it. * returns: 0 on success, -ERR# on failure (including nonexistence) */ -extern int rgw_get_user_info_by_swift(RGWUserCtl *user_ctl, +extern int rgw_get_user_info_by_swift(const DoutPrefixProvider *dpp, + RGWUserCtl *user_ctl, const string& swift_name, RGWUserInfo& info, /* out */ optional_yield y, @@ -115,7 +119,8 @@ extern int rgw_get_user_info_by_swift(RGWUserCtl *user_ctl, * Given an access key, finds the user info associated with it. * returns: 0 on success, -ERR# on failure (including nonexistence) */ -extern int rgw_get_user_info_by_access_key(RGWUserCtl *user_ctl, +extern int rgw_get_user_info_by_access_key(const DoutPrefixProvider *dpp, + RGWUserCtl *user_ctl, const std::string& access_key, RGWUserInfo& info, optional_yield y, @@ -589,7 +594,8 @@ class RGWAccessKeyPool private: int create_key(RGWUserAdminOpState& op_state, std::string *err_msg = NULL); - int generate_key(RGWUserAdminOpState& op_state, optional_yield y, + int generate_key(const DoutPrefixProvider *dpp, + RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL); int modify_key(RGWUserAdminOpState& op_state, std::string *err_msg = NULL); @@ -598,16 +604,18 @@ private: int check_op(RGWUserAdminOpState& op_state, std::string *err_msg = NULL); /* API Contract Fulfilment */ - int execute_add(RGWUserAdminOpState& op_state, std::string *err_msg, + int execute_add(const DoutPrefixProvider *dpp, + RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, optional_yield y); - int execute_remove(RGWUserAdminOpState& op_state, std::string *err_msg, + int execute_remove(const DoutPrefixProvider *dpp, + RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, optional_yield y); - int remove_subuser_keys(RGWUserAdminOpState& op_state, std::string *err_msg, + int remove_subuser_keys(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, optional_yield y); - int add(RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, + int add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, optional_yield y); - int remove(RGWUserAdminOpState& op_state, std::string *err_msg, + int remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, optional_yield y); public: explicit RGWAccessKeyPool(RGWUser* usr); @@ -615,9 +623,9 @@ public: int init(RGWUserAdminOpState& op_state); /* API Contracted Methods */ - int add(RGWUserAdminOpState& op_state, optional_yield y, + int add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL); - int remove(RGWUserAdminOpState& op_state, optional_yield y, + int remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL); friend class RGWUser; @@ -639,14 +647,14 @@ private: int check_op(RGWUserAdminOpState& op_state, std::string *err_msg = NULL); /* API Contract Fulfillment */ - int execute_add(RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, optional_yield y); - int execute_remove(RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, optional_yield y); - int execute_modify(RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, optional_yield y); + int execute_add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, optional_yield y); + int execute_remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, optional_yield y); + int execute_modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, optional_yield y); - int add(RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, + int add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, optional_yield y); - int remove(RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, optional_yield y); - int modify(RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg, bool defer_save); + int remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, optional_yield y); + int modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg, bool defer_save); public: explicit RGWSubUserPool(RGWUser *user); @@ -654,10 +662,10 @@ public: int init(RGWUserAdminOpState& op_state); /* API contracted methods */ - int add(RGWUserAdminOpState& op_state, optional_yield y, + int add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL); - int remove(RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL); - int modify(RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL); + int remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL); + int modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL); friend class RGWUser; }; @@ -669,9 +677,9 @@ class RGWUserCapPool RGWUser *user{nullptr}; private: - int add(RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, + int add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, optional_yield y); - int remove(RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, + int remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, optional_yield y); public: @@ -680,9 +688,9 @@ public: int init(RGWUserAdminOpState& op_state); /* API contracted methods */ - int add(RGWUserAdminOpState& op_state, optional_yield y, + int add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL); - int remove(RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL); + int remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL); friend class RGWUser; }; @@ -703,27 +711,27 @@ private: bool is_populated() { return info_stored; } int check_op(RGWUserAdminOpState& req, std::string *err_msg); - int update(RGWUserAdminOpState& op_state, std::string *err_msg, optional_yield y); + int update(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, optional_yield y); void clear_members(); void init_default(); /* API Contract Fulfillment */ - int execute_add(RGWUserAdminOpState& op_state, std::string *err_msg, + int execute_add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, optional_yield y); - int execute_remove(RGWUserAdminOpState& op_state, + int execute_remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, optional_yield y); - int execute_modify(RGWUserAdminOpState& op_state, std::string *err_msg, optional_yield y); - int execute_rename(RGWUserAdminOpState& op_state, std::string *err_msg, optional_yield y); + int execute_modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, optional_yield y); + int execute_rename(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, optional_yield y); public: RGWUser(); - int init(rgw::sal::RGWRadosStore *storage, RGWUserAdminOpState& op_state, + int init(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *storage, RGWUserAdminOpState& op_state, optional_yield y); int init_storage(rgw::sal::RGWRadosStore *storage); - int init(RGWUserAdminOpState& op_state, optional_yield y); + int init(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y); int init_members(RGWUserAdminOpState& op_state); rgw::sal::RGWRadosStore *get_store() { return store; } @@ -735,26 +743,26 @@ public: RGWSubUserPool subusers; /* API Contracted Methods */ - int add(RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL); + int add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL); - int remove(RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL); + int remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL); - int rename(RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL); + int rename(RGWUserAdminOpState& op_state, optional_yield y, const DoutPrefixProvider *dpp, std::string *err_msg = NULL); /* remove an already populated RGWUser */ int remove(std::string *err_msg = NULL); - int modify(RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL); + int modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL); /* retrieve info from an existing user in the RGW system */ - int info(RGWUserAdminOpState& op_state, RGWUserInfo& fetched_info, optional_yield y, + int info(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, RGWUserInfo& fetched_info, optional_yield y, std::string *err_msg = NULL); /* info from an already populated RGWUser */ int info (RGWUserInfo& fetched_info, std::string *err_msg = NULL); /* list the existing users */ - int list(RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher); + int list(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher); friend class RGWAccessKeyPool; friend class RGWSubUserPool; @@ -766,36 +774,41 @@ public: class RGWUserAdminOp_User { public: - static int list(rgw::sal::RGWRadosStore *store, + static int list(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher); - static int info(rgw::sal::RGWRadosStore *store, + static int info(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); - static int create(rgw::sal::RGWRadosStore *store, + static int create(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); - static int modify(rgw::sal::RGWRadosStore *store, + static int modify(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); - static int remove(rgw::sal::RGWRadosStore *store, + static int remove(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); }; class RGWUserAdminOp_Subuser { public: - static int create(rgw::sal::RGWRadosStore *store, + static int create(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); - static int modify(rgw::sal::RGWRadosStore *store, + static int modify(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); - static int remove(rgw::sal::RGWRadosStore *store, + static int remove(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); }; @@ -803,11 +816,12 @@ public: class RGWUserAdminOp_Key { public: - static int create(rgw::sal::RGWRadosStore *store, + static int create(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); - static int remove(rgw::sal::RGWRadosStore *store, + static int remove(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); }; @@ -815,11 +829,13 @@ public: class RGWUserAdminOp_Caps { public: - static int add(rgw::sal::RGWRadosStore *store, + static int add(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); - static int remove(rgw::sal::RGWRadosStore *store, + static int remove(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); }; @@ -963,32 +979,42 @@ public: } }; - int get_info_by_uid(const rgw_user& uid, RGWUserInfo *info, + int get_info_by_uid(const DoutPrefixProvider *dpp, + const rgw_user& uid, RGWUserInfo *info, optional_yield y, const GetParams& params = {}); - int get_info_by_email(const string& email, RGWUserInfo *info, + int get_info_by_email(const DoutPrefixProvider *dpp, + const string& email, RGWUserInfo *info, optional_yield y, const GetParams& params = {}); - int get_info_by_swift(const string& swift_name, RGWUserInfo *info, + int get_info_by_swift(const DoutPrefixProvider *dpp, + const string& swift_name, RGWUserInfo *info, optional_yield y, const GetParams& params = {}); - int get_info_by_access_key(const string& access_key, RGWUserInfo *info, + int get_info_by_access_key(const DoutPrefixProvider *dpp, + const string& access_key, RGWUserInfo *info, optional_yield y, const GetParams& params = {}); - int get_attrs_by_uid(const rgw_user& user_id, + int get_attrs_by_uid(const DoutPrefixProvider *dpp, + const rgw_user& user_id, map *attrs, optional_yield y, RGWObjVersionTracker *objv_tracker = nullptr); - int store_info(const RGWUserInfo& info, optional_yield y, + int store_info(const DoutPrefixProvider *dpp, + const RGWUserInfo& info, optional_yield y, const PutParams& params = {}); - int remove_info(const RGWUserInfo& info, optional_yield y, + int remove_info(const DoutPrefixProvider *dpp, + const RGWUserInfo& info, optional_yield y, const RemoveParams& params = {}); - int add_bucket(const rgw_user& user, + int add_bucket(const DoutPrefixProvider *dpp, + const rgw_user& user, const rgw_bucket& bucket, ceph::real_time creation_time, optional_yield y); - int remove_bucket(const rgw_user& user, + int remove_bucket(const DoutPrefixProvider *dpp, + const rgw_user& user, const rgw_bucket& bucket, optional_yield y); - int list_buckets(const rgw_user& user, + int list_buckets(const DoutPrefixProvider *dpp, + const rgw_user& user, const string& marker, const string& end_marker, uint64_t max, @@ -998,16 +1024,18 @@ public: optional_yield y, uint64_t default_max = 1000); - int flush_bucket_stats(const rgw_user& user, + int flush_bucket_stats(const DoutPrefixProvider *dpp, + const rgw_user& user, const RGWBucketEnt& ent, optional_yield y); - int complete_flush_stats(const rgw_user& user, optional_yield y); - int reset_stats(const rgw_user& user, optional_yield y); - int read_stats(const rgw_user& user, RGWStorageStats *stats, + int complete_flush_stats(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y); + int reset_stats(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y); + int read_stats(const DoutPrefixProvider *dpp, + const rgw_user& user, RGWStorageStats *stats, optional_yield y, ceph::real_time *last_stats_sync = nullptr, /* last time a full stats sync completed */ ceph::real_time *last_stats_update = nullptr); /* last time a stats update was done */ - int read_stats_async(const rgw_user& user, RGWGetUserStats_CB *ctx); + int read_stats_async(const DoutPrefixProvider *dpp, const rgw_user& user, RGWGetUserStats_CB *ctx); }; class RGWUserMetaHandlerAllocator { diff --git a/src/rgw/rgw_worker.h b/src/rgw/rgw_worker.h index 5df99dbecfb46..f878ff8a6d18d 100644 --- a/src/rgw/rgw_worker.h +++ b/src/rgw/rgw_worker.h @@ -24,10 +24,12 @@ #include "common/ceph_mutex.h" #include "include/common_fwd.h" +#define dout_subsys ceph_subsys_rgw + class RGWRados; class RGWRadosThread { - class Worker : public Thread { + class Worker : public Thread, public DoutPrefixProvider { CephContext *cct; RGWRadosThread *processor; ceph::mutex lock = ceph::make_mutex("RGWRadosThread::Worker"); @@ -50,6 +52,11 @@ class RGWRadosThread { std::lock_guard l{lock}; cond.notify_all(); } + + CephContext *get_cct() const { return cct; } + unsigned get_subsys() const { return dout_subsys; } + std::ostream& gen_prefix(std::ostream& out) const { return out << "rgw rados thread: "; } + }; Worker *worker; @@ -71,8 +78,8 @@ public: stop(); } - virtual int init() { return 0; } - virtual int process() = 0; + virtual int init(const DoutPrefixProvider *dpp) { return 0; } + virtual int process(const DoutPrefixProvider *dpp) = 0; bool going_down() { return down_flag; } diff --git a/src/rgw/rgw_zone.cc b/src/rgw/rgw_zone.cc index 51e2fd3e0e0b8..51bf3b3f8fc2a 100644 --- a/src/rgw/rgw_zone.cc +++ b/src/rgw/rgw_zone.cc @@ -67,7 +67,7 @@ rgw_pool RGWZoneGroup::get_pool(CephContext *cct_) const return rgw_pool(cct_->_conf->rgw_zonegroup_root_pool); } -int RGWZoneGroup::create_default(optional_yield y, bool old_format) +int RGWZoneGroup::create_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format) { name = default_zonegroup_name; api_name = default_zonegroup_name; @@ -80,25 +80,25 @@ int RGWZoneGroup::create_default(optional_yield y, bool old_format) RGWZoneParams zone_params(default_zone_name); - int r = zone_params.init(cct, sysobj_svc, y, false); + int r = zone_params.init(dpp, cct, sysobj_svc, y, false); if (r < 0) { - ldout(cct, 0) << "create_default: error initializing zone params: " << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "create_default: error initializing zone params: " << cpp_strerror(-r) << dendl; return r; } - r = zone_params.create_default(y); + r = zone_params.create_default(dpp, y); if (r < 0 && r != -EEXIST) { - ldout(cct, 0) << "create_default: error in create_default zone params: " << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "create_default: error in create_default zone params: " << cpp_strerror(-r) << dendl; return r; } else if (r == -EEXIST) { - ldout(cct, 10) << "zone_params::create_default() returned -EEXIST, we raced with another default zone_params creation" << dendl; + ldpp_dout(dpp, 10) << "zone_params::create_default() returned -EEXIST, we raced with another default zone_params creation" << dendl; zone_params.clear_id(); - r = zone_params.init(cct, sysobj_svc, y); + r = zone_params.init(dpp, cct, sysobj_svc, y); if (r < 0) { - ldout(cct, 0) << "create_default: error in init existing zone params: " << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "create_default: error in init existing zone params: " << cpp_strerror(-r) << dendl; return r; } - ldout(cct, 20) << "zone_params::create_default() " << zone_params.get_name() << " id " << zone_params.get_id() + ldpp_dout(dpp, 20) << "zone_params::create_default() " << zone_params.get_name() << " id " << zone_params.get_id() << dendl; } @@ -107,16 +107,16 @@ int RGWZoneGroup::create_default(optional_yield y, bool old_format) default_zone.id = zone_params.get_id(); master_zone = default_zone.id; - r = create(y); + r = create(dpp, y); if (r < 0 && r != -EEXIST) { - ldout(cct, 0) << "error storing zone group info: " << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "error storing zone group info: " << cpp_strerror(-r) << dendl; return r; } if (r == -EEXIST) { - ldout(cct, 10) << "create_default() returned -EEXIST, we raced with another zonegroup creation" << dendl; + ldpp_dout(dpp, 10) << "create_default() returned -EEXIST, we raced with another zonegroup creation" << dendl; id.clear(); - r = init(cct, sysobj_svc, y); + r = init(dpp, cct, sysobj_svc, y); if (r < 0) { return r; } @@ -126,7 +126,7 @@ int RGWZoneGroup::create_default(optional_yield y, bool old_format) name = id; } - post_process_params(y); + post_process_params(dpp, y); return 0; } @@ -176,7 +176,8 @@ int RGWZoneGroup::equals(const string& other_zonegroup) const return (id == other_zonegroup); } -int RGWZoneGroup::add_zone(const RGWZoneParams& zone_params, bool *is_master, bool *read_only, +int RGWZoneGroup::add_zone(const DoutPrefixProvider *dpp, + const RGWZoneParams& zone_params, bool *is_master, bool *read_only, const list& endpoints, const string *ptier_type, bool *psync_from_all, list& sync_from, list& sync_from_rm, string *predirect_zone, std::optional bucket_index_max_shards, @@ -190,7 +191,7 @@ int RGWZoneGroup::add_zone(const RGWZoneParams& zone_params, bool *is_master, bo if (!zones.count(zone_id)) { for (const auto& zone : zones) { if (zone.second.name == zone_name) { - ldout(cct, 0) << "ERROR: found existing zone name " << zone_name + ldpp_dout(dpp, 0) << "ERROR: found existing zone name " << zone_name << " (" << zone.first << ") in zonegroup " << get_name() << dendl; return -EEXIST; } @@ -200,7 +201,7 @@ int RGWZoneGroup::add_zone(const RGWZoneParams& zone_params, bool *is_master, bo if (is_master) { if (*is_master) { if (!master_zone.empty() && master_zone != zone_id) { - ldout(cct, 0) << "NOTICE: overriding master zone: " << master_zone << dendl; + ldpp_dout(dpp, 0) << "NOTICE: overriding master zone: " << master_zone << dendl; } master_zone = zone_id; } else if (master_zone == zone_id) { @@ -220,7 +221,7 @@ int RGWZoneGroup::add_zone(const RGWZoneParams& zone_params, bool *is_master, bo if (ptier_type) { zone.tier_type = *ptier_type; if (!sync_mgr->get_module(*ptier_type, nullptr)) { - ldout(cct, 0) << "ERROR: could not found sync module: " << *ptier_type + ldpp_dout(dpp, 0) << "ERROR: could not found sync module: " << *ptier_type << ", valid sync modules: " << sync_mgr->get_registered_module_names() << dendl; @@ -248,22 +249,23 @@ int RGWZoneGroup::add_zone(const RGWZoneParams& zone_params, bool *is_master, bo zone.sync_from.erase(rm); } - post_process_params(y); + post_process_params(dpp, y); - return update(y); + return update(dpp,y); } -int RGWZoneGroup::rename_zone(const RGWZoneParams& zone_params, +int RGWZoneGroup::rename_zone(const DoutPrefixProvider *dpp, + const RGWZoneParams& zone_params, optional_yield y) { RGWZone& zone = zones[zone_params.get_id()]; zone.name = zone_params.get_name(); - return update(y); + return update(dpp, y); } -void RGWZoneGroup::post_process_params(optional_yield y) +void RGWZoneGroup::post_process_params(const DoutPrefixProvider *dpp, optional_yield y) { bool log_data = zones.size() > 1; @@ -279,9 +281,9 @@ void RGWZoneGroup::post_process_params(optional_yield y) zone.log_data = log_data; RGWZoneParams zone_params(zone.id, zone.name); - int ret = zone_params.init(cct, sysobj_svc, y); + int ret = zone_params.init(dpp, cct, sysobj_svc, y); if (ret < 0) { - ldout(cct, 0) << "WARNING: could not read zone params for zone id=" << zone.id << " name=" << zone.name << dendl; + ldpp_dout(dpp, 0) << "WARNING: could not read zone params for zone id=" << zone.id << " name=" << zone.name << dendl; continue; } @@ -300,53 +302,53 @@ void RGWZoneGroup::post_process_params(optional_yield y) } } -int RGWZoneGroup::remove_zone(const std::string& zone_id, optional_yield y) +int RGWZoneGroup::remove_zone(const DoutPrefixProvider *dpp, const std::string& zone_id, optional_yield y) { auto iter = zones.find(zone_id); if (iter == zones.end()) { - ldout(cct, 0) << "zone id " << zone_id << " is not a part of zonegroup " + ldpp_dout(dpp, 0) << "zone id " << zone_id << " is not a part of zonegroup " << name << dendl; return -ENOENT; } zones.erase(iter); - post_process_params(y); + post_process_params(dpp, y); - return update(y); + return update(dpp, y); } -int RGWZoneGroup::read_default_id(string& default_id, optional_yield y, +int RGWZoneGroup::read_default_id(const DoutPrefixProvider *dpp, string& default_id, optional_yield y, bool old_format) { if (realm_id.empty()) { /* try using default realm */ RGWRealm realm; - int ret = realm.init(cct, sysobj_svc, y); + int ret = realm.init(dpp, cct, sysobj_svc, y); // no default realm exist if (ret < 0) { - return read_id(default_zonegroup_name, default_id, y); + return read_id(dpp, default_zonegroup_name, default_id, y); } realm_id = realm.get_id(); } - return RGWSystemMetaObj::read_default_id(default_id, y, old_format); + return RGWSystemMetaObj::read_default_id(dpp, default_id, y, old_format); } -int RGWZoneGroup::set_as_default(optional_yield y, bool exclusive) +int RGWZoneGroup::set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive) { if (realm_id.empty()) { /* try using default realm */ RGWRealm realm; - int ret = realm.init(cct, sysobj_svc, y); + int ret = realm.init(dpp, cct, sysobj_svc, y); if (ret < 0) { - ldout(cct, 10) << "could not read realm id: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 10) << "could not read realm id: " << cpp_strerror(-ret) << dendl; return -EINVAL; } realm_id = realm.get_id(); } - return RGWSystemMetaObj::set_as_default(y, exclusive); + return RGWSystemMetaObj::set_as_default(dpp, y, exclusive); } void RGWSystemMetaObj::reinit_instance(CephContext *_cct, RGWSI_SysObj *_sysobj_svc) @@ -356,7 +358,7 @@ void RGWSystemMetaObj::reinit_instance(CephContext *_cct, RGWSI_SysObj *_sysobj_ zone_svc = _sysobj_svc->get_zone_svc(); } -int RGWSystemMetaObj::init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, +int RGWSystemMetaObj::init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y, bool setup_obj, bool old_format) { @@ -375,25 +377,26 @@ int RGWSystemMetaObj::init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, name = get_predefined_name(cct); } if (name.empty()) { - r = use_default(y, old_format); + r = use_default(dpp, y, old_format); if (r < 0) { return r; } } else if (!old_format) { - r = read_id(name, id, y); + r = read_id(dpp, name, id, y); if (r < 0) { if (r != -ENOENT) { - ldout(cct, 0) << "error in read_id for object name: " << name << " : " << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "error in read_id for object name: " << name << " : " << cpp_strerror(-r) << dendl; } return r; } } } - return read_info(id, y, old_format); + return read_info(dpp, id, y, old_format); } -int RGWSystemMetaObj::read_default(RGWDefaultSystemMetaObjInfo& default_info, +int RGWSystemMetaObj::read_default(const DoutPrefixProvider *dpp, + RGWDefaultSystemMetaObjInfo& default_info, const string& oid, optional_yield y) { using ceph::decode; @@ -402,7 +405,7 @@ int RGWSystemMetaObj::read_default(RGWDefaultSystemMetaObjInfo& default_info, auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid)); - int ret = sysobj.rop().read(&bl, y); + int ret = sysobj.rop().read(dpp, &bl, y); if (ret < 0) return ret; @@ -410,19 +413,19 @@ int RGWSystemMetaObj::read_default(RGWDefaultSystemMetaObjInfo& default_info, auto iter = bl.cbegin(); decode(default_info, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "error decoding data from " << pool << ":" << oid << dendl; + ldpp_dout(dpp, 0) << "error decoding data from " << pool << ":" << oid << dendl; return -EIO; } return 0; } -int RGWSystemMetaObj::read_default_id(string& default_id, optional_yield y, +int RGWSystemMetaObj::read_default_id(const DoutPrefixProvider *dpp, string& default_id, optional_yield y, bool old_format) { RGWDefaultSystemMetaObjInfo default_info; - int ret = read_default(default_info, get_default_oid(old_format), y); + int ret = read_default(dpp, default_info, get_default_oid(old_format), y); if (ret < 0) { return ret; } @@ -432,12 +435,12 @@ int RGWSystemMetaObj::read_default_id(string& default_id, optional_yield y, return 0; } -int RGWSystemMetaObj::use_default(optional_yield y, bool old_format) +int RGWSystemMetaObj::use_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format) { - return read_default_id(id, y, old_format); + return read_default_id(dpp, id, y, old_format); } -int RGWSystemMetaObj::set_as_default(optional_yield y, bool exclusive) +int RGWSystemMetaObj::set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive) { using ceph::encode; string oid = get_default_oid(); @@ -454,14 +457,14 @@ int RGWSystemMetaObj::set_as_default(optional_yield y, bool exclusive) auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid)); int ret = sysobj.wop() .set_exclusive(exclusive) - .write(bl, y); + .write(dpp, bl, y); if (ret < 0) return ret; return 0; } -int RGWSystemMetaObj::read_id(const string& obj_name, string& object_id, +int RGWSystemMetaObj::read_id(const DoutPrefixProvider *dpp, const string& obj_name, string& object_id, optional_yield y) { using ceph::decode; @@ -472,7 +475,7 @@ int RGWSystemMetaObj::read_id(const string& obj_name, string& object_id, auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid)); - int ret = sysobj.rop().read(&bl, y); + int ret = sysobj.rop().read(dpp, &bl, y); if (ret < 0) { return ret; } @@ -482,14 +485,14 @@ int RGWSystemMetaObj::read_id(const string& obj_name, string& object_id, auto iter = bl.cbegin(); decode(nameToId, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: failed to decode obj from " << pool << ":" << oid << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to decode obj from " << pool << ":" << oid << dendl; return -EIO; } object_id = nameToId.obj_id; return 0; } -int RGWSystemMetaObj::delete_obj(optional_yield y, bool old_format) +int RGWSystemMetaObj::delete_obj(const DoutPrefixProvider *dpp, optional_yield y, bool old_format) { rgw_pool pool(get_pool(cct)); @@ -497,16 +500,16 @@ int RGWSystemMetaObj::delete_obj(optional_yield y, bool old_format) /* check to see if obj is the default */ RGWDefaultSystemMetaObjInfo default_info; - int ret = read_default(default_info, get_default_oid(old_format), y); + int ret = read_default(dpp, default_info, get_default_oid(old_format), y); if (ret < 0 && ret != -ENOENT) return ret; if (default_info.default_id == id || (old_format && default_info.default_id == name)) { string oid = get_default_oid(old_format); rgw_raw_obj default_named_obj(pool, oid); auto sysobj = sysobj_svc->get_obj(obj_ctx, default_named_obj); - ret = sysobj.wop().remove(y); + ret = sysobj.wop().remove(dpp, y); if (ret < 0) { - ldout(cct, 0) << "Error delete default obj name " << name << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "Error delete default obj name " << name << ": " << cpp_strerror(-ret) << dendl; return ret; } } @@ -514,9 +517,9 @@ int RGWSystemMetaObj::delete_obj(optional_yield y, bool old_format) string oid = get_names_oid_prefix() + name; rgw_raw_obj object_name(pool, oid); auto sysobj = sysobj_svc->get_obj(obj_ctx, object_name); - ret = sysobj.wop().remove(y); + ret = sysobj.wop().remove(dpp, y); if (ret < 0) { - ldout(cct, 0) << "Error delete obj name " << name << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "Error delete obj name " << name << ": " << cpp_strerror(-ret) << dendl; return ret; } } @@ -530,15 +533,15 @@ int RGWSystemMetaObj::delete_obj(optional_yield y, bool old_format) rgw_raw_obj object_id(pool, oid); auto sysobj = sysobj_svc->get_obj(obj_ctx, object_id); - ret = sysobj.wop().remove(y); + ret = sysobj.wop().remove(dpp, y); if (ret < 0) { - ldout(cct, 0) << "Error delete object id " << id << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "Error delete object id " << id << ": " << cpp_strerror(-ret) << dendl; } return ret; } -int RGWSystemMetaObj::store_name(bool exclusive, optional_yield y) +int RGWSystemMetaObj::store_name(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) { rgw_pool pool(get_pool(cct)); string oid = get_names_oid_prefix() + name; @@ -553,30 +556,30 @@ int RGWSystemMetaObj::store_name(bool exclusive, optional_yield y) auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid)); return sysobj.wop() .set_exclusive(exclusive) - .write(bl, y); + .write(dpp, bl, y); } -int RGWSystemMetaObj::rename(const string& new_name, optional_yield y) +int RGWSystemMetaObj::rename(const DoutPrefixProvider *dpp, const string& new_name, optional_yield y) { string new_id; - int ret = read_id(new_name, new_id, y); + int ret = read_id(dpp, new_name, new_id, y); if (!ret) { return -EEXIST; } if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "Error read_id " << new_name << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "Error read_id " << new_name << ": " << cpp_strerror(-ret) << dendl; return ret; } string old_name = name; name = new_name; - ret = update(y); + ret = update(dpp, y); if (ret < 0) { - ldout(cct, 0) << "Error storing new obj info " << new_name << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "Error storing new obj info " << new_name << ": " << cpp_strerror(-ret) << dendl; return ret; } - ret = store_name(true, y); + ret = store_name(dpp, true, y); if (ret < 0) { - ldout(cct, 0) << "Error storing new name " << new_name << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "Error storing new name " << new_name << ": " << cpp_strerror(-ret) << dendl; return ret; } /* delete old name */ @@ -585,16 +588,16 @@ int RGWSystemMetaObj::rename(const string& new_name, optional_yield y) rgw_raw_obj old_name_obj(pool, oid); auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, old_name_obj); - ret = sysobj.wop().remove(y); + ret = sysobj.wop().remove(dpp, y); if (ret < 0) { - ldout(cct, 0) << "Error delete old obj name " << old_name << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "Error delete old obj name " << old_name << ": " << cpp_strerror(-ret) << dendl; return ret; } return ret; } -int RGWSystemMetaObj::read_info(const string& obj_id, optional_yield y, +int RGWSystemMetaObj::read_info(const DoutPrefixProvider *dpp, const string& obj_id, optional_yield y, bool old_format) { rgw_pool pool(get_pool(cct)); @@ -605,9 +608,9 @@ int RGWSystemMetaObj::read_info(const string& obj_id, optional_yield y, auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, oid}); - int ret = sysobj.rop().read(&bl, y); + int ret = sysobj.rop().read(dpp, &bl, y); if (ret < 0) { - ldout(cct, 0) << "failed reading obj info from " << pool << ":" << oid << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "failed reading obj info from " << pool << ":" << oid << ": " << cpp_strerror(-ret) << dendl; return ret; } using ceph::decode; @@ -616,34 +619,34 @@ int RGWSystemMetaObj::read_info(const string& obj_id, optional_yield y, auto iter = bl.cbegin(); decode(*this, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: failed to decode obj from " << pool << ":" << oid << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to decode obj from " << pool << ":" << oid << dendl; return -EIO; } return 0; } -int RGWSystemMetaObj::read(optional_yield y) +int RGWSystemMetaObj::read(const DoutPrefixProvider *dpp, optional_yield y) { - int ret = read_id(name, id, y); + int ret = read_id(dpp, name, id, y); if (ret < 0) { return ret; } - return read_info(id, y); + return read_info(dpp, id, y); } -int RGWSystemMetaObj::create(optional_yield y, bool exclusive) +int RGWSystemMetaObj::create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive) { int ret; /* check to see the name is not used */ - ret = read_id(name, id, y); + ret = read_id(dpp, name, id, y); if (exclusive && ret == 0) { - ldout(cct, 10) << "ERROR: name " << name << " already in use for obj id " << id << dendl; + ldpp_dout(dpp, 10) << "ERROR: name " << name << " already in use for obj id " << id << dendl; return -EEXIST; } else if ( ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "failed reading obj id " << id << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "failed reading obj id " << id << ": " << cpp_strerror(-ret) << dendl; return ret; } @@ -656,16 +659,16 @@ int RGWSystemMetaObj::create(optional_yield y, bool exclusive) id = uuid_str; } - ret = store_info(exclusive, y); + ret = store_info(dpp, exclusive, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: storing info for " << id << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "ERROR: storing info for " << id << ": " << cpp_strerror(-ret) << dendl; return ret; } - return store_name(exclusive, y); + return store_name(dpp, exclusive, y); } -int RGWSystemMetaObj::store_info(bool exclusive, optional_yield y) +int RGWSystemMetaObj::store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) { rgw_pool pool(get_pool(cct)); @@ -678,19 +681,19 @@ int RGWSystemMetaObj::store_info(bool exclusive, optional_yield y) auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, oid}); return sysobj.wop() .set_exclusive(exclusive) - .write(bl, y); + .write(dpp, bl, y); } -int RGWSystemMetaObj::write(bool exclusive, optional_yield y) +int RGWSystemMetaObj::write(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) { - int ret = store_info(exclusive, y); + int ret = store_info(dpp, exclusive, y); if (ret < 0) { - ldout(cct, 20) << __func__ << "(): store_info() returned ret=" << ret << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): store_info() returned ret=" << ret << dendl; return ret; } - ret = store_name(exclusive, y); + ret = store_name(dpp, exclusive, y); if (ret < 0) { - ldout(cct, 20) << __func__ << "(): store_name() returned ret=" << ret << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): store_name() returned ret=" << ret << dendl; return ret; } return 0; @@ -701,64 +704,64 @@ const string& RGWRealm::get_predefined_name(CephContext *cct) const { return cct->_conf->rgw_realm; } -int RGWRealm::create(optional_yield y, bool exclusive) +int RGWRealm::create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive) { - int ret = RGWSystemMetaObj::create(y, exclusive); + int ret = RGWSystemMetaObj::create(dpp, y, exclusive); if (ret < 0) { - ldout(cct, 0) << "ERROR creating new realm object " << name << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "ERROR creating new realm object " << name << ": " << cpp_strerror(-ret) << dendl; return ret; } // create the control object for watch/notify - ret = create_control(exclusive, y); + ret = create_control(dpp, exclusive, y); if (ret < 0) { - ldout(cct, 0) << "ERROR creating control for new realm " << name << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "ERROR creating control for new realm " << name << ": " << cpp_strerror(-ret) << dendl; return ret; } RGWPeriod period; if (current_period.empty()) { /* create new period for the realm */ - ret = period.init(cct, sysobj_svc, id, y, name, false); + ret = period.init(dpp, cct, sysobj_svc, id, y, name, false); if (ret < 0 ) { return ret; } - ret = period.create(y, true); + ret = period.create(dpp, y, true); if (ret < 0) { - ldout(cct, 0) << "ERROR: creating new period for realm " << name << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "ERROR: creating new period for realm " << name << ": " << cpp_strerror(-ret) << dendl; return ret; } } else { period = RGWPeriod(current_period, 0); - int ret = period.init(cct, sysobj_svc, id, y, name); + int ret = period.init(dpp, cct, sysobj_svc, id, y, name); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to init period " << current_period << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to init period " << current_period << dendl; return ret; } } - ret = set_current_period(period, y); + ret = set_current_period(dpp, period, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed set current period " << current_period << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed set current period " << current_period << dendl; return ret; } // try to set as default. may race with another create, so pass exclusive=true // so we don't override an existing default - ret = set_as_default(y, true); + ret = set_as_default(dpp, y, true); if (ret < 0 && ret != -EEXIST) { - ldout(cct, 0) << "WARNING: failed to set realm as default realm, ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "WARNING: failed to set realm as default realm, ret=" << ret << dendl; } return 0; } -int RGWRealm::delete_obj(optional_yield y) +int RGWRealm::delete_obj(const DoutPrefixProvider *dpp, optional_yield y) { - int ret = RGWSystemMetaObj::delete_obj(y); + int ret = RGWSystemMetaObj::delete_obj(dpp, y); if (ret < 0) { return ret; } - return delete_control(y); + return delete_control(dpp, y); } -int RGWRealm::create_control(bool exclusive, optional_yield y) +int RGWRealm::create_control(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) { auto pool = rgw_pool{get_pool(cct)}; auto oid = get_control_oid(); @@ -767,16 +770,16 @@ int RGWRealm::create_control(bool exclusive, optional_yield y) auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, oid}); return sysobj.wop() .set_exclusive(exclusive) - .write(bl, y); + .write(dpp, bl, y); } -int RGWRealm::delete_control(optional_yield y) +int RGWRealm::delete_control(const DoutPrefixProvider *dpp, optional_yield y) { auto pool = rgw_pool{get_pool(cct)}; auto obj = rgw_raw_obj{pool, get_control_oid()}; auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, obj); - return sysobj.wop().remove(y); + return sysobj.wop().remove(dpp, y); } rgw_pool RGWRealm::get_pool(CephContext *cct) const @@ -805,16 +808,16 @@ const string& RGWRealm::get_info_oid_prefix(bool old_format) const return realm_info_oid_prefix; } -int RGWRealm::set_current_period(RGWPeriod& period, optional_yield y) +int RGWRealm::set_current_period(const DoutPrefixProvider *dpp, RGWPeriod& period, optional_yield y) { // update realm epoch to match the period's if (epoch > period.get_realm_epoch()) { - ldout(cct, 0) << "ERROR: set_current_period with old realm epoch " + ldpp_dout(dpp, 0) << "ERROR: set_current_period with old realm epoch " << period.get_realm_epoch() << ", current epoch=" << epoch << dendl; return -EINVAL; } if (epoch == period.get_realm_epoch() && current_period != period.get_id()) { - ldout(cct, 0) << "ERROR: set_current_period with same realm epoch " + ldpp_dout(dpp, 0) << "ERROR: set_current_period with same realm epoch " << period.get_realm_epoch() << ", but different period id " << period.get_id() << " != " << current_period << dendl; return -EINVAL; @@ -823,15 +826,15 @@ int RGWRealm::set_current_period(RGWPeriod& period, optional_yield y) epoch = period.get_realm_epoch(); current_period = period.get_id(); - int ret = update(y); + int ret = update(dpp, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: period update: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "ERROR: period update: " << cpp_strerror(-ret) << dendl; return ret; } - ret = period.reflect(y); + ret = period.reflect(dpp, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: period.reflect(): " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "ERROR: period.reflect(): " << cpp_strerror(-ret) << dendl; return ret; } @@ -843,19 +846,19 @@ string RGWRealm::get_control_oid() const return get_info_oid_prefix() + id + ".control"; } -int RGWRealm::notify_zone(bufferlist& bl, optional_yield y) +int RGWRealm::notify_zone(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y) { rgw_pool pool{get_pool(cct)}; auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, get_control_oid()}); - int ret = sysobj.wn().notify(bl, 0, nullptr, y); + int ret = sysobj.wn().notify(dpp, bl, 0, nullptr, y); if (ret < 0) { return ret; } return 0; } -int RGWRealm::notify_new_period(const RGWPeriod& period, optional_yield y) +int RGWRealm::notify_new_period(const DoutPrefixProvider *dpp, const RGWPeriod& period, optional_yield y) { bufferlist bl; using ceph::encode; @@ -865,7 +868,7 @@ int RGWRealm::notify_new_period(const RGWPeriod& period, optional_yield y) // reload the gateway with the new period encode(RGWRealmNotify::Reload, bl); - return notify_zone(bl, y); + return notify_zone(dpp, bl, y); } std::string RGWPeriodConfig::get_oid(const std::string& realm_id) @@ -885,7 +888,7 @@ rgw_pool RGWPeriodConfig::get_pool(CephContext *cct) return {pool_name}; } -int RGWPeriodConfig::read(RGWSI_SysObj *sysobj_svc, const std::string& realm_id, +int RGWPeriodConfig::read(const DoutPrefixProvider *dpp, RGWSI_SysObj *sysobj_svc, const std::string& realm_id, optional_yield y) { const auto& pool = get_pool(sysobj_svc->ctx()); @@ -894,7 +897,7 @@ int RGWPeriodConfig::read(RGWSI_SysObj *sysobj_svc, const std::string& realm_id, auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, oid}); - int ret = sysobj.rop().read(&bl, y); + int ret = sysobj.rop().read(dpp, &bl, y); if (ret < 0) { return ret; } @@ -908,7 +911,8 @@ int RGWPeriodConfig::read(RGWSI_SysObj *sysobj_svc, const std::string& realm_id, return 0; } -int RGWPeriodConfig::write(RGWSI_SysObj *sysobj_svc, +int RGWPeriodConfig::write(const DoutPrefixProvider *dpp, + RGWSI_SysObj *sysobj_svc, const std::string& realm_id, optional_yield y) { const auto& pool = get_pool(sysobj_svc->ctx()); @@ -920,10 +924,10 @@ int RGWPeriodConfig::write(RGWSI_SysObj *sysobj_svc, auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, oid}); return sysobj.wop() .set_exclusive(false) - .write(bl, y); + .write(dpp, bl, y); } -int RGWPeriod::init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, +int RGWPeriod::init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc, const string& period_realm_id, optional_yield y, const string& period_realm_name, bool setup_obj) { @@ -936,11 +940,12 @@ int RGWPeriod::init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, if (!setup_obj) return 0; - return init(_cct, _sysobj_svc, y, setup_obj); + return init(dpp, _cct, _sysobj_svc, y, setup_obj); } -int RGWPeriod::init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, +int RGWPeriod::init(const DoutPrefixProvider *dpp, + CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y, bool setup_obj) { cct = _cct; @@ -951,9 +956,9 @@ int RGWPeriod::init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, if (id.empty()) { RGWRealm realm(realm_id, realm_name); - int ret = realm.init(cct, sysobj_svc, y); + int ret = realm.init(dpp, cct, sysobj_svc, y); if (ret < 0) { - ldout(cct, 0) << "RGWPeriod::init failed to init realm " << realm_name << " id " << realm_id << " : " << + ldpp_dout(dpp, 0) << "RGWPeriod::init failed to init realm " << realm_name << " id " << realm_id << " : " << cpp_strerror(-ret) << dendl; return ret; } @@ -962,15 +967,15 @@ int RGWPeriod::init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, } if (!epoch) { - int ret = use_latest_epoch(y); + int ret = use_latest_epoch(dpp, y); if (ret < 0) { - ldout(cct, 0) << "failed to use_latest_epoch period id " << id << " realm " << realm_name << " id " << realm_id + ldpp_dout(dpp, 0) << "failed to use_latest_epoch period id " << id << " realm " << realm_name << " id " << realm_id << " : " << cpp_strerror(-ret) << dendl; return ret; } } - return read_info(y); + return read_info(dpp, y); } @@ -1019,7 +1024,8 @@ const string RGWPeriod::get_period_oid() const return oss.str(); } -int RGWPeriod::read_latest_epoch(RGWPeriodLatestEpochInfo& info, +int RGWPeriod::read_latest_epoch(const DoutPrefixProvider *dpp, + RGWPeriodLatestEpochInfo& info, optional_yield y, RGWObjVersionTracker *objv) { @@ -1029,9 +1035,9 @@ int RGWPeriod::read_latest_epoch(RGWPeriodLatestEpochInfo& info, bufferlist bl; auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, oid}); - int ret = sysobj.rop().read(&bl, y); + int ret = sysobj.rop().read(dpp, &bl, y); if (ret < 0) { - ldout(cct, 1) << "error read_lastest_epoch " << pool << ":" << oid << dendl; + ldpp_dout(dpp, 1) << "error read_lastest_epoch " << pool << ":" << oid << dendl; return ret; } try { @@ -1039,18 +1045,18 @@ int RGWPeriod::read_latest_epoch(RGWPeriodLatestEpochInfo& info, using ceph::decode; decode(info, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "error decoding data from " << pool << ":" << oid << dendl; + ldpp_dout(dpp, 0) << "error decoding data from " << pool << ":" << oid << dendl; return -EIO; } return 0; } -int RGWPeriod::get_latest_epoch(epoch_t& latest_epoch, optional_yield y) +int RGWPeriod::get_latest_epoch(const DoutPrefixProvider *dpp, epoch_t& latest_epoch, optional_yield y) { RGWPeriodLatestEpochInfo info; - int ret = read_latest_epoch(info, y); + int ret = read_latest_epoch(dpp, info, y); if (ret < 0) { return ret; } @@ -1060,10 +1066,10 @@ int RGWPeriod::get_latest_epoch(epoch_t& latest_epoch, optional_yield y) return 0; } -int RGWPeriod::use_latest_epoch(optional_yield y) +int RGWPeriod::use_latest_epoch(const DoutPrefixProvider *dpp, optional_yield y) { RGWPeriodLatestEpochInfo info; - int ret = read_latest_epoch(info, y); + int ret = read_latest_epoch(dpp, info, y); if (ret < 0) { return ret; } @@ -1073,7 +1079,8 @@ int RGWPeriod::use_latest_epoch(optional_yield y) return 0; } -int RGWPeriod::set_latest_epoch(optional_yield y, +int RGWPeriod::set_latest_epoch(const DoutPrefixProvider *dpp, + optional_yield y, epoch_t epoch, bool exclusive, RGWObjVersionTracker *objv) { @@ -1092,10 +1099,10 @@ int RGWPeriod::set_latest_epoch(optional_yield y, auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid)); return sysobj.wop() .set_exclusive(exclusive) - .write(bl, y); + .write(dpp, bl, y); } -int RGWPeriod::update_latest_epoch(epoch_t epoch, optional_yield y) +int RGWPeriod::update_latest_epoch(const DoutPrefixProvider *dpp, epoch_t epoch, optional_yield y) { static constexpr int MAX_RETRIES = 20; @@ -1105,33 +1112,33 @@ int RGWPeriod::update_latest_epoch(epoch_t epoch, optional_yield y) bool exclusive = false; // read existing epoch - int r = read_latest_epoch(info, y, &objv); + int r = read_latest_epoch(dpp, info, y, &objv); if (r == -ENOENT) { // use an exclusive create to set the epoch atomically exclusive = true; - ldout(cct, 20) << "creating initial latest_epoch=" << epoch + ldpp_dout(dpp, 20) << "creating initial latest_epoch=" << epoch << " for period=" << id << dendl; } else if (r < 0) { - ldout(cct, 0) << "ERROR: failed to read latest_epoch" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to read latest_epoch" << dendl; return r; } else if (epoch <= info.epoch) { r = -EEXIST; // fail with EEXIST if epoch is not newer - ldout(cct, 10) << "found existing latest_epoch " << info.epoch + ldpp_dout(dpp, 10) << "found existing latest_epoch " << info.epoch << " >= given epoch " << epoch << ", returning r=" << r << dendl; return r; } else { - ldout(cct, 20) << "updating latest_epoch from " << info.epoch + ldpp_dout(dpp, 20) << "updating latest_epoch from " << info.epoch << " -> " << epoch << " on period=" << id << dendl; } - r = set_latest_epoch(y, epoch, exclusive, &objv); + r = set_latest_epoch(dpp, y, epoch, exclusive, &objv); if (r == -EEXIST) { continue; // exclusive create raced with another update, retry } else if (r == -ECANCELED) { continue; // write raced with a conflicting version, retry } if (r < 0) { - ldout(cct, 0) << "ERROR: failed to write latest_epoch" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to write latest_epoch" << dendl; return r; } return 0; // return success @@ -1140,7 +1147,7 @@ int RGWPeriod::update_latest_epoch(epoch_t epoch, optional_yield y) return -ECANCELED; // fail after max retries } -int RGWPeriod::delete_obj(optional_yield y) +int RGWPeriod::delete_obj(const DoutPrefixProvider *dpp, optional_yield y) { rgw_pool pool(get_pool(cct)); @@ -1150,9 +1157,9 @@ int RGWPeriod::delete_obj(optional_yield y) rgw_raw_obj oid{pool, p.get_period_oid()}; auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, oid); - int ret = sysobj.wop().remove(y); + int ret = sysobj.wop().remove(dpp, y); if (ret < 0) { - ldout(cct, 0) << "WARNING: failed to delete period object " << oid + ldpp_dout(dpp, 0) << "WARNING: failed to delete period object " << oid << ": " << cpp_strerror(-ret) << dendl; } } @@ -1161,15 +1168,15 @@ int RGWPeriod::delete_obj(optional_yield y) rgw_raw_obj oid{pool, get_period_oid_prefix() + get_latest_epoch_oid()}; auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, oid); - int ret = sysobj.wop().remove(y); + int ret = sysobj.wop().remove(dpp, y); if (ret < 0) { - ldout(cct, 0) << "WARNING: failed to delete period object " << oid + ldpp_dout(dpp, 0) << "WARNING: failed to delete period object " << oid << ": " << cpp_strerror(-ret) << dendl; } return ret; } -int RGWPeriod::read_info(optional_yield y) +int RGWPeriod::read_info(const DoutPrefixProvider *dpp, optional_yield y) { rgw_pool pool(get_pool(cct)); @@ -1177,9 +1184,9 @@ int RGWPeriod::read_info(optional_yield y) auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, get_period_oid()}); - int ret = sysobj.rop().read(&bl, y); + int ret = sysobj.rop().read(dpp, &bl, y); if (ret < 0) { - ldout(cct, 0) << "failed reading obj info from " << pool << ":" << get_period_oid() << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "failed reading obj info from " << pool << ":" << get_period_oid() << ": " << cpp_strerror(-ret) << dendl; return ret; } @@ -1188,14 +1195,14 @@ int RGWPeriod::read_info(optional_yield y) auto iter = bl.cbegin(); decode(*this, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: failed to decode obj from " << pool << ":" << get_period_oid() << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to decode obj from " << pool << ":" << get_period_oid() << dendl; return -EIO; } return 0; } -int RGWPeriod::create(optional_yield y, bool exclusive) +int RGWPeriod::create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive) { int ret; @@ -1210,21 +1217,21 @@ int RGWPeriod::create(optional_yield y, bool exclusive) period_map.id = id; - ret = store_info(exclusive, y); + ret = store_info(dpp, exclusive, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: storing info for " << id << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "ERROR: storing info for " << id << ": " << cpp_strerror(-ret) << dendl; return ret; } - ret = set_latest_epoch(y, epoch); + ret = set_latest_epoch(dpp, y, epoch); if (ret < 0) { - ldout(cct, 0) << "ERROR: setting latest epoch " << id << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "ERROR: setting latest epoch " << id << ": " << cpp_strerror(-ret) << dendl; } return ret; } -int RGWPeriod::store_info(bool exclusive, optional_yield y) +int RGWPeriod::store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) { rgw_pool pool(get_pool(cct)); @@ -1237,7 +1244,7 @@ int RGWPeriod::store_info(bool exclusive, optional_yield y) auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid)); return sysobj.wop() .set_exclusive(exclusive) - .write(bl, y); + .write(dpp, bl, y); } rgw_pool RGWPeriod::get_pool(CephContext *cct) const @@ -1248,28 +1255,28 @@ rgw_pool RGWPeriod::get_pool(CephContext *cct) const return rgw_pool(cct->_conf->rgw_period_root_pool); } -int RGWPeriod::add_zonegroup(const RGWZoneGroup& zonegroup, optional_yield y) +int RGWPeriod::add_zonegroup(const DoutPrefixProvider *dpp, const RGWZoneGroup& zonegroup, optional_yield y) { if (zonegroup.realm_id != realm_id) { return 0; } int ret = period_map.update(zonegroup, cct); if (ret < 0) { - ldout(cct, 0) << "ERROR: updating period map: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "ERROR: updating period map: " << cpp_strerror(-ret) << dendl; return ret; } - return store_info(false, y); + return store_info(dpp, false, y); } -int RGWPeriod::update(optional_yield y) +int RGWPeriod::update(const DoutPrefixProvider *dpp, optional_yield y) { auto zone_svc = sysobj_svc->get_zone_svc(); - ldout(cct, 20) << __func__ << " realm " << realm_id << " period " << get_id() << dendl; + ldpp_dout(dpp, 20) << __func__ << " realm " << realm_id << " period " << get_id() << dendl; list zonegroups; - int ret = zone_svc->list_zonegroups(zonegroups); + int ret = zone_svc->list_zonegroups(dpp, zonegroups); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to list zonegroups: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to list zonegroups: " << cpp_strerror(-ret) << dendl; return ret; } @@ -1279,24 +1286,24 @@ int RGWPeriod::update(optional_yield y) for (auto& iter : zonegroups) { RGWZoneGroup zg(string(), iter); - ret = zg.init(cct, sysobj_svc, y); + ret = zg.init(dpp, cct, sysobj_svc, y); if (ret < 0) { - ldout(cct, 0) << "WARNING: zg.init() failed: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "WARNING: zg.init() failed: " << cpp_strerror(-ret) << dendl; continue; } if (zg.realm_id != realm_id) { - ldout(cct, 20) << "skipping zonegroup " << zg.get_name() << " zone realm id " << zg.realm_id << ", not on our realm " << realm_id << dendl; + ldpp_dout(dpp, 20) << "skipping zonegroup " << zg.get_name() << " zone realm id " << zg.realm_id << ", not on our realm " << realm_id << dendl; continue; } if (zg.master_zone.empty()) { - ldout(cct, 0) << "ERROR: zonegroup " << zg.get_name() << " should have a master zone " << dendl; + ldpp_dout(dpp, 0) << "ERROR: zonegroup " << zg.get_name() << " should have a master zone " << dendl; return -EINVAL; } if (zg.zones.find(zg.master_zone) == zg.zones.end()) { - ldout(cct,0) << "ERROR: zonegroup " << zg.get_name() + ldpp_dout(dpp, 0) << "ERROR: zonegroup " << zg.get_name() << " has a non existent master zone "<< dendl; return -EINVAL; } @@ -1312,38 +1319,38 @@ int RGWPeriod::update(optional_yield y) } } - ret = period_config.read(sysobj_svc, realm_id, y); + ret = period_config.read(dpp, sysobj_svc, realm_id, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "ERROR: failed to read period config: " + ldpp_dout(dpp, 0) << "ERROR: failed to read period config: " << cpp_strerror(ret) << dendl; return ret; } return 0; } -int RGWPeriod::reflect(optional_yield y) +int RGWPeriod::reflect(const DoutPrefixProvider *dpp, optional_yield y) { for (auto& iter : period_map.zonegroups) { RGWZoneGroup& zg = iter.second; zg.reinit_instance(cct, sysobj_svc); - int r = zg.write(false, y); + int r = zg.write(dpp, false, y); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to store zonegroup info for zonegroup=" << iter.first << ": " << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to store zonegroup info for zonegroup=" << iter.first << ": " << cpp_strerror(-r) << dendl; return r; } if (zg.is_master_zonegroup()) { // set master as default if no default exists - r = zg.set_as_default(y, true); + r = zg.set_as_default(dpp, y, true); if (r == 0) { - ldout(cct, 1) << "Set the period's master zonegroup " << zg.get_id() + ldpp_dout(dpp, 1) << "Set the period's master zonegroup " << zg.get_id() << " as the default" << dendl; } } } - int r = period_config.write(sysobj_svc, realm_id, y); + int r = period_config.write(dpp, sysobj_svc, realm_id, y); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to store period config: " + ldpp_dout(dpp, 0) << "ERROR: failed to store period config: " << cpp_strerror(-r) << dendl; return r; } @@ -1359,28 +1366,29 @@ void RGWPeriod::fork() realm_epoch++; } -static int read_sync_status(rgw::sal::RGWRadosStore *store, rgw_meta_sync_status *sync_status) +static int read_sync_status(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, rgw_meta_sync_status *sync_status) { // initialize a sync status manager to read the status RGWMetaSyncStatusManager mgr(store, store->svc()->rados->get_async_processor()); - int r = mgr.init(); + int r = mgr.init(dpp); if (r < 0) { return r; } - r = mgr.read_sync_status(sync_status); + r = mgr.read_sync_status(dpp, sync_status); mgr.stop(); return r; } -int RGWPeriod::update_sync_status(rgw::sal::RGWRadosStore *store, /* for now */ +int RGWPeriod::update_sync_status(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, /* for now */ const RGWPeriod ¤t_period, std::ostream& error_stream, bool force_if_stale) { rgw_meta_sync_status status; - int r = read_sync_status(store, &status); + int r = read_sync_status(dpp, store, &status); if (r < 0) { - ldout(cct, 0) << "period failed to read sync status: " + ldpp_dout(dpp, 0) << "period failed to read sync status: " << cpp_strerror(-r) << dendl; return r; } @@ -1422,13 +1430,14 @@ int RGWPeriod::update_sync_status(rgw::sal::RGWRadosStore *store, /* for now */ return 0; } -int RGWPeriod::commit(rgw::sal::RGWRadosStore *store, +int RGWPeriod::commit(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWRealm& realm, const RGWPeriod& current_period, std::ostream& error_stream, optional_yield y, bool force_if_stale) { auto zone_svc = sysobj_svc->get_zone_svc(); - ldout(cct, 20) << __func__ << " realm " << realm.get_id() << " period " << current_period.get_id() << dendl; + ldpp_dout(dpp, 20) << __func__ << " realm " << realm.get_id() << " period " << current_period.get_id() << dendl; // gateway must be in the master zone to commit if (master_zone != zone_svc->get_zone_params().get_id()) { error_stream << "Cannot commit period on zone " @@ -1456,28 +1465,28 @@ int RGWPeriod::commit(rgw::sal::RGWRadosStore *store, // did the master zone change? if (master_zone != current_period.get_master_zone()) { // store the current metadata sync status in the period - int r = update_sync_status(store, current_period, error_stream, force_if_stale); + int r = update_sync_status(dpp, store, current_period, error_stream, force_if_stale); if (r < 0) { - ldout(cct, 0) << "failed to update metadata sync status: " + ldpp_dout(dpp, 0) << "failed to update metadata sync status: " << cpp_strerror(-r) << dendl; return r; } // create an object with a new period id - r = create(y, true); + r = create(dpp, y, true); if (r < 0) { - ldout(cct, 0) << "failed to create new period: " << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "failed to create new period: " << cpp_strerror(-r) << dendl; return r; } // set as current period - r = realm.set_current_period(*this, y); + r = realm.set_current_period(dpp, *this, y); if (r < 0) { - ldout(cct, 0) << "failed to update realm's current period: " + ldpp_dout(dpp, 0) << "failed to update realm's current period: " << cpp_strerror(-r) << dendl; return r; } - ldout(cct, 4) << "Promoted to master zone and committed new period " + ldpp_dout(dpp, 4) << "Promoted to master zone and committed new period " << id << dendl; - realm.notify_new_period(*this, y); + realm.notify_new_period(dpp, *this, y); return 0; } // period must be based on current epoch @@ -1494,37 +1503,37 @@ int RGWPeriod::commit(rgw::sal::RGWRadosStore *store, set_predecessor(current_period.get_predecessor()); realm_epoch = current_period.get_realm_epoch(); // write the period to rados - int r = store_info(false, y); + int r = store_info(dpp, false, y); if (r < 0) { - ldout(cct, 0) << "failed to store period: " << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "failed to store period: " << cpp_strerror(-r) << dendl; return r; } // set as latest epoch - r = update_latest_epoch(epoch, y); + r = update_latest_epoch(dpp, epoch, y); if (r == -EEXIST) { // already have this epoch (or a more recent one) return 0; } if (r < 0) { - ldout(cct, 0) << "failed to set latest epoch: " << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "failed to set latest epoch: " << cpp_strerror(-r) << dendl; return r; } - r = reflect(y); + r = reflect(dpp, y); if (r < 0) { - ldout(cct, 0) << "failed to update local objects: " << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "failed to update local objects: " << cpp_strerror(-r) << dendl; return r; } - ldout(cct, 4) << "Committed new epoch " << epoch + ldpp_dout(dpp, 4) << "Committed new epoch " << epoch << " for period " << id << dendl; - realm.notify_new_period(*this, y); + realm.notify_new_period(dpp, *this, y); return 0; } -int RGWZoneParams::create_default(optional_yield y, bool old_format) +int RGWZoneParams::create_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format) { name = default_zone_name; - int r = create(y); + int r = create(dpp, y); if (r < 0) { return r; } @@ -1538,7 +1547,8 @@ int RGWZoneParams::create_default(optional_yield y, bool old_format) namespace { -int get_zones_pool_set(CephContext* cct, +int get_zones_pool_set(const DoutPrefixProvider *dpp, + CephContext* cct, RGWSI_SysObj* sysobj_svc, const list& zones, const string& my_zone_id, @@ -1547,9 +1557,9 @@ int get_zones_pool_set(CephContext* cct, { for(auto const& iter : zones) { RGWZoneParams zone(iter); - int r = zone.init(cct, sysobj_svc, y); + int r = zone.init(dpp, cct, sysobj_svc, y); if (r < 0) { - ldout(cct, 0) << "Error: init zone " << iter << ":" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "Error: init zone " << iter << ":" << cpp_strerror(-r) << dendl; return r; } if (zone.get_id() != my_zone_id) { @@ -1612,19 +1622,19 @@ rgw_pool fix_zone_pool_dup(set pools, } } -int RGWZoneParams::fix_pool_names(optional_yield y) +int RGWZoneParams::fix_pool_names(const DoutPrefixProvider *dpp, optional_yield y) { list zones; - int r = zone_svc->list_zones(zones); + int r = zone_svc->list_zones(dpp, zones); if (r < 0) { - ldout(cct, 10) << "WARNING: store->list_zones() returned r=" << r << dendl; + ldpp_dout(dpp, 10) << "WARNING: store->list_zones() returned r=" << r << dendl; } set pools; - r = get_zones_pool_set(cct, sysobj_svc, zones, id, pools, y); + r = get_zones_pool_set(dpp, cct, sysobj_svc, zones, id, pools, y); if (r < 0) { - ldout(cct, 0) << "Error: get_zones_pool_names" << r << dendl; + ldpp_dout(dpp, 0) << "Error: get_zones_pool_names" << r << dendl; return r; } @@ -1662,15 +1672,15 @@ int RGWZoneParams::fix_pool_names(optional_yield y) return 0; } -int RGWZoneParams::create(optional_yield y, bool exclusive) +int RGWZoneParams::create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive) { /* check for old pools config */ rgw_raw_obj obj(domain_root, avail_pools); auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, obj); - int r = sysobj.rop().stat(y); + int r = sysobj.rop().stat(y, dpp); if (r < 0) { - ldout(cct, 10) << "couldn't find old data placement pools config, setting up new ones for the zone" << dendl; + ldpp_dout(dpp, 10) << "couldn't find old data placement pools config, setting up new ones for the zone" << dendl; /* a new system, let's set new placement info */ RGWZonePlacementInfo default_placement; default_placement.index_pool = name + "." + default_bucket_index_pool_suffix; @@ -1680,22 +1690,22 @@ int RGWZoneParams::create(optional_yield y, bool exclusive) placement_pools["default-placement"] = default_placement; } - r = fix_pool_names(y); + r = fix_pool_names(dpp, y); if (r < 0) { - ldout(cct, 0) << "ERROR: fix_pool_names returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: fix_pool_names returned r=" << r << dendl; return r; } - r = RGWSystemMetaObj::create(y, exclusive); + r = RGWSystemMetaObj::create(dpp, y, exclusive); if (r < 0) { return r; } // try to set as default. may race with another create, so pass exclusive=true // so we don't override an existing default - r = set_as_default(y, true); + r = set_as_default(dpp, y, true); if (r < 0 && r != -EEXIST) { - ldout(cct, 10) << "WARNING: failed to set zone as default, r=" << r << dendl; + ldpp_dout(dpp, 10) << "WARNING: failed to set zone as default, r=" << r << dendl; } return 0; @@ -1733,48 +1743,49 @@ const string& RGWZoneParams::get_predefined_name(CephContext *cct) const { return cct->_conf->rgw_zone; } -int RGWZoneParams::init(CephContext *cct, RGWSI_SysObj *sysobj_svc, +int RGWZoneParams::init(const DoutPrefixProvider *dpp, + CephContext *cct, RGWSI_SysObj *sysobj_svc, optional_yield y, bool setup_obj, bool old_format) { if (name.empty()) { name = cct->_conf->rgw_zone; } - return RGWSystemMetaObj::init(cct, sysobj_svc, y, setup_obj, old_format); + return RGWSystemMetaObj::init(dpp, cct, sysobj_svc, y, setup_obj, old_format); } -int RGWZoneParams::read_default_id(string& default_id, optional_yield y, +int RGWZoneParams::read_default_id(const DoutPrefixProvider *dpp, string& default_id, optional_yield y, bool old_format) { if (realm_id.empty()) { /* try using default realm */ RGWRealm realm; - int ret = realm.init(cct, sysobj_svc, y); + int ret = realm.init(dpp, cct, sysobj_svc, y); //no default realm exist if (ret < 0) { - return read_id(default_zone_name, default_id, y); + return read_id(dpp, default_zone_name, default_id, y); } realm_id = realm.get_id(); } - return RGWSystemMetaObj::read_default_id(default_id, y, old_format); + return RGWSystemMetaObj::read_default_id(dpp, default_id, y, old_format); } -int RGWZoneParams::set_as_default(optional_yield y, bool exclusive) +int RGWZoneParams::set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive) { if (realm_id.empty()) { /* try using default realm */ RGWRealm realm; - int ret = realm.init(cct, sysobj_svc, y); + int ret = realm.init(dpp, cct, sysobj_svc, y); if (ret < 0) { - ldout(cct, 10) << "could not read realm id: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 10) << "could not read realm id: " << cpp_strerror(-ret) << dendl; return -EINVAL; } realm_id = realm.get_id(); } - return RGWSystemMetaObj::set_as_default(y, exclusive); + return RGWSystemMetaObj::set_as_default(dpp, y, exclusive); } const string& RGWZoneParams::get_compression_type(const rgw_placement_rule& placement_rule) const @@ -1890,11 +1901,11 @@ uint32_t RGWPeriodMap::get_zone_short_id(const string& zone_id) const return i->second; } -int RGWZoneGroupMap::read(CephContext *cct, RGWSI_SysObj *sysobj_svc, optional_yield y) +int RGWZoneGroupMap::read(const DoutPrefixProvider *dpp, CephContext *cct, RGWSI_SysObj *sysobj_svc, optional_yield y) { RGWPeriod period; - int ret = period.init(cct, sysobj_svc, y); + int ret = period.init(dpp, cct, sysobj_svc, y); if (ret < 0) { cerr << "failed to read current period info: " << cpp_strerror(ret); return ret; diff --git a/src/rgw/rgw_zone.h b/src/rgw/rgw_zone.h index 32a588aea2adb..d430e9475b4da 100644 --- a/src/rgw/rgw_zone.h +++ b/src/rgw/rgw_zone.h @@ -86,15 +86,16 @@ protected: RGWSI_SysObj *sysobj_svc{nullptr}; RGWSI_Zone *zone_svc{nullptr}; - int store_name(bool exclusive, optional_yield y); - int store_info(bool exclusive, optional_yield y); - int read_info(const std::string& obj_id, optional_yield y, bool old_format = false); - int read_id(const std::string& obj_name, std::string& obj_id, optional_yield y); - int read_default(RGWDefaultSystemMetaObjInfo& default_info, + int store_name(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y); + int store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y); + int read_info(const DoutPrefixProvider *dpp, const std::string& obj_id, optional_yield y, bool old_format = false); + int read_id(const DoutPrefixProvider *dpp, const std::string& obj_name, std::string& obj_id, optional_yield y); + int read_default(const DoutPrefixProvider *dpp, + RGWDefaultSystemMetaObjInfo& default_info, const std::string& oid, optional_yield y); /* read and use default id */ - int use_default(optional_yield y, bool old_format = false); + int use_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format = false); public: RGWSystemMetaObj() {} @@ -131,20 +132,20 @@ public: } void reinit_instance(CephContext *_cct, RGWSI_SysObj *_sysobj_svc); - int init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, + int init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y, bool setup_obj = true, bool old_format = false); - virtual int read_default_id(std::string& default_id, optional_yield y, + virtual int read_default_id(const DoutPrefixProvider *dpp, std::string& default_id, optional_yield y, bool old_format = false); - virtual int set_as_default(optional_yield y, bool exclusive = false); + virtual int set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = false); int delete_default(); - virtual int create(optional_yield y, bool exclusive = true); - int delete_obj(optional_yield y, bool old_format = false); - int rename(const std::string& new_name, optional_yield y); - int update(optional_yield y) { return store_info(false, y);} - int update_name(optional_yield y) { return store_name(false, y);} - int read(optional_yield y); - int write(bool exclusive, optional_yield y); + virtual int create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = true); + int delete_obj(const DoutPrefixProvider *dpp, optional_yield y, bool old_format = false); + int rename(const DoutPrefixProvider *dpp, const std::string& new_name, optional_yield y); + int update(const DoutPrefixProvider *dpp, optional_yield y) { return store_info(dpp, false, y);} + int update_name(const DoutPrefixProvider *dpp, optional_yield y) { return store_name(dpp, false, y);} + int read(const DoutPrefixProvider *dpp, optional_yield y); + int write(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y); virtual rgw_pool get_pool(CephContext *cct) const = 0; virtual const std::string get_default_oid(bool old_format = false) const = 0; @@ -400,14 +401,15 @@ struct RGWZoneParams : RGWSystemMetaObj { const std::string& get_info_oid_prefix(bool old_format = false) const override; const std::string& get_predefined_name(CephContext *cct) const override; - int init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y, + int init(const DoutPrefixProvider *dpp, + CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y, bool setup_obj = true, bool old_format = false); using RGWSystemMetaObj::init; - int read_default_id(std::string& default_id, optional_yield y, bool old_format = false) override; - int set_as_default(optional_yield y, bool exclusive = false) override; - int create_default(optional_yield y, bool old_format = false); - int create(optional_yield y, bool exclusive = true) override; - int fix_pool_names(optional_yield y); + int read_default_id(const DoutPrefixProvider *dpp, std::string& default_id, optional_yield y, bool old_format = false) override; + int set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = false) override; + int create_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format = false); + int create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = true) override; + int fix_pool_names(const DoutPrefixProvider *dpp, optional_yield y); const string& get_compression_type(const rgw_placement_rule& placement_rule) const; @@ -760,11 +762,11 @@ struct RGWZoneGroup : public RGWSystemMetaObj { realm_id(_realm_id) {} bool is_master_zonegroup() const { return is_master;} - void update_master(bool _is_master, optional_yield y) { + void update_master(const DoutPrefixProvider *dpp, bool _is_master, optional_yield y) { is_master = _is_master; - post_process_params(y); + post_process_params(dpp, y); } - void post_process_params(optional_yield y); + void post_process_params(const DoutPrefixProvider *dpp, optional_yield y); void encode(bufferlist& bl) const override { ENCODE_START(5, 1, bl); @@ -812,18 +814,19 @@ struct RGWZoneGroup : public RGWSystemMetaObj { DECODE_FINISH(bl); } - int read_default_id(std::string& default_id, optional_yield y, bool old_format = false) override; - int set_as_default(optional_yield y, bool exclusive = false) override; - int create_default(optional_yield y, bool old_format = false); + int read_default_id(const DoutPrefixProvider *dpp, std::string& default_id, optional_yield y, bool old_format = false) override; + int set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = false) override; + int create_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format = false); int equals(const std::string& other_zonegroup) const; - int add_zone(const RGWZoneParams& zone_params, bool *is_master, bool *read_only, + int add_zone(const DoutPrefixProvider *dpp, + const RGWZoneParams& zone_params, bool *is_master, bool *read_only, const list& endpoints, const std::string *ptier_type, bool *psync_from_all, list& sync_from, list& sync_from_rm, std::string *predirect_zone, std::optional bucket_index_max_shards, RGWSyncModulesManager *sync_mgr, optional_yield y); - int remove_zone(const std::string& zone_id, optional_yield y); - int rename_zone(const RGWZoneParams& zone_params, optional_yield y); + int remove_zone(const DoutPrefixProvider *dpp, const std::string& zone_id, optional_yield y); + int rename_zone(const DoutPrefixProvider *dpp, const RGWZoneParams& zone_params, optional_yield y); rgw_pool get_pool(CephContext *cct) const override; const std::string get_default_oid(bool old_region_format = false) const override; const std::string& get_info_oid_prefix(bool old_region_format = false) const override; @@ -888,8 +891,8 @@ struct RGWPeriodConfig // the period config must be stored in a local object outside of the period, // so that it can be used in a default configuration where no realm/period // exists - int read(RGWSI_SysObj *sysobj_svc, const std::string& realm_id, optional_yield y); - int write(RGWSI_SysObj *sysobj_svc, const std::string& realm_id, optional_yield y); + int read(const DoutPrefixProvider *dpp, RGWSI_SysObj *sysobj_svc, const std::string& realm_id, optional_yield y); + int write(const DoutPrefixProvider *dpp, RGWSI_SysObj *sysobj_svc, const std::string& realm_id, optional_yield y); static std::string get_oid(const std::string& realm_id); static rgw_pool get_pool(CephContext *cct); @@ -925,7 +928,7 @@ struct RGWZoneGroupMap { RGWQuotaInfo user_quota; /* construct the map */ - int read(CephContext *cct, RGWSI_SysObj *sysobj_svc, optional_yield y); + int read(const DoutPrefixProvider *dpp, CephContext *cct, RGWSI_SysObj *sysobj_svc, optional_yield y); void encode(bufferlist& bl) const; void decode(bufferlist::const_iterator& bl); @@ -943,8 +946,8 @@ class RGWRealm : public RGWSystemMetaObj std::string current_period; epoch_t epoch{0}; //< realm epoch, incremented for each new period - int create_control(bool exclusive, optional_yield y); - int delete_control(optional_yield y); + int create_control(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y); + int delete_control(const DoutPrefixProvider *dpp, optional_yield y); public: RGWRealm() {} RGWRealm(const std::string& _id, const std::string& _name = "") : RGWSystemMetaObj(_id, _name) {} @@ -967,8 +970,8 @@ public: DECODE_FINISH(bl); } - int create(optional_yield y, bool exclusive = true) override; - int delete_obj(optional_yield y); + int create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = true) override; + int delete_obj(const DoutPrefixProvider *dpp, optional_yield y); rgw_pool get_pool(CephContext *cct) const override; const std::string get_default_oid(bool old_format = false) const override; const std::string& get_names_oid_prefix() const override; @@ -984,7 +987,7 @@ public: const std::string& get_current_period() const { return current_period; } - int set_current_period(RGWPeriod& period, optional_yield y); + int set_current_period(const DoutPrefixProvider *dpp, RGWPeriod& period, optional_yield y); void clear_current_period_and_epoch() { current_period.clear(); epoch = 0; @@ -993,9 +996,9 @@ public: std::string get_control_oid() const; /// send a notify on the realm control object - int notify_zone(bufferlist& bl, optional_yield y); + int notify_zone(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y); /// notify the zone of a new period - int notify_new_period(const RGWPeriod& period, optional_yield y); + int notify_new_period(const DoutPrefixProvider *dpp, const RGWPeriod& period, optional_yield y); }; WRITE_CLASS_ENCODER(RGWRealm) @@ -1059,18 +1062,20 @@ class RGWPeriod CephContext *cct{nullptr}; RGWSI_SysObj *sysobj_svc{nullptr}; - int read_info(optional_yield y); - int read_latest_epoch(RGWPeriodLatestEpochInfo& epoch_info, + int read_info(const DoutPrefixProvider *dpp, optional_yield y); + int read_latest_epoch(const DoutPrefixProvider *dpp, + RGWPeriodLatestEpochInfo& epoch_info, optional_yield y, RGWObjVersionTracker *objv = nullptr); - int use_latest_epoch(optional_yield y); + int use_latest_epoch(const DoutPrefixProvider *dpp, optional_yield y); int use_current_period(); const std::string get_period_oid() const; const std::string get_period_oid_prefix() const; // gather the metadata sync status for each shard; only for use on master zone - int update_sync_status(rgw::sal::RGWRadosStore *store, + int update_sync_status(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, const RGWPeriod ¤t_period, std::ostream& error_stream, bool force_if_stale); @@ -1119,7 +1124,7 @@ public: realm_id = _realm_id; } - int reflect(optional_yield y); + int reflect(const DoutPrefixProvider *dpp, optional_yield y); int get_zonegroup(RGWZoneGroup& zonegroup, const std::string& zonegroup_id) const; @@ -1145,27 +1150,28 @@ public: return false; } - int get_latest_epoch(epoch_t& epoch, optional_yield y); - int set_latest_epoch(optional_yield y, + int get_latest_epoch(const DoutPrefixProvider *dpp, epoch_t& epoch, optional_yield y); + int set_latest_epoch(const DoutPrefixProvider *dpp, optional_yield y, epoch_t epoch, bool exclusive = false, RGWObjVersionTracker *objv = nullptr); // update latest_epoch if the given epoch is higher, else return -EEXIST - int update_latest_epoch(epoch_t epoch, optional_yield y); + int update_latest_epoch(const DoutPrefixProvider *dpp, epoch_t epoch, optional_yield y); - int init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, const std::string &period_realm_id, optional_yield y, + int init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc, const std::string &period_realm_id, optional_yield y, const std::string &period_realm_name = "", bool setup_obj = true); - int init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y, bool setup_obj = true); + int init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y, bool setup_obj = true); - int create(optional_yield y, bool exclusive = true); - int delete_obj(optional_yield y); - int store_info(bool exclusive, optional_yield y); - int add_zonegroup(const RGWZoneGroup& zonegroup, optional_yield y); + int create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = true); + int delete_obj(const DoutPrefixProvider *dpp, optional_yield y); + int store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y); + int add_zonegroup(const DoutPrefixProvider *dpp, const RGWZoneGroup& zonegroup, optional_yield y); void fork(); - int update(optional_yield y); + int update(const DoutPrefixProvider *dpp, optional_yield y); // commit a staging period; only for use on master zone - int commit(rgw::sal::RGWRadosStore *store, + int commit(const DoutPrefixProvider *dpp, + rgw::sal::RGWRadosStore *store, RGWRealm& realm, const RGWPeriod ¤t_period, std::ostream& error_stream, optional_yield y, bool force_if_stale = false); diff --git a/src/rgw/services/svc_bi.h b/src/rgw/services/svc_bi.h index de348b40de450..abb68e3941840 100644 --- a/src/rgw/services/svc_bi.h +++ b/src/rgw/services/svc_bi.h @@ -29,14 +29,16 @@ public: RGWSI_BucketIndex(CephContext *cct) : RGWServiceInstance(cct) {} virtual ~RGWSI_BucketIndex() {} - virtual int init_index(RGWBucketInfo& bucket_info) = 0; - virtual int clean_index(RGWBucketInfo& bucket_info) = 0; + virtual int init_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info) = 0; + virtual int clean_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info) = 0; - virtual int read_stats(const RGWBucketInfo& bucket_info, + virtual int read_stats(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, RGWBucketEnt *stats, optional_yield y) = 0; - virtual int handle_overwrite(const RGWBucketInfo& info, + virtual int handle_overwrite(const DoutPrefixProvider *dpp, + const RGWBucketInfo& info, const RGWBucketInfo& orig_info) = 0; }; diff --git a/src/rgw/services/svc_bi_rados.cc b/src/rgw/services/svc_bi_rados.cc index 4e58438d50293..dd9bcc6add235 100644 --- a/src/rgw/services/svc_bi_rados.cc +++ b/src/rgw/services/svc_bi_rados.cc @@ -30,22 +30,24 @@ void RGWSI_BucketIndex_RADOS::init(RGWSI_Zone *zone_svc, svc.datalog_rados = datalog_rados_svc; } -int RGWSI_BucketIndex_RADOS::open_pool(const rgw_pool& pool, +int RGWSI_BucketIndex_RADOS::open_pool(const DoutPrefixProvider *dpp, + const rgw_pool& pool, RGWSI_RADOS::Pool *index_pool, bool mostly_omap) { *index_pool = svc.rados->pool(pool); - return index_pool->open(RGWSI_RADOS::OpenParams() + return index_pool->open(dpp, RGWSI_RADOS::OpenParams() .set_mostly_omap(mostly_omap)); } -int RGWSI_BucketIndex_RADOS::open_bucket_index_pool(const RGWBucketInfo& bucket_info, +int RGWSI_BucketIndex_RADOS::open_bucket_index_pool(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, RGWSI_RADOS::Pool *index_pool) { const rgw_pool& explicit_pool = bucket_info.bucket.explicit_placement.index_pool; if (!explicit_pool.empty()) { - return open_pool(explicit_pool, index_pool, false); + return open_pool(dpp, explicit_pool, index_pool, false); } auto& zonegroup = svc.zone->get_zonegroup(); @@ -57,28 +59,29 @@ int RGWSI_BucketIndex_RADOS::open_bucket_index_pool(const RGWBucketInfo& bucket_ } auto iter = zone_params.placement_pools.find(rule->name); if (iter == zone_params.placement_pools.end()) { - ldout(cct, 0) << "could not find placement rule " << *rule << " within zonegroup " << dendl; + ldpp_dout(dpp, 0) << "could not find placement rule " << *rule << " within zonegroup " << dendl; return -EINVAL; } - int r = open_pool(iter->second.index_pool, index_pool, true); + int r = open_pool(dpp, iter->second.index_pool, index_pool, true); if (r < 0) return r; return 0; } -int RGWSI_BucketIndex_RADOS::open_bucket_index_base(const RGWBucketInfo& bucket_info, +int RGWSI_BucketIndex_RADOS::open_bucket_index_base(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, RGWSI_RADOS::Pool *index_pool, string *bucket_oid_base) { const rgw_bucket& bucket = bucket_info.bucket; - int r = open_bucket_index_pool(bucket_info, index_pool); + int r = open_bucket_index_pool(dpp, bucket_info, index_pool); if (r < 0) return r; if (bucket.bucket_id.empty()) { - ldout(cct, 0) << "ERROR: empty bucket_id for bucket operation" << dendl; + ldpp_dout(dpp, 0) << "ERROR: empty bucket_id for bucket operation" << dendl; return -EIO; } @@ -89,20 +92,21 @@ int RGWSI_BucketIndex_RADOS::open_bucket_index_base(const RGWBucketInfo& bucket_ } -int RGWSI_BucketIndex_RADOS::open_bucket_index(const RGWBucketInfo& bucket_info, +int RGWSI_BucketIndex_RADOS::open_bucket_index(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, RGWSI_RADOS::Pool *index_pool, string *bucket_oid) { const rgw_bucket& bucket = bucket_info.bucket; - int r = open_bucket_index_pool(bucket_info, index_pool); + int r = open_bucket_index_pool(dpp, bucket_info, index_pool); if (r < 0) { - ldout(cct, 20) << __func__ << ": open_bucket_index_pool() returned " + ldpp_dout(dpp, 20) << __func__ << ": open_bucket_index_pool() returned " << r << dendl; return r; } if (bucket.bucket_id.empty()) { - ldout(cct, 0) << "ERROR: empty bucket id for bucket operation" << dendl; + ldpp_dout(dpp, 0) << "ERROR: empty bucket id for bucket operation" << dendl; return -EIO; } @@ -163,7 +167,8 @@ static void get_bucket_instance_ids(const RGWBucketInfo& bucket_info, } } -int RGWSI_BucketIndex_RADOS::open_bucket_index(const RGWBucketInfo& bucket_info, +int RGWSI_BucketIndex_RADOS::open_bucket_index(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, std::optional _shard_id, RGWSI_RADOS::Pool *index_pool, map *bucket_objs, @@ -171,9 +176,9 @@ int RGWSI_BucketIndex_RADOS::open_bucket_index(const RGWBucketInfo& bucket_info, { int shard_id = _shard_id.value_or(-1); string bucket_oid_base; - int ret = open_bucket_index_base(bucket_info, index_pool, &bucket_oid_base); + int ret = open_bucket_index_base(dpp, bucket_info, index_pool, &bucket_oid_base); if (ret < 0) { - ldout(cct, 20) << __func__ << ": open_bucket_index_pool() returned " + ldpp_dout(dpp, 20) << __func__ << ": open_bucket_index_pool() returned " << ret << dendl; return ret; } @@ -236,7 +241,8 @@ int RGWSI_BucketIndex_RADOS::get_bucket_index_object(const string& bucket_oid_ba return r; } -int RGWSI_BucketIndex_RADOS::open_bucket_index_shard(const RGWBucketInfo& bucket_info, +int RGWSI_BucketIndex_RADOS::open_bucket_index_shard(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, const string& obj_key, RGWSI_RADOS::Obj *bucket_obj, int *shard_id) @@ -245,9 +251,9 @@ int RGWSI_BucketIndex_RADOS::open_bucket_index_shard(const RGWBucketInfo& bucket RGWSI_RADOS::Pool pool; - int ret = open_bucket_index_base(bucket_info, &pool, &bucket_oid_base); + int ret = open_bucket_index_base(dpp, bucket_info, &pool, &bucket_oid_base); if (ret < 0) { - ldout(cct, 20) << __func__ << ": open_bucket_index_pool() returned " + ldpp_dout(dpp, 20) << __func__ << ": open_bucket_index_pool() returned " << ret << dendl; return ret; } @@ -257,7 +263,7 @@ int RGWSI_BucketIndex_RADOS::open_bucket_index_shard(const RGWBucketInfo& bucket ret = get_bucket_index_object(bucket_oid_base, obj_key, bucket_info.layout.current_index.layout.normal.num_shards, bucket_info.layout.current_index.layout.normal.hash_type, &oid, shard_id); if (ret < 0) { - ldout(cct, 10) << "get_bucket_index_object() returned ret=" << ret << dendl; + ldpp_dout(dpp, 10) << "get_bucket_index_object() returned ret=" << ret << dendl; return ret; } @@ -266,16 +272,17 @@ int RGWSI_BucketIndex_RADOS::open_bucket_index_shard(const RGWBucketInfo& bucket return 0; } -int RGWSI_BucketIndex_RADOS::open_bucket_index_shard(const RGWBucketInfo& bucket_info, +int RGWSI_BucketIndex_RADOS::open_bucket_index_shard(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, int shard_id, const rgw::bucket_index_layout_generation& idx_layout, RGWSI_RADOS::Obj *bucket_obj) { RGWSI_RADOS::Pool index_pool; string bucket_oid_base; - int ret = open_bucket_index_base(bucket_info, &index_pool, &bucket_oid_base); + int ret = open_bucket_index_base(dpp, bucket_info, &index_pool, &bucket_oid_base); if (ret < 0) { - ldout(cct, 20) << __func__ << ": open_bucket_index_pool() returned " + ldpp_dout(dpp, 20) << __func__ << ": open_bucket_index_pool() returned " << ret << dendl; return ret; } @@ -290,7 +297,8 @@ int RGWSI_BucketIndex_RADOS::open_bucket_index_shard(const RGWBucketInfo& bucket return 0; } -int RGWSI_BucketIndex_RADOS::cls_bucket_head(const RGWBucketInfo& bucket_info, +int RGWSI_BucketIndex_RADOS::cls_bucket_head(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, int shard_id, vector *headers, map *bucket_instance_ids, @@ -298,7 +306,7 @@ int RGWSI_BucketIndex_RADOS::cls_bucket_head(const RGWBucketInfo& bucket_info, { RGWSI_RADOS::Pool index_pool; map oids; - int r = open_bucket_index(bucket_info, shard_id, &index_pool, &oids, bucket_instance_ids); + int r = open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &oids, bucket_instance_ids); if (r < 0) return r; @@ -319,12 +327,12 @@ int RGWSI_BucketIndex_RADOS::cls_bucket_head(const RGWBucketInfo& bucket_info, } -int RGWSI_BucketIndex_RADOS::init_index(RGWBucketInfo& bucket_info) +int RGWSI_BucketIndex_RADOS::init_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info) { RGWSI_RADOS::Pool index_pool; string dir_oid = dir_oid_prefix; - int r = open_bucket_index_pool(bucket_info, &index_pool); + int r = open_bucket_index_pool(dpp, bucket_info, &index_pool); if (r < 0) { return r; } @@ -339,12 +347,12 @@ int RGWSI_BucketIndex_RADOS::init_index(RGWBucketInfo& bucket_info) cct->_conf->rgw_bucket_index_max_aio)(); } -int RGWSI_BucketIndex_RADOS::clean_index(RGWBucketInfo& bucket_info) +int RGWSI_BucketIndex_RADOS::clean_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info) { RGWSI_RADOS::Pool index_pool; std::string dir_oid = dir_oid_prefix; - int r = open_bucket_index_pool(bucket_info, &index_pool); + int r = open_bucket_index_pool(dpp, bucket_info, &index_pool); if (r < 0) { return r; } @@ -359,14 +367,15 @@ int RGWSI_BucketIndex_RADOS::clean_index(RGWBucketInfo& bucket_info) cct->_conf->rgw_bucket_index_max_aio)(); } -int RGWSI_BucketIndex_RADOS::read_stats(const RGWBucketInfo& bucket_info, +int RGWSI_BucketIndex_RADOS::read_stats(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, RGWBucketEnt *result, optional_yield y) { vector headers; result->bucket = bucket_info.bucket; - int r = cls_bucket_head(bucket_info, RGW_NO_SHARD, &headers, nullptr, y); + int r = cls_bucket_head(dpp, bucket_info, RGW_NO_SHARD, &headers, nullptr, y); if (r < 0) { return r; } @@ -388,13 +397,13 @@ int RGWSI_BucketIndex_RADOS::read_stats(const RGWBucketInfo& bucket_info, return 0; } -int RGWSI_BucketIndex_RADOS::get_reshard_status(const RGWBucketInfo& bucket_info, list *status) +int RGWSI_BucketIndex_RADOS::get_reshard_status(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, list *status) { map bucket_objs; RGWSI_RADOS::Pool index_pool; - int r = open_bucket_index(bucket_info, + int r = open_bucket_index(dpp, bucket_info, std::nullopt, &index_pool, &bucket_objs, @@ -408,7 +417,7 @@ int RGWSI_BucketIndex_RADOS::get_reshard_status(const RGWBucketInfo& bucket_info int ret = cls_rgw_get_bucket_resharding(index_pool.ioctx(), i.second, &entry); if (ret < 0 && ret != -ENOENT) { - lderr(cct) << "ERROR: " << __func__ << ": cls_rgw_get_bucket_resharding() returned ret=" << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: " << __func__ << ": cls_rgw_get_bucket_resharding() returned ret=" << ret << dendl; return ret; } @@ -418,7 +427,8 @@ int RGWSI_BucketIndex_RADOS::get_reshard_status(const RGWBucketInfo& bucket_info return 0; } -int RGWSI_BucketIndex_RADOS::handle_overwrite(const RGWBucketInfo& info, +int RGWSI_BucketIndex_RADOS::handle_overwrite(const DoutPrefixProvider *dpp, + const RGWBucketInfo& info, const RGWBucketInfo& orig_info) { bool new_sync_enabled = info.datasync_flag_enabled(); @@ -430,19 +440,19 @@ int RGWSI_BucketIndex_RADOS::handle_overwrite(const RGWBucketInfo& info, int ret; if (!new_sync_enabled) { - ret = svc.bilog->log_stop(info, -1); + ret = svc.bilog->log_stop(dpp, info, -1); } else { - ret = svc.bilog->log_start(info, -1); + ret = svc.bilog->log_start(dpp, info, -1); } if (ret < 0) { - lderr(cct) << "ERROR: failed writing bilog (bucket=" << info.bucket << "); ret=" << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed writing bilog (bucket=" << info.bucket << "); ret=" << ret << dendl; return ret; } for (int i = 0; i < shards_num; ++i, ++shard_id) { - ret = svc.datalog_rados->add_entry(info, shard_id); + ret = svc.datalog_rados->add_entry(dpp, info, shard_id); if (ret < 0) { - lderr(cct) << "ERROR: failed writing data log (info.bucket=" << info.bucket << ", shard_id=" << shard_id << ")" << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed writing data log (info.bucket=" << info.bucket << ", shard_id=" << shard_id << ")" << dendl; return ret; } } diff --git a/src/rgw/services/svc_bi_rados.h b/src/rgw/services/svc_bi_rados.h index b25f744927512..9037f43c8af35 100644 --- a/src/rgw/services/svc_bi_rados.h +++ b/src/rgw/services/svc_bi_rados.h @@ -36,13 +36,16 @@ class RGWSI_BucketIndex_RADOS : public RGWSI_BucketIndex { friend class RGWSI_BILog_RADOS; - int open_pool(const rgw_pool& pool, + int open_pool(const DoutPrefixProvider *dpp, + const rgw_pool& pool, RGWSI_RADOS::Pool *index_pool, bool mostly_omap); - int open_bucket_index_pool(const RGWBucketInfo& bucket_info, + int open_bucket_index_pool(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, RGWSI_RADOS::Pool *index_pool); - int open_bucket_index_base(const RGWBucketInfo& bucket_info, + int open_bucket_index_base(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, RGWSI_RADOS::Pool *index_pool, string *bucket_oid_base); @@ -55,7 +58,8 @@ class RGWSI_BucketIndex_RADOS : public RGWSI_BucketIndex uint32_t num_shards, rgw::BucketHashType hash_type, string *bucket_obj, int *shard_id); - int cls_bucket_head(const RGWBucketInfo& bucket_info, + int cls_bucket_head(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, int shard_id, vector *headers, map *bucket_instance_ids, @@ -92,37 +96,42 @@ public: return rgw_shards_mod(sid2, num_shards); } - int init_index(RGWBucketInfo& bucket_info); - int clean_index(RGWBucketInfo& bucket_info); + int init_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info); + int clean_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info); /* RADOS specific */ - int read_stats(const RGWBucketInfo& bucket_info, + int read_stats(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, RGWBucketEnt *stats, optional_yield y) override; - int get_reshard_status(const RGWBucketInfo& bucket_info, + int get_reshard_status(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, std::list *status); - int handle_overwrite(const RGWBucketInfo& info, + int handle_overwrite(const DoutPrefixProvider *dpp, const RGWBucketInfo& info, const RGWBucketInfo& orig_info) override; - int open_bucket_index_shard(const RGWBucketInfo& bucket_info, + int open_bucket_index_shard(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, const string& obj_key, RGWSI_RADOS::Obj *bucket_obj, int *shard_id); - int open_bucket_index_shard(const RGWBucketInfo& bucket_info, + int open_bucket_index_shard(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, int shard_id, const rgw::bucket_index_layout_generation& idx_layout, RGWSI_RADOS::Obj *bucket_obj); - int open_bucket_index(const RGWBucketInfo& bucket_info, + int open_bucket_index(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, RGWSI_RADOS::Pool *index_pool, string *bucket_oid); - int open_bucket_index(const RGWBucketInfo& bucket_info, + int open_bucket_index(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, std::optional shard_id, RGWSI_RADOS::Pool *index_pool, map *bucket_objs, diff --git a/src/rgw/services/svc_bilog_rados.cc b/src/rgw/services/svc_bilog_rados.cc index 13368e24d6cf5..06cf5ce7a67ed 100644 --- a/src/rgw/services/svc_bilog_rados.cc +++ b/src/rgw/services/svc_bilog_rados.cc @@ -18,7 +18,7 @@ void RGWSI_BILog_RADOS::init(RGWSI_BucketIndex_RADOS *bi_rados_svc) svc.bi = bi_rados_svc; } -int RGWSI_BILog_RADOS::log_trim(const RGWBucketInfo& bucket_info, int shard_id, string& start_marker, string& end_marker) +int RGWSI_BILog_RADOS::log_trim(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id, string& start_marker, string& end_marker) { RGWSI_RADOS::Pool index_pool; map bucket_objs; @@ -26,7 +26,7 @@ int RGWSI_BILog_RADOS::log_trim(const RGWBucketInfo& bucket_info, int shard_id, BucketIndexShardsManager start_marker_mgr; BucketIndexShardsManager end_marker_mgr; - int r = svc.bi->open_bucket_index(bucket_info, shard_id, &index_pool, &bucket_objs, nullptr); + int r = svc.bi->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &bucket_objs, nullptr); if (r < 0) { return r; } @@ -45,22 +45,22 @@ int RGWSI_BILog_RADOS::log_trim(const RGWBucketInfo& bucket_info, int shard_id, cct->_conf->rgw_bucket_index_max_aio)(); } -int RGWSI_BILog_RADOS::log_start(const RGWBucketInfo& bucket_info, int shard_id) +int RGWSI_BILog_RADOS::log_start(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id) { RGWSI_RADOS::Pool index_pool; map bucket_objs; - int r = svc.bi->open_bucket_index(bucket_info, shard_id, &index_pool, &bucket_objs, nullptr); + int r = svc.bi->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &bucket_objs, nullptr); if (r < 0) return r; return CLSRGWIssueResyncBucketBILog(index_pool.ioctx(), bucket_objs, cct->_conf->rgw_bucket_index_max_aio)(); } -int RGWSI_BILog_RADOS::log_stop(const RGWBucketInfo& bucket_info, int shard_id) +int RGWSI_BILog_RADOS::log_stop(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id) { RGWSI_RADOS::Pool index_pool; map bucket_objs; - int r = svc.bi->open_bucket_index(bucket_info, shard_id, &index_pool, &bucket_objs, nullptr); + int r = svc.bi->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &bucket_objs, nullptr); if (r < 0) return r; @@ -77,16 +77,16 @@ static void build_bucket_index_marker(const string& shard_id_str, } } -int RGWSI_BILog_RADOS::log_list(const RGWBucketInfo& bucket_info, int shard_id, string& marker, uint32_t max, +int RGWSI_BILog_RADOS::log_list(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id, string& marker, uint32_t max, std::list& result, bool *truncated) { - ldout(cct, 20) << __func__ << ": " << bucket_info.bucket << " marker " << marker << " shard_id=" << shard_id << " max " << max << dendl; + ldpp_dout(dpp, 20) << __func__ << ": " << bucket_info.bucket << " marker " << marker << " shard_id=" << shard_id << " max " << max << dendl; result.clear(); RGWSI_RADOS::Pool index_pool; map oids; map bi_log_lists; - int r = svc.bi->open_bucket_index(bucket_info, shard_id, &index_pool, &oids, nullptr); + int r = svc.bi->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &oids, nullptr); if (r < 0) return r; @@ -175,14 +175,15 @@ int RGWSI_BILog_RADOS::log_list(const RGWBucketInfo& bucket_info, int shard_id, return 0; } -int RGWSI_BILog_RADOS::get_log_status(const RGWBucketInfo& bucket_info, +int RGWSI_BILog_RADOS::get_log_status(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, int shard_id, map *markers, optional_yield y) { vector headers; map bucket_instance_ids; - int r = svc.bi->cls_bucket_head(bucket_info, shard_id, &headers, &bucket_instance_ids, y); + int r = svc.bi->cls_bucket_head(dpp, bucket_info, shard_id, &headers, &bucket_instance_ids, y); if (r < 0) return r; diff --git a/src/rgw/services/svc_bilog_rados.h b/src/rgw/services/svc_bilog_rados.h index 2691d209253d0..84f5679af1051 100644 --- a/src/rgw/services/svc_bilog_rados.h +++ b/src/rgw/services/svc_bilog_rados.h @@ -35,21 +35,24 @@ public: void init(RGWSI_BucketIndex_RADOS *bi_rados_svc); - int log_start(const RGWBucketInfo& bucket_info, int shard_id); - int log_stop(const RGWBucketInfo& bucket_info, int shard_id); + int log_start(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id); + int log_stop(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id); - int log_trim(const RGWBucketInfo& bucket_info, + int log_trim(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, int shard_id, std::string& start_marker, std::string& end_marker); - int log_list(const RGWBucketInfo& bucket_info, + int log_list(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, int shard_id, std::string& marker, uint32_t max, std::list& result, bool *truncated); - int get_log_status(const RGWBucketInfo& bucket_info, + int get_log_status(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, int shard_id, map *markers, optional_yield y); diff --git a/src/rgw/services/svc_bucket.h b/src/rgw/services/svc_bucket.h index 7e39302f43cce..fcdabca8b6f21 100644 --- a/src/rgw/services/svc_bucket.h +++ b/src/rgw/services/svc_bucket.h @@ -40,6 +40,7 @@ public: real_time *pmtime, map *pattrs, optional_yield y, + const DoutPrefixProvider *dpp, rgw_cache_entry_info *cache_info = nullptr, boost::optional refresh_version = boost::none) = 0; @@ -50,12 +51,14 @@ public: real_time mtime, map *pattrs, RGWObjVersionTracker *objv_tracker, - optional_yield y) = 0; + optional_yield y, + const DoutPrefixProvider *dpp) = 0; virtual int remove_bucket_entrypoint_info(RGWSI_Bucket_EP_Ctx& ctx, const string& key, RGWObjVersionTracker *objv_tracker, - optional_yield y) = 0; + optional_yield y, + const DoutPrefixProvider *dpp) = 0; virtual int read_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, const string& key, @@ -63,6 +66,7 @@ public: real_time *pmtime, map *pattrs, optional_yield y, + const DoutPrefixProvider *dpp, rgw_cache_entry_info *cache_info = nullptr, boost::optional refresh_version = boost::none) = 0; @@ -72,7 +76,8 @@ public: real_time *pmtime, map *pattrs, boost::optional refresh_version, - optional_yield y) = 0; + optional_yield y, + const DoutPrefixProvider *dpp) = 0; virtual int store_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, const string& key, @@ -82,21 +87,25 @@ public: bool exclusive, real_time mtime, map *pattrs, - optional_yield y) = 0; + optional_yield y, + const DoutPrefixProvider *dpp) = 0; virtual int remove_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, const string& key, const RGWBucketInfo& bucket_info, RGWObjVersionTracker *objv_tracker, - optional_yield y) = 0; + optional_yield y, + const DoutPrefixProvider *dpp) = 0; virtual int read_bucket_stats(RGWSI_Bucket_X_Ctx& ctx, const rgw_bucket& bucket, RGWBucketEnt *ent, - optional_yield y) = 0; + optional_yield y, + const DoutPrefixProvider *dpp) = 0; virtual int read_buckets_stats(RGWSI_Bucket_X_Ctx& ctx, map& m, - optional_yield y) = 0; + optional_yield y, + const DoutPrefixProvider *dpp) = 0; }; diff --git a/src/rgw/services/svc_bucket_sobj.cc b/src/rgw/services/svc_bucket_sobj.cc index 8dea5461d113c..13aeedf03d6eb 100644 --- a/src/rgw/services/svc_bucket_sobj.cc +++ b/src/rgw/services/svc_bucket_sobj.cc @@ -159,7 +159,7 @@ void RGWSI_Bucket_SObj::init(RGWSI_Zone *_zone_svc, RGWSI_SysObj *_sysobj_svc, svc.bucket_sync = _bucket_sync_svc; } -int RGWSI_Bucket_SObj::do_start(optional_yield) +int RGWSI_Bucket_SObj::do_start(optional_yield, const DoutPrefixProvider *dpp) { binfo_cache.reset(new RGWChainedCacheImpl); binfo_cache->init(svc.cache); @@ -170,7 +170,7 @@ int RGWSI_Bucket_SObj::do_start(optional_yield) int r = svc.meta->create_be_handler(RGWSI_MetaBackend::Type::MDBE_SOBJ, &ep_handler); if (r < 0) { - ldout(ctx(), 0) << "ERROR: failed to create be handler: r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to create be handler: r=" << r << dendl; return r; } @@ -188,7 +188,7 @@ int RGWSI_Bucket_SObj::do_start(optional_yield) r = svc.meta->create_be_handler(RGWSI_MetaBackend::Type::MDBE_SOBJ, &bi_handler); if (r < 0) { - ldout(ctx(), 0) << "ERROR: failed to create be handler: r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to create be handler: r=" << r << dendl; return r; } @@ -210,6 +210,7 @@ int RGWSI_Bucket_SObj::read_bucket_entrypoint_info(RGWSI_Bucket_EP_Ctx& ctx, real_time *pmtime, map *pattrs, optional_yield y, + const DoutPrefixProvider *dpp, rgw_cache_entry_info *cache_info, boost::optional refresh_version) { @@ -218,7 +219,7 @@ int RGWSI_Bucket_SObj::read_bucket_entrypoint_info(RGWSI_Bucket_EP_Ctx& ctx, auto params = RGWSI_MBSObj_GetParams(&bl, pattrs, pmtime).set_cache_info(cache_info) .set_refresh_version(refresh_version); - int ret = svc.meta_be->get_entry(ctx.get(), key, params, objv_tracker, y); + int ret = svc.meta_be->get_entry(ctx.get(), key, params, objv_tracker, y, dpp); if (ret < 0) { return ret; } @@ -227,7 +228,7 @@ int RGWSI_Bucket_SObj::read_bucket_entrypoint_info(RGWSI_Bucket_EP_Ctx& ctx, try { decode(*entry_point, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: could not decode buffer info, caught buffer::error" << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not decode buffer info, caught buffer::error" << dendl; return -EIO; } return 0; @@ -240,14 +241,15 @@ int RGWSI_Bucket_SObj::store_bucket_entrypoint_info(RGWSI_Bucket_EP_Ctx& ctx, real_time mtime, map *pattrs, RGWObjVersionTracker *objv_tracker, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { bufferlist bl; encode(info, bl); RGWSI_MBSObj_PutParams params(bl, pattrs, mtime, exclusive); - int ret = svc.meta_be->put(ctx.get(), key, params, objv_tracker, y); + int ret = svc.meta_be->put(ctx.get(), key, params, objv_tracker, y, dpp); if (ret < 0) { return ret; } @@ -258,10 +260,11 @@ int RGWSI_Bucket_SObj::store_bucket_entrypoint_info(RGWSI_Bucket_EP_Ctx& ctx, int RGWSI_Bucket_SObj::remove_bucket_entrypoint_info(RGWSI_Bucket_EP_Ctx& ctx, const string& key, RGWObjVersionTracker *objv_tracker, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { RGWSI_MBSObj_RemoveParams params; - return svc.meta_be->remove(ctx.get(), key, params, objv_tracker, y); + return svc.meta_be->remove(ctx.get(), key, params, objv_tracker, y, dpp); } int RGWSI_Bucket_SObj::read_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, @@ -269,6 +272,7 @@ int RGWSI_Bucket_SObj::read_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, RGWBucketInfo *info, real_time *pmtime, map *pattrs, optional_yield y, + const DoutPrefixProvider *dpp, rgw_cache_entry_info *cache_info, boost::optional refresh_version) { @@ -278,7 +282,7 @@ int RGWSI_Bucket_SObj::read_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, if (auto e = binfo_cache->find(cache_key)) { if (refresh_version && e->info.objv_tracker.read_version.compare(&(*refresh_version))) { - lderr(cct) << "WARNING: The bucket info cache is inconsistent. This is " + ldpp_dout(dpp, -1) << "WARNING: The bucket info cache is inconsistent. This is " << "a failure that should be debugged. I am a nice machine, " << "so I will try to recover." << dendl; binfo_cache->invalidate(key); @@ -297,14 +301,14 @@ int RGWSI_Bucket_SObj::read_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, int ret = do_read_bucket_instance_info(ctx, key, &e.info, &e.mtime, &e.attrs, - &ci, refresh_version, y); + &ci, refresh_version, y, dpp); *info = e.info; if (ret < 0) { if (ret != -ENOENT) { - lderr(cct) << "ERROR: do_read_bucket_instance_info failed: " << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: do_read_bucket_instance_info failed: " << ret << dendl; } else { - ldout(cct, 20) << "do_read_bucket_instance_info, bucket instance not found (key=" << key << ")" << dendl; + ldpp_dout(dpp, 20) << "do_read_bucket_instance_info, bucket instance not found (key=" << key << ")" << dendl; } return ret; } @@ -320,13 +324,13 @@ int RGWSI_Bucket_SObj::read_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, } /* chain to only bucket instance and *not* bucket entrypoint */ - if (!binfo_cache->put(svc.cache, cache_key, &e, {&ci})) { - ldout(cct, 20) << "couldn't put binfo cache entry, might have raced with data changes" << dendl; + if (!binfo_cache->put(dpp, svc.cache, cache_key, &e, {&ci})) { + ldpp_dout(dpp, 20) << "couldn't put binfo cache entry, might have raced with data changes" << dendl; } if (refresh_version && refresh_version->compare(&info->objv_tracker.read_version)) { - lderr(cct) << "WARNING: The OSD has the same version I have. Something may " + ldpp_dout(dpp, -1) << "WARNING: The OSD has the same version I have. Something may " << "have gone squirrelly. An administrator may have forced a " << "change; otherwise there is a problem somewhere." << dendl; } @@ -340,7 +344,8 @@ int RGWSI_Bucket_SObj::do_read_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, real_time *pmtime, map *pattrs, rgw_cache_entry_info *cache_info, boost::optional refresh_version, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { bufferlist bl; RGWObjVersionTracker ot; @@ -348,7 +353,7 @@ int RGWSI_Bucket_SObj::do_read_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, auto params = RGWSI_MBSObj_GetParams(&bl, pattrs, pmtime).set_cache_info(cache_info) .set_refresh_version(refresh_version); - int ret = svc.meta_be->get_entry(ctx.get(), key, params, &ot, y); + int ret = svc.meta_be->get_entry(ctx.get(), key, params, &ot, y, dpp); if (ret < 0) { return ret; } @@ -357,7 +362,7 @@ int RGWSI_Bucket_SObj::do_read_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, try { decode(*info, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: could not decode buffer info, caught buffer::error" << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not decode buffer info, caught buffer::error" << dendl; return -EIO; } info->objv_tracker = ot; @@ -370,7 +375,8 @@ int RGWSI_Bucket_SObj::read_bucket_info(RGWSI_Bucket_X_Ctx& ctx, real_time *pmtime, map *pattrs, boost::optional refresh_version, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { rgw_cache_entry_info cache_info; @@ -379,6 +385,7 @@ int RGWSI_Bucket_SObj::read_bucket_info(RGWSI_Bucket_X_Ctx& ctx, info, pmtime, pattrs, y, + dpp, &cache_info, refresh_version); } @@ -414,6 +421,7 @@ int RGWSI_Bucket_SObj::read_bucket_info(RGWSI_Bucket_X_Ctx& ctx, int ret = read_bucket_entrypoint_info(ctx.ep, bucket_entry, &entry_point, &ot, &ep_mtime, pattrs, y, + dpp, &entry_cache_info, refresh_version); if (ret < 0) { /* only init these fields */ @@ -424,7 +432,7 @@ int RGWSI_Bucket_SObj::read_bucket_info(RGWSI_Bucket_X_Ctx& ctx, if (entry_point.has_bucket_info) { *info = entry_point.old_bucket_info; info->bucket.tenant = bucket.tenant; - ldout(cct, 20) << "rgw_get_bucket_info: old bucket info, bucket=" << info->bucket << " owner " << info->owner << dendl; + ldpp_dout(dpp, 20) << "rgw_get_bucket_info: old bucket info, bucket=" << info->bucket << " owner " << info->owner << dendl; return 0; } @@ -435,7 +443,7 @@ int RGWSI_Bucket_SObj::read_bucket_info(RGWSI_Bucket_X_Ctx& ctx, pattrs->clear(); } - ldout(cct, 20) << "rgw_get_bucket_info: bucket instance: " << entry_point.bucket << dendl; + ldpp_dout(dpp, 20) << "rgw_get_bucket_info: bucket instance: " << entry_point.bucket << dendl; /* read bucket instance info */ @@ -445,10 +453,11 @@ int RGWSI_Bucket_SObj::read_bucket_info(RGWSI_Bucket_X_Ctx& ctx, ret = read_bucket_instance_info(ctx.bi, get_bi_meta_key(entry_point.bucket), &e.info, &e.mtime, &e.attrs, y, + dpp, &cache_info, refresh_version); *info = e.info; if (ret < 0) { - lderr(cct) << "ERROR: read_bucket_instance_from_oid failed: " << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: read_bucket_instance_from_oid failed: " << ret << dendl; info->bucket = bucket; // XXX and why return anything in case of an error anyway? return ret; @@ -460,13 +469,13 @@ int RGWSI_Bucket_SObj::read_bucket_info(RGWSI_Bucket_X_Ctx& ctx, *pattrs = e.attrs; /* chain to both bucket entry point and bucket instance */ - if (!binfo_cache->put(svc.cache, cache_key, &e, {&entry_cache_info, &cache_info})) { - ldout(cct, 20) << "couldn't put binfo cache entry, might have raced with data changes" << dendl; + if (!binfo_cache->put(dpp, svc.cache, cache_key, &e, {&entry_cache_info, &cache_info})) { + ldpp_dout(dpp, 20) << "couldn't put binfo cache entry, might have raced with data changes" << dendl; } if (refresh_version && refresh_version->compare(&info->objv_tracker.read_version)) { - lderr(cct) << "WARNING: The OSD has the same version I have. Something may " + ldpp_dout(dpp, -1) << "WARNING: The OSD has the same version I have. Something may " << "have gone squirrelly. An administrator may have forced a " << "change; otherwise there is a problem somewhere." << dendl; } @@ -482,7 +491,8 @@ int RGWSI_Bucket_SObj::store_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, bool exclusive, real_time mtime, map *pattrs, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { bufferlist bl; encode(info, bl); @@ -502,10 +512,11 @@ int RGWSI_Bucket_SObj::store_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, &shared_bucket_info, nullptr, nullptr, y, + dpp, nullptr, boost::none); if (r < 0) { if (r != -ENOENT) { - ldout(cct, 0) << "ERROR: " << __func__ << "(): read_bucket_instance_info() of key=" << key << " returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): read_bucket_instance_info() of key=" << key << " returned r=" << r << dendl; return r; } } else { @@ -514,19 +525,19 @@ int RGWSI_Bucket_SObj::store_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, } if (orig_info && *orig_info && !exclusive) { - int r = svc.bi->handle_overwrite(info, *(orig_info.value())); + int r = svc.bi->handle_overwrite(dpp, info, *(orig_info.value())); if (r < 0) { - ldout(cct, 0) << "ERROR: " << __func__ << "(): svc.bi->handle_overwrite() of key=" << key << " returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): svc.bi->handle_overwrite() of key=" << key << " returned r=" << r << dendl; return r; } } RGWSI_MBSObj_PutParams params(bl, pattrs, mtime, exclusive); - int ret = svc.meta_be->put(ctx.get(), key, params, &info.objv_tracker, y); + int ret = svc.meta_be->put(ctx.get(), key, params, &info.objv_tracker, y, dpp); if (ret >= 0) { - int r = svc.bucket_sync->handle_bi_update(info, + int r = svc.bucket_sync->handle_bi_update(dpp, info, orig_info.value_or(nullptr), y); if (r < 0) { @@ -555,19 +566,20 @@ int RGWSI_Bucket_SObj::remove_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, const string& key, const RGWBucketInfo& info, RGWObjVersionTracker *objv_tracker, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { RGWSI_MBSObj_RemoveParams params; - int ret = svc.meta_be->remove_entry(ctx.get(), key, params, objv_tracker, y); + int ret = svc.meta_be->remove_entry(dpp, ctx.get(), key, params, objv_tracker, y); if (ret < 0 && ret != -ENOENT) { return ret; } - int r = svc.bucket_sync->handle_bi_removal(info, y); + int r = svc.bucket_sync->handle_bi_removal(dpp, info, y); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to update bucket instance sync index: r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to update bucket instance sync index: r=" << r << dendl; /* returning success as index is just keeping hints, so will keep extra hints, * but bucket removal succeeded */ @@ -578,7 +590,8 @@ int RGWSI_Bucket_SObj::remove_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, int RGWSI_Bucket_SObj::read_bucket_stats(const RGWBucketInfo& bucket_info, RGWBucketEnt *ent, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { ent->count = 0; ent->size = 0; @@ -586,9 +599,9 @@ int RGWSI_Bucket_SObj::read_bucket_stats(const RGWBucketInfo& bucket_info, vector headers; - int r = svc.bi->read_stats(bucket_info, ent, y); + int r = svc.bi->read_stats(dpp, bucket_info, ent, y); if (r < 0) { - ldout(cct, 0) << "ERROR: " << __func__ << "(): read_stats returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): read_stats returned r=" << r << dendl; return r; } @@ -598,27 +611,29 @@ int RGWSI_Bucket_SObj::read_bucket_stats(const RGWBucketInfo& bucket_info, int RGWSI_Bucket_SObj::read_bucket_stats(RGWSI_Bucket_X_Ctx& ctx, const rgw_bucket& bucket, RGWBucketEnt *ent, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { RGWBucketInfo bucket_info; - int ret = read_bucket_info(ctx, bucket, &bucket_info, nullptr, nullptr, boost::none, y); + int ret = read_bucket_info(ctx, bucket, &bucket_info, nullptr, nullptr, boost::none, y, dpp); if (ret < 0) { return ret; } - return read_bucket_stats(bucket_info, ent, y); + return read_bucket_stats(bucket_info, ent, y, dpp); } int RGWSI_Bucket_SObj::read_buckets_stats(RGWSI_Bucket_X_Ctx& ctx, map& m, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { map::iterator iter; for (iter = m.begin(); iter != m.end(); ++iter) { RGWBucketEnt& ent = iter->second; - int r = read_bucket_stats(ctx, ent.bucket, &ent, y); + int r = read_bucket_stats(ctx, ent.bucket, &ent, y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: " << __func__ << "(): read_bucket_stats returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): read_bucket_stats returned r=" << r << dendl; return r; } } diff --git a/src/rgw/services/svc_bucket_sobj.h b/src/rgw/services/svc_bucket_sobj.h index 10e134d5b24f2..776367e8aba4f 100644 --- a/src/rgw/services/svc_bucket_sobj.h +++ b/src/rgw/services/svc_bucket_sobj.h @@ -51,7 +51,7 @@ class RGWSI_Bucket_SObj : public RGWSI_Bucket RGWSI_BucketInstance_BE_Handler bi_be_handler; std::unique_ptr bi_be_module; - int do_start(optional_yield) override; + int do_start(optional_yield, const DoutPrefixProvider *dpp) override; int do_read_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, const string& key, @@ -60,11 +60,13 @@ class RGWSI_Bucket_SObj : public RGWSI_Bucket map *pattrs, rgw_cache_entry_info *cache_info, boost::optional refresh_version, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); int read_bucket_stats(const RGWBucketInfo& bucket_info, RGWBucketEnt *ent, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); public: struct Svc { @@ -107,6 +109,7 @@ public: real_time *pmtime, map *pattrs, optional_yield y, + const DoutPrefixProvider *dpp, rgw_cache_entry_info *cache_info = nullptr, boost::optional refresh_version = boost::none) override; @@ -117,12 +120,14 @@ public: real_time mtime, map *pattrs, RGWObjVersionTracker *objv_tracker, - optional_yield y) override; + optional_yield y, + const DoutPrefixProvider *dpp) override; int remove_bucket_entrypoint_info(RGWSI_Bucket_EP_Ctx& ctx, const string& key, RGWObjVersionTracker *objv_tracker, - optional_yield y) override; + optional_yield y, + const DoutPrefixProvider *dpp) override; int read_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, const string& key, @@ -130,6 +135,7 @@ public: real_time *pmtime, map *pattrs, optional_yield y, + const DoutPrefixProvider *dpp, rgw_cache_entry_info *cache_info = nullptr, boost::optional refresh_version = boost::none) override; @@ -139,7 +145,8 @@ public: real_time *pmtime, map *pattrs, boost::optional refresh_version, - optional_yield y) override; + optional_yield y, + const DoutPrefixProvider *dpp) override; int store_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, const string& key, @@ -149,21 +156,25 @@ public: bool exclusive, real_time mtime, map *pattrs, - optional_yield y) override; + optional_yield y, + const DoutPrefixProvider *dpp) override; int remove_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, const string& key, const RGWBucketInfo& bucket_info, RGWObjVersionTracker *objv_tracker, - optional_yield y) override; + optional_yield y, + const DoutPrefixProvider *dpp) override; int read_bucket_stats(RGWSI_Bucket_X_Ctx& ctx, const rgw_bucket& bucket, RGWBucketEnt *ent, - optional_yield y) override; + optional_yield y, + const DoutPrefixProvider *dpp) override; int read_buckets_stats(RGWSI_Bucket_X_Ctx& ctx, map& m, - optional_yield y) override; + optional_yield y, + const DoutPrefixProvider *dpp) override; }; diff --git a/src/rgw/services/svc_bucket_sync.h b/src/rgw/services/svc_bucket_sync.h index 4016490f71db8..d90856b7afe8e 100644 --- a/src/rgw/services/svc_bucket_sync.h +++ b/src/rgw/services/svc_bucket_sync.h @@ -34,15 +34,19 @@ public: std::optional zone, std::optional bucket, RGWBucketSyncPolicyHandlerRef *handler, - optional_yield y) = 0; + optional_yield y, + const DoutPrefixProvider *dpp) = 0; - virtual int handle_bi_update(RGWBucketInfo& bucket_info, + virtual int handle_bi_update(const DoutPrefixProvider *dpp, + RGWBucketInfo& bucket_info, RGWBucketInfo *orig_bucket_info, optional_yield y) = 0; - virtual int handle_bi_removal(const RGWBucketInfo& bucket_info, + virtual int handle_bi_removal(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, optional_yield y) = 0; - virtual int get_bucket_sync_hints(const rgw_bucket& bucket, + virtual int get_bucket_sync_hints(const DoutPrefixProvider *dpp, + const rgw_bucket& bucket, std::set *sources, std::set *dests, optional_yield y) = 0; diff --git a/src/rgw/services/svc_bucket_sync_sobj.cc b/src/rgw/services/svc_bucket_sync_sobj.cc index f80b9618bb9a5..88503344215a4 100644 --- a/src/rgw/services/svc_bucket_sync_sobj.cc +++ b/src/rgw/services/svc_bucket_sync_sobj.cc @@ -33,7 +33,8 @@ public: rgw_raw_obj get_dests_obj(const rgw_bucket& bucket) const; template - int update_hints(const RGWBucketInfo& bucket_info, + int update_hints(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, C1& added_dests, C2& removed_dests, C1& added_sources, @@ -59,7 +60,7 @@ void RGWSI_Bucket_Sync_SObj::init(RGWSI_Zone *_zone_svc, hint_index_mgr.reset(new RGWSI_Bucket_Sync_SObj_HintIndexManager(svc.zone, svc.sysobj)); } -int RGWSI_Bucket_Sync_SObj::do_start(optional_yield) +int RGWSI_Bucket_Sync_SObj::do_start(optional_yield, const DoutPrefixProvider *dpp) { sync_policy_cache.reset(new RGWChainedCacheImpl); sync_policy_cache->init(svc.cache); @@ -71,7 +72,7 @@ void RGWSI_Bucket_Sync_SObj::get_hint_entities(RGWSI_Bucket_X_Ctx& ctx, const std::set& zones, const std::set& buckets, std::set *hint_entities, - optional_yield y) + optional_yield y, const DoutPrefixProvider *dpp) { vector hint_buckets; @@ -81,9 +82,9 @@ void RGWSI_Bucket_Sync_SObj::get_hint_entities(RGWSI_Bucket_X_Ctx& ctx, RGWBucketInfo hint_bucket_info; int ret = svc.bucket_sobj->read_bucket_info(ctx, b, &hint_bucket_info, nullptr, nullptr, boost::none, - y); + y, dpp); if (ret < 0) { - ldout(cct, 20) << "could not init bucket info for hint bucket=" << b << " ... skipping" << dendl; + ldpp_dout(dpp, 20) << "could not init bucket info for hint bucket=" << b << " ... skipping" << dendl; continue; } @@ -102,7 +103,8 @@ int RGWSI_Bucket_Sync_SObj::resolve_policy_hints(RGWSI_Bucket_X_Ctx& ctx, RGWBucketSyncPolicyHandlerRef& handler, RGWBucketSyncPolicyHandlerRef& zone_policy_handler, std::map& temp_map, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { set source_zones; set target_zones; @@ -115,8 +117,8 @@ int RGWSI_Bucket_Sync_SObj::resolve_policy_hints(RGWSI_Bucket_X_Ctx& ctx, std::set hint_entities; - get_hint_entities(ctx, source_zones, handler->get_source_hints(), &hint_entities, y); - get_hint_entities(ctx, target_zones, handler->get_target_hints(), &hint_entities, y); + get_hint_entities(ctx, source_zones, handler->get_source_hints(), &hint_entities, y, dpp); + get_hint_entities(ctx, target_zones, handler->get_target_hints(), &hint_entities, y, dpp); std::set resolved_sources; std::set resolved_dests; @@ -136,9 +138,9 @@ int RGWSI_Bucket_Sync_SObj::resolve_policy_hints(RGWSI_Bucket_X_Ctx& ctx, if (iter != temp_map.end()) { hint_bucket_handler = iter->second; } else { - int r = do_get_policy_handler(ctx, zid, hint_bucket, temp_map, &hint_bucket_handler, y); + int r = do_get_policy_handler(ctx, zid, hint_bucket, temp_map, &hint_bucket_handler, y, dpp); if (r < 0) { - ldout(cct, 20) << "could not get bucket sync policy handler for hint bucket=" << hint_bucket << " ... skipping" << dendl; + ldpp_dout(dpp, 20) << "could not get bucket sync policy handler for hint bucket=" << hint_bucket << " ... skipping" << dendl; continue; } } @@ -159,7 +161,8 @@ int RGWSI_Bucket_Sync_SObj::do_get_policy_handler(RGWSI_Bucket_X_Ctx& ctx, std::optional _bucket, std::map& temp_map, RGWBucketSyncPolicyHandlerRef *handler, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { if (!_bucket) { *handler = svc.zone->get_sync_policy_handler(zone); @@ -196,25 +199,26 @@ int RGWSI_Bucket_Sync_SObj::do_get_policy_handler(RGWSI_Bucket_X_Ctx& ctx, nullptr, &attrs, y, + dpp, &cache_info); if (r < 0) { if (r != -ENOENT) { - ldout(cct, 0) << "ERROR: svc.bucket->read_bucket_instance_info(key=" << bucket_key << ") returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: svc.bucket->read_bucket_instance_info(key=" << bucket_key << ") returned r=" << r << dendl; } return r; } auto zone_policy_handler = svc.zone->get_sync_policy_handler(zone); if (!zone_policy_handler) { - ldout(cct, 20) << "ERROR: could not find policy handler for zone=" << zone << dendl; + ldpp_dout(dpp, 20) << "ERROR: could not find policy handler for zone=" << zone << dendl; return -ENOENT; } e.handler.reset(zone_policy_handler->alloc_child(bucket_info, std::move(attrs))); - r = e.handler->init(y); + r = e.handler->init(dpp, y); if (r < 0) { - ldout(cct, 20) << "ERROR: failed to init bucket sync policy handler: r=" << r << dendl; + ldpp_dout(dpp, 20) << "ERROR: failed to init bucket sync policy handler: r=" << r << dendl; return r; } @@ -225,14 +229,14 @@ int RGWSI_Bucket_Sync_SObj::do_get_policy_handler(RGWSI_Bucket_X_Ctx& ctx, r = resolve_policy_hints(ctx, self_entity, e.handler, zone_policy_handler, - temp_map, y); + temp_map, y, dpp); if (r < 0) { - ldout(cct, 20) << "ERROR: failed to resolve policy hints: bucket_key=" << bucket_key << ", r=" << r << dendl; + ldpp_dout(dpp, 20) << "ERROR: failed to resolve policy hints: bucket_key=" << bucket_key << ", r=" << r << dendl; return r; } - if (!sync_policy_cache->put(svc.cache, cache_key, &e, {&cache_info})) { - ldout(cct, 20) << "couldn't put bucket_sync_policy cache entry, might have raced with data changes" << dendl; + if (!sync_policy_cache->put(dpp, svc.cache, cache_key, &e, {&cache_info})) { + ldpp_dout(dpp, 20) << "couldn't put bucket_sync_policy cache entry, might have raced with data changes" << dendl; } *handler = e.handler; @@ -244,10 +248,11 @@ int RGWSI_Bucket_Sync_SObj::get_policy_handler(RGWSI_Bucket_X_Ctx& ctx, std::optional zone, std::optional _bucket, RGWBucketSyncPolicyHandlerRef *handler, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { std::map temp_map; - return do_get_policy_handler(ctx, zone, _bucket, temp_map, handler, y); + return do_get_policy_handler(ctx, zone, _bucket, temp_map, handler, y, dpp); } static bool diff_sets(std::set& orig_set, @@ -470,7 +475,8 @@ public: } template - int update(const rgw_bucket& entity, + int update(const DoutPrefixProvider *dpp, + const rgw_bucket& entity, const RGWBucketInfo& info_source, C1 *add, C2 *remove, @@ -484,8 +490,8 @@ private: C2 *remove, single_instance_info *instance); - int read(optional_yield y); - int flush(optional_yield y); + int read(const DoutPrefixProvider *dpp, optional_yield y); + int flush(const DoutPrefixProvider *dpp, optional_yield y); void invalidate() { has_data = false; @@ -502,7 +508,8 @@ WRITE_CLASS_ENCODER(RGWSI_BS_SObj_HintIndexObj::single_instance_info) WRITE_CLASS_ENCODER(RGWSI_BS_SObj_HintIndexObj::info_map) template -int RGWSI_BS_SObj_HintIndexObj::update(const rgw_bucket& entity, +int RGWSI_BS_SObj_HintIndexObj::update(const DoutPrefixProvider *dpp, + const rgw_bucket& entity, const RGWBucketInfo& info_source, C1 *add, C2 *remove, @@ -516,9 +523,9 @@ int RGWSI_BS_SObj_HintIndexObj::update(const rgw_bucket& entity, for (int i = 0; i < MAX_RETRIES; ++i) { if (!has_data) { - r = read(y); + r = read(dpp, y); if (r < 0) { - ldout(cct, 0) << "ERROR: cannot update hint index: failed to read: r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: cannot update hint index: failed to read: r=" << r << dendl; return r; } } @@ -534,19 +541,19 @@ int RGWSI_BS_SObj_HintIndexObj::update(const rgw_bucket& entity, info.instances.erase(entity); } - r = flush(y); + r = flush(dpp, y); if (r >= 0) { return 0; } if (r != -ECANCELED) { - ldout(cct, 0) << "ERROR: failed to flush hint index: obj=" << obj << " r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to flush hint index: obj=" << obj << " r=" << r << dendl; return r; } invalidate(); } - ldout(cct, 0) << "ERROR: failed to flush hint index: too many retries (obj=" << obj << "), likely a bug" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to flush hint index: too many retries (obj=" << obj << "), likely a bug" << dendl; return -EIO; } @@ -571,14 +578,14 @@ void RGWSI_BS_SObj_HintIndexObj::update_entries(const rgw_bucket& info_source, } } -int RGWSI_BS_SObj_HintIndexObj::read(optional_yield y) { +int RGWSI_BS_SObj_HintIndexObj::read(const DoutPrefixProvider *dpp, optional_yield y) { RGWObjVersionTracker _ot; bufferlist bl; int r = sysobj.rop() .set_objv_tracker(&_ot) /* forcing read of current version */ - .read(&bl, y); + .read(dpp, &bl, y); if (r < 0 && r != -ENOENT) { - ldout(cct, 0) << "ERROR: failed reading data (obj=" << obj << "), r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed reading data (obj=" << obj << "), r=" << r << dendl; return r; } @@ -590,7 +597,7 @@ int RGWSI_BS_SObj_HintIndexObj::read(optional_yield y) { decode(info, iter); has_data = true; } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: " << __func__ << "(): failed to decode entries, ignoring" << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to decode entries, ignoring" << dendl; info.clear(); } } else { @@ -600,7 +607,7 @@ int RGWSI_BS_SObj_HintIndexObj::read(optional_yield y) { return 0; } -int RGWSI_BS_SObj_HintIndexObj::flush(optional_yield y) { +int RGWSI_BS_SObj_HintIndexObj::flush(const DoutPrefixProvider *dpp, optional_yield y) { int r; if (!info.empty()) { @@ -609,12 +616,12 @@ int RGWSI_BS_SObj_HintIndexObj::flush(optional_yield y) { r = sysobj.wop() .set_objv_tracker(&ot) /* forcing read of current version */ - .write(bl, y); + .write(dpp, bl, y); } else { /* remove */ r = sysobj.wop() .set_objv_tracker(&ot) - .remove(y); + .remove(dpp, y); } if (r < 0) { @@ -641,7 +648,8 @@ rgw_raw_obj RGWSI_Bucket_Sync_SObj_HintIndexManager::get_dests_obj(const rgw_buc } template -int RGWSI_Bucket_Sync_SObj_HintIndexManager::update_hints(const RGWBucketInfo& bucket_info, +int RGWSI_Bucket_Sync_SObj_HintIndexManager::update_hints(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, C1& added_dests, C2& removed_dests, C1& added_sources, @@ -655,13 +663,13 @@ int RGWSI_Bucket_Sync_SObj_HintIndexManager::update_hints(const RGWBucketInfo& b /* update our dests */ RGWSI_BS_SObj_HintIndexObj index(svc.sysobj, get_dests_obj(bucket_info.bucket)); - int r = index.update(bucket_info.bucket, + int r = index.update(dpp, bucket_info.bucket, bucket_info, &added_dests, &removed_dests, y); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to update targets index for bucket=" << bucket_info.bucket << " r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << bucket_info.bucket << " r=" << r << dendl; return r; } @@ -669,13 +677,13 @@ int RGWSI_Bucket_Sync_SObj_HintIndexManager::update_hints(const RGWBucketInfo& b for (auto& dest_bucket : added_dests) { RGWSI_BS_SObj_HintIndexObj dep_index(svc.sysobj, get_sources_obj(dest_bucket)); - int r = dep_index.update(dest_bucket, + int r = dep_index.update(dpp, dest_bucket, bucket_info, &self_entity, static_cast(nullptr), y); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to update targets index for bucket=" << dest_bucket << " r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << dest_bucket << " r=" << r << dendl; return r; } } @@ -683,13 +691,13 @@ int RGWSI_Bucket_Sync_SObj_HintIndexManager::update_hints(const RGWBucketInfo& b for (auto& dest_bucket : removed_dests) { RGWSI_BS_SObj_HintIndexObj dep_index(svc.sysobj, get_sources_obj(dest_bucket)); - int r = dep_index.update(dest_bucket, + int r = dep_index.update(dpp, dest_bucket, bucket_info, static_cast(nullptr), &self_entity, y); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to update targets index for bucket=" << dest_bucket << " r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << dest_bucket << " r=" << r << dendl; return r; } } @@ -700,13 +708,13 @@ int RGWSI_Bucket_Sync_SObj_HintIndexManager::update_hints(const RGWBucketInfo& b RGWSI_BS_SObj_HintIndexObj index(svc.sysobj, get_sources_obj(bucket_info.bucket)); /* update our sources */ - int r = index.update(bucket_info.bucket, + int r = index.update(dpp, bucket_info.bucket, bucket_info, &added_sources, &removed_sources, y); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to update targets index for bucket=" << bucket_info.bucket << " r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << bucket_info.bucket << " r=" << r << dendl; return r; } @@ -714,13 +722,13 @@ int RGWSI_Bucket_Sync_SObj_HintIndexManager::update_hints(const RGWBucketInfo& b for (auto& source_bucket : added_sources) { RGWSI_BS_SObj_HintIndexObj dep_index(svc.sysobj, get_dests_obj(source_bucket)); - int r = dep_index.update(source_bucket, + int r = dep_index.update(dpp, source_bucket, bucket_info, &self_entity, static_cast(nullptr), y); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to update targets index for bucket=" << source_bucket << " r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << source_bucket << " r=" << r << dendl; return r; } } @@ -728,13 +736,13 @@ int RGWSI_Bucket_Sync_SObj_HintIndexManager::update_hints(const RGWBucketInfo& b for (auto& source_bucket : removed_sources) { RGWSI_BS_SObj_HintIndexObj dep_index(svc.sysobj, get_dests_obj(source_bucket)); - int r = dep_index.update(source_bucket, + int r = dep_index.update(dpp, source_bucket, bucket_info, static_cast(nullptr), &self_entity, y); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to update targets index for bucket=" << source_bucket << " r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << source_bucket << " r=" << r << dendl; return r; } } @@ -743,7 +751,8 @@ int RGWSI_Bucket_Sync_SObj_HintIndexManager::update_hints(const RGWBucketInfo& b return 0; } -int RGWSI_Bucket_Sync_SObj::handle_bi_removal(const RGWBucketInfo& bucket_info, +int RGWSI_Bucket_Sync_SObj::handle_bi_removal(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, optional_yield y) { std::set sources_set; @@ -770,7 +779,7 @@ int RGWSI_Bucket_Sync_SObj::handle_bi_removal(const RGWBucketInfo& bucket_info, std::vector added_sources; std::vector added_dests; - return hint_index_mgr->update_hints(bucket_info, + return hint_index_mgr->update_hints(dpp, bucket_info, added_dests, removed_dests, added_sources, @@ -778,7 +787,8 @@ int RGWSI_Bucket_Sync_SObj::handle_bi_removal(const RGWBucketInfo& bucket_info, y); } -int RGWSI_Bucket_Sync_SObj::handle_bi_update(RGWBucketInfo& bucket_info, +int RGWSI_Bucket_Sync_SObj::handle_bi_update(const DoutPrefixProvider *dpp, + RGWBucketInfo& bucket_info, RGWBucketInfo *orig_bucket_info, optional_yield y) { @@ -803,21 +813,21 @@ int RGWSI_Bucket_Sync_SObj::handle_bi_update(RGWBucketInfo& bucket_info, std::vector removed_sources; std::vector added_sources; bool found = diff_sets(orig_sources, sources, &added_sources, &removed_sources); - ldout(cct, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": orig_sources=" << orig_sources << " new_sources=" << sources << dendl; - ldout(cct, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": potential sources added=" << added_sources << " removed=" << removed_sources << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": orig_sources=" << orig_sources << " new_sources=" << sources << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": potential sources added=" << added_sources << " removed=" << removed_sources << dendl; std::vector removed_dests; std::vector added_dests; found = found || diff_sets(orig_dests, dests, &added_dests, &removed_dests); - ldout(cct, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": orig_dests=" << orig_dests << " new_dests=" << dests << dendl; - ldout(cct, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": potential dests added=" << added_dests << " removed=" << removed_dests << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": orig_dests=" << orig_dests << " new_dests=" << dests << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": potential dests added=" << added_dests << " removed=" << removed_dests << dendl; if (!found) { return 0; } - return hint_index_mgr->update_hints(bucket_info, + return hint_index_mgr->update_hints(dpp, bucket_info, dests, /* set all dests, not just the ones that were added */ removed_dests, sources, /* set all sources, not just that the ones that were added */ @@ -825,7 +835,8 @@ int RGWSI_Bucket_Sync_SObj::handle_bi_update(RGWBucketInfo& bucket_info, y); } -int RGWSI_Bucket_Sync_SObj::get_bucket_sync_hints(const rgw_bucket& bucket, +int RGWSI_Bucket_Sync_SObj::get_bucket_sync_hints(const DoutPrefixProvider *dpp, + const rgw_bucket& bucket, std::set *sources, std::set *dests, optional_yield y) @@ -837,9 +848,9 @@ int RGWSI_Bucket_Sync_SObj::get_bucket_sync_hints(const rgw_bucket& bucket, if (sources) { RGWSI_BS_SObj_HintIndexObj index(svc.sysobj, hint_index_mgr->get_sources_obj(bucket)); - int r = index.read(y); + int r = index.read(dpp, y); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to update sources index for bucket=" << bucket << " r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to update sources index for bucket=" << bucket << " r=" << r << dendl; return r; } @@ -855,9 +866,9 @@ int RGWSI_Bucket_Sync_SObj::get_bucket_sync_hints(const rgw_bucket& bucket, if (dests) { RGWSI_BS_SObj_HintIndexObj index(svc.sysobj, hint_index_mgr->get_dests_obj(bucket)); - int r = index.read(y); + int r = index.read(dpp, y); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to read targets index for bucket=" << bucket << " r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to read targets index for bucket=" << bucket << " r=" << r << dendl; return r; } diff --git a/src/rgw/services/svc_bucket_sync_sobj.h b/src/rgw/services/svc_bucket_sync_sobj.h index 49f764881ca4c..60786665d1d5a 100644 --- a/src/rgw/services/svc_bucket_sync_sobj.h +++ b/src/rgw/services/svc_bucket_sync_sobj.h @@ -43,7 +43,7 @@ class RGWSI_Bucket_Sync_SObj : public RGWSI_Bucket_Sync std::unique_ptr hint_index_mgr; - int do_start(optional_yield) override; + int do_start(optional_yield, const DoutPrefixProvider *dpp) override; struct optional_zone_bucket { optional zone; @@ -67,19 +67,21 @@ class RGWSI_Bucket_Sync_SObj : public RGWSI_Bucket_Sync const std::set& zone_names, const std::set& buckets, std::set *hint_entities, - optional_yield y); + optional_yield y, const DoutPrefixProvider *); int resolve_policy_hints(RGWSI_Bucket_X_Ctx& ctx, rgw_sync_bucket_entity& self_entity, RGWBucketSyncPolicyHandlerRef& handler, RGWBucketSyncPolicyHandlerRef& zone_policy_handler, std::map& temp_map, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); int do_get_policy_handler(RGWSI_Bucket_X_Ctx& ctx, std::optional zone, std::optional _bucket, std::map& temp_map, RGWBucketSyncPolicyHandlerRef *handler, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); public: struct Svc { RGWSI_Zone *zone{nullptr}; @@ -101,15 +103,19 @@ public: std::optional zone, std::optional bucket, RGWBucketSyncPolicyHandlerRef *handler, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); - int handle_bi_update(RGWBucketInfo& bucket_info, + int handle_bi_update(const DoutPrefixProvider *dpp, + RGWBucketInfo& bucket_info, RGWBucketInfo *orig_bucket_info, optional_yield y) override; - int handle_bi_removal(const RGWBucketInfo& bucket_info, + int handle_bi_removal(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, optional_yield y) override; - int get_bucket_sync_hints(const rgw_bucket& bucket, + int get_bucket_sync_hints(const DoutPrefixProvider *dpp, + const rgw_bucket& bucket, std::set *sources, std::set *dests, optional_yield y) override; diff --git a/src/rgw/services/svc_cls.cc b/src/rgw/services/svc_cls.cc index 984d92a5ee95f..d2aaa6d889d94 100644 --- a/src/rgw/services/svc_cls.cc +++ b/src/rgw/services/svc_cls.cc @@ -17,36 +17,36 @@ static string log_lock_name = "rgw_log_lock"; -int RGWSI_Cls::do_start(optional_yield y) +int RGWSI_Cls::do_start(optional_yield y, const DoutPrefixProvider *dpp) { - int r = mfa.do_start(y); + int r = mfa.do_start(y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to start mfa service" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start mfa service" << dendl; return r; } return 0; } -int RGWSI_Cls::MFA::get_mfa_obj(const rgw_user& user, std::optional *obj) +int RGWSI_Cls::MFA::get_mfa_obj(const DoutPrefixProvider *dpp, const rgw_user& user, std::optional *obj) { string oid = get_mfa_oid(user); rgw_raw_obj o(zone_svc->get_zone_params().otp_pool, oid); obj->emplace(rados_svc->obj(o)); - int r = (*obj)->open(); + int r = (*obj)->open(dpp); if (r < 0) { - ldout(cct, 4) << "failed to open rados context for " << o << dendl; + ldpp_dout(dpp, 4) << "failed to open rados context for " << o << dendl; return r; } return 0; } -int RGWSI_Cls::MFA::get_mfa_ref(const rgw_user& user, rgw_rados_ref *ref) +int RGWSI_Cls::MFA::get_mfa_ref(const DoutPrefixProvider *dpp, const rgw_user& user, rgw_rados_ref *ref) { std::optional obj; - int r = get_mfa_obj(user, &obj); + int r = get_mfa_obj(dpp, user, &obj); if (r < 0) { return r; } @@ -54,10 +54,10 @@ int RGWSI_Cls::MFA::get_mfa_ref(const rgw_user& user, rgw_rados_ref *ref) return 0; } -int RGWSI_Cls::MFA::check_mfa(const rgw_user& user, const string& otp_id, const string& pin, optional_yield y) +int RGWSI_Cls::MFA::check_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const string& otp_id, const string& pin, optional_yield y) { rgw_rados_ref ref; - int r = get_mfa_ref(user, &ref); + int r = get_mfa_ref(dpp, user, &ref); if (r < 0) { return r; } @@ -68,7 +68,7 @@ int RGWSI_Cls::MFA::check_mfa(const rgw_user& user, const string& otp_id, const if (r < 0) return r; - ldout(cct, 20) << "OTP check, otp_id=" << otp_id << " result=" << (int)result.result << dendl; + ldpp_dout(dpp, 20) << "OTP check, otp_id=" << otp_id << " result=" << (int)result.result << dendl; return (result.result == rados::cls::otp::OTP_CHECK_SUCCESS ? 0 : -EACCES); } @@ -97,11 +97,11 @@ void RGWSI_Cls::MFA::prepare_mfa_write(librados::ObjectWriteOperation *op, op->mtime2(&mtime_ts); } -int RGWSI_Cls::MFA::create_mfa(const rgw_user& user, const rados::cls::otp::otp_info_t& config, +int RGWSI_Cls::MFA::create_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const rados::cls::otp::otp_info_t& config, RGWObjVersionTracker *objv_tracker, const ceph::real_time& mtime, optional_yield y) { std::optional obj; - int r = get_mfa_obj(user, &obj); + int r = get_mfa_obj(dpp, user, &obj); if (r < 0) { return r; } @@ -109,22 +109,23 @@ int RGWSI_Cls::MFA::create_mfa(const rgw_user& user, const rados::cls::otp::otp_ librados::ObjectWriteOperation op; prepare_mfa_write(&op, objv_tracker, mtime); rados::cls::otp::OTP::create(&op, config); - r = obj->operate(&op, y); + r = obj->operate(dpp, &op, y); if (r < 0) { - ldout(cct, 20) << "OTP create, otp_id=" << config.id << " result=" << (int)r << dendl; + ldpp_dout(dpp, 20) << "OTP create, otp_id=" << config.id << " result=" << (int)r << dendl; return r; } return 0; } -int RGWSI_Cls::MFA::remove_mfa(const rgw_user& user, const string& id, +int RGWSI_Cls::MFA::remove_mfa(const DoutPrefixProvider *dpp, + const rgw_user& user, const string& id, RGWObjVersionTracker *objv_tracker, const ceph::real_time& mtime, optional_yield y) { std::optional obj; - int r = get_mfa_obj(user, &obj); + int r = get_mfa_obj(dpp, user, &obj); if (r < 0) { return r; } @@ -132,21 +133,21 @@ int RGWSI_Cls::MFA::remove_mfa(const rgw_user& user, const string& id, librados::ObjectWriteOperation op; prepare_mfa_write(&op, objv_tracker, mtime); rados::cls::otp::OTP::remove(&op, id); - r = obj->operate(&op, y); + r = obj->operate(dpp, &op, y); if (r < 0) { - ldout(cct, 20) << "OTP remove, otp_id=" << id << " result=" << (int)r << dendl; + ldpp_dout(dpp, 20) << "OTP remove, otp_id=" << id << " result=" << (int)r << dendl; return r; } return 0; } -int RGWSI_Cls::MFA::get_mfa(const rgw_user& user, const string& id, rados::cls::otp::otp_info_t *result, +int RGWSI_Cls::MFA::get_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const string& id, rados::cls::otp::otp_info_t *result, optional_yield y) { rgw_rados_ref ref; - int r = get_mfa_ref(user, &ref); + int r = get_mfa_ref(dpp, user, &ref); if (r < 0) { return r; } @@ -159,12 +160,12 @@ int RGWSI_Cls::MFA::get_mfa(const rgw_user& user, const string& id, rados::cls:: return 0; } -int RGWSI_Cls::MFA::list_mfa(const rgw_user& user, list *result, +int RGWSI_Cls::MFA::list_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, list *result, optional_yield y) { rgw_rados_ref ref; - int r = get_mfa_ref(user, &ref); + int r = get_mfa_ref(dpp, user, &ref); if (r < 0) { return r; } @@ -177,12 +178,12 @@ int RGWSI_Cls::MFA::list_mfa(const rgw_user& user, list& entries, +int RGWSI_Cls::MFA::set_mfa(const DoutPrefixProvider *dpp, const string& oid, const list& entries, bool reset_obj, RGWObjVersionTracker *objv_tracker, const real_time& mtime, optional_yield y) { rgw_raw_obj o(zone_svc->get_zone_params().otp_pool, oid); auto obj = rados_svc->obj(o); - int r = obj.open(); + int r = obj.open(dpp); if (r < 0) { - ldout(cct, 4) << "failed to open rados context for " << o << dendl; + ldpp_dout(dpp, 4) << "failed to open rados context for " << o << dendl; return r; } librados::ObjectWriteOperation op; @@ -215,24 +216,24 @@ int RGWSI_Cls::MFA::set_mfa(const string& oid, const listget_zone_params().log_pool, oid); obj = rados_svc->obj(o); - return obj.open(); + return obj.open(dpp); } -int RGWSI_Cls::TimeLog::add(const string& oid, +int RGWSI_Cls::TimeLog::add(const DoutPrefixProvider *dpp, + const string& oid, const real_time& ut, const string& section, const string& key, @@ -278,7 +280,7 @@ int RGWSI_Cls::TimeLog::add(const string& oid, { RGWSI_RADOS::Obj obj; - int r = init_obj(oid, obj); + int r = init_obj(dpp, oid, obj); if (r < 0) { return r; } @@ -287,10 +289,11 @@ int RGWSI_Cls::TimeLog::add(const string& oid, utime_t t(ut); cls_log_add(op, t, section, key, bl); - return obj.operate(&op, y); + return obj.operate(dpp, &op, y); } -int RGWSI_Cls::TimeLog::add(const string& oid, +int RGWSI_Cls::TimeLog::add(const DoutPrefixProvider *dpp, + const string& oid, std::list& entries, librados::AioCompletion *completion, bool monotonic_inc, @@ -298,7 +301,7 @@ int RGWSI_Cls::TimeLog::add(const string& oid, { RGWSI_RADOS::Obj obj; - int r = init_obj(oid, obj); + int r = init_obj(dpp, oid, obj); if (r < 0) { return r; } @@ -307,14 +310,15 @@ int RGWSI_Cls::TimeLog::add(const string& oid, cls_log_add(op, entries, monotonic_inc); if (!completion) { - r = obj.operate(&op, y); + r = obj.operate(dpp, &op, y); } else { r = obj.aio_operate(completion, &op); } return r; } -int RGWSI_Cls::TimeLog::list(const string& oid, +int RGWSI_Cls::TimeLog::list(const DoutPrefixProvider *dpp, + const string& oid, const real_time& start_time, const real_time& end_time, int max_entries, std::list& entries, @@ -325,7 +329,7 @@ int RGWSI_Cls::TimeLog::list(const string& oid, { RGWSI_RADOS::Obj obj; - int r = init_obj(oid, obj); + int r = init_obj(dpp, oid, obj); if (r < 0) { return r; } @@ -340,20 +344,21 @@ int RGWSI_Cls::TimeLog::list(const string& oid, bufferlist obl; - int ret = obj.operate(&op, &obl, y); + int ret = obj.operate(dpp, &op, &obl, y); if (ret < 0) return ret; return 0; } -int RGWSI_Cls::TimeLog::info(const string& oid, +int RGWSI_Cls::TimeLog::info(const DoutPrefixProvider *dpp, + const string& oid, cls_log_header *header, optional_yield y) { RGWSI_RADOS::Obj obj; - int r = init_obj(oid, obj); + int r = init_obj(dpp, oid, obj); if (r < 0) { return r; } @@ -364,19 +369,20 @@ int RGWSI_Cls::TimeLog::info(const string& oid, bufferlist obl; - int ret = obj.operate(&op, &obl, y); + int ret = obj.operate(dpp, &op, &obl, y); if (ret < 0) return ret; return 0; } -int RGWSI_Cls::TimeLog::info_async(RGWSI_RADOS::Obj& obj, +int RGWSI_Cls::TimeLog::info_async(const DoutPrefixProvider *dpp, + RGWSI_RADOS::Obj& obj, const string& oid, cls_log_header *header, librados::AioCompletion *completion) { - int r = init_obj(oid, obj); + int r = init_obj(dpp, oid, obj); if (r < 0) { return r; } @@ -392,7 +398,8 @@ int RGWSI_Cls::TimeLog::info_async(RGWSI_RADOS::Obj& obj, return 0; } -int RGWSI_Cls::TimeLog::trim(const string& oid, +int RGWSI_Cls::TimeLog::trim(const DoutPrefixProvider *dpp, + const string& oid, const real_time& start_time, const real_time& end_time, const string& from_marker, @@ -402,7 +409,7 @@ int RGWSI_Cls::TimeLog::trim(const string& oid, { RGWSI_RADOS::Obj obj; - int r = init_obj(oid, obj); + int r = init_obj(dpp, oid, obj); if (r < 0) { return r; } @@ -414,14 +421,15 @@ int RGWSI_Cls::TimeLog::trim(const string& oid, cls_log_trim(op, st, et, from_marker, to_marker); if (!completion) { - r = obj.operate(&op, y); + r = obj.operate(dpp, &op, y); } else { r = obj.aio_operate(completion, &op); } return r; } -int RGWSI_Cls::Lock::lock_exclusive(const rgw_pool& pool, +int RGWSI_Cls::Lock::lock_exclusive(const DoutPrefixProvider *dpp, + const rgw_pool& pool, const string& oid, timespan& duration, string& zone_id, @@ -429,7 +437,7 @@ int RGWSI_Cls::Lock::lock_exclusive(const rgw_pool& pool, std::optional lock_name) { auto p = rados_svc->pool(pool); - int r = p.open(); + int r = p.open(dpp); if (r < 0) { return r; } @@ -446,14 +454,15 @@ int RGWSI_Cls::Lock::lock_exclusive(const rgw_pool& pool, return l.lock_exclusive(&p.ioctx(), oid); } -int RGWSI_Cls::Lock::unlock(const rgw_pool& pool, +int RGWSI_Cls::Lock::unlock(const DoutPrefixProvider *dpp, + const rgw_pool& pool, const string& oid, string& zone_id, string& owner_id, std::optional lock_name) { auto p = rados_svc->pool(pool); - int r = p.open(); + int r = p.open(dpp); if (r < 0) { return r; } diff --git a/src/rgw/services/svc_cls.h b/src/rgw/services/svc_cls.h index 80674a286bcf3..61487b2f95487 100644 --- a/src/rgw/services/svc_cls.h +++ b/src/rgw/services/svc_cls.h @@ -48,8 +48,8 @@ class RGWSI_Cls : public RGWServiceInstance public: class MFA : public ClsSubService { - int get_mfa_obj(const rgw_user& user, std::optional *obj); - int get_mfa_ref(const rgw_user& user, rgw_rados_ref *ref); + int get_mfa_obj(const DoutPrefixProvider *dpp, const rgw_user& user, std::optional *obj); + int get_mfa_ref(const DoutPrefixProvider *dpp, const rgw_user& user, rgw_rados_ref *ref); void prepare_mfa_write(librados::ObjectWriteOperation *op, RGWObjVersionTracker *objv_tracker, @@ -62,25 +62,26 @@ public: return string("user:") + user.to_str(); } - int check_mfa(const rgw_user& user, const string& otp_id, const string& pin, optional_yield y); - int create_mfa(const rgw_user& user, const rados::cls::otp::otp_info_t& config, + int check_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const string& otp_id, const string& pin, optional_yield y); + int create_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const rados::cls::otp::otp_info_t& config, RGWObjVersionTracker *objv_tracker, const ceph::real_time& mtime, optional_yield y); - int remove_mfa(const rgw_user& user, const string& id, + int remove_mfa(const DoutPrefixProvider *dpp, + const rgw_user& user, const string& id, RGWObjVersionTracker *objv_tracker, const ceph::real_time& mtime, optional_yield y); - int get_mfa(const rgw_user& user, const string& id, rados::cls::otp::otp_info_t *result, optional_yield y); - int list_mfa(const rgw_user& user, list *result, optional_yield y); - int otp_get_current_time(const rgw_user& user, ceph::real_time *result, optional_yield y); - int set_mfa(const string& oid, const list& entries, + int get_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const string& id, rados::cls::otp::otp_info_t *result, optional_yield y); + int list_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, list *result, optional_yield y); + int otp_get_current_time(const DoutPrefixProvider *dpp, const rgw_user& user, ceph::real_time *result, optional_yield y); + int set_mfa(const DoutPrefixProvider *dpp, const string& oid, const list& entries, bool reset_obj, RGWObjVersionTracker *objv_tracker, const real_time& mtime, optional_yield y); - int list_mfa(const string& oid, list *result, + int list_mfa(const DoutPrefixProvider *dpp, const string& oid, list *result, RGWObjVersionTracker *objv_tracker, ceph::real_time *pmtime, optional_yield y); } mfa; class TimeLog : public ClsSubService { - int init_obj(const string& oid, RGWSI_RADOS::Obj& obj); + int init_obj(const DoutPrefixProvider *dpp, const string& oid, RGWSI_RADOS::Obj& obj); public: TimeLog(CephContext *cct): ClsSubService(cct) {} @@ -89,18 +90,21 @@ public: const string& section, const string& key, bufferlist& bl); - int add(const string& oid, + int add(const DoutPrefixProvider *dpp, + const string& oid, const real_time& ut, const string& section, const string& key, bufferlist& bl, optional_yield y); - int add(const string& oid, + int add(const DoutPrefixProvider *dpp, + const string& oid, std::list& entries, librados::AioCompletion *completion, bool monotonic_inc, optional_yield y); - int list(const string& oid, + int list(const DoutPrefixProvider *dpp, + const string& oid, const real_time& start_time, const real_time& end_time, int max_entries, list& entries, @@ -108,14 +112,17 @@ public: string *out_marker, bool *truncated, optional_yield y); - int info(const string& oid, + int info(const DoutPrefixProvider *dpp, + const string& oid, cls_log_header *header, optional_yield y); - int info_async(RGWSI_RADOS::Obj& obj, + int info_async(const DoutPrefixProvider *dpp, + RGWSI_RADOS::Obj& obj, const string& oid, cls_log_header *header, librados::AioCompletion *completion); - int trim(const string& oid, + int trim(const DoutPrefixProvider *dpp, + const string& oid, const real_time& start_time, const real_time& end_time, const string& from_marker, @@ -128,13 +135,15 @@ public: int init_obj(const string& oid, RGWSI_RADOS::Obj& obj); public: Lock(CephContext *cct): ClsSubService(cct) {} - int lock_exclusive(const rgw_pool& pool, + int lock_exclusive(const DoutPrefixProvider *dpp, + const rgw_pool& pool, const string& oid, timespan& duration, string& zone_id, string& owner_id, std::optional lock_name = std::nullopt); - int unlock(const rgw_pool& pool, + int unlock(const DoutPrefixProvider *dpp, + const rgw_pool& pool, const string& oid, string& zone_id, string& owner_id, @@ -152,6 +161,6 @@ public: lock.init(this, zone_svc, rados_svc); } - int do_start(optional_yield) override; + int do_start(optional_yield, const DoutPrefixProvider *dpp) override; }; diff --git a/src/rgw/services/svc_config_key_rados.cc b/src/rgw/services/svc_config_key_rados.cc index 7a16b22f0bb23..9bb0344af03af 100644 --- a/src/rgw/services/svc_config_key_rados.cc +++ b/src/rgw/services/svc_config_key_rados.cc @@ -2,7 +2,7 @@ #include "svc_rados.h" #include "svc_config_key_rados.h" -int RGWSI_ConfigKey_RADOS::do_start(optional_yield) +int RGWSI_ConfigKey_RADOS::do_start(optional_yield, const DoutPrefixProvider *dpp) { maybe_insecure_mon_conn = !svc.rados->check_secure_mon_conn(); diff --git a/src/rgw/services/svc_config_key_rados.h b/src/rgw/services/svc_config_key_rados.h index d7997364fb64d..e0de60cac0113 100644 --- a/src/rgw/services/svc_config_key_rados.h +++ b/src/rgw/services/svc_config_key_rados.h @@ -31,7 +31,7 @@ class RGWSI_ConfigKey_RADOS : public RGWSI_ConfigKey bool maybe_insecure_mon_conn{false}; std::atomic_flag warned_insecure = ATOMIC_FLAG_INIT; - int do_start(optional_yield) override; + int do_start(optional_yield, const DoutPrefixProvider *dpp) override; void warn_if_insecure(); diff --git a/src/rgw/services/svc_finisher.cc b/src/rgw/services/svc_finisher.cc index 70938ecac107d..83e1b1514e42f 100644 --- a/src/rgw/services/svc_finisher.cc +++ b/src/rgw/services/svc_finisher.cc @@ -5,7 +5,7 @@ #include "svc_finisher.h" -int RGWSI_Finisher::do_start(optional_yield) +int RGWSI_Finisher::do_start(optional_yield, const DoutPrefixProvider *dpp) { finisher = new Finisher(cct); finisher->start(); diff --git a/src/rgw/services/svc_finisher.h b/src/rgw/services/svc_finisher.h index 9ce22dbce4a55..2f3ae52236aae 100644 --- a/src/rgw/services/svc_finisher.h +++ b/src/rgw/services/svc_finisher.h @@ -25,7 +25,7 @@ private: protected: void init() {} - int do_start(optional_yield y) override; + int do_start(optional_yield y, const DoutPrefixProvider *dpp) override; public: RGWSI_Finisher(CephContext *cct): RGWServiceInstance(cct) {} diff --git a/src/rgw/services/svc_mdlog.cc b/src/rgw/services/svc_mdlog.cc index cbc729ecfb796..f93c44d680e36 100644 --- a/src/rgw/services/svc_mdlog.cc +++ b/src/rgw/services/svc_mdlog.cc @@ -38,7 +38,7 @@ int RGWSI_MDLog::init(RGWSI_RADOS *_rados_svc, RGWSI_Zone *_zone_svc, RGWSI_SysO return 0; } -int RGWSI_MDLog::do_start(optional_yield y) +int RGWSI_MDLog::do_start(optional_yield y, const DoutPrefixProvider *dpp) { auto& current_period = svc.zone->get_current_period(); @@ -51,20 +51,21 @@ int RGWSI_MDLog::do_start(optional_yield y) if (run_sync && svc.zone->need_to_sync()) { // initialize the log period history - svc.mdlog->init_oldest_log_period(y); + svc.mdlog->init_oldest_log_period(y, dpp); } return 0; } int RGWSI_MDLog::read_history(RGWMetadataLogHistory *state, RGWObjVersionTracker *objv_tracker, - optional_yield y) const + optional_yield y, + const DoutPrefixProvider *dpp) const { auto obj_ctx = svc.sysobj->init_obj_ctx(); auto& pool = svc.zone->get_zone_params().log_pool; const auto& oid = RGWMetadataLogHistory::oid; bufferlist bl; - int ret = rgw_get_system_obj(obj_ctx, pool, oid, bl, objv_tracker, nullptr, y); + int ret = rgw_get_system_obj(obj_ctx, pool, oid, bl, objv_tracker, nullptr, y, dpp); if (ret < 0) { return ret; } @@ -72,9 +73,9 @@ int RGWSI_MDLog::read_history(RGWMetadataLogHistory *state, /* bad history object, remove it */ rgw_raw_obj obj(pool, oid); auto sysobj = obj_ctx.get_obj(obj); - ret = sysobj.wop().remove(y); + ret = sysobj.wop().remove(dpp, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: meta history is empty, but cannot remove it (" << cpp_strerror(-ret) << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: meta history is empty, but cannot remove it (" << cpp_strerror(-ret) << ")" << dendl; return ret; } return -ENOENT; @@ -83,14 +84,15 @@ int RGWSI_MDLog::read_history(RGWMetadataLogHistory *state, auto p = bl.cbegin(); state->decode(p); } catch (buffer::error& e) { - ldout(cct, 1) << "failed to decode the mdlog history: " + ldpp_dout(dpp, 1) << "failed to decode the mdlog history: " << e.what() << dendl; return -EIO; } return 0; } -int RGWSI_MDLog::write_history(const RGWMetadataLogHistory& state, +int RGWSI_MDLog::write_history(const DoutPrefixProvider *dpp, + const RGWMetadataLogHistory& state, RGWObjVersionTracker *objv_tracker, optional_yield y, bool exclusive) { @@ -100,7 +102,7 @@ int RGWSI_MDLog::write_history(const RGWMetadataLogHistory& state, auto& pool = svc.zone->get_zone_params().log_pool; const auto& oid = RGWMetadataLogHistory::oid; auto obj_ctx = svc.sysobj->init_obj_ctx(); - return rgw_put_system_obj(obj_ctx, pool, oid, bl, + return rgw_put_system_obj(dpp, obj_ctx, pool, oid, bl, exclusive, objv_tracker, real_time{}, y); } @@ -110,6 +112,7 @@ using Cursor = RGWPeriodHistory::Cursor; /// read the mdlog history and use it to initialize the given cursor class ReadHistoryCR : public RGWCoroutine { + const DoutPrefixProvider *dpp; Svc svc; Cursor *cursor; RGWObjVersionTracker *objv_tracker; @@ -117,16 +120,17 @@ class ReadHistoryCR : public RGWCoroutine { RGWAsyncRadosProcessor *async_processor; public: - ReadHistoryCR(const Svc& svc, + ReadHistoryCR(const DoutPrefixProvider *dpp, + const Svc& svc, Cursor *cursor, RGWObjVersionTracker *objv_tracker) - : RGWCoroutine(svc.zone->ctx()), svc(svc), + : RGWCoroutine(svc.zone->ctx()), dpp(dpp), svc(svc), cursor(cursor), objv_tracker(objv_tracker), async_processor(svc.rados->get_async_processor()) {} - int operate() { + int operate(const DoutPrefixProvider *dpp) { reenter(this) { yield { rgw_raw_obj obj{svc.zone->get_zone_params().log_pool, @@ -134,11 +138,11 @@ class ReadHistoryCR : public RGWCoroutine { constexpr bool empty_on_enoent = false; using ReadCR = RGWSimpleRadosReadCR; - call(new ReadCR(async_processor, svc.sysobj, obj, + call(new ReadCR(dpp, async_processor, svc.sysobj, obj, &state, empty_on_enoent, objv_tracker)); } if (retcode < 0) { - ldout(cct, 1) << "failed to read mdlog history: " + ldpp_dout(dpp, 1) << "failed to read mdlog history: " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } @@ -147,7 +151,7 @@ class ReadHistoryCR : public RGWCoroutine { return set_cr_error(cursor->get_error()); } - ldout(cct, 10) << "read mdlog history with oldest period id=" + ldpp_dout(dpp, 10) << "read mdlog history with oldest period id=" << state.oldest_period_id << " realm_epoch=" << state.oldest_realm_epoch << dendl; return set_cr_done(); @@ -158,6 +162,7 @@ class ReadHistoryCR : public RGWCoroutine { /// write the given cursor to the mdlog history class WriteHistoryCR : public RGWCoroutine { + const DoutPrefixProvider *dpp; Svc svc; Cursor cursor; RGWObjVersionTracker *objv; @@ -165,15 +170,16 @@ class WriteHistoryCR : public RGWCoroutine { RGWAsyncRadosProcessor *async_processor; public: - WriteHistoryCR(Svc& svc, + WriteHistoryCR(const DoutPrefixProvider *dpp, + Svc& svc, const Cursor& cursor, RGWObjVersionTracker *objv) - : RGWCoroutine(svc.zone->ctx()), svc(svc), + : RGWCoroutine(svc.zone->ctx()), dpp(dpp), svc(svc), cursor(cursor), objv(objv), async_processor(svc.rados->get_async_processor()) {} - int operate() { + int operate(const DoutPrefixProvider *dpp) { reenter(this) { state.oldest_period_id = cursor.get_period().get_id(); state.oldest_realm_epoch = cursor.get_epoch(); @@ -183,15 +189,15 @@ class WriteHistoryCR : public RGWCoroutine { RGWMetadataLogHistory::oid}; using WriteCR = RGWSimpleRadosWriteCR; - call(new WriteCR(async_processor, svc.sysobj, obj, state, objv)); + call(new WriteCR(dpp, async_processor, svc.sysobj, obj, state, objv)); } if (retcode < 0) { - ldout(cct, 1) << "failed to write mdlog history: " + ldpp_dout(dpp, 1) << "failed to write mdlog history: " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } - ldout(cct, 10) << "wrote mdlog history with oldest period id=" + ldpp_dout(dpp, 10) << "wrote mdlog history with oldest period id=" << state.oldest_period_id << " realm_epoch=" << state.oldest_realm_epoch << dendl; return set_cr_done(); @@ -202,6 +208,7 @@ class WriteHistoryCR : public RGWCoroutine { /// update the mdlog history to reflect trimmed logs class TrimHistoryCR : public RGWCoroutine { + const DoutPrefixProvider *dpp; Svc svc; const Cursor cursor; //< cursor to trimmed period RGWObjVersionTracker *objv; //< to prevent racing updates @@ -209,27 +216,27 @@ class TrimHistoryCR : public RGWCoroutine { Cursor existing; //< existing cursor read from disk public: - TrimHistoryCR(const Svc& svc, Cursor cursor, RGWObjVersionTracker *objv) - : RGWCoroutine(svc.zone->ctx()), svc(svc), + TrimHistoryCR(const DoutPrefixProvider *dpp, const Svc& svc, Cursor cursor, RGWObjVersionTracker *objv) + : RGWCoroutine(svc.zone->ctx()), dpp(dpp), svc(svc), cursor(cursor), objv(objv), next(cursor) { next.next(); // advance past cursor } - int operate() { + int operate(const DoutPrefixProvider *dpp) { reenter(this) { // read an existing history, and write the new history if it's newer - yield call(new ReadHistoryCR(svc, &existing, objv)); + yield call(new ReadHistoryCR(dpp, svc, &existing, objv)); if (retcode < 0) { return set_cr_error(retcode); } // reject older trims with ECANCELED if (cursor.get_epoch() < existing.get_epoch()) { - ldout(cct, 4) << "found oldest log epoch=" << existing.get_epoch() + ldpp_dout(dpp, 4) << "found oldest log epoch=" << existing.get_epoch() << ", rejecting trim at epoch=" << cursor.get_epoch() << dendl; return set_cr_error(-ECANCELED); } // overwrite with updated history - yield call(new WriteHistoryCR(svc, next, objv)); + yield call(new WriteHistoryCR(dpp, svc, next, objv)); if (retcode < 0) { return set_cr_error(retcode); } @@ -243,7 +250,7 @@ class TrimHistoryCR : public RGWCoroutine { // traverse all the way back to the beginning of the period history, and // return a cursor to the first period in a fully attached history -Cursor RGWSI_MDLog::find_oldest_period(optional_yield y) +Cursor RGWSI_MDLog::find_oldest_period(const DoutPrefixProvider *dpp, optional_yield y) { auto cursor = period_history->get_current(); @@ -253,13 +260,13 @@ Cursor RGWSI_MDLog::find_oldest_period(optional_yield y) auto& predecessor = cursor.get_period().get_predecessor(); if (predecessor.empty()) { // this is the first period, so our logs must start here - ldout(cct, 10) << "find_oldest_period returning first " + ldpp_dout(dpp, 10) << "find_oldest_period returning first " "period " << cursor.get_period().get_id() << dendl; return cursor; } // pull the predecessor and add it to our history RGWPeriod period; - int r = period_puller->pull(predecessor, period, y); + int r = period_puller->pull(dpp, predecessor, period, y); if (r < 0) { return cursor; } @@ -267,27 +274,27 @@ Cursor RGWSI_MDLog::find_oldest_period(optional_yield y) if (!prev) { return prev; } - ldout(cct, 20) << "find_oldest_period advancing to " + ldpp_dout(dpp, 20) << "find_oldest_period advancing to " "predecessor period " << predecessor << dendl; ceph_assert(cursor.has_prev()); } cursor.prev(); } - ldout(cct, 10) << "find_oldest_period returning empty cursor" << dendl; + ldpp_dout(dpp, 10) << "find_oldest_period returning empty cursor" << dendl; return cursor; } -Cursor RGWSI_MDLog::init_oldest_log_period(optional_yield y) +Cursor RGWSI_MDLog::init_oldest_log_period(optional_yield y, const DoutPrefixProvider *dpp) { // read the mdlog history RGWMetadataLogHistory state; RGWObjVersionTracker objv; - int ret = read_history(&state, &objv, y); + int ret = read_history(&state, &objv, y, dpp); if (ret == -ENOENT) { // initialize the mdlog history and write it - ldout(cct, 10) << "initializing mdlog history" << dendl; - auto cursor = find_oldest_period(y); + ldpp_dout(dpp, 10) << "initializing mdlog history" << dendl; + auto cursor = find_oldest_period(dpp, y); if (!cursor) { return cursor; } @@ -296,15 +303,15 @@ Cursor RGWSI_MDLog::init_oldest_log_period(optional_yield y) state.oldest_period_id = cursor.get_period().get_id(); constexpr bool exclusive = true; // don't overwrite - int ret = write_history(state, &objv, y, exclusive); + int ret = write_history(dpp, state, &objv, y, exclusive); if (ret < 0 && ret != -EEXIST) { - ldout(cct, 1) << "failed to write mdlog history: " + ldpp_dout(dpp, 1) << "failed to write mdlog history: " << cpp_strerror(ret) << dendl; return Cursor{ret}; } return cursor; } else if (ret < 0) { - ldout(cct, 1) << "failed to read mdlog history: " + ldpp_dout(dpp, 1) << "failed to read mdlog history: " << cpp_strerror(ret) << dendl; return Cursor{ret}; } @@ -314,13 +321,13 @@ Cursor RGWSI_MDLog::init_oldest_log_period(optional_yield y) if (cursor) { return cursor; } else { - cursor = find_oldest_period(y); + cursor = find_oldest_period(dpp, y); state.oldest_realm_epoch = cursor.get_epoch(); state.oldest_period_id = cursor.get_period().get_id(); - ldout(cct, 10) << "rewriting mdlog history" << dendl; - ret = write_history(state, &objv, y); + ldpp_dout(dpp, 10) << "rewriting mdlog history" << dendl; + ret = write_history(dpp, state, &objv, y); if (ret < 0 && ret != -ECANCELED) { - ldout(cct, 1) << "failed to write mdlog history: " + ldpp_dout(dpp, 1) << "failed to write mdlog history: " << cpp_strerror(ret) << dendl; return Cursor{ret}; } @@ -329,50 +336,50 @@ Cursor RGWSI_MDLog::init_oldest_log_period(optional_yield y) // pull the oldest period by id RGWPeriod period; - ret = period_puller->pull(state.oldest_period_id, period, y); + ret = period_puller->pull(dpp, state.oldest_period_id, period, y); if (ret < 0) { - ldout(cct, 1) << "failed to read period id=" << state.oldest_period_id + ldpp_dout(dpp, 1) << "failed to read period id=" << state.oldest_period_id << " for mdlog history: " << cpp_strerror(ret) << dendl; return Cursor{ret}; } // verify its realm_epoch if (period.get_realm_epoch() != state.oldest_realm_epoch) { - ldout(cct, 1) << "inconsistent mdlog history: read period id=" + ldpp_dout(dpp, 1) << "inconsistent mdlog history: read period id=" << period.get_id() << " with realm_epoch=" << period.get_realm_epoch() << ", expected realm_epoch=" << state.oldest_realm_epoch << dendl; return Cursor{-EINVAL}; } // attach the period to our history - return period_history->attach(std::move(period), y); + return period_history->attach(dpp, std::move(period), y); } -Cursor RGWSI_MDLog::read_oldest_log_period(optional_yield y) const +Cursor RGWSI_MDLog::read_oldest_log_period(optional_yield y, const DoutPrefixProvider *dpp) const { RGWMetadataLogHistory state; - int ret = read_history(&state, nullptr, y); + int ret = read_history(&state, nullptr, y, dpp); if (ret < 0) { - ldout(cct, 1) << "failed to read mdlog history: " + ldpp_dout(dpp, 1) << "failed to read mdlog history: " << cpp_strerror(ret) << dendl; return Cursor{ret}; } - ldout(cct, 10) << "read mdlog history with oldest period id=" + ldpp_dout(dpp, 10) << "read mdlog history with oldest period id=" << state.oldest_period_id << " realm_epoch=" << state.oldest_realm_epoch << dendl; return period_history->lookup(state.oldest_realm_epoch); } -RGWCoroutine* RGWSI_MDLog::read_oldest_log_period_cr(Cursor *period, - RGWObjVersionTracker *objv) const +RGWCoroutine* RGWSI_MDLog::read_oldest_log_period_cr(const DoutPrefixProvider *dpp, + Cursor *period, RGWObjVersionTracker *objv) const { - return new mdlog::ReadHistoryCR(svc, period, objv); + return new mdlog::ReadHistoryCR(dpp, svc, period, objv); } -RGWCoroutine* RGWSI_MDLog::trim_log_period_cr(Cursor period, - RGWObjVersionTracker *objv) const +RGWCoroutine* RGWSI_MDLog::trim_log_period_cr(const DoutPrefixProvider *dpp, + Cursor period, RGWObjVersionTracker *objv) const { - return new mdlog::TrimHistoryCR(svc, period, objv); + return new mdlog::TrimHistoryCR(dpp, svc, period, objv); } RGWMetadataLog* RGWSI_MDLog::get_log(const std::string& period) @@ -384,10 +391,10 @@ RGWMetadataLog* RGWSI_MDLog::get_log(const std::string& period) return &insert.first->second; } -int RGWSI_MDLog::add_entry(const string& hash_key, const string& section, const string& key, bufferlist& bl) +int RGWSI_MDLog::add_entry(const DoutPrefixProvider *dpp, const string& hash_key, const string& section, const string& key, bufferlist& bl) { ceph_assert(current_log); // must have called init() - return current_log->add_entry(hash_key, section, key, bl); + return current_log->add_entry(dpp, hash_key, section, key, bl); } int RGWSI_MDLog::get_shard_id(const string& hash_key, int *shard_id) @@ -396,9 +403,9 @@ int RGWSI_MDLog::get_shard_id(const string& hash_key, int *shard_id) return current_log->get_shard_id(hash_key, shard_id); } -int RGWSI_MDLog::pull_period(const std::string& period_id, RGWPeriod& period, +int RGWSI_MDLog::pull_period(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period, optional_yield y) { - return period_puller->pull(period_id, period, y); + return period_puller->pull(dpp, period_id, period, y); } diff --git a/src/rgw/services/svc_mdlog.h b/src/rgw/services/svc_mdlog.h index da15e4b987308..57103efb464f4 100644 --- a/src/rgw/services/svc_mdlog.h +++ b/src/rgw/services/svc_mdlog.h @@ -71,35 +71,38 @@ public: RGWSI_SysObj *_sysobj_svc, RGWSI_Cls *_cls_svc); - int do_start(optional_yield y) override; + int do_start(optional_yield y, const DoutPrefixProvider *dpp) override; // traverse all the way back to the beginning of the period history, and // return a cursor to the first period in a fully attached history - RGWPeriodHistory::Cursor find_oldest_period(optional_yield y); + RGWPeriodHistory::Cursor find_oldest_period(const DoutPrefixProvider *dpp, optional_yield y); /// initialize the oldest log period if it doesn't exist, and attach it to /// our current history - RGWPeriodHistory::Cursor init_oldest_log_period(optional_yield y); + RGWPeriodHistory::Cursor init_oldest_log_period(optional_yield y, const DoutPrefixProvider *dpp); /// read the oldest log period, and return a cursor to it in our existing /// period history - RGWPeriodHistory::Cursor read_oldest_log_period(optional_yield y) const; + RGWPeriodHistory::Cursor read_oldest_log_period(optional_yield y, const DoutPrefixProvider *dpp) const; /// read the oldest log period asynchronously and write its result to the /// given cursor pointer - RGWCoroutine* read_oldest_log_period_cr(RGWPeriodHistory::Cursor *period, + RGWCoroutine* read_oldest_log_period_cr(const DoutPrefixProvider *dpp, + RGWPeriodHistory::Cursor *period, RGWObjVersionTracker *objv) const; /// try to advance the oldest log period when the given period is trimmed, /// using a rados lock to provide atomicity - RGWCoroutine* trim_log_period_cr(RGWPeriodHistory::Cursor period, + RGWCoroutine* trim_log_period_cr(const DoutPrefixProvider *dpp, + RGWPeriodHistory::Cursor period, RGWObjVersionTracker *objv) const; - int read_history(RGWMetadataLogHistory *state, RGWObjVersionTracker *objv_tracker,optional_yield y) const; - int write_history(const RGWMetadataLogHistory& state, + int read_history(RGWMetadataLogHistory *state, RGWObjVersionTracker *objv_tracker,optional_yield y, const DoutPrefixProvider *dpp) const; + int write_history(const DoutPrefixProvider *dpp, + const RGWMetadataLogHistory& state, RGWObjVersionTracker *objv_tracker, optional_yield y, bool exclusive = false); - int add_entry(const string& hash_key, const string& section, const string& key, bufferlist& bl); + int add_entry(const DoutPrefixProvider *dpp, const string& hash_key, const string& section, const string& key, bufferlist& bl); int get_shard_id(const string& hash_key, int *shard_id); @@ -107,7 +110,7 @@ public: return period_history.get(); } - int pull_period(const std::string& period_id, RGWPeriod& period, optional_yield y); + int pull_period(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period, optional_yield y); /// find or create the metadata log for the given period RGWMetadataLog* get_log(const std::string& period); diff --git a/src/rgw/services/svc_meta_be.cc b/src/rgw/services/svc_meta_be.cc index 08bc03d915009..0d4daffe2a41a 100644 --- a/src/rgw/services/svc_meta_be.cc +++ b/src/rgw/services/svc_meta_be.cc @@ -15,7 +15,8 @@ RGWSI_MetaBackend::PutParams::~PutParams() {} // ... RGWSI_MetaBackend::GetParams::~GetParams() {} // ... RGWSI_MetaBackend::RemoveParams::~RemoveParams() {} // ... -int RGWSI_MetaBackend::pre_modify(RGWSI_MetaBackend::Context *ctx, +int RGWSI_MetaBackend::pre_modify(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const string& key, RGWMetadataLogData& log_data, RGWObjVersionTracker *objv_tracker, @@ -34,7 +35,8 @@ int RGWSI_MetaBackend::pre_modify(RGWSI_MetaBackend::Context *ctx, return 0; } -int RGWSI_MetaBackend::post_modify(RGWSI_MetaBackend::Context *ctx, +int RGWSI_MetaBackend::post_modify(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const string& key, RGWMetadataLogData& log_data, RGWObjVersionTracker *objv_tracker, int ret, @@ -47,12 +49,13 @@ int RGWSI_MetaBackend::prepare_mutate(RGWSI_MetaBackend::Context *ctx, const string& key, const real_time& mtime, RGWObjVersionTracker *objv_tracker, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { real_time orig_mtime; int ret = call_with_get_params(&orig_mtime, [&](GetParams& params) { - return get_entry(ctx, key, params, objv_tracker, y); + return get_entry(ctx, key, params, objv_tracker, y, dpp); }); if (ret < 0 && ret != -ENOENT) { return ret; @@ -76,12 +79,13 @@ int RGWSI_MetaBackend::do_mutate(RGWSI_MetaBackend::Context *ctx, RGWMDLogStatus op_type, optional_yield y, std::function f, - bool generic_prepare) + bool generic_prepare, + const DoutPrefixProvider *dpp) { int ret; if (generic_prepare) { - ret = prepare_mutate(ctx, key, mtime, objv_tracker, y); + ret = prepare_mutate(ctx, key, mtime, objv_tracker, y, dpp); if (ret < 0 || ret == STATUS_NO_APPLY) { return ret; @@ -89,7 +93,7 @@ int RGWSI_MetaBackend::do_mutate(RGWSI_MetaBackend::Context *ctx, } RGWMetadataLogData log_data; - ret = pre_modify(ctx, key, log_data, objv_tracker, op_type, y); + ret = pre_modify(dpp, ctx, key, log_data, objv_tracker, op_type, y); if (ret < 0) { return ret; } @@ -98,7 +102,7 @@ int RGWSI_MetaBackend::do_mutate(RGWSI_MetaBackend::Context *ctx, /* cascading ret into post_modify() */ - ret = post_modify(ctx, key, log_data, objv_tracker, ret, y); + ret = post_modify(dpp, ctx, key, log_data, objv_tracker, ret, y); if (ret < 0) return ret; @@ -109,43 +113,48 @@ int RGWSI_MetaBackend::get(Context *ctx, const string& key, GetParams& params, RGWObjVersionTracker *objv_tracker, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { - return get_entry(ctx, key, params, objv_tracker, y); + return get_entry(ctx, key, params, objv_tracker, y, dpp); } int RGWSI_MetaBackend::put(Context *ctx, const string& key, PutParams& params, RGWObjVersionTracker *objv_tracker, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { std::function f = [&]() { - return put_entry(ctx, key, params, objv_tracker, y); + return put_entry(dpp, ctx, key, params, objv_tracker, y); }; return do_mutate(ctx, key, params.mtime, objv_tracker, MDLOG_STATUS_WRITE, y, f, - false); + false, + dpp); } int RGWSI_MetaBackend::remove(Context *ctx, const string& key, RemoveParams& params, RGWObjVersionTracker *objv_tracker, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { std::function f = [&]() { - return remove_entry(ctx, key, params, objv_tracker, y); + return remove_entry(dpp, ctx, key, params, objv_tracker, y); }; return do_mutate(ctx, key, params.mtime, objv_tracker, MDLOG_STATUS_REMOVE, y, f, - false); + false, + dpp); } int RGWSI_MetaBackend::mutate(Context *ctx, @@ -153,12 +162,14 @@ int RGWSI_MetaBackend::mutate(Context *ctx, MutateParams& params, RGWObjVersionTracker *objv_tracker, optional_yield y, - std::function f) + std::function f, + const DoutPrefixProvider *dpp) { return do_mutate(ctx, key, params.mtime, objv_tracker, params.op_type, y, f, - false); + false, + dpp); } int RGWSI_MetaBackend_Handler::call(std::optional bectx_params, diff --git a/src/rgw/services/svc_meta_be.h b/src/rgw/services/svc_meta_be.h index 0308c239e115b..af749d497f34b 100644 --- a/src/rgw/services/svc_meta_be.h +++ b/src/rgw/services/svc_meta_be.h @@ -46,7 +46,8 @@ protected: const std::string& key, const ceph::real_time& mtime, RGWObjVersionTracker *objv_tracker, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); virtual int do_mutate(Context *ctx, const std::string& key, @@ -54,15 +55,18 @@ protected: RGWMDLogStatus op_type, optional_yield y, std::function f, - bool generic_prepare); + bool generic_prepare, + const DoutPrefixProvider *dpp); - virtual int pre_modify(Context *ctx, + virtual int pre_modify(const DoutPrefixProvider *dpp, + Context *ctx, const std::string& key, RGWMetadataLogData& log_data, RGWObjVersionTracker *objv_tracker, RGWMDLogStatus op_type, optional_yield y); - virtual int post_modify(Context *ctx, + virtual int post_modify(const DoutPrefixProvider *dpp, + Context *ctx, const std::string& key, RGWMetadataLogData& log_data, RGWObjVersionTracker *objv_tracker, int ret, @@ -140,19 +144,22 @@ public: const std::string& key, RGWSI_MetaBackend::GetParams& params, RGWObjVersionTracker *objv_tracker, - optional_yield y) = 0; - virtual int put_entry(RGWSI_MetaBackend::Context *ctx, + optional_yield y, + const DoutPrefixProvider *dpp) = 0; + virtual int put_entry(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const std::string& key, RGWSI_MetaBackend::PutParams& params, RGWObjVersionTracker *objv_tracker, optional_yield y) = 0; - virtual int remove_entry(Context *ctx, + virtual int remove_entry(const DoutPrefixProvider *dpp, + Context *ctx, const std::string& key, RGWSI_MetaBackend::RemoveParams& params, RGWObjVersionTracker *objv_tracker, optional_yield y) = 0; - virtual int list_init(RGWSI_MetaBackend::Context *ctx, const string& marker) = 0; + virtual int list_init(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx, const string& marker) = 0; virtual int list_next(RGWSI_MetaBackend::Context *ctx, int max, list *keys, bool *truncated) = 0; @@ -175,26 +182,30 @@ public: const std::string& key, GetParams ¶ms, RGWObjVersionTracker *objv_tracker, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); virtual int put(Context *ctx, const std::string& key, PutParams& params, RGWObjVersionTracker *objv_tracker, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); virtual int remove(Context *ctx, const std::string& key, RemoveParams& params, RGWObjVersionTracker *objv_tracker, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); virtual int mutate(Context *ctx, const std::string& key, MutateParams& params, RGWObjVersionTracker *objv_tracker, optional_yield y, - std::function f); + std::function f, + const DoutPrefixProvider *dpp); }; class RGWSI_MetaBackend_Handler { @@ -218,34 +229,35 @@ public: int get(const std::string& key, RGWSI_MetaBackend::GetParams ¶ms, RGWObjVersionTracker *objv_tracker, - optional_yield y) { - return be->get(be_ctx, key, params, objv_tracker, y); + optional_yield y, const DoutPrefixProvider *dpp) { + return be->get(be_ctx, key, params, objv_tracker, y, dpp); } int put(const std::string& key, RGWSI_MetaBackend::PutParams& params, RGWObjVersionTracker *objv_tracker, - optional_yield y) { - return be->put(be_ctx, key, params, objv_tracker, y); + optional_yield y, const DoutPrefixProvider *dpp) { + return be->put(be_ctx, key, params, objv_tracker, y, dpp); } int remove(const std::string& key, RGWSI_MetaBackend::RemoveParams& params, RGWObjVersionTracker *objv_tracker, - optional_yield y) { - return be->remove(be_ctx, key, params, objv_tracker, y); + optional_yield y, const DoutPrefixProvider *dpp) { + return be->remove(be_ctx, key, params, objv_tracker, y, dpp); } int mutate(const std::string& key, RGWSI_MetaBackend::MutateParams& params, RGWObjVersionTracker *objv_tracker, optional_yield y, - std::function f) { - return be->mutate(be_ctx, key, params, objv_tracker, y, f); + std::function f, + const DoutPrefixProvider *dpp) { + return be->mutate(be_ctx, key, params, objv_tracker, y, f, dpp); } - int list_init(const string& marker) { - return be->list_init(be_ctx, marker); + int list_init(const DoutPrefixProvider *dpp, const string& marker) { + return be->list_init(dpp, be_ctx, marker); } int list_next(int max, list *keys, bool *truncated) { diff --git a/src/rgw/services/svc_meta_be_otp.cc b/src/rgw/services/svc_meta_be_otp.cc index 986296a118bc5..1800f8a3cac52 100644 --- a/src/rgw/services/svc_meta_be_otp.cc +++ b/src/rgw/services/svc_meta_be_otp.cc @@ -44,11 +44,12 @@ int RGWSI_MetaBackend_OTP::get_entry(RGWSI_MetaBackend::Context *_ctx, const string& key, RGWSI_MetaBackend::GetParams& _params, RGWObjVersionTracker *objv_tracker, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { RGWSI_MBOTP_GetParams& params = static_cast(_params); - int r = cls_svc->mfa.list_mfa(key, params.pdevices, objv_tracker, params.pmtime, y); + int r = cls_svc->mfa.list_mfa(dpp, key, params.pdevices, objv_tracker, params.pmtime, y); if (r < 0) { return r; } @@ -56,7 +57,8 @@ int RGWSI_MetaBackend_OTP::get_entry(RGWSI_MetaBackend::Context *_ctx, return 0; } -int RGWSI_MetaBackend_OTP::put_entry(RGWSI_MetaBackend::Context *_ctx, +int RGWSI_MetaBackend_OTP::put_entry(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *_ctx, const string& key, RGWSI_MetaBackend::PutParams& _params, RGWObjVersionTracker *objv_tracker, @@ -64,6 +66,6 @@ int RGWSI_MetaBackend_OTP::put_entry(RGWSI_MetaBackend::Context *_ctx, { RGWSI_MBOTP_PutParams& params = static_cast(_params); - return cls_svc->mfa.set_mfa(key, params.devices, true, objv_tracker, params.mtime, y); + return cls_svc->mfa.set_mfa(dpp, key, params.devices, true, objv_tracker, params.mtime, y); } diff --git a/src/rgw/services/svc_meta_be_otp.h b/src/rgw/services/svc_meta_be_otp.h index 85b9d16b3d323..9da97b024bbb7 100644 --- a/src/rgw/services/svc_meta_be_otp.h +++ b/src/rgw/services/svc_meta_be_otp.h @@ -77,8 +77,10 @@ public: const string& key, RGWSI_MetaBackend::GetParams& _params, RGWObjVersionTracker *objv_tracker, - optional_yield y); - int put_entry(RGWSI_MetaBackend::Context *ctx, + optional_yield y, + const DoutPrefixProvider *dpp); + int put_entry(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const string& key, RGWSI_MetaBackend::PutParams& _params, RGWObjVersionTracker *objv_tracker, diff --git a/src/rgw/services/svc_meta_be_sobj.cc b/src/rgw/services/svc_meta_be_sobj.cc index e325b4ae71d58..253e509ca8e94 100644 --- a/src/rgw/services/svc_meta_be_sobj.cc +++ b/src/rgw/services/svc_meta_be_sobj.cc @@ -28,7 +28,7 @@ RGWSI_MetaBackend::Context *RGWSI_MetaBackend_SObj::alloc_ctx() return new Context_SObj(sysobj_svc); } -int RGWSI_MetaBackend_SObj::pre_modify(RGWSI_MetaBackend::Context *_ctx, +int RGWSI_MetaBackend_SObj::pre_modify(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *_ctx, const string& key, RGWMetadataLogData& log_data, RGWObjVersionTracker *objv_tracker, @@ -36,7 +36,7 @@ int RGWSI_MetaBackend_SObj::pre_modify(RGWSI_MetaBackend::Context *_ctx, optional_yield y) { auto ctx = static_cast(_ctx); - int ret = RGWSI_MetaBackend::pre_modify(ctx, key, log_data, + int ret = RGWSI_MetaBackend::pre_modify(dpp, ctx, key, log_data, objv_tracker, op_type, y); if (ret < 0) { @@ -56,14 +56,15 @@ int RGWSI_MetaBackend_SObj::pre_modify(RGWSI_MetaBackend::Context *_ctx, bufferlist logbl; encode(log_data, logbl); - ret = mdlog_svc->add_entry(ctx->module->get_hash_key(key), ctx->module->get_section(), key, logbl); + ret = mdlog_svc->add_entry(dpp, ctx->module->get_hash_key(key), ctx->module->get_section(), key, logbl); if (ret < 0) return ret; return 0; } -int RGWSI_MetaBackend_SObj::post_modify(RGWSI_MetaBackend::Context *_ctx, +int RGWSI_MetaBackend_SObj::post_modify(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *_ctx, const string& key, RGWMetadataLogData& log_data, RGWObjVersionTracker *objv_tracker, int ret, @@ -78,14 +79,14 @@ int RGWSI_MetaBackend_SObj::post_modify(RGWSI_MetaBackend::Context *_ctx, bufferlist logbl; encode(log_data, logbl); - int r = mdlog_svc->add_entry(ctx->module->get_hash_key(key), ctx->module->get_section(), key, logbl); + int r = mdlog_svc->add_entry(dpp, ctx->module->get_hash_key(key), ctx->module->get_section(), key, logbl); if (ret < 0) return ret; if (r < 0) return r; - return RGWSI_MetaBackend::post_modify(ctx, key, log_data, objv_tracker, ret, y); + return RGWSI_MetaBackend::post_modify(dpp, ctx, key, log_data, objv_tracker, ret, y); } int RGWSI_MetaBackend_SObj::get_shard_id(RGWSI_MetaBackend::Context *_ctx, @@ -140,7 +141,8 @@ int RGWSI_MetaBackend_SObj::get_entry(RGWSI_MetaBackend::Context *_ctx, const string& key, GetParams& _params, RGWObjVersionTracker *objv_tracker, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { RGWSI_MetaBackend_SObj::Context_SObj *ctx = static_cast(_ctx); RGWSI_MBSObj_GetParams& params = static_cast(_params); @@ -151,12 +153,13 @@ int RGWSI_MetaBackend_SObj::get_entry(RGWSI_MetaBackend::Context *_ctx, return rgw_get_system_obj(*ctx->obj_ctx, pool, oid, *params.pbl, objv_tracker, params.pmtime, - y, + y, dpp, params.pattrs, params.cache_info, params.refresh_version); } -int RGWSI_MetaBackend_SObj::put_entry(RGWSI_MetaBackend::Context *_ctx, +int RGWSI_MetaBackend_SObj::put_entry(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *_ctx, const string& key, PutParams& _params, RGWObjVersionTracker *objv_tracker, @@ -169,11 +172,12 @@ int RGWSI_MetaBackend_SObj::put_entry(RGWSI_MetaBackend::Context *_ctx, string oid; ctx->module->get_pool_and_oid(key, &pool, &oid); - return rgw_put_system_obj(*ctx->obj_ctx, pool, oid, params.bl, params.exclusive, + return rgw_put_system_obj(dpp, *ctx->obj_ctx, pool, oid, params.bl, params.exclusive, objv_tracker, params.mtime, y, params.pattrs); } -int RGWSI_MetaBackend_SObj::remove_entry(RGWSI_MetaBackend::Context *_ctx, +int RGWSI_MetaBackend_SObj::remove_entry(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *_ctx, const string& key, RemoveParams& params, RGWObjVersionTracker *objv_tracker, @@ -189,10 +193,11 @@ int RGWSI_MetaBackend_SObj::remove_entry(RGWSI_MetaBackend::Context *_ctx, auto sysobj = ctx->obj_ctx->get_obj(k); return sysobj.wop() .set_objv_tracker(objv_tracker) - .remove(y); + .remove(dpp, y); } -int RGWSI_MetaBackend_SObj::list_init(RGWSI_MetaBackend::Context *_ctx, +int RGWSI_MetaBackend_SObj::list_init(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *_ctx, const string& marker) { RGWSI_MetaBackend_SObj::Context_SObj *ctx = static_cast(_ctx); @@ -206,7 +211,7 @@ int RGWSI_MetaBackend_SObj::list_init(RGWSI_MetaBackend::Context *_ctx, ctx->list.op.emplace(ctx->list.pool->op()); string prefix = ctx->module->get_oid_prefix(); - ctx->list.op->init(marker, prefix); + ctx->list.op->init(dpp, marker, prefix); return 0; } diff --git a/src/rgw/services/svc_meta_be_sobj.h b/src/rgw/services/svc_meta_be_sobj.h index 145850b974785..8c5660a6d54d9 100644 --- a/src/rgw/services/svc_meta_be_sobj.h +++ b/src/rgw/services/svc_meta_be_sobj.h @@ -132,13 +132,15 @@ public: int call_with_get_params(ceph::real_time *pmtime, std::function cb) override; - int pre_modify(RGWSI_MetaBackend::Context *ctx, + int pre_modify(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const string& key, RGWMetadataLogData& log_data, RGWObjVersionTracker *objv_tracker, RGWMDLogStatus op_type, optional_yield y); - int post_modify(RGWSI_MetaBackend::Context *ctx, + int post_modify(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const string& key, RGWMetadataLogData& log_data, RGWObjVersionTracker *objv_tracker, int ret, @@ -148,19 +150,22 @@ public: const string& key, RGWSI_MetaBackend::GetParams& params, RGWObjVersionTracker *objv_tracker, - optional_yield y) override; - int put_entry(RGWSI_MetaBackend::Context *ctx, + optional_yield y, + const DoutPrefixProvider *dpp) override; + int put_entry(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const string& key, RGWSI_MetaBackend::PutParams& params, RGWObjVersionTracker *objv_tracker, optional_yield y) override; - int remove_entry(RGWSI_MetaBackend::Context *ctx, + int remove_entry(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const string& key, RGWSI_MetaBackend::RemoveParams& params, RGWObjVersionTracker *objv_tracker, optional_yield y) override; - int list_init(RGWSI_MetaBackend::Context *_ctx, const string& marker) override; + int list_init(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *_ctx, const string& marker) override; int list_next(RGWSI_MetaBackend::Context *_ctx, int max, list *keys, bool *truncated) override; diff --git a/src/rgw/services/svc_notify.cc b/src/rgw/services/svc_notify.cc index bb71f3b577a93..25ccfdbfba74b 100644 --- a/src/rgw/services/svc_notify.cc +++ b/src/rgw/services/svc_notify.cc @@ -16,7 +16,7 @@ static string notify_oid_prefix = "notify"; -class RGWWatcher : public librados::WatchCtx2 { +class RGWWatcher : public DoutPrefixProvider , public librados::WatchCtx2 { CephContext *cct; RGWSI_Notify *svc; int index; @@ -33,13 +33,18 @@ class RGWWatcher : public librados::WatchCtx2 { watcher->reinit(); } }; + + CephContext *get_cct() const { return cct; } + unsigned get_subsys() const { return dout_subsys; } + std::ostream& gen_prefix(std::ostream& out) const { return out << "rgw watcher librados: "; } + public: RGWWatcher(CephContext *_cct, RGWSI_Notify *s, int i, RGWSI_RADOS::Obj& o) : cct(_cct), svc(s), index(i), obj(o), watch_handle(0) {} void handle_notify(uint64_t notify_id, uint64_t cookie, uint64_t notifier_id, bufferlist& bl) override { - ldout(cct, 10) << "RGWWatcher::handle_notify() " + ldpp_dout(this, 10) << "RGWWatcher::handle_notify() " << " notify_id " << notify_id << " cookie " << cookie << " notifier " << notifier_id @@ -49,14 +54,14 @@ public: (svc->inject_notify_timeout_probability > 0 && (svc->inject_notify_timeout_probability > ceph::util::generate_random_number(0.0, 1.0)))) { - ldout(cct, 0) + ldpp_dout(this, 0) << "RGWWatcher::handle_notify() dropping notification! " << "If this isn't what you want, set " << "rgw_inject_notify_timeout_probability to zero!" << dendl; return; } - svc->watch_cb(notify_id, cookie, notifier_id, bl); + svc->watch_cb(this, notify_id, cookie, notifier_id, bl); bufferlist reply_bl; // empty reply payload obj.notify_ack(notify_id, cookie, reply_bl); @@ -160,7 +165,7 @@ RGWSI_RADOS::Obj RGWSI_Notify::pick_control_obj(const string& key) return notify_objs[i]; } -int RGWSI_Notify::init_watch(optional_yield y) +int RGWSI_Notify::init_watch(const DoutPrefixProvider *dpp, optional_yield y) { num_watchers = cct->_conf->rgw_num_control_oids; @@ -187,17 +192,17 @@ int RGWSI_Notify::init_watch(optional_yield y) notify_objs[i] = rados_svc->handle().obj({control_pool, notify_oid}); auto& notify_obj = notify_objs[i]; - int r = notify_obj.open(); + int r = notify_obj.open(dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: notify_obj.open() returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: notify_obj.open() returned r=" << r << dendl; return r; } librados::ObjectWriteOperation op; op.create(false); - r = notify_obj.operate(&op, y); + r = notify_obj.operate(dpp, &op, y); if (r < 0 && r != -EEXIST) { - ldout(cct, 0) << "ERROR: notify_obj.operate() returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: notify_obj.operate() returned r=" << r << dendl; return r; } @@ -206,7 +211,7 @@ int RGWSI_Notify::init_watch(optional_yield y) r = watcher->register_watch_async(); if (r < 0) { - ldout(cct, 0) << "WARNING: register_watch_aio() returned " << r << dendl; + ldpp_dout(dpp, 0) << "WARNING: register_watch_aio() returned " << r << dendl; error = r; continue; } @@ -215,7 +220,7 @@ int RGWSI_Notify::init_watch(optional_yield y) for (int i = 0; i < num_watchers; ++i) { int r = watchers[i]->register_watch_finish(); if (r < 0) { - ldout(cct, 0) << "WARNING: async watch returned " << r << dendl; + ldpp_dout(dpp, 0) << "WARNING: async watch returned " << r << dendl; error = r; } } @@ -238,27 +243,27 @@ void RGWSI_Notify::finalize_watch() delete[] watchers; } -int RGWSI_Notify::do_start(optional_yield y) +int RGWSI_Notify::do_start(optional_yield y, const DoutPrefixProvider *dpp) { - int r = zone_svc->start(y); + int r = zone_svc->start(y, dpp); if (r < 0) { return r; } assert(zone_svc->is_started()); /* otherwise there's an ordering problem */ - r = rados_svc->start(y); + r = rados_svc->start(y, dpp); if (r < 0) { return r; } - r = finisher_svc->start(y); + r = finisher_svc->start(y, dpp); if (r < 0) { return r; } control_pool = zone_svc->get_zone_params().control_pool; - int ret = init_watch(y); + int ret = init_watch(dpp, y); if (ret < 0) { lderr(cct) << "ERROR: failed to initialize watch: " << cpp_strerror(-ret) << dendl; return ret; @@ -332,14 +337,15 @@ void RGWSI_Notify::remove_watcher(int i) } } -int RGWSI_Notify::watch_cb(uint64_t notify_id, +int RGWSI_Notify::watch_cb(const DoutPrefixProvider *dpp, + uint64_t notify_id, uint64_t cookie, uint64_t notifier_id, bufferlist& bl) { std::shared_lock l{watchers_lock}; if (cb) { - return cb->watch_cb(notify_id, cookie, notifier_id, bl); + return cb->watch_cb(dpp, notify_id, cookie, notifier_id, bl); } return 0; } @@ -358,7 +364,7 @@ void RGWSI_Notify::_set_enabled(bool status) } } -int RGWSI_Notify::distribute(const string& key, bufferlist& bl, +int RGWSI_Notify::distribute(const DoutPrefixProvider *dpp, const string& key, bufferlist& bl, optional_yield y) { /* The RGW uses the control pool to store the watch notify objects. @@ -370,14 +376,15 @@ int RGWSI_Notify::distribute(const string& key, bufferlist& bl, if (num_watchers > 0) { RGWSI_RADOS::Obj notify_obj = pick_control_obj(key); - ldout(cct, 10) << "distributing notification oid=" << notify_obj.get_ref().obj + ldpp_dout(dpp, 10) << "distributing notification oid=" << notify_obj.get_ref().obj << " bl.length()=" << bl.length() << dendl; - return robust_notify(notify_obj, bl, y); + return robust_notify(dpp, notify_obj, bl, y); } return 0; } -int RGWSI_Notify::robust_notify(RGWSI_RADOS::Obj& notify_obj, bufferlist& bl, +int RGWSI_Notify::robust_notify(const DoutPrefixProvider *dpp, + RGWSI_RADOS::Obj& notify_obj, bufferlist& bl, optional_yield y) { // The reply of every machine that acks goes in here. @@ -385,11 +392,11 @@ int RGWSI_Notify::robust_notify(RGWSI_RADOS::Obj& notify_obj, bufferlist& bl, bufferlist rbl; // First, try to send, without being fancy about it. - auto r = notify_obj.notify(bl, 0, &rbl, y); + auto r = notify_obj.notify(dpp, bl, 0, &rbl, y); // If that doesn't work, get serious. if (r < 0) { - ldout(cct, 1) << "robust_notify: If at first you don't succeed: " + ldpp_dout(dpp, 1) << "robust_notify: If at first you don't succeed: " << cpp_strerror(-r) << dendl; @@ -403,13 +410,13 @@ int RGWSI_Notify::robust_notify(RGWSI_RADOS::Obj& notify_obj, bufferlist& bl, std::pair id; decode(id, p); acks.insert(id); - ldout(cct, 20) << "robust_notify: acked by " << id << dendl; + ldpp_dout(dpp, 20) << "robust_notify: acked by " << id << dendl; uint32_t blen; decode(blen, p); p += blen; } } catch (const buffer::error& e) { - ldout(cct, 0) << "robust_notify: notify response parse failed: " + ldpp_dout(dpp, 0) << "robust_notify: notify response parse failed: " << e.what() << dendl; acks.clear(); // Throw away junk on failed parse. } @@ -425,9 +432,9 @@ int RGWSI_Notify::robust_notify(RGWSI_RADOS::Obj& notify_obj, bufferlist& bl, rbl.clear(); // Reset the timeouts, we're only concerned with new ones. timeouts.clear(); - r = notify_obj.notify(bl, 0, &rbl, y); + r = notify_obj.notify(dpp, bl, 0, &rbl, y); if (r < 0) { - ldout(cct, 1) << "robust_notify: retry " << tries << " failed: " + ldpp_dout(dpp, 1) << "robust_notify: retry " << tries << " failed: " << cpp_strerror(-r) << dendl; p = rbl.begin(); try { @@ -441,7 +448,7 @@ int RGWSI_Notify::robust_notify(RGWSI_RADOS::Obj& notify_obj, bufferlist& bl, decode(id, p); auto ir = acks.insert(id); if (ir.second) { - ldout(cct, 20) << "robust_notify: acked by " << id << dendl; + ldpp_dout(dpp, 20) << "robust_notify: acked by " << id << dendl; } uint32_t blen; decode(blen, p); @@ -455,13 +462,13 @@ int RGWSI_Notify::robust_notify(RGWSI_RADOS::Obj& notify_obj, bufferlist& bl, decode(id, p); // Only track timeouts from hosts that haven't acked previously. if (acks.find(id) != acks.cend()) { - ldout(cct, 20) << "robust_notify: " << id << " timed out." + ldpp_dout(dpp, 20) << "robust_notify: " << id << " timed out." << dendl; timeouts.insert(id); } } } catch (const buffer::error& e) { - ldout(cct, 0) << "robust_notify: notify response parse failed: " + ldpp_dout(dpp, 0) << "robust_notify: notify response parse failed: " << e.what() << dendl; continue; } diff --git a/src/rgw/services/svc_notify.h b/src/rgw/services/svc_notify.h index 567b256d064b9..5b01d77b7bfc0 100644 --- a/src/rgw/services/svc_notify.h +++ b/src/rgw/services/svc_notify.h @@ -53,7 +53,7 @@ private: bool finalized{false}; - int init_watch(optional_yield y); + int init_watch(const DoutPrefixProvider *dpp, optional_yield y); void finalize_watch(); void init(RGWSI_Zone *_zone_svc, @@ -63,21 +63,23 @@ private: rados_svc = _rados_svc; finisher_svc = _finisher_svc; } - int do_start(optional_yield) override; + int do_start(optional_yield, const DoutPrefixProvider *dpp) override; void shutdown() override; int unwatch(RGWSI_RADOS::Obj& obj, uint64_t watch_handle); void add_watcher(int i); void remove_watcher(int i); - int watch_cb(uint64_t notify_id, + int watch_cb(const DoutPrefixProvider *dpp, + uint64_t notify_id, uint64_t cookie, uint64_t notifier_id, bufferlist& bl); void _set_enabled(bool status); void set_enabled(bool status); - int robust_notify(RGWSI_RADOS::Obj& notify_obj, bufferlist& bl, + int robust_notify(const DoutPrefixProvider *dpp, + RGWSI_RADOS::Obj& notify_obj, bufferlist& bl, optional_yield y); void schedule_context(Context *c); @@ -88,14 +90,15 @@ public: class CB { public: virtual ~CB() {} - virtual int watch_cb(uint64_t notify_id, + virtual int watch_cb(const DoutPrefixProvider *dpp, + uint64_t notify_id, uint64_t cookie, uint64_t notifier_id, bufferlist& bl) = 0; virtual void set_enabled(bool status) = 0; }; - int distribute(const string& key, bufferlist& bl, optional_yield y); + int distribute(const DoutPrefixProvider *dpp, const string& key, bufferlist& bl, optional_yield y); void register_watch_cb(CB *cb); }; diff --git a/src/rgw/services/svc_otp.cc b/src/rgw/services/svc_otp.cc index 512c542eb2d48..fc386ae72359f 100644 --- a/src/rgw/services/svc_otp.cc +++ b/src/rgw/services/svc_otp.cc @@ -60,7 +60,7 @@ void RGWSI_OTP::init(RGWSI_Zone *_zone_svc, svc.meta_be = _meta_be_svc; } -int RGWSI_OTP::do_start(optional_yield) +int RGWSI_OTP::do_start(optional_yield, const DoutPrefixProvider *dpp) { /* create first backend handler for bucket entrypoints */ @@ -88,13 +88,13 @@ int RGWSI_OTP::read_all(RGWSI_OTP_BE_Ctx& ctx, otp_devices_list_t *devices, real_time *pmtime, RGWObjVersionTracker *objv_tracker, - optional_yield y) + optional_yield y, const DoutPrefixProvider *dpp) { RGWSI_MBOTP_GetParams params; params.pdevices = devices; params.pmtime = pmtime; - int ret = svc.meta_be->get_entry(ctx.get(), key, params, objv_tracker, y); + int ret = svc.meta_be->get_entry(ctx.get(), key, params, objv_tracker, y, dpp); if (ret < 0) { return ret; } @@ -107,17 +107,20 @@ int RGWSI_OTP::read_all(RGWSI_OTP_BE_Ctx& ctx, otp_devices_list_t *devices, real_time *pmtime, RGWObjVersionTracker *objv_tracker, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { return read_all(ctx, uid.to_str(), devices, pmtime, objv_tracker, - y); + y, + dpp); } -int RGWSI_OTP::store_all(RGWSI_OTP_BE_Ctx& ctx, +int RGWSI_OTP::store_all(const DoutPrefixProvider *dpp, + RGWSI_OTP_BE_Ctx& ctx, const string& key, const otp_devices_list_t& devices, real_time mtime, @@ -128,7 +131,7 @@ int RGWSI_OTP::store_all(RGWSI_OTP_BE_Ctx& ctx, params.mtime = mtime; params.devices = devices; - int ret = svc.meta_be->put_entry(ctx.get(), key, params, objv_tracker, y); + int ret = svc.meta_be->put_entry(dpp, ctx.get(), key, params, objv_tracker, y); if (ret < 0) { return ret; } @@ -136,14 +139,15 @@ int RGWSI_OTP::store_all(RGWSI_OTP_BE_Ctx& ctx, return 0; } -int RGWSI_OTP::store_all(RGWSI_OTP_BE_Ctx& ctx, +int RGWSI_OTP::store_all(const DoutPrefixProvider *dpp, + RGWSI_OTP_BE_Ctx& ctx, const rgw_user& uid, const otp_devices_list_t& devices, real_time mtime, RGWObjVersionTracker *objv_tracker, optional_yield y) { - return store_all(ctx, + return store_all(dpp, ctx, uid.to_str(), devices, mtime, @@ -151,14 +155,15 @@ int RGWSI_OTP::store_all(RGWSI_OTP_BE_Ctx& ctx, y); } -int RGWSI_OTP::remove_all(RGWSI_OTP_BE_Ctx& ctx, +int RGWSI_OTP::remove_all(const DoutPrefixProvider *dpp, + RGWSI_OTP_BE_Ctx& ctx, const string& key, RGWObjVersionTracker *objv_tracker, optional_yield y) { RGWSI_MBOTP_RemoveParams params; - int ret = svc.meta_be->remove_entry(ctx.get(), key, params, objv_tracker, y); + int ret = svc.meta_be->remove_entry(dpp, ctx.get(), key, params, objv_tracker, y); if (ret < 0) { return ret; } @@ -166,12 +171,13 @@ int RGWSI_OTP::remove_all(RGWSI_OTP_BE_Ctx& ctx, return 0; } -int RGWSI_OTP::remove_all(RGWSI_OTP_BE_Ctx& ctx, +int RGWSI_OTP::remove_all(const DoutPrefixProvider *dpp, + RGWSI_OTP_BE_Ctx& ctx, const rgw_user& uid, RGWObjVersionTracker *objv_tracker, optional_yield y) { - return remove_all(ctx, + return remove_all(dpp,ctx, uid.to_str(), objv_tracker, y); diff --git a/src/rgw/services/svc_otp.h b/src/rgw/services/svc_otp.h index 76824bfdf4326..f4b2e4ed2cc89 100644 --- a/src/rgw/services/svc_otp.h +++ b/src/rgw/services/svc_otp.h @@ -31,7 +31,7 @@ class RGWSI_OTP : public RGWServiceInstance RGWSI_OTP_BE_Handler be_handler; std::unique_ptr be_module; - int do_start(optional_yield) override; + int do_start(optional_yield, const DoutPrefixProvider *dpp) override; public: struct Svc { @@ -57,30 +57,36 @@ public: otp_devices_list_t *devices, real_time *pmtime, RGWObjVersionTracker *objv_tracker, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); int read_all(RGWSI_OTP_BE_Ctx& ctx, const rgw_user& uid, otp_devices_list_t *devices, real_time *pmtime, RGWObjVersionTracker *objv_tracker, - optional_yield y); - int store_all(RGWSI_OTP_BE_Ctx& ctx, + optional_yield y, + const DoutPrefixProvider *dpp); + int store_all(const DoutPrefixProvider *dpp, + RGWSI_OTP_BE_Ctx& ctx, const string& key, const otp_devices_list_t& devices, real_time mtime, RGWObjVersionTracker *objv_tracker, optional_yield y); - int store_all(RGWSI_OTP_BE_Ctx& ctx, + int store_all(const DoutPrefixProvider *dpp, + RGWSI_OTP_BE_Ctx& ctx, const rgw_user& uid, const otp_devices_list_t& devices, real_time mtime, RGWObjVersionTracker *objv_tracker, optional_yield y); - int remove_all(RGWSI_OTP_BE_Ctx& ctx, + int remove_all(const DoutPrefixProvider *dpp, + RGWSI_OTP_BE_Ctx& ctx, const string& key, RGWObjVersionTracker *objv_tracker, optional_yield y); - int remove_all(RGWSI_OTP_BE_Ctx& ctx, + int remove_all(const DoutPrefixProvider *dpp, + RGWSI_OTP_BE_Ctx& ctx, const rgw_user& uid, RGWObjVersionTracker *objv_tracker, optional_yield y); diff --git a/src/rgw/services/svc_rados.cc b/src/rgw/services/svc_rados.cc index 1e1b12023f622..32a6b3a3e39ad 100644 --- a/src/rgw/services/svc_rados.cc +++ b/src/rgw/services/svc_rados.cc @@ -21,7 +21,7 @@ RGWSI_RADOS::~RGWSI_RADOS() { } -int RGWSI_RADOS::do_start(optional_yield) +int RGWSI_RADOS::do_start(optional_yield, const DoutPrefixProvider *dpp) { int ret = rados.init_with_context(cct); if (ret < 0) { @@ -55,10 +55,10 @@ uint64_t RGWSI_RADOS::instance_id() return get_rados_handle()->get_instance_id(); } -int RGWSI_RADOS::open_pool_ctx(const rgw_pool& pool, librados::IoCtx& io_ctx, +int RGWSI_RADOS::open_pool_ctx(const DoutPrefixProvider *dpp, const rgw_pool& pool, librados::IoCtx& io_ctx, const OpenParams& params) { - return rgw_init_ioctx(get_rados_handle(), pool, io_ctx, + return rgw_init_ioctx(dpp, get_rados_handle(), pool, io_ctx, params.create, params.mostly_omap); } @@ -106,9 +106,9 @@ void RGWSI_RADOS::Obj::init(const rgw_raw_obj& obj) ref.obj = obj; } -int RGWSI_RADOS::Obj::open() +int RGWSI_RADOS::Obj::open(const DoutPrefixProvider *dpp) { - int r = ref.pool.open(); + int r = ref.pool.open(dpp); if (r < 0) { return r; } @@ -118,16 +118,16 @@ int RGWSI_RADOS::Obj::open() return 0; } -int RGWSI_RADOS::Obj::operate(librados::ObjectWriteOperation *op, +int RGWSI_RADOS::Obj::operate(const DoutPrefixProvider *dpp, librados::ObjectWriteOperation *op, optional_yield y, int flags) { - return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, op, y, flags); + return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, op, y, flags); } -int RGWSI_RADOS::Obj::operate(librados::ObjectReadOperation *op, +int RGWSI_RADOS::Obj::operate(const DoutPrefixProvider *dpp, librados::ObjectReadOperation *op, bufferlist *pbl, optional_yield y, int flags) { - return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, op, pbl, y, flags); + return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, op, pbl, y, flags); } int RGWSI_RADOS::Obj::aio_operate(librados::AioCompletion *c, librados::ObjectWriteOperation *op) @@ -156,10 +156,10 @@ int RGWSI_RADOS::Obj::unwatch(uint64_t handle) return ref.pool.ioctx().unwatch2(handle); } -int RGWSI_RADOS::Obj::notify(bufferlist& bl, uint64_t timeout_ms, +int RGWSI_RADOS::Obj::notify(const DoutPrefixProvider *dpp, bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl, optional_yield y) { - return rgw_rados_notify(ref.pool.ioctx(), ref.obj.oid, bl, timeout_ms, pbl, y); + return rgw_rados_notify(dpp, ref.pool.ioctx(), ref.obj.oid, bl, timeout_ms, pbl, y); } void RGWSI_RADOS::Obj::notify_ack(uint64_t notify_id, @@ -286,12 +286,12 @@ int RGWSI_RADOS::Pool::lookup() return 0; } -int RGWSI_RADOS::Pool::open(const OpenParams& params) +int RGWSI_RADOS::Pool::open(const DoutPrefixProvider *dpp, const OpenParams& params) { - return rados_svc->open_pool_ctx(pool, state.ioctx, params); + return rados_svc->open_pool_ctx(dpp, pool, state.ioctx, params); } -int RGWSI_RADOS::Pool::List::init(const string& marker, RGWAccessListFilter *filter) +int RGWSI_RADOS::Pool::List::init(const DoutPrefixProvider *dpp, const string& marker, RGWAccessListFilter *filter) { if (ctx.initialized) { return -EINVAL; @@ -301,14 +301,14 @@ int RGWSI_RADOS::Pool::List::init(const string& marker, RGWAccessListFilter *fil return -EINVAL; } - int r = pool->rados_svc->open_pool_ctx(pool->pool, ctx.ioctx); + int r = pool->rados_svc->open_pool_ctx(dpp, pool->pool, ctx.ioctx); if (r < 0) { return r; } librados::ObjectCursor oc; if (!oc.from_str(marker)) { - ldout(pool->rados_svc->cct, 10) << "failed to parse cursor: " << marker << dendl; + ldpp_dout(dpp, 10) << "failed to parse cursor: " << marker << dendl; return -EINVAL; } diff --git a/src/rgw/services/svc_rados.h b/src/rgw/services/svc_rados.h index b09cd6d73a975..d3fa393140b8d 100644 --- a/src/rgw/services/svc_rados.h +++ b/src/rgw/services/svc_rados.h @@ -31,7 +31,7 @@ class RGWSI_RADOS : public RGWServiceInstance librados::Rados rados; std::unique_ptr async_processor; - int do_start(optional_yield) override; + int do_start(optional_yield, const DoutPrefixProvider *dpp) override; public: struct OpenParams { @@ -51,7 +51,7 @@ public: }; private: - int open_pool_ctx(const rgw_pool& pool, librados::IoCtx& io_ctx, + int open_pool_ctx(const DoutPrefixProvider *dpp, const rgw_pool& pool, librados::IoCtx& io_ctx, const OpenParams& params = {}); int pool_iterate(librados::IoCtx& ioctx, librados::NObjectIterator& iter, @@ -101,7 +101,7 @@ public: int create(); int create(const std::vector& pools, std::vector *retcodes); int lookup(); - int open(const OpenParams& params = {}); + int open(const DoutPrefixProvider *dpp, const OpenParams& params = {}); const rgw_pool& get_pool() { return pool; @@ -124,7 +124,7 @@ public: List() {} List(Pool *_pool) : pool(_pool) {} - int init(const string& marker, RGWAccessListFilter *filter = nullptr); + int init(const DoutPrefixProvider *dpp, const string& marker, RGWAccessListFilter *filter = nullptr); int get_next(int max, std::vector *oids, bool *is_truncated); @@ -164,11 +164,11 @@ public: public: Obj() {} - int open(); + int open(const DoutPrefixProvider *dpp); - int operate(librados::ObjectWriteOperation *op, optional_yield y, + int operate(const DoutPrefixProvider *dpp, librados::ObjectWriteOperation *op, optional_yield y, int flags = 0); - int operate(librados::ObjectReadOperation *op, bufferlist *pbl, + int operate(const DoutPrefixProvider *dpp, librados::ObjectReadOperation *op, bufferlist *pbl, optional_yield y, int flags = 0); int aio_operate(librados::AioCompletion *c, librados::ObjectWriteOperation *op); int aio_operate(librados::AioCompletion *c, librados::ObjectReadOperation *op, @@ -177,7 +177,7 @@ public: int watch(uint64_t *handle, librados::WatchCtx2 *ctx); int aio_watch(librados::AioCompletion *c, uint64_t *handle, librados::WatchCtx2 *ctx); int unwatch(uint64_t handle); - int notify(bufferlist& bl, uint64_t timeout_ms, + int notify(const DoutPrefixProvider *dpp, bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl, optional_yield y); void notify_ack(uint64_t notify_id, uint64_t cookie, diff --git a/src/rgw/services/svc_sync_modules.cc b/src/rgw/services/svc_sync_modules.cc index f232fe5130a8a..b5490660a4f64 100644 --- a/src/rgw/services/svc_sync_modules.cc +++ b/src/rgw/services/svc_sync_modules.cc @@ -16,7 +16,7 @@ void RGWSI_SyncModules::init(RGWSI_Zone *zone_svc) rgw_register_sync_modules(sync_modules_manager); } -int RGWSI_SyncModules::do_start(optional_yield) +int RGWSI_SyncModules::do_start(optional_yield, const DoutPrefixProvider *dpp) { auto& zone_public_config = svc.zone->get_zone(); @@ -32,7 +32,7 @@ int RGWSI_SyncModules::do_start(optional_yield) return ret; } - ldout(cct, 20) << "started sync module instance, tier type = " << zone_public_config.tier_type << dendl; + ldpp_dout(dpp, 20) << "started sync module instance, tier type = " << zone_public_config.tier_type << dendl; return 0; } diff --git a/src/rgw/services/svc_sync_modules.h b/src/rgw/services/svc_sync_modules.h index 39a51f58091d5..0640ced1d57b1 100644 --- a/src/rgw/services/svc_sync_modules.h +++ b/src/rgw/services/svc_sync_modules.h @@ -28,7 +28,7 @@ public: } void init(RGWSI_Zone *zone_svc); - int do_start(optional_yield) override; + int do_start(optional_yield, const DoutPrefixProvider *dpp) override; RGWSyncModuleInstanceRef& get_sync_module() { return sync_module; } }; diff --git a/src/rgw/services/svc_sys_obj.cc b/src/rgw/services/svc_sys_obj.cc index 881a8f955d729..be30e45c56ccf 100644 --- a/src/rgw/services/svc_sys_obj.cc +++ b/src/rgw/services/svc_sys_obj.cc @@ -29,7 +29,7 @@ RGWSI_SysObj::Obj::ROp::ROp(Obj& _source) : source(_source) { state.emplace(); } -int RGWSI_SysObj::Obj::ROp::stat(optional_yield y) +int RGWSI_SysObj::Obj::ROp::stat(optional_yield y, const DoutPrefixProvider *dpp) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.obj; @@ -37,16 +37,17 @@ int RGWSI_SysObj::Obj::ROp::stat(optional_yield y) return svc->stat(source.get_ctx(), *state, obj, attrs, raw_attrs, lastmod, obj_size, - objv_tracker, y); + objv_tracker, y, dpp); } -int RGWSI_SysObj::Obj::ROp::read(int64_t ofs, int64_t end, bufferlist *bl, +int RGWSI_SysObj::Obj::ROp::read(const DoutPrefixProvider *dpp, + int64_t ofs, int64_t end, bufferlist *bl, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.get_obj(); - return svc->read(source.get_ctx(), *state, + return svc->read(dpp, source.get_ctx(), *state, objv_tracker, obj, bl, ofs, end, attrs, @@ -55,51 +56,52 @@ int RGWSI_SysObj::Obj::ROp::read(int64_t ofs, int64_t end, bufferlist *bl, refresh_version, y); } -int RGWSI_SysObj::Obj::ROp::get_attr(const char *name, bufferlist *dest, +int RGWSI_SysObj::Obj::ROp::get_attr(const DoutPrefixProvider *dpp, + const char *name, bufferlist *dest, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.get_obj(); - return svc->get_attr(obj, name, dest, y); + return svc->get_attr(dpp, obj, name, dest, y); } -int RGWSI_SysObj::Obj::WOp::remove(optional_yield y) +int RGWSI_SysObj::Obj::WOp::remove(const DoutPrefixProvider *dpp, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.get_obj(); - return svc->remove(source.get_ctx(), + return svc->remove(dpp, source.get_ctx(), objv_tracker, obj, y); } -int RGWSI_SysObj::Obj::WOp::write(bufferlist& bl, optional_yield y) +int RGWSI_SysObj::Obj::WOp::write(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.get_obj(); - return svc->write(obj, pmtime, attrs, exclusive, + return svc->write(dpp, obj, pmtime, attrs, exclusive, bl, objv_tracker, mtime, y); } -int RGWSI_SysObj::Obj::WOp::write_data(bufferlist& bl, optional_yield y) +int RGWSI_SysObj::Obj::WOp::write_data(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.get_obj(); - return svc->write_data(obj, bl, exclusive, objv_tracker, y); + return svc->write_data(dpp, obj, bl, exclusive, objv_tracker, y); } -int RGWSI_SysObj::Obj::WOp::write_attrs(optional_yield y) +int RGWSI_SysObj::Obj::WOp::write_attrs(const DoutPrefixProvider *dpp, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.get_obj(); - return svc->set_attrs(obj, attrs, nullptr, objv_tracker, y); + return svc->set_attrs(dpp, obj, attrs, nullptr, objv_tracker, y); } -int RGWSI_SysObj::Obj::WOp::write_attr(const char *name, bufferlist& bl, +int RGWSI_SysObj::Obj::WOp::write_attr(const DoutPrefixProvider *dpp, const char *name, bufferlist& bl, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; @@ -108,17 +110,17 @@ int RGWSI_SysObj::Obj::WOp::write_attr(const char *name, bufferlist& bl, map m; m[name] = bl; - return svc->set_attrs(obj, m, nullptr, objv_tracker, y); + return svc->set_attrs(dpp, obj, m, nullptr, objv_tracker, y); } -int RGWSI_SysObj::Pool::list_prefixed_objs(const string& prefix, std::function cb) +int RGWSI_SysObj::Pool::list_prefixed_objs(const DoutPrefixProvider *dpp, const string& prefix, std::function cb) { - return core_svc->pool_list_prefixed_objs(pool, prefix, cb); + return core_svc->pool_list_prefixed_objs(dpp, pool, prefix, cb); } -int RGWSI_SysObj::Pool::Op::init(const string& marker, const string& prefix) +int RGWSI_SysObj::Pool::Op::init(const DoutPrefixProvider *dpp, const string& marker, const string& prefix) { - return source.core_svc->pool_list_objects_init(source.pool, marker, prefix, &ctx); + return source.core_svc->pool_list_objects_init(dpp, source.pool, marker, prefix, &ctx); } int RGWSI_SysObj::Pool::Op::get_next(int max, vector *oids, bool *is_truncated) @@ -131,58 +133,59 @@ int RGWSI_SysObj::Pool::Op::get_marker(string *marker) return source.core_svc->pool_list_objects_get_marker(ctx, marker); } -int RGWSI_SysObj::Obj::OmapOp::get_all(std::map *m, +int RGWSI_SysObj::Obj::OmapOp::get_all(const DoutPrefixProvider *dpp, std::map *m, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.obj; - return svc->omap_get_all(obj, m, y); + return svc->omap_get_all(dpp, obj, m, y); } -int RGWSI_SysObj::Obj::OmapOp::get_vals(const string& marker, uint64_t count, +int RGWSI_SysObj::Obj::OmapOp::get_vals(const DoutPrefixProvider *dpp, + const string& marker, uint64_t count, std::map *m, bool *pmore, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.obj; - return svc->omap_get_vals(obj, marker, count, m, pmore, y); + return svc->omap_get_vals(dpp, obj, marker, count, m, pmore, y); } -int RGWSI_SysObj::Obj::OmapOp::set(const std::string& key, bufferlist& bl, +int RGWSI_SysObj::Obj::OmapOp::set(const DoutPrefixProvider *dpp, const std::string& key, bufferlist& bl, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.obj; - return svc->omap_set(obj, key, bl, must_exist, y); + return svc->omap_set(dpp, obj, key, bl, must_exist, y); } -int RGWSI_SysObj::Obj::OmapOp::set(const map& m, +int RGWSI_SysObj::Obj::OmapOp::set(const DoutPrefixProvider *dpp, const map& m, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.obj; - return svc->omap_set(obj, m, must_exist, y); + return svc->omap_set(dpp, obj, m, must_exist, y); } -int RGWSI_SysObj::Obj::OmapOp::del(const std::string& key, optional_yield y) +int RGWSI_SysObj::Obj::OmapOp::del(const DoutPrefixProvider *dpp, const std::string& key, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.obj; - return svc->omap_del(obj, key, y); + return svc->omap_del(dpp, obj, key, y); } -int RGWSI_SysObj::Obj::WNOp::notify(bufferlist& bl, uint64_t timeout_ms, +int RGWSI_SysObj::Obj::WNOp::notify(const DoutPrefixProvider *dpp, bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.obj; - return svc->notify(obj, bl, timeout_ms, pbl, y); + return svc->notify(dpp, obj, bl, timeout_ms, pbl, y); } RGWSI_Zone *RGWSI_SysObj::get_zone_svc() diff --git a/src/rgw/services/svc_sys_obj.h b/src/rgw/services/svc_sys_obj.h index 5fc2ea27dc3db..48ae302408aac 100644 --- a/src/rgw/services/svc_sys_obj.h +++ b/src/rgw/services/svc_sys_obj.h @@ -97,12 +97,12 @@ public: ROp(Obj& _source); - int stat(optional_yield y); - int read(int64_t ofs, int64_t end, bufferlist *pbl, optional_yield y); - int read(bufferlist *pbl, optional_yield y) { - return read(0, -1, pbl, y); + int stat(optional_yield y, const DoutPrefixProvider *dpp); + int read(const DoutPrefixProvider *dpp, int64_t ofs, int64_t end, bufferlist *pbl, optional_yield y); + int read(const DoutPrefixProvider *dpp, bufferlist *pbl, optional_yield y) { + return read(dpp, 0, -1, pbl, y); } - int get_attr(const char *name, bufferlist *dest, optional_yield y); + int get_attr(const DoutPrefixProvider *dpp, const char *name, bufferlist *dest, optional_yield y); }; struct WOp { @@ -146,12 +146,12 @@ public: WOp(Obj& _source) : source(_source) {} - int remove(optional_yield y); - int write(bufferlist& bl, optional_yield y); + int remove(const DoutPrefixProvider *dpp, optional_yield y); + int write(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y); - int write_data(bufferlist& bl, optional_yield y); /* write data only */ - int write_attrs(optional_yield y); /* write attrs only */ - int write_attr(const char *name, bufferlist& bl, + int write_data(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y); /* write data only */ + int write_attrs(const DoutPrefixProvider *dpp, optional_yield y); /* write attrs only */ + int write_attr(const DoutPrefixProvider *dpp, const char *name, bufferlist& bl, optional_yield y); /* write attrs only */ }; @@ -167,13 +167,13 @@ public: OmapOp(Obj& _source) : source(_source) {} - int get_all(std::map *m, optional_yield y); - int get_vals(const string& marker, uint64_t count, + int get_all(const DoutPrefixProvider *dpp, std::map *m, optional_yield y); + int get_vals(const DoutPrefixProvider *dpp, const string& marker, uint64_t count, std::map *m, bool *pmore, optional_yield y); - int set(const std::string& key, bufferlist& bl, optional_yield y); - int set(const map& m, optional_yield y); - int del(const std::string& key, optional_yield y); + int set(const DoutPrefixProvider *dpp, const std::string& key, bufferlist& bl, optional_yield y); + int set(const DoutPrefixProvider *dpp, const map& m, optional_yield y); + int del(const DoutPrefixProvider *dpp, const std::string& key, optional_yield y); }; struct WNOp { @@ -181,7 +181,7 @@ public: WNOp(Obj& _source) : source(_source) {} - int notify(bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl, + int notify(const DoutPrefixProvider *dpp, bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl, optional_yield y); }; ROp rop() { @@ -230,17 +230,17 @@ public: Op(Pool& _source) : source(_source) {} - int init(const std::string& marker, const std::string& prefix); + int init(const DoutPrefixProvider *dpp, const std::string& marker, const std::string& prefix); int get_next(int max, std::vector *oids, bool *is_truncated); int get_marker(string *marker); }; - int list_prefixed_objs(const std::string& prefix, std::function cb); + int list_prefixed_objs(const DoutPrefixProvider *dpp, const std::string& prefix, std::function cb); template - int list_prefixed_objs(const string& prefix, + int list_prefixed_objs(const DoutPrefixProvider *dpp, const string& prefix, Container *result) { - return list_prefixed_objs(prefix, [&](const string& val) { + return list_prefixed_objs(dpp, prefix, [&](const string& val) { result->push_back(val); }); } diff --git a/src/rgw/services/svc_sys_obj_cache.cc b/src/rgw/services/svc_sys_obj_cache.cc index 9be71f706708c..68b90888121d0 100644 --- a/src/rgw/services/svc_sys_obj_cache.cc +++ b/src/rgw/services/svc_sys_obj_cache.cc @@ -18,11 +18,12 @@ class RGWSI_SysObj_Cache_CB : public RGWSI_Notify::CB RGWSI_SysObj_Cache *svc; public: RGWSI_SysObj_Cache_CB(RGWSI_SysObj_Cache *_svc) : svc(_svc) {} - int watch_cb(uint64_t notify_id, + int watch_cb(const DoutPrefixProvider *dpp, + uint64_t notify_id, uint64_t cookie, uint64_t notifier_id, bufferlist& bl) { - return svc->watch_cb(notify_id, cookie, notifier_id, bl); + return svc->watch_cb(dpp, notify_id, cookie, notifier_id, bl); } void set_enabled(bool status) { @@ -30,19 +31,19 @@ public: } }; -int RGWSI_SysObj_Cache::do_start(optional_yield y) +int RGWSI_SysObj_Cache::do_start(optional_yield y, const DoutPrefixProvider *dpp) { int r = asocket.start(); if (r < 0) { return r; } - r = RGWSI_SysObj_Core::do_start(y); + r = RGWSI_SysObj_Core::do_start(y, dpp); if (r < 0) { return r; } - r = notify_svc->start(y); + r = notify_svc->start(y, dpp); if (r < 0) { return r; } @@ -81,7 +82,8 @@ void RGWSI_SysObj_Cache::normalize_pool_and_obj(const rgw_pool& src_pool, const } -int RGWSI_SysObj_Cache::remove(RGWSysObjectCtxBase& obj_ctx, +int RGWSI_SysObj_Cache::remove(const DoutPrefixProvider *dpp, + RGWSysObjectCtxBase& obj_ctx, RGWObjVersionTracker *objv_tracker, const rgw_raw_obj& obj, optional_yield y) @@ -92,18 +94,19 @@ int RGWSI_SysObj_Cache::remove(RGWSysObjectCtxBase& obj_ctx, normalize_pool_and_obj(obj.pool, obj.oid, pool, oid); string name = normal_name(pool, oid); - cache.remove(name); + cache.remove(dpp, name); ObjectCacheInfo info; - int r = distribute_cache(name, obj, info, REMOVE_OBJ, y); + int r = distribute_cache(dpp, name, obj, info, REMOVE_OBJ, y); if (r < 0) { - ldout(cct, 0) << "ERROR: " << __func__ << "(): failed to distribute cache: r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to distribute cache: r=" << r << dendl; } - return RGWSI_SysObj_Core::remove(obj_ctx, objv_tracker, obj, y); + return RGWSI_SysObj_Core::remove(dpp, obj_ctx, objv_tracker, obj, y); } -int RGWSI_SysObj_Cache::read(RGWSysObjectCtxBase& obj_ctx, +int RGWSI_SysObj_Cache::read(const DoutPrefixProvider *dpp, + RGWSysObjectCtxBase& obj_ctx, RGWSI_SysObj_Obj_GetObjState& read_state, RGWObjVersionTracker *objv_tracker, const rgw_raw_obj& obj, @@ -117,7 +120,7 @@ int RGWSI_SysObj_Cache::read(RGWSysObjectCtxBase& obj_ctx, rgw_pool pool; string oid; if (ofs != 0) { - return RGWSI_SysObj_Core::read(obj_ctx, read_state, objv_tracker, + return RGWSI_SysObj_Core::read(dpp, obj_ctx, read_state, objv_tracker, obj, obl, ofs, end, attrs, raw_attrs, cache_info, refresh_version, y); } @@ -133,7 +136,7 @@ int RGWSI_SysObj_Cache::read(RGWSysObjectCtxBase& obj_ctx, if (attrs) flags |= CACHE_FLAG_XATTRS; - int r = cache.get(name, info, flags, cache_info); + int r = cache.get(dpp, name, info, flags, cache_info); if (r == 0 && (!refresh_version || !info.version.compare(&(*refresh_version)))) { if (info.status < 0) @@ -161,7 +164,7 @@ int RGWSI_SysObj_Cache::read(RGWSysObjectCtxBase& obj_ctx, return -ENOENT; map unfiltered_attrset; - r = RGWSI_SysObj_Core::read(obj_ctx, read_state, objv_tracker, + r = RGWSI_SysObj_Core::read(dpp, obj_ctx, read_state, objv_tracker, obj, obl, ofs, end, (attrs ? &unfiltered_attrset : nullptr), true, /* cache unfiltered attrs */ @@ -170,7 +173,7 @@ int RGWSI_SysObj_Cache::read(RGWSysObjectCtxBase& obj_ctx, if (r < 0) { if (r == -ENOENT) { // only update ENOENT, we'd rather retry other errors info.status = r; - cache.put(name, info, cache_info); + cache.put(dpp, name, info, cache_info); } return r; } @@ -199,11 +202,12 @@ int RGWSI_SysObj_Cache::read(RGWSysObjectCtxBase& obj_ctx, rgw_filter_attrset(info.xattrs, RGW_ATTR_PREFIX, attrs); } } - cache.put(name, info, cache_info); + cache.put(dpp, name, info, cache_info); return r; } -int RGWSI_SysObj_Cache::get_attr(const rgw_raw_obj& obj, +int RGWSI_SysObj_Cache::get_attr(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, const char *attr_name, bufferlist *dest, optional_yield y) @@ -218,7 +222,7 @@ int RGWSI_SysObj_Cache::get_attr(const rgw_raw_obj& obj, uint32_t flags = CACHE_FLAG_XATTRS; - int r = cache.get(name, info, flags, nullptr); + int r = cache.get(dpp, name, info, flags, nullptr); if (r == 0) { if (info.status < 0) return info.status; @@ -234,10 +238,11 @@ int RGWSI_SysObj_Cache::get_attr(const rgw_raw_obj& obj, return -ENOENT; } /* don't try to cache this one */ - return RGWSI_SysObj_Core::get_attr(obj, attr_name, dest, y); + return RGWSI_SysObj_Core::get_attr(dpp, obj, attr_name, dest, y); } -int RGWSI_SysObj_Cache::set_attrs(const rgw_raw_obj& obj, +int RGWSI_SysObj_Cache::set_attrs(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, map& attrs, map *rmattrs, RGWObjVersionTracker *objv_tracker, @@ -253,25 +258,26 @@ int RGWSI_SysObj_Cache::set_attrs(const rgw_raw_obj& obj, } info.status = 0; info.flags = CACHE_FLAG_MODIFY_XATTRS; - int ret = RGWSI_SysObj_Core::set_attrs(obj, attrs, rmattrs, objv_tracker, y); + int ret = RGWSI_SysObj_Core::set_attrs(dpp, obj, attrs, rmattrs, objv_tracker, y); string name = normal_name(pool, oid); if (ret >= 0) { if (objv_tracker && objv_tracker->read_version.ver) { info.version = objv_tracker->read_version; info.flags |= CACHE_FLAG_OBJV; } - cache.put(name, info, NULL); - int r = distribute_cache(name, obj, info, UPDATE_OBJ, y); + cache.put(dpp, name, info, NULL); + int r = distribute_cache(dpp, name, obj, info, UPDATE_OBJ, y); if (r < 0) - ldout(cct, 0) << "ERROR: failed to distribute cache for " << obj << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to distribute cache for " << obj << dendl; } else { - cache.remove(name); + cache.remove(dpp, name); } return ret; } -int RGWSI_SysObj_Cache::write(const rgw_raw_obj& obj, +int RGWSI_SysObj_Cache::write(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, real_time *pmtime, map& attrs, bool exclusive, @@ -289,7 +295,7 @@ int RGWSI_SysObj_Cache::write(const rgw_raw_obj& obj, info.data = data; info.flags = CACHE_FLAG_XATTRS | CACHE_FLAG_DATA | CACHE_FLAG_META; ceph::real_time result_mtime; - int ret = RGWSI_SysObj_Core::write(obj, &result_mtime, attrs, + int ret = RGWSI_SysObj_Core::write(dpp, obj, &result_mtime, attrs, exclusive, data, objv_tracker, set_mtime, y); if (pmtime) { @@ -303,18 +309,19 @@ int RGWSI_SysObj_Cache::write(const rgw_raw_obj& obj, info.meta.size = data.length(); string name = normal_name(pool, oid); if (ret >= 0) { - cache.put(name, info, NULL); - int r = distribute_cache(name, obj, info, UPDATE_OBJ, y); + cache.put(dpp, name, info, NULL); + int r = distribute_cache(dpp, name, obj, info, UPDATE_OBJ, y); if (r < 0) - ldout(cct, 0) << "ERROR: failed to distribute cache for " << obj << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to distribute cache for " << obj << dendl; } else { - cache.remove(name); + cache.remove(dpp, name); } return ret; } -int RGWSI_SysObj_Cache::write_data(const rgw_raw_obj& obj, +int RGWSI_SysObj_Cache::write_data(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, const bufferlist& data, bool exclusive, RGWObjVersionTracker *objv_tracker, @@ -330,25 +337,25 @@ int RGWSI_SysObj_Cache::write_data(const rgw_raw_obj& obj, info.status = 0; info.flags = CACHE_FLAG_DATA; - int ret = RGWSI_SysObj_Core::write_data(obj, data, exclusive, objv_tracker, y); + int ret = RGWSI_SysObj_Core::write_data(dpp, obj, data, exclusive, objv_tracker, y); string name = normal_name(pool, oid); if (ret >= 0) { if (objv_tracker && objv_tracker->read_version.ver) { info.version = objv_tracker->read_version; info.flags |= CACHE_FLAG_OBJV; } - cache.put(name, info, NULL); - int r = distribute_cache(name, obj, info, UPDATE_OBJ, y); + cache.put(dpp, name, info, NULL); + int r = distribute_cache(dpp, name, obj, info, UPDATE_OBJ, y); if (r < 0) - ldout(cct, 0) << "ERROR: failed to distribute cache for " << obj << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to distribute cache for " << obj << dendl; } else { - cache.remove(name); + cache.remove(dpp, name); } return ret; } -int RGWSI_SysObj_Cache::raw_stat(const rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *pepoch, +int RGWSI_SysObj_Cache::raw_stat(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *pepoch, map *attrs, bufferlist *first_chunk, RGWObjVersionTracker *objv_tracker, optional_yield y) @@ -367,7 +374,7 @@ int RGWSI_SysObj_Cache::raw_stat(const rgw_raw_obj& obj, uint64_t *psize, real_t uint32_t flags = CACHE_FLAG_META | CACHE_FLAG_XATTRS; if (objv_tracker) flags |= CACHE_FLAG_OBJV; - int r = cache.get(name, info, flags, NULL); + int r = cache.get(dpp, name, info, flags, NULL); if (r == 0) { if (info.status < 0) return info.status; @@ -382,12 +389,12 @@ int RGWSI_SysObj_Cache::raw_stat(const rgw_raw_obj& obj, uint64_t *psize, real_t if (r == -ENODATA) { return -ENOENT; } - r = RGWSI_SysObj_Core::raw_stat(obj, &size, &mtime, &epoch, &info.xattrs, + r = RGWSI_SysObj_Core::raw_stat(dpp, obj, &size, &mtime, &epoch, &info.xattrs, first_chunk, objv_tracker, y); if (r < 0) { if (r == -ENOENT) { info.status = r; - cache.put(name, info, NULL); + cache.put(dpp, name, info, NULL); } return r; } @@ -400,7 +407,7 @@ int RGWSI_SysObj_Cache::raw_stat(const rgw_raw_obj& obj, uint64_t *psize, real_t info.flags |= CACHE_FLAG_OBJV; info.version = objv_tracker->read_version; } - cache.put(name, info, NULL); + cache.put(dpp, name, info, NULL); done: if (psize) *psize = size; @@ -413,7 +420,8 @@ done: return 0; } -int RGWSI_SysObj_Cache::distribute_cache(const string& normal_name, +int RGWSI_SysObj_Cache::distribute_cache(const DoutPrefixProvider *dpp, + const string& normal_name, const rgw_raw_obj& obj, ObjectCacheInfo& obj_info, int op, optional_yield y) @@ -424,10 +432,11 @@ int RGWSI_SysObj_Cache::distribute_cache(const string& normal_name, info.obj = obj; bufferlist bl; encode(info, bl); - return notify_svc->distribute(normal_name, bl, y); + return notify_svc->distribute(dpp, normal_name, bl, y); } -int RGWSI_SysObj_Cache::watch_cb(uint64_t notify_id, +int RGWSI_SysObj_Cache::watch_cb(const DoutPrefixProvider *dpp, + uint64_t notify_id, uint64_t cookie, uint64_t notifier_id, bufferlist& bl) @@ -452,10 +461,10 @@ int RGWSI_SysObj_Cache::watch_cb(uint64_t notify_id, switch (info.op) { case UPDATE_OBJ: - cache.put(name, info.obj_info, NULL); + cache.put(dpp, name, info.obj_info, NULL); break; case REMOVE_OBJ: - cache.remove(name); + cache.remove(dpp, name); break; default: ldout(cct, 0) << "WARNING: got unknown notification op: " << info.op << dendl; @@ -470,10 +479,11 @@ void RGWSI_SysObj_Cache::set_enabled(bool status) cache.set_enabled(status); } -bool RGWSI_SysObj_Cache::chain_cache_entry(std::initializer_list cache_info_entries, +bool RGWSI_SysObj_Cache::chain_cache_entry(const DoutPrefixProvider *dpp, + std::initializer_list cache_info_entries, RGWChainedCache::Entry *chained_entry) { - return cache.chain_cache_entry(cache_info_entries, chained_entry); + return cache.chain_cache_entry(dpp, cache_info_entries, chained_entry); } void RGWSI_SysObj_Cache::register_chained_cache(RGWChainedCache *cc) @@ -580,7 +590,7 @@ int RGWSI_SysObj_Cache_ASocketHook::call( return -ENOSYS; } -RGWSI_SysObj_Cache::ASocketHandler::ASocketHandler(RGWSI_SysObj_Cache *_svc) : svc(_svc) +RGWSI_SysObj_Cache::ASocketHandler::ASocketHandler(const DoutPrefixProvider *_dpp, RGWSI_SysObj_Cache *_svc) : dpp(_dpp), svc(_svc) { hook.reset(new RGWSI_SysObj_Cache_ASocketHook(_svc)); } @@ -612,7 +622,7 @@ void RGWSI_SysObj_Cache::ASocketHandler::call_list(const std::optionalcache.get(target)) { + if (const auto entry = svc->cache.get(dpp, target)) { f->open_object_section("cache_entry"); f->dump_string("name", target.c_str()); entry->dump(f); @@ -625,7 +635,7 @@ int RGWSI_SysObj_Cache::ASocketHandler::call_inspect(const std::string& target, int RGWSI_SysObj_Cache::ASocketHandler::call_erase(const std::string& target) { - return svc->cache.remove(target); + return svc->cache.remove(dpp, target); } int RGWSI_SysObj_Cache::ASocketHandler::call_zap() diff --git a/src/rgw/services/svc_sys_obj_cache.h b/src/rgw/services/svc_sys_obj_cache.h index b2abb2f550566..af95c06f4ff7d 100644 --- a/src/rgw/services/svc_sys_obj_cache.h +++ b/src/rgw/services/svc_sys_obj_cache.h @@ -33,15 +33,16 @@ protected: notify_svc = _notify_svc; } - int do_start(optional_yield) override; + int do_start(optional_yield, const DoutPrefixProvider *dpp) override; void shutdown() override; - int raw_stat(const rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *epoch, + int raw_stat(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *epoch, map *attrs, bufferlist *first_chunk, RGWObjVersionTracker *objv_tracker, optional_yield y) override; - int read(RGWSysObjectCtxBase& obj_ctx, + int read(const DoutPrefixProvider *dpp, + RGWSysObjectCtxBase& obj_ctx, RGWSI_SysObj_Obj_GetObjState& read_state, RGWObjVersionTracker *objv_tracker, const rgw_raw_obj& obj, @@ -52,21 +53,24 @@ protected: boost::optional, optional_yield y) override; - int get_attr(const rgw_raw_obj& obj, const char *name, bufferlist *dest, + int get_attr(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const char *name, bufferlist *dest, optional_yield y) override; - int set_attrs(const rgw_raw_obj& obj, + int set_attrs(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, map& attrs, map *rmattrs, RGWObjVersionTracker *objv_tracker, optional_yield y); - int remove(RGWSysObjectCtxBase& obj_ctx, + int remove(const DoutPrefixProvider *dpp, + RGWSysObjectCtxBase& obj_ctx, RGWObjVersionTracker *objv_tracker, const rgw_raw_obj& obj, optional_yield y) override; - int write(const rgw_raw_obj& obj, + int write(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, real_time *pmtime, map& attrs, bool exclusive, @@ -75,17 +79,19 @@ protected: real_time set_mtime, optional_yield y) override; - int write_data(const rgw_raw_obj& obj, + int write_data(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, const bufferlist& bl, bool exclusive, RGWObjVersionTracker *objv_tracker, optional_yield y); - int distribute_cache(const string& normal_name, const rgw_raw_obj& obj, + int distribute_cache(const DoutPrefixProvider *dpp, const string& normal_name, const rgw_raw_obj& obj, ObjectCacheInfo& obj_info, int op, optional_yield y); - int watch_cb(uint64_t notify_id, + int watch_cb(const DoutPrefixProvider *dpp, + uint64_t notify_id, uint64_t cookie, uint64_t notifier_id, bufferlist& bl); @@ -93,22 +99,24 @@ protected: void set_enabled(bool status); public: - RGWSI_SysObj_Cache(CephContext *cct) : RGWSI_SysObj_Core(cct), asocket(this) { + RGWSI_SysObj_Cache(const DoutPrefixProvider *dpp, CephContext *cct) : RGWSI_SysObj_Core(cct), asocket(dpp, this) { cache.set_ctx(cct); } - bool chain_cache_entry(std::initializer_list cache_info_entries, + bool chain_cache_entry(const DoutPrefixProvider *dpp, + std::initializer_list cache_info_entries, RGWChainedCache::Entry *chained_entry); void register_chained_cache(RGWChainedCache *cc); void unregister_chained_cache(RGWChainedCache *cc); class ASocketHandler { + const DoutPrefixProvider *dpp; RGWSI_SysObj_Cache *svc; std::unique_ptr hook; public: - ASocketHandler(RGWSI_SysObj_Cache *_svc); + ASocketHandler(const DoutPrefixProvider *dpp, RGWSI_SysObj_Cache *_svc); ~ASocketHandler(); int start(); @@ -180,7 +188,7 @@ public: return iter->second.first; } - bool put(RGWSI_SysObj_Cache *svc, const string& key, T *entry, + bool put(const DoutPrefixProvider *dpp, RGWSI_SysObj_Cache *svc, const string& key, T *entry, std::initializer_list cache_info_entries) { if (!svc) { return false; @@ -189,7 +197,7 @@ public: Entry chain_entry(this, key, entry); /* we need the svc cache to call us under its lock to maintain lock ordering */ - return svc->chain_cache_entry(cache_info_entries, &chain_entry); + return svc->chain_cache_entry(dpp, cache_info_entries, &chain_entry); } void chain_cb(const string& key, void *data) override { diff --git a/src/rgw/services/svc_sys_obj_core.cc b/src/rgw/services/svc_sys_obj_core.cc index 04bff6563ef3b..2e194dfeeb23b 100644 --- a/src/rgw/services/svc_sys_obj_core.cc +++ b/src/rgw/services/svc_sys_obj_core.cc @@ -9,19 +9,20 @@ #define dout_subsys ceph_subsys_rgw -int RGWSI_SysObj_Core_GetObjState::get_rados_obj(RGWSI_RADOS *rados_svc, +int RGWSI_SysObj_Core_GetObjState::get_rados_obj(const DoutPrefixProvider *dpp, + RGWSI_RADOS *rados_svc, RGWSI_Zone *zone_svc, const rgw_raw_obj& obj, RGWSI_RADOS::Obj **pobj) { if (!has_rados_obj) { if (obj.oid.empty()) { - ldout(rados_svc->ctx(), 0) << "ERROR: obj.oid is empty" << dendl; + ldpp_dout(dpp, 0) << "ERROR: obj.oid is empty" << dendl; return -EINVAL; } rados_obj = rados_svc->obj(obj); - int r = rados_obj.open(); + int r = rados_obj.open(dpp); if (r < 0) { return r; } @@ -31,17 +32,18 @@ int RGWSI_SysObj_Core_GetObjState::get_rados_obj(RGWSI_RADOS *rados_svc, return 0; } -int RGWSI_SysObj_Core::get_rados_obj(RGWSI_Zone *zone_svc, +int RGWSI_SysObj_Core::get_rados_obj(const DoutPrefixProvider *dpp, + RGWSI_Zone *zone_svc, const rgw_raw_obj& obj, RGWSI_RADOS::Obj *pobj) { if (obj.oid.empty()) { - ldout(rados_svc->ctx(), 0) << "ERROR: obj.oid is empty" << dendl; + ldpp_dout(dpp, 0) << "ERROR: obj.oid is empty" << dendl; return -EINVAL; } *pobj = rados_svc->obj(obj); - int r = pobj->open(); + int r = pobj->open(dpp); if (r < 0) { return r; } @@ -53,14 +55,15 @@ int RGWSI_SysObj_Core::get_system_obj_state_impl(RGWSysObjectCtxBase *rctx, const rgw_raw_obj& obj, RGWSysObjState **state, RGWObjVersionTracker *objv_tracker, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { if (obj.empty()) { return -EINVAL; } RGWSysObjState *s = rctx->get_state(obj); - ldout(cct, 20) << "get_system_obj_state: rctx=" << (void *)rctx << " obj=" << obj << " state=" << (void *)s << " s->prefetch_data=" << s->prefetch_data << dendl; + ldpp_dout(dpp, 20) << "get_system_obj_state: rctx=" << (void *)rctx << " obj=" << obj << " state=" << (void *)s << " s->prefetch_data=" << s->prefetch_data << dendl; *state = s; if (s->has_attrs) { return 0; @@ -68,7 +71,7 @@ int RGWSI_SysObj_Core::get_system_obj_state_impl(RGWSysObjectCtxBase *rctx, s->obj = obj; - int r = raw_stat(obj, &s->size, &s->mtime, &s->epoch, &s->attrset, + int r = raw_stat(dpp, obj, &s->size, &s->mtime, &s->epoch, &s->attrset, (s->prefetch_data ? &s->data : nullptr), objv_tracker, y); if (r == -ENOENT) { s->exists = false; @@ -83,11 +86,11 @@ int RGWSI_SysObj_Core::get_system_obj_state_impl(RGWSysObjectCtxBase *rctx, s->has_attrs = true; s->obj_tag = s->attrset[RGW_ATTR_ID_TAG]; - if (s->obj_tag.length()) - ldout(cct, 20) << "get_system_obj_state: setting s->obj_tag to " - << s->obj_tag.c_str() << dendl; - else - ldout(cct, 20) << "get_system_obj_state: s->obj_tag was set empty" << dendl; + if (s->obj_tag.length()) { + ldpp_dout(dpp, 20) << "get_system_obj_state: setting s->obj_tag to " << s->obj_tag.c_str() << dendl; + } else { + ldpp_dout(dpp, 20) << "get_system_obj_state: s->obj_tag was set empty" << dendl; + } return 0; } @@ -96,24 +99,25 @@ int RGWSI_SysObj_Core::get_system_obj_state(RGWSysObjectCtxBase *rctx, const rgw_raw_obj& obj, RGWSysObjState **state, RGWObjVersionTracker *objv_tracker, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { int ret; do { - ret = get_system_obj_state_impl(rctx, obj, state, objv_tracker, y); + ret = get_system_obj_state_impl(rctx, obj, state, objv_tracker, y, dpp); } while (ret == -EAGAIN); return ret; } -int RGWSI_SysObj_Core::raw_stat(const rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *epoch, +int RGWSI_SysObj_Core::raw_stat(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *epoch, map *attrs, bufferlist *first_chunk, RGWObjVersionTracker *objv_tracker, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { return r; } @@ -133,7 +137,7 @@ int RGWSI_SysObj_Core::raw_stat(const rgw_raw_obj& obj, uint64_t *psize, real_ti op.read(0, cct->_conf->rgw_max_chunk_size, first_chunk, nullptr); } bufferlist outbl; - r = rados_obj.operate(&op, &outbl, y); + r = rados_obj.operate(dpp, &op, &outbl, y); if (epoch) { *epoch = rados_obj.get_last_version(); @@ -158,11 +162,12 @@ int RGWSI_SysObj_Core::stat(RGWSysObjectCtxBase& obj_ctx, real_time *lastmod, uint64_t *obj_size, RGWObjVersionTracker *objv_tracker, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { RGWSysObjState *astate = nullptr; - int r = get_system_obj_state(&obj_ctx, obj, &astate, objv_tracker, y); + int r = get_system_obj_state(&obj_ctx, obj, &astate, objv_tracker, y, dpp); if (r < 0) return r; @@ -179,7 +184,7 @@ int RGWSI_SysObj_Core::stat(RGWSysObjectCtxBase& obj_ctx, if (cct->_conf->subsys.should_gather()) { map::iterator iter; for (iter = attrs->begin(); iter != attrs->end(); ++iter) { - ldout(cct, 20) << "Read xattr: " << iter->first << dendl; + ldpp_dout(dpp, 20) << "Read xattr: " << iter->first << dendl; } } } @@ -192,7 +197,8 @@ int RGWSI_SysObj_Core::stat(RGWSysObjectCtxBase& obj_ctx, return 0; } -int RGWSI_SysObj_Core::read(RGWSysObjectCtxBase& obj_ctx, +int RGWSI_SysObj_Core::read(const DoutPrefixProvider *dpp, + RGWSysObjectCtxBase& obj_ctx, RGWSI_SysObj_Obj_GetObjState& _read_state, RGWObjVersionTracker *objv_tracker, const rgw_raw_obj& obj, @@ -217,7 +223,7 @@ int RGWSI_SysObj_Core::read(RGWSysObjectCtxBase& obj_ctx, objv_tracker->prepare_op_for_read(&op); } - ldout(cct, 20) << "rados->read ofs=" << ofs << " len=" << len << dendl; + ldpp_dout(dpp, 20) << "rados->read ofs=" << ofs << " len=" << len << dendl; op.read(ofs, len, bl, nullptr); map unfiltered_attrset; @@ -231,23 +237,23 @@ int RGWSI_SysObj_Core::read(RGWSysObjectCtxBase& obj_ctx, } RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } - r = rados_obj.operate(&op, nullptr, y); + r = rados_obj.operate(dpp, &op, nullptr, y); if (r < 0) { - ldout(cct, 20) << "rados_obj.operate() r=" << r << " bl.length=" << bl->length() << dendl; + ldpp_dout(dpp, 20) << "rados_obj.operate() r=" << r << " bl.length=" << bl->length() << dendl; return r; } - ldout(cct, 20) << "rados_obj.operate() r=" << r << " bl.length=" << bl->length() << dendl; + ldpp_dout(dpp, 20) << "rados_obj.operate() r=" << r << " bl.length=" << bl->length() << dendl; uint64_t op_ver = rados_obj.get_last_version(); if (read_state.last_ver > 0 && read_state.last_ver != op_ver) { - ldout(cct, 5) << "raced with an object write, abort" << dendl; + ldpp_dout(dpp, 5) << "raced with an object write, abort" << dendl; return -ECANCELED; } @@ -267,15 +273,16 @@ int RGWSI_SysObj_Core::read(RGWSysObjectCtxBase& obj_ctx, * dest: bufferlist to store the result in * Returns: 0 on success, -ERR# otherwise. */ -int RGWSI_SysObj_Core::get_attr(const rgw_raw_obj& obj, +int RGWSI_SysObj_Core::get_attr(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, const char *name, bufferlist *dest, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } @@ -284,23 +291,24 @@ int RGWSI_SysObj_Core::get_attr(const rgw_raw_obj& obj, int rval; op.getxattr(name, dest, &rval); - r = rados_obj.operate(&op, nullptr, y); + r = rados_obj.operate(dpp, &op, nullptr, y); if (r < 0) return r; return 0; } -int RGWSI_SysObj_Core::set_attrs(const rgw_raw_obj& obj, +int RGWSI_SysObj_Core::set_attrs(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, map& attrs, map *rmattrs, RGWObjVersionTracker *objv_tracker, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } @@ -333,7 +341,7 @@ int RGWSI_SysObj_Core::set_attrs(const rgw_raw_obj& obj, bufferlist bl; - r = rados_obj.operate(&op, y); + r = rados_obj.operate(dpp, &op, y); if (r < 0) return r; @@ -343,7 +351,8 @@ int RGWSI_SysObj_Core::set_attrs(const rgw_raw_obj& obj, return 0; } -int RGWSI_SysObj_Core::omap_get_vals(const rgw_raw_obj& obj, +int RGWSI_SysObj_Core::omap_get_vals(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, const string& marker, uint64_t count, std::map *m, @@ -351,9 +360,9 @@ int RGWSI_SysObj_Core::omap_get_vals(const rgw_raw_obj& obj, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } @@ -367,7 +376,7 @@ int RGWSI_SysObj_Core::omap_get_vals(const rgw_raw_obj& obj, int rval; op.omap_get_vals2(start_after, count, &t, &more, &rval); - r = rados_obj.operate(&op, nullptr, y); + r = rados_obj.operate(dpp, &op, nullptr, y); if (r < 0) { return r; } @@ -385,14 +394,15 @@ int RGWSI_SysObj_Core::omap_get_vals(const rgw_raw_obj& obj, return 0; } -int RGWSI_SysObj_Core::omap_get_all(const rgw_raw_obj& obj, +int RGWSI_SysObj_Core::omap_get_all(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, std::map *m, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } @@ -408,7 +418,7 @@ int RGWSI_SysObj_Core::omap_get_all(const rgw_raw_obj& obj, int rval; op.omap_get_vals2(start_after, count, &t, &more, &rval); - r = rados_obj.operate(&op, nullptr, y); + r = rados_obj.operate(dpp, &op, nullptr, y); if (r < 0) { return r; } @@ -421,18 +431,18 @@ int RGWSI_SysObj_Core::omap_get_all(const rgw_raw_obj& obj, return 0; } -int RGWSI_SysObj_Core::omap_set(const rgw_raw_obj& obj, const std::string& key, +int RGWSI_SysObj_Core::omap_set(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const std::string& key, bufferlist& bl, bool must_exist, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } - ldout(cct, 15) << "omap_set obj=" << obj << " key=" << key << dendl; + ldpp_dout(dpp, 15) << "omap_set obj=" << obj << " key=" << key << dendl; map m; m[key] = bl; @@ -440,18 +450,18 @@ int RGWSI_SysObj_Core::omap_set(const rgw_raw_obj& obj, const std::string& key, if (must_exist) op.assert_exists(); op.omap_set(m); - r = rados_obj.operate(&op, y); + r = rados_obj.operate(dpp, &op, y); return r; } -int RGWSI_SysObj_Core::omap_set(const rgw_raw_obj& obj, +int RGWSI_SysObj_Core::omap_set(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const std::map& m, bool must_exist, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } @@ -459,17 +469,17 @@ int RGWSI_SysObj_Core::omap_set(const rgw_raw_obj& obj, if (must_exist) op.assert_exists(); op.omap_set(m); - r = rados_obj.operate(&op, y); + r = rados_obj.operate(dpp, &op, y); return r; } -int RGWSI_SysObj_Core::omap_del(const rgw_raw_obj& obj, const std::string& key, +int RGWSI_SysObj_Core::omap_del(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const std::string& key, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } @@ -480,34 +490,35 @@ int RGWSI_SysObj_Core::omap_del(const rgw_raw_obj& obj, const std::string& key, op.omap_rm_keys(k); - r = rados_obj.operate(&op, y); + r = rados_obj.operate(dpp, &op, y); return r; } -int RGWSI_SysObj_Core::notify(const rgw_raw_obj& obj, bufferlist& bl, +int RGWSI_SysObj_Core::notify(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } - r = rados_obj.notify(bl, timeout_ms, pbl, y); + r = rados_obj.notify(dpp, bl, timeout_ms, pbl, y); return r; } -int RGWSI_SysObj_Core::remove(RGWSysObjectCtxBase& obj_ctx, +int RGWSI_SysObj_Core::remove(const DoutPrefixProvider *dpp, + RGWSysObjectCtxBase& obj_ctx, RGWObjVersionTracker *objv_tracker, const rgw_raw_obj& obj, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } @@ -518,14 +529,15 @@ int RGWSI_SysObj_Core::remove(RGWSysObjectCtxBase& obj_ctx, } op.remove(); - r = rados_obj.operate(&op, y); + r = rados_obj.operate(dpp, &op, y); if (r < 0) return r; return 0; } -int RGWSI_SysObj_Core::write(const rgw_raw_obj& obj, +int RGWSI_SysObj_Core::write(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, real_time *pmtime, map& attrs, bool exclusive, @@ -535,9 +547,9 @@ int RGWSI_SysObj_Core::write(const rgw_raw_obj& obj, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } @@ -575,7 +587,7 @@ int RGWSI_SysObj_Core::write(const rgw_raw_obj& obj, op.setxattr(name.c_str(), bl); } - r = rados_obj.operate(&op, y); + r = rados_obj.operate(dpp, &op, y); if (r < 0) { return r; } @@ -592,16 +604,17 @@ int RGWSI_SysObj_Core::write(const rgw_raw_obj& obj, } -int RGWSI_SysObj_Core::write_data(const rgw_raw_obj& obj, +int RGWSI_SysObj_Core::write_data(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, const bufferlist& bl, bool exclusive, RGWObjVersionTracker *objv_tracker, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } @@ -615,7 +628,7 @@ int RGWSI_SysObj_Core::write_data(const rgw_raw_obj& obj, objv_tracker->prepare_op_for_write(&op); } op.write_full(bl); - r = rados_obj.operate(&op, y); + r = rados_obj.operate(dpp, &op, y); if (r < 0) return r; @@ -625,7 +638,8 @@ int RGWSI_SysObj_Core::write_data(const rgw_raw_obj& obj, return 0; } -int RGWSI_SysObj_Core::pool_list_prefixed_objs(const rgw_pool& pool, const string& prefix, +int RGWSI_SysObj_Core::pool_list_prefixed_objs(const DoutPrefixProvider *dpp, + const rgw_pool& pool, const string& prefix, std::function cb) { bool is_truncated; @@ -636,7 +650,7 @@ int RGWSI_SysObj_Core::pool_list_prefixed_objs(const rgw_pool& pool, const strin RGWAccessListFilterPrefix filter(prefix); - int r = op.init(string(), &filter); + int r = op.init(dpp, string(), &filter); if (r < 0) { return r; } @@ -658,7 +672,8 @@ int RGWSI_SysObj_Core::pool_list_prefixed_objs(const rgw_pool& pool, const strin return 0; } -int RGWSI_SysObj_Core::pool_list_objects_init(const rgw_pool& pool, +int RGWSI_SysObj_Core::pool_list_objects_init(const DoutPrefixProvider *dpp, + const rgw_pool& pool, const string& marker, const string& prefix, RGWSI_SysObj::Pool::ListCtx *_ctx) @@ -670,9 +685,9 @@ int RGWSI_SysObj_Core::pool_list_objects_init(const rgw_pool& pool, ctx.pool = rados_svc->pool(pool); ctx.op = ctx.pool.op(); - int r = ctx.op.init(marker, &ctx.filter); + int r = ctx.op.init(dpp, marker, &ctx.filter); if (r < 0) { - ldout(cct, 10) << "failed to list objects pool_iterate_begin() returned r=" << r << dendl; + ldpp_dout(dpp, 10) << "failed to list objects pool_iterate_begin() returned r=" << r << dendl; return r; } return 0; diff --git a/src/rgw/services/svc_sys_obj_core.h b/src/rgw/services/svc_sys_obj_core.h index 24659ae209d68..52c94051c7188 100644 --- a/src/rgw/services/svc_sys_obj_core.h +++ b/src/rgw/services/svc_sys_obj_core.h @@ -31,14 +31,16 @@ protected: rados_svc = _rados_svc; zone_svc = _zone_svc; } - int get_rados_obj(RGWSI_Zone *zone_svc, const rgw_raw_obj& obj, RGWSI_RADOS::Obj *pobj); + int get_rados_obj(const DoutPrefixProvider *dpp, RGWSI_Zone *zone_svc, const rgw_raw_obj& obj, RGWSI_RADOS::Obj *pobj); - virtual int raw_stat(const rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *epoch, + virtual int raw_stat(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, uint64_t *psize, + real_time *pmtime, uint64_t *epoch, map *attrs, bufferlist *first_chunk, RGWObjVersionTracker *objv_tracker, optional_yield y); - virtual int read(RGWSysObjectCtxBase& obj_ctx, + virtual int read(const DoutPrefixProvider *dpp, + RGWSysObjectCtxBase& obj_ctx, RGWSI_SysObj_Obj_GetObjState& read_state, RGWObjVersionTracker *objv_tracker, const rgw_raw_obj& obj, @@ -49,12 +51,14 @@ protected: boost::optional, optional_yield y); - virtual int remove(RGWSysObjectCtxBase& obj_ctx, + virtual int remove(const DoutPrefixProvider *dpp, + RGWSysObjectCtxBase& obj_ctx, RGWObjVersionTracker *objv_tracker, const rgw_raw_obj& obj, optional_yield y); - virtual int write(const rgw_raw_obj& obj, + virtual int write(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, real_time *pmtime, map& attrs, bool exclusive, @@ -63,47 +67,54 @@ protected: real_time set_mtime, optional_yield y); - virtual int write_data(const rgw_raw_obj& obj, + virtual int write_data(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, const bufferlist& bl, bool exclusive, RGWObjVersionTracker *objv_tracker, optional_yield y); - virtual int get_attr(const rgw_raw_obj& obj, const char *name, bufferlist *dest, + virtual int get_attr(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, + const char *name, bufferlist *dest, optional_yield y); - virtual int set_attrs(const rgw_raw_obj& obj, + virtual int set_attrs(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, map& attrs, map *rmattrs, RGWObjVersionTracker *objv_tracker, optional_yield y); - virtual int omap_get_all(const rgw_raw_obj& obj, std::map *m, + virtual int omap_get_all(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, std::map *m, optional_yield y); - virtual int omap_get_vals(const rgw_raw_obj& obj, + virtual int omap_get_vals(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, const string& marker, uint64_t count, std::map *m, bool *pmore, optional_yield y); - virtual int omap_set(const rgw_raw_obj& obj, const std::string& key, + virtual int omap_set(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, const std::string& key, bufferlist& bl, bool must_exist, optional_yield y); - virtual int omap_set(const rgw_raw_obj& obj, + virtual int omap_set(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const map& m, bool must_exist, optional_yield y); - virtual int omap_del(const rgw_raw_obj& obj, const std::string& key, + virtual int omap_del(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const std::string& key, optional_yield y); - virtual int notify(const rgw_raw_obj& obj, bufferlist& bl, + virtual int notify(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl, optional_yield y); - virtual int pool_list_prefixed_objs(const rgw_pool& pool, + virtual int pool_list_prefixed_objs(const DoutPrefixProvider *dpp, + const rgw_pool& pool, const string& prefix, std::function cb); - virtual int pool_list_objects_init(const rgw_pool& pool, + virtual int pool_list_objects_init(const DoutPrefixProvider *dpp, + const rgw_pool& pool, const std::string& marker, const std::string& prefix, RGWSI_SysObj::Pool::ListCtx *ctx); @@ -119,11 +130,13 @@ protected: int get_system_obj_state_impl(RGWSysObjectCtxBase *rctx, const rgw_raw_obj& obj, RGWSysObjState **state, RGWObjVersionTracker *objv_tracker, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); int get_system_obj_state(RGWSysObjectCtxBase *rctx, const rgw_raw_obj& obj, RGWSysObjState **state, RGWObjVersionTracker *objv_tracker, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); int stat(RGWSysObjectCtxBase& obj_ctx, RGWSI_SysObj_Obj_GetObjState& state, @@ -133,7 +146,8 @@ protected: real_time *lastmod, uint64_t *obj_size, RGWObjVersionTracker *objv_tracker, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); public: RGWSI_SysObj_Core(CephContext *cct): RGWServiceInstance(cct) {} diff --git a/src/rgw/services/svc_sys_obj_core_types.h b/src/rgw/services/svc_sys_obj_core_types.h index 002f2763a909b..f45fe77f5b4a3 100644 --- a/src/rgw/services/svc_sys_obj_core_types.h +++ b/src/rgw/services/svc_sys_obj_core_types.h @@ -18,7 +18,8 @@ struct RGWSI_SysObj_Core_GetObjState : public RGWSI_SysObj_Obj_GetObjState { RGWSI_SysObj_Core_GetObjState() {} - int get_rados_obj(RGWSI_RADOS *rados_svc, + int get_rados_obj(const DoutPrefixProvider *dpp, + RGWSI_RADOS *rados_svc, RGWSI_Zone *zone_svc, const rgw_raw_obj& obj, RGWSI_RADOS::Obj **pobj); diff --git a/src/rgw/services/svc_user.h b/src/rgw/services/svc_user.h index 5c27e587c512e..37e533d6dfc4f 100644 --- a/src/rgw/services/svc_user.h +++ b/src/rgw/services/svc_user.h @@ -49,7 +49,8 @@ public: real_time * const pmtime, rgw_cache_entry_info * const cache_info, map * const pattrs, - optional_yield y) = 0; + optional_yield y, + const DoutPrefixProvider *dpp) = 0; virtual int store_user_info(RGWSI_MetaBackend::Context *ctx, const RGWUserInfo& info, @@ -58,40 +59,48 @@ public: const real_time& mtime, bool exclusive, map *attrs, - optional_yield y) = 0; + optional_yield y, + const DoutPrefixProvider *dpp) = 0; virtual int remove_user_info(RGWSI_MetaBackend::Context *ctx, const RGWUserInfo& info, RGWObjVersionTracker *objv_tracker, - optional_yield y) = 0; + optional_yield y, + const DoutPrefixProvider *dpp) = 0; virtual int get_user_info_by_email(RGWSI_MetaBackend::Context *ctx, const string& email, RGWUserInfo *info, RGWObjVersionTracker *objv_tracker, real_time *pmtime, - optional_yield y) = 0; + optional_yield y, + const DoutPrefixProvider *dpp) = 0; virtual int get_user_info_by_swift(RGWSI_MetaBackend::Context *ctx, const string& swift_name, RGWUserInfo *info, /* out */ RGWObjVersionTracker * const objv_tracker, real_time * const pmtime, - optional_yield y) = 0; + optional_yield y, + const DoutPrefixProvider *dpp) = 0; virtual int get_user_info_by_access_key(RGWSI_MetaBackend::Context *ctx, const std::string& access_key, RGWUserInfo *info, RGWObjVersionTracker* objv_tracker, real_time *pmtime, - optional_yield y) = 0; + optional_yield y, + const DoutPrefixProvider *dpp) = 0; - virtual int add_bucket(RGWSI_MetaBackend::Context *ctx, + virtual int add_bucket(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const rgw_bucket& bucket, ceph::real_time creation_time, optional_yield y) = 0; - virtual int remove_bucket(RGWSI_MetaBackend::Context *ctx, + virtual int remove_bucket(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const rgw_bucket& _bucket, optional_yield) = 0; - virtual int list_buckets(RGWSI_MetaBackend::Context *ctx, + virtual int list_buckets(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const string& marker, const string& end_marker, @@ -100,21 +109,24 @@ public: bool *is_truncated, optional_yield y) = 0; - virtual int flush_bucket_stats(RGWSI_MetaBackend::Context *ctx, + virtual int flush_bucket_stats(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const RGWBucketEnt& ent, optional_yield y) = 0; - virtual int complete_flush_stats(RGWSI_MetaBackend::Context *ctx, + virtual int complete_flush_stats(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx, const rgw_user& user, optional_yield y) = 0; - virtual int reset_bucket_stats(RGWSI_MetaBackend::Context *ctx, + virtual int reset_bucket_stats(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, optional_yield y) = 0; - virtual int read_stats(RGWSI_MetaBackend::Context *ctx, + virtual int read_stats(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, RGWStorageStats *stats, ceph::real_time *last_stats_sync, /* last time a full stats sync completed */ ceph::real_time *last_stats_update, optional_yield y) = 0; /* last time a stats update was done */ - virtual int read_stats_async(RGWSI_MetaBackend::Context *ctx, + virtual int read_stats_async(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx, const rgw_user& user, RGWGetUserStats_CB *cb) = 0; }; diff --git a/src/rgw/services/svc_user_rados.cc b/src/rgw/services/svc_user_rados.cc index 14dcb6367f287..eb30f3e663ee2 100644 --- a/src/rgw/services/svc_user_rados.cc +++ b/src/rgw/services/svc_user_rados.cc @@ -81,14 +81,14 @@ void RGWSI_User_RADOS::init(RGWSI_RADOS *_rados_svc, svc.sync_modules = _sync_modules_svc; } -int RGWSI_User_RADOS::do_start(optional_yield) +int RGWSI_User_RADOS::do_start(optional_yield, const DoutPrefixProvider *dpp) { uinfo_cache.reset(new RGWChainedCacheImpl); uinfo_cache->init(svc.cache); int r = svc.meta->create_be_handler(RGWSI_MetaBackend::Type::MDBE_SOBJ, &be_handler); if (r < 0) { - ldout(ctx(), 0) << "ERROR: failed to create be handler: r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to create be handler: r=" << r << dendl; return r; } @@ -113,10 +113,11 @@ int RGWSI_User_RADOS::read_user_info(RGWSI_MetaBackend::Context *ctx, real_time * const pmtime, rgw_cache_entry_info * const cache_info, map * const pattrs, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { if(user.id == RGW_USER_ANON_ID) { - ldout(svc.meta_be->ctx(), 20) << "RGWSI_User_RADOS::read_user_info(): anonymous user" << dendl; + ldpp_dout(dpp, 20) << "RGWSI_User_RADOS::read_user_info(): anonymous user" << dendl; return -ENOENT; } bufferlist bl; @@ -125,7 +126,7 @@ int RGWSI_User_RADOS::read_user_info(RGWSI_MetaBackend::Context *ctx, RGWSI_MBSObj_GetParams params(&bl, pattrs, pmtime); params.set_cache_info(cache_info); - int ret = svc.meta_be->get_entry(ctx, get_meta_key(user), params, objv_tracker, y); + int ret = svc.meta_be->get_entry(ctx, get_meta_key(user), params, objv_tracker, y, dpp); if (ret < 0) { return ret; } @@ -134,14 +135,14 @@ int RGWSI_User_RADOS::read_user_info(RGWSI_MetaBackend::Context *ctx, try { decode(user_id, iter); if (user_id.user_id != user) { - lderr(svc.meta_be->ctx()) << "ERROR: rgw_get_user_info_by_uid(): user id mismatch: " << user_id.user_id << " != " << user << dendl; + ldpp_dout(dpp, -1) << "ERROR: rgw_get_user_info_by_uid(): user id mismatch: " << user_id.user_id << " != " << user << dendl; return -EIO; } if (!iter.end()) { decode(*info, iter); } } catch (buffer::error& err) { - ldout(svc.meta_be->ctx(), 0) << "ERROR: failed to decode user info, caught buffer::error" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to decode user info, caught buffer::error" << dendl; return -EIO; } @@ -186,7 +187,7 @@ public: ui.user_id = info.user_id; } - int prepare() { + int prepare(const DoutPrefixProvider *dpp) { if (objv_tracker) { ot = *objv_tracker; } @@ -206,10 +207,10 @@ public: auto& k = iter->second; /* check if swift mapping exists */ RGWUserInfo inf; - int r = svc.user->get_user_info_by_swift(ctx, k.id, &inf, nullptr, nullptr, y); + int r = svc.user->get_user_info_by_swift(ctx, k.id, &inf, nullptr, nullptr, y, dpp); if (r >= 0 && inf.user_id != info.user_id && (!old_info || inf.user_id != old_info->user_id)) { - ldout(svc.meta_be->ctx(), 0) << "WARNING: can't store user info, swift id (" << k.id + ldpp_dout(dpp, 0) << "WARNING: can't store user info, swift id (" << k.id << ") already mapped to another user (" << info.user_id << ")" << dendl; return -EEXIST; } @@ -221,10 +222,10 @@ public: continue; auto& k = iter->second; RGWUserInfo inf; - int r = svc.user->get_user_info_by_access_key(ctx, k.id, &inf, nullptr, nullptr, y); + int r = svc.user->get_user_info_by_access_key(ctx, k.id, &inf, nullptr, nullptr, y, dpp); if (r >= 0 && inf.user_id != info.user_id && (!old_info || inf.user_id != old_info->user_id)) { - ldout(svc.meta_be->ctx(), 0) << "WARNING: can't store user info, access key already mapped to another user" << dendl; + ldpp_dout(dpp, 0) << "WARNING: can't store user info, access key already mapped to another user" << dendl; return -EEXIST; } } @@ -232,21 +233,21 @@ public: return 0; } - int put() { + int put(const DoutPrefixProvider *dpp) { bufferlist data_bl; encode(ui, data_bl); encode(info, data_bl); RGWSI_MBSObj_PutParams params(data_bl, pattrs, mtime, exclusive); - int ret = svc.meta_be->put(ctx, RGWSI_User::get_meta_key(info.user_id), params, &ot, y); + int ret = svc.meta_be->put(ctx, RGWSI_User::get_meta_key(info.user_id), params, &ot, y, dpp); if (ret < 0) return ret; return 0; } - int complete() { + int complete(const DoutPrefixProvider *dpp) { int ret; bufferlist link_bl; @@ -257,7 +258,7 @@ public: if (!info.user_email.empty()) { if (!old_info || old_info->user_email.compare(info.user_email) != 0) { /* only if new index changed */ - ret = rgw_put_system_obj(obj_ctx, svc.zone->get_zone_params().user_email_pool, info.user_email, + ret = rgw_put_system_obj(dpp, obj_ctx, svc.zone->get_zone_params().user_email_pool, info.user_email, link_bl, exclusive, NULL, real_time(), y); if (ret < 0) return ret; @@ -270,7 +271,7 @@ public: if (old_info && old_info->access_keys.count(iter->first) != 0 && !renamed) continue; - ret = rgw_put_system_obj(obj_ctx, svc.zone->get_zone_params().user_keys_pool, k.id, + ret = rgw_put_system_obj(dpp, obj_ctx, svc.zone->get_zone_params().user_keys_pool, k.id, link_bl, exclusive, NULL, real_time(), y); if (ret < 0) return ret; @@ -281,14 +282,14 @@ public: if (old_info && old_info->swift_keys.count(siter->first) != 0 && !renamed) continue; - ret = rgw_put_system_obj(obj_ctx, svc.zone->get_zone_params().user_swift_pool, k.id, + ret = rgw_put_system_obj(dpp, obj_ctx, svc.zone->get_zone_params().user_swift_pool, k.id, link_bl, exclusive, NULL, real_time(), y); if (ret < 0) return ret; } if (old_info) { - ret = remove_old_indexes(*old_info, info, y); + ret = remove_old_indexes(*old_info, info, y, dpp); if (ret < 0) { return ret; } @@ -297,16 +298,16 @@ public: return 0; } - int remove_old_indexes(const RGWUserInfo& old_info, const RGWUserInfo& new_info, optional_yield y) { + int remove_old_indexes(const RGWUserInfo& old_info, const RGWUserInfo& new_info, optional_yield y, const DoutPrefixProvider *dpp) { int ret; if (!old_info.user_id.empty() && old_info.user_id != new_info.user_id) { if (old_info.user_id.tenant != new_info.user_id.tenant) { - ldout(svc.user->ctx(), 0) << "ERROR: tenant mismatch: " << old_info.user_id.tenant << " != " << new_info.user_id.tenant << dendl; + ldpp_dout(dpp, 0) << "ERROR: tenant mismatch: " << old_info.user_id.tenant << " != " << new_info.user_id.tenant << dendl; return -EINVAL; } - ret = svc.user->remove_uid_index(ctx, old_info, nullptr, y); + ret = svc.user->remove_uid_index(ctx, old_info, nullptr, y, dpp); if (ret < 0 && ret != -ENOENT) { set_err_msg("ERROR: could not remove index for uid " + old_info.user_id.to_str()); return ret; @@ -315,7 +316,7 @@ public: if (!old_info.user_email.empty() && old_info.user_email != new_info.user_email) { - ret = svc.user->remove_email_index(ctx, old_info.user_email, y); + ret = svc.user->remove_email_index(dpp, ctx, old_info.user_email, y); if (ret < 0 && ret != -ENOENT) { set_err_msg("ERROR: could not remove index for email " + old_info.user_email); return ret; @@ -324,7 +325,7 @@ public: for ([[maybe_unused]] const auto& [name, access_key] : old_info.access_keys) { if (!new_info.access_keys.count(access_key.id)) { - ret = svc.user->remove_key_index(ctx, access_key, y); + ret = svc.user->remove_key_index(dpp, ctx, access_key, y); if (ret < 0 && ret != -ENOENT) { set_err_msg("ERROR: could not remove index for key " + access_key.id); return ret; @@ -336,7 +337,7 @@ public: const auto& swift_key = old_iter->second; auto new_iter = new_info.swift_keys.find(swift_key.id); if (new_iter == new_info.swift_keys.end()) { - ret = svc.user->remove_swift_name_index(ctx, swift_key.id, y); + ret = svc.user->remove_swift_name_index(dpp, ctx, swift_key.id, y); if (ret < 0 && ret != -ENOENT) { set_err_msg("ERROR: could not remove index for swift_name " + swift_key.id); return ret; @@ -359,7 +360,8 @@ int RGWSI_User_RADOS::store_user_info(RGWSI_MetaBackend::Context *ctx, const real_time& mtime, bool exclusive, map *attrs, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { PutOperation op(svc, ctx, info, old_info, @@ -368,17 +370,17 @@ int RGWSI_User_RADOS::store_user_info(RGWSI_MetaBackend::Context *ctx, attrs, y); - int r = op.prepare(); + int r = op.prepare(dpp); if (r < 0) { return r; } - r = op.put(); + r = op.put(dpp); if (r < 0) { return r; } - r = op.complete(); + r = op.complete(dpp); if (r < 0) { return r; } @@ -386,17 +388,19 @@ int RGWSI_User_RADOS::store_user_info(RGWSI_MetaBackend::Context *ctx, return 0; } -int RGWSI_User_RADOS::remove_key_index(RGWSI_MetaBackend::Context *_ctx, +int RGWSI_User_RADOS::remove_key_index(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *_ctx, const RGWAccessKey& access_key, optional_yield y) { RGWSI_MetaBackend_SObj::Context_SObj *ctx = static_cast(_ctx); rgw_raw_obj obj(svc.zone->get_zone_params().user_keys_pool, access_key.id); auto sysobj = ctx->obj_ctx->get_obj(obj); - return sysobj.wop().remove(y); + return sysobj.wop().remove(dpp, y); } -int RGWSI_User_RADOS::remove_email_index(RGWSI_MetaBackend::Context *_ctx, +int RGWSI_User_RADOS::remove_email_index(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *_ctx, const string& email, optional_yield y) { @@ -406,16 +410,16 @@ int RGWSI_User_RADOS::remove_email_index(RGWSI_MetaBackend::Context *_ctx, RGWSI_MetaBackend_SObj::Context_SObj *ctx = static_cast(_ctx); rgw_raw_obj obj(svc.zone->get_zone_params().user_email_pool, email); auto sysobj = ctx->obj_ctx->get_obj(obj); - return sysobj.wop().remove(y); + return sysobj.wop().remove(dpp, y); } -int RGWSI_User_RADOS::remove_swift_name_index(RGWSI_MetaBackend::Context *_ctx, const string& swift_name, +int RGWSI_User_RADOS::remove_swift_name_index(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *_ctx, const string& swift_name, optional_yield y) { RGWSI_MetaBackend_SObj::Context_SObj *ctx = static_cast(_ctx); rgw_raw_obj obj(svc.zone->get_zone_params().user_swift_pool, swift_name); auto sysobj = ctx->obj_ctx->get_obj(obj); - return sysobj.wop().remove(y); + return sysobj.wop().remove(dpp, y); } /** @@ -427,18 +431,17 @@ int RGWSI_User_RADOS::remove_swift_name_index(RGWSI_MetaBackend::Context *_ctx, int RGWSI_User_RADOS::remove_user_info(RGWSI_MetaBackend::Context *_ctx, const RGWUserInfo& info, RGWObjVersionTracker *objv_tracker, - optional_yield y) + optional_yield y, + const DoutPrefixProvider *dpp) { int ret; - auto cct = svc.meta_be->ctx(); - auto kiter = info.access_keys.begin(); for (; kiter != info.access_keys.end(); ++kiter) { - ldout(cct, 10) << "removing key index: " << kiter->first << dendl; - ret = remove_key_index(_ctx, kiter->second, y); + ldpp_dout(dpp, 10) << "removing key index: " << kiter->first << dendl; + ret = remove_key_index(dpp, _ctx, kiter->second, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "ERROR: could not remove " << kiter->first << " (access key object), should be fixed (err=" << ret << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not remove " << kiter->first << " (access key object), should be fixed (err=" << ret << ")" << dendl; return ret; } } @@ -446,34 +449,34 @@ int RGWSI_User_RADOS::remove_user_info(RGWSI_MetaBackend::Context *_ctx, auto siter = info.swift_keys.begin(); for (; siter != info.swift_keys.end(); ++siter) { auto& k = siter->second; - ldout(cct, 10) << "removing swift subuser index: " << k.id << dendl; + ldpp_dout(dpp, 10) << "removing swift subuser index: " << k.id << dendl; /* check if swift mapping exists */ - ret = remove_swift_name_index(_ctx, k.id, y); + ret = remove_swift_name_index(dpp, _ctx, k.id, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "ERROR: could not remove " << k.id << " (swift name object), should be fixed (err=" << ret << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not remove " << k.id << " (swift name object), should be fixed (err=" << ret << ")" << dendl; return ret; } } - ldout(cct, 10) << "removing email index: " << info.user_email << dendl; - ret = remove_email_index(_ctx, info.user_email, y); + ldpp_dout(dpp, 10) << "removing email index: " << info.user_email << dendl; + ret = remove_email_index(dpp, _ctx, info.user_email, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "ERROR: could not remove email index object for " + ldpp_dout(dpp, 0) << "ERROR: could not remove email index object for " << info.user_email << ", should be fixed (err=" << ret << ")" << dendl; return ret; } rgw_raw_obj uid_bucks = get_buckets_obj(info.user_id); - ldout(cct, 10) << "removing user buckets index" << dendl; + ldpp_dout(dpp, 10) << "removing user buckets index" << dendl; RGWSI_MetaBackend_SObj::Context_SObj *ctx = static_cast(_ctx); auto sysobj = ctx->obj_ctx->get_obj(uid_bucks); - ret = sysobj.wop().remove(y); + ret = sysobj.wop().remove(dpp, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "ERROR: could not remove " << info.user_id << ":" << uid_bucks << ", should be fixed (err=" << ret << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not remove " << info.user_id << ":" << uid_bucks << ", should be fixed (err=" << ret << ")" << dendl; return ret; } - ret = remove_uid_index(ctx, info, objv_tracker, y); + ret = remove_uid_index(ctx, info, objv_tracker, y, dpp); if (ret < 0 && ret != -ENOENT) { return ret; } @@ -482,17 +485,17 @@ int RGWSI_User_RADOS::remove_user_info(RGWSI_MetaBackend::Context *_ctx, } int RGWSI_User_RADOS::remove_uid_index(RGWSI_MetaBackend::Context *ctx, const RGWUserInfo& user_info, RGWObjVersionTracker *objv_tracker, - optional_yield y) + optional_yield y, const DoutPrefixProvider *dpp) { - ldout(cct, 10) << "removing user index: " << user_info.user_id << dendl; + ldpp_dout(dpp, 10) << "removing user index: " << user_info.user_id << dendl; RGWSI_MBSObj_RemoveParams params; - int ret = svc.meta_be->remove(ctx, get_meta_key(user_info.user_id), params, objv_tracker, y); + int ret = svc.meta_be->remove(ctx, get_meta_key(user_info.user_id), params, objv_tracker, y, dpp); if (ret < 0 && ret != -ENOENT && ret != -ECANCELED) { string key; user_info.user_id.to_str(key); rgw_raw_obj uid_obj(svc.zone->get_zone_params().user_uid_pool, key); - ldout(cct, 0) << "ERROR: could not remove " << user_info.user_id << ":" << uid_obj << ", should be fixed (err=" << ret << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not remove " << user_info.user_id << ":" << uid_obj << ", should be fixed (err=" << ret << ")" << dendl; return ret; } @@ -504,7 +507,7 @@ int RGWSI_User_RADOS::get_user_info_from_index(RGWSI_MetaBackend::Context *_ctx, const rgw_pool& pool, RGWUserInfo *info, RGWObjVersionTracker * const objv_tracker, - real_time * const pmtime, optional_yield y) + real_time * const pmtime, optional_yield y, const DoutPrefixProvider *dpp) { RGWSI_MetaBackend_SObj::Context_SObj *ctx = static_cast(_ctx); @@ -523,7 +526,7 @@ int RGWSI_User_RADOS::get_user_info_from_index(RGWSI_MetaBackend::Context *_ctx, bufferlist bl; RGWUID uid; - int ret = rgw_get_system_obj(*ctx->obj_ctx, pool, key, bl, nullptr, &e.mtime, y); + int ret = rgw_get_system_obj(*ctx->obj_ctx, pool, key, bl, nullptr, &e.mtime, y, dpp); if (ret < 0) return ret; @@ -535,16 +538,16 @@ int RGWSI_User_RADOS::get_user_info_from_index(RGWSI_MetaBackend::Context *_ctx, int ret = read_user_info(ctx, uid.user_id, &e.info, &e.objv_tracker, nullptr, &cache_info, nullptr, - y); + y, dpp); if (ret < 0) { return ret; } } catch (buffer::error& err) { - ldout(svc.meta_be->ctx(), 0) << "ERROR: failed to decode user info, caught buffer::error" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to decode user info, caught buffer::error" << dendl; return -EIO; } - uinfo_cache->put(svc.cache, cache_key, &e, { &cache_info }); + uinfo_cache->put(dpp, svc.cache, cache_key, &e, { &cache_info }); *info = e.info; if (objv_tracker) @@ -562,10 +565,11 @@ int RGWSI_User_RADOS::get_user_info_from_index(RGWSI_MetaBackend::Context *_ctx, int RGWSI_User_RADOS::get_user_info_by_email(RGWSI_MetaBackend::Context *ctx, const string& email, RGWUserInfo *info, RGWObjVersionTracker *objv_tracker, - real_time *pmtime, optional_yield y) + real_time *pmtime, optional_yield y, + const DoutPrefixProvider *dpp) { return get_user_info_from_index(ctx, email, svc.zone->get_zone_params().user_email_pool, - info, objv_tracker, pmtime, y); + info, objv_tracker, pmtime, y, dpp); } /** @@ -576,12 +580,13 @@ int RGWSI_User_RADOS::get_user_info_by_swift(RGWSI_MetaBackend::Context *ctx, const string& swift_name, RGWUserInfo *info, /* out */ RGWObjVersionTracker * const objv_tracker, - real_time * const pmtime, optional_yield y) + real_time * const pmtime, optional_yield y, + const DoutPrefixProvider *dpp) { return get_user_info_from_index(ctx, swift_name, svc.zone->get_zone_params().user_swift_pool, - info, objv_tracker, pmtime, y); + info, objv_tracker, pmtime, y, dpp); } /** @@ -592,25 +597,26 @@ int RGWSI_User_RADOS::get_user_info_by_access_key(RGWSI_MetaBackend::Context *ct const std::string& access_key, RGWUserInfo *info, RGWObjVersionTracker* objv_tracker, - real_time *pmtime, optional_yield y) + real_time *pmtime, optional_yield y, + const DoutPrefixProvider *dpp) { return get_user_info_from_index(ctx, access_key, svc.zone->get_zone_params().user_keys_pool, - info, objv_tracker, pmtime, y); + info, objv_tracker, pmtime, y, dpp); } -int RGWSI_User_RADOS::cls_user_update_buckets(rgw_raw_obj& obj, list& entries, bool add, optional_yield y) +int RGWSI_User_RADOS::cls_user_update_buckets(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, list& entries, bool add, optional_yield y) { auto rados_obj = svc.rados->obj(obj); - int r = rados_obj.open(); + int r = rados_obj.open(dpp); if (r < 0) { return r; } librados::ObjectWriteOperation op; cls_user_set_buckets(op, entries, add); - r = rados_obj.operate(&op, y); + r = rados_obj.operate(dpp, &op, y); if (r < 0) { return r; } @@ -618,32 +624,33 @@ int RGWSI_User_RADOS::cls_user_update_buckets(rgw_raw_obj& obj, list l; l.push_back(entry); - return cls_user_update_buckets(obj, l, true, y); + return cls_user_update_buckets(dpp, obj, l, true, y); } -int RGWSI_User_RADOS::cls_user_remove_bucket(rgw_raw_obj& obj, const cls_user_bucket& bucket, optional_yield y) +int RGWSI_User_RADOS::cls_user_remove_bucket(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, const cls_user_bucket& bucket, optional_yield y) { auto rados_obj = svc.rados->obj(obj); - int r = rados_obj.open(); + int r = rados_obj.open(dpp); if (r < 0) { return r; } librados::ObjectWriteOperation op; ::cls_user_remove_bucket(op, bucket); - r = rados_obj.operate(&op, y); + r = rados_obj.operate(dpp, &op, y); if (r < 0) return r; return 0; } -int RGWSI_User_RADOS::add_bucket(RGWSI_MetaBackend::Context *ctx, +int RGWSI_User_RADOS::add_bucket(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const rgw_bucket& bucket, ceph::real_time creation_time, @@ -661,9 +668,9 @@ int RGWSI_User_RADOS::add_bucket(RGWSI_MetaBackend::Context *ctx, new_bucket.creation_time = creation_time; rgw_raw_obj obj = get_buckets_obj(user); - ret = cls_user_add_bucket(obj, new_bucket, y); + ret = cls_user_add_bucket(dpp, obj, new_bucket, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: error adding bucket to user: ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: error adding bucket to user: ret=" << ret << dendl; return ret; } @@ -671,7 +678,8 @@ int RGWSI_User_RADOS::add_bucket(RGWSI_MetaBackend::Context *ctx, } -int RGWSI_User_RADOS::remove_bucket(RGWSI_MetaBackend::Context *ctx, +int RGWSI_User_RADOS::remove_bucket(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const rgw_bucket& _bucket, optional_yield y) @@ -679,15 +687,16 @@ int RGWSI_User_RADOS::remove_bucket(RGWSI_MetaBackend::Context *ctx, cls_user_bucket bucket; bucket.name = _bucket.name; rgw_raw_obj obj = get_buckets_obj(user); - int ret = cls_user_remove_bucket(obj, bucket, y); + int ret = cls_user_remove_bucket(dpp, obj, bucket, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: error removing bucket from user: ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: error removing bucket from user: ret=" << ret << dendl; } return 0; } -int RGWSI_User_RADOS::cls_user_flush_bucket_stats(rgw_raw_obj& user_obj, +int RGWSI_User_RADOS::cls_user_flush_bucket_stats(const DoutPrefixProvider *dpp, + rgw_raw_obj& user_obj, const RGWBucketEnt& ent, optional_yield y) { cls_user_bucket_entry entry; @@ -696,16 +705,17 @@ int RGWSI_User_RADOS::cls_user_flush_bucket_stats(rgw_raw_obj& user_obj, list entries; entries.push_back(entry); - int r = cls_user_update_buckets(user_obj, entries, false, y); + int r = cls_user_update_buckets(dpp, user_obj, entries, false, y); if (r < 0) { - ldout(cct, 20) << "cls_user_update_buckets() returned " << r << dendl; + ldpp_dout(dpp, 20) << "cls_user_update_buckets() returned " << r << dendl; return r; } return 0; } -int RGWSI_User_RADOS::cls_user_list_buckets(rgw_raw_obj& obj, +int RGWSI_User_RADOS::cls_user_list_buckets(const DoutPrefixProvider *dpp, + rgw_raw_obj& obj, const string& in_marker, const string& end_marker, const int max_entries, @@ -715,7 +725,7 @@ int RGWSI_User_RADOS::cls_user_list_buckets(rgw_raw_obj& obj, optional_yield y) { auto rados_obj = svc.rados->obj(obj); - int r = rados_obj.open(); + int r = rados_obj.open(dpp); if (r < 0) { return r; } @@ -725,7 +735,7 @@ int RGWSI_User_RADOS::cls_user_list_buckets(rgw_raw_obj& obj, cls_user_bucket_list(op, in_marker, end_marker, max_entries, entries, out_marker, truncated, &rc); bufferlist ibl; - r = rados_obj.operate(&op, &ibl, y); + r = rados_obj.operate(dpp, &op, &ibl, y); if (r < 0) return r; if (rc < 0) @@ -734,7 +744,8 @@ int RGWSI_User_RADOS::cls_user_list_buckets(rgw_raw_obj& obj, return 0; } -int RGWSI_User_RADOS::list_buckets(RGWSI_MetaBackend::Context *ctx, +int RGWSI_User_RADOS::list_buckets(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const string& marker, const string& end_marker, @@ -746,7 +757,7 @@ int RGWSI_User_RADOS::list_buckets(RGWSI_MetaBackend::Context *ctx, buckets->clear(); if (user.id == RGW_USER_ANON_ID) { - ldout(cct, 20) << "RGWSI_User_RADOS::list_buckets(): anonymous user" << dendl; + ldpp_dout(dpp, 20) << "RGWSI_User_RADOS::list_buckets(): anonymous user" << dendl; *is_truncated = false; return 0; } @@ -759,7 +770,7 @@ int RGWSI_User_RADOS::list_buckets(RGWSI_MetaBackend::Context *ctx, do { std::list entries; - ret = cls_user_list_buckets(obj, m, end_marker, max - total, entries, &m, &truncated, y); + ret = cls_user_list_buckets(dpp, obj, m, end_marker, max - total, entries, &m, &truncated, y); if (ret == -ENOENT) { ret = 0; } @@ -782,28 +793,30 @@ int RGWSI_User_RADOS::list_buckets(RGWSI_MetaBackend::Context *ctx, return 0; } -int RGWSI_User_RADOS::flush_bucket_stats(RGWSI_MetaBackend::Context *ctx, +int RGWSI_User_RADOS::flush_bucket_stats(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const RGWBucketEnt& ent, optional_yield y) { rgw_raw_obj obj = get_buckets_obj(user); - return cls_user_flush_bucket_stats(obj, ent, y); + return cls_user_flush_bucket_stats(dpp, obj, ent, y); } -int RGWSI_User_RADOS::reset_bucket_stats(RGWSI_MetaBackend::Context *ctx, +int RGWSI_User_RADOS::reset_bucket_stats(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, optional_yield y) { - return cls_user_reset_stats(user, y); + return cls_user_reset_stats(dpp, user, y); } -int RGWSI_User_RADOS::cls_user_reset_stats(const rgw_user& user, optional_yield y) +int RGWSI_User_RADOS::cls_user_reset_stats(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y) { rgw_raw_obj obj = get_buckets_obj(user); auto rados_obj = svc.rados->obj(obj); - int rval, r = rados_obj.open(); + int rval, r = rados_obj.open(dpp); if (r < 0) { return r; } @@ -820,7 +833,7 @@ int RGWSI_User_RADOS::cls_user_reset_stats(const rgw_user& user, optional_yield encode(call, in); op.exec("user", "reset_user_stats2", in, &out, &rval); - r = rados_obj.operate(&op, y, librados::OPERATION_RETURNVEC); + r = rados_obj.operate(dpp, &op, y, librados::OPERATION_RETURNVEC); if (r < 0) { return r; } @@ -835,26 +848,28 @@ int RGWSI_User_RADOS::cls_user_reset_stats(const rgw_user& user, optional_yield return rval; } -int RGWSI_User_RADOS::complete_flush_stats(RGWSI_MetaBackend::Context *ctx, +int RGWSI_User_RADOS::complete_flush_stats(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, optional_yield y) { rgw_raw_obj obj = get_buckets_obj(user); auto rados_obj = svc.rados->obj(obj); - int r = rados_obj.open(); + int r = rados_obj.open(dpp); if (r < 0) { return r; } librados::ObjectWriteOperation op; ::cls_user_complete_stats_sync(op); - return rados_obj.operate(&op, y); + return rados_obj.operate(dpp, &op, y); } -int RGWSI_User_RADOS::cls_user_get_header(const rgw_user& user, cls_user_header *header, +int RGWSI_User_RADOS::cls_user_get_header(const DoutPrefixProvider *dpp, + const rgw_user& user, cls_user_header *header, optional_yield y) { rgw_raw_obj obj = get_buckets_obj(user); auto rados_obj = svc.rados->obj(obj); - int r = rados_obj.open(); + int r = rados_obj.open(dpp); if (r < 0) { return r; } @@ -862,14 +877,14 @@ int RGWSI_User_RADOS::cls_user_get_header(const rgw_user& user, cls_user_header bufferlist ibl; librados::ObjectReadOperation op; ::cls_user_get_header(op, header, &rc); - return rados_obj.operate(&op, &ibl, y); + return rados_obj.operate(dpp, &op, &ibl, y); } -int RGWSI_User_RADOS::cls_user_get_header_async(const string& user_str, RGWGetUserHeader_CB *cb) +int RGWSI_User_RADOS::cls_user_get_header_async(const DoutPrefixProvider *dpp, const string& user_str, RGWGetUserHeader_CB *cb) { rgw_raw_obj obj = get_buckets_obj(rgw_user(user_str)); auto rados_obj = svc.rados->obj(obj); - int r = rados_obj.open(); + int r = rados_obj.open(dpp); if (r < 0) { return r; } @@ -884,7 +899,8 @@ int RGWSI_User_RADOS::cls_user_get_header_async(const string& user_str, RGWGetUs return 0; } -int RGWSI_User_RADOS::read_stats(RGWSI_MetaBackend::Context *ctx, +int RGWSI_User_RADOS::read_stats(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, RGWStorageStats *stats, ceph::real_time *last_stats_sync, ceph::real_time *last_stats_update, @@ -893,7 +909,7 @@ int RGWSI_User_RADOS::read_stats(RGWSI_MetaBackend::Context *ctx, string user_str = user.to_str(); cls_user_header header; - int r = cls_user_get_header(rgw_user(user_str), &header, y); + int r = cls_user_get_header(dpp, rgw_user(user_str), &header, y); if (r < 0) return r; @@ -939,13 +955,13 @@ public: } }; -int RGWSI_User_RADOS::read_stats_async(RGWSI_MetaBackend::Context *ctx, +int RGWSI_User_RADOS::read_stats_async(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx, const rgw_user& user, RGWGetUserStats_CB *_cb) { string user_str = user.to_str(); RGWGetUserStatsContext *cb = new RGWGetUserStatsContext(_cb); - int r = cls_user_get_header_async(user_str, cb); + int r = cls_user_get_header_async(dpp, user_str, cb); if (r < 0) { _cb->put(); delete cb; diff --git a/src/rgw/services/svc_user_rados.h b/src/rgw/services/svc_user_rados.h index 7014e3430ab30..f0b025e9db04c 100644 --- a/src/rgw/services/svc_user_rados.h +++ b/src/rgw/services/svc_user_rados.h @@ -63,24 +63,26 @@ class RGWSI_User_RADOS : public RGWSI_User RGWUserInfo *info, RGWObjVersionTracker * const objv_tracker, real_time * const pmtime, - optional_yield y); + optional_yield y, + const DoutPrefixProvider *dpp); int remove_uid_index(RGWSI_MetaBackend::Context *ctx, const RGWUserInfo& user_info, RGWObjVersionTracker *objv_tracker, - optional_yield y); + optional_yield y, const DoutPrefixProvider *dpp); - int remove_key_index(RGWSI_MetaBackend::Context *ctx, const RGWAccessKey& access_key, optional_yield y); - int remove_email_index(RGWSI_MetaBackend::Context *ctx, const string& email, optional_yield y); - int remove_swift_name_index(RGWSI_MetaBackend::Context *ctx, const string& swift_name, optional_yield y); + int remove_key_index(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx, const RGWAccessKey& access_key, optional_yield y); + int remove_email_index(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx, const string& email, optional_yield y); + int remove_swift_name_index(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx, const string& swift_name, optional_yield y); /* admin management */ - int cls_user_update_buckets(rgw_raw_obj& obj, list& entries, bool add, optional_yield y); - int cls_user_add_bucket(rgw_raw_obj& obj, const cls_user_bucket_entry& entry, optional_yield y); - int cls_user_remove_bucket(rgw_raw_obj& obj, const cls_user_bucket& bucket, optional_yield y); + int cls_user_update_buckets(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, list& entries, bool add, optional_yield y); + int cls_user_add_bucket(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, const cls_user_bucket_entry& entry, optional_yield y); + int cls_user_remove_bucket(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, const cls_user_bucket& bucket, optional_yield y); /* quota stats */ - int cls_user_flush_bucket_stats(rgw_raw_obj& user_obj, + int cls_user_flush_bucket_stats(const DoutPrefixProvider *dpp, rgw_raw_obj& user_obj, const RGWBucketEnt& ent, optional_yield y); - int cls_user_list_buckets(rgw_raw_obj& obj, + int cls_user_list_buckets(const DoutPrefixProvider *dpp, + rgw_raw_obj& obj, const string& in_marker, const string& end_marker, const int max_entries, @@ -89,11 +91,11 @@ class RGWSI_User_RADOS : public RGWSI_User bool * const truncated, optional_yield y); - int cls_user_reset_stats(const rgw_user& user, optional_yield y); - int cls_user_get_header(const rgw_user& user, cls_user_header *header, optional_yield y); - int cls_user_get_header_async(const string& user, RGWGetUserHeader_CB *cb); + int cls_user_reset_stats(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y); + int cls_user_get_header(const DoutPrefixProvider *dpp, const rgw_user& user, cls_user_header *header, optional_yield y); + int cls_user_get_header_async(const DoutPrefixProvider *dpp, const string& user, RGWGetUserHeader_CB *cb); - int do_start(optional_yield) override; + int do_start(optional_yield, const DoutPrefixProvider *dpp) override; public: struct Svc { RGWSI_User_RADOS *user{nullptr}; @@ -126,7 +128,8 @@ public: real_time * const pmtime, rgw_cache_entry_info * const cache_info, map * const pattrs, - optional_yield y) override; + optional_yield y, + const DoutPrefixProvider *dpp) override; int store_user_info(RGWSI_MetaBackend::Context *ctx, const RGWUserInfo& info, @@ -135,43 +138,51 @@ public: const real_time& mtime, bool exclusive, map *attrs, - optional_yield y) override; + optional_yield y, + const DoutPrefixProvider *dpp) override; int remove_user_info(RGWSI_MetaBackend::Context *ctx, const RGWUserInfo& info, RGWObjVersionTracker *objv_tracker, - optional_yield y) override; + optional_yield y, + const DoutPrefixProvider *dpp) override; int get_user_info_by_email(RGWSI_MetaBackend::Context *ctx, const string& email, RGWUserInfo *info, RGWObjVersionTracker *objv_tracker, real_time *pmtime, - optional_yield y) override; + optional_yield y, + const DoutPrefixProvider *dpp) override; int get_user_info_by_swift(RGWSI_MetaBackend::Context *ctx, const string& swift_name, RGWUserInfo *info, /* out */ RGWObjVersionTracker * const objv_tracker, real_time * const pmtime, - optional_yield y) override; + optional_yield y, + const DoutPrefixProvider *dpp) override; int get_user_info_by_access_key(RGWSI_MetaBackend::Context *ctx, const std::string& access_key, RGWUserInfo *info, RGWObjVersionTracker* objv_tracker, real_time *pmtime, - optional_yield y) override; + optional_yield y, + const DoutPrefixProvider *dpp) override; /* user buckets directory */ - int add_bucket(RGWSI_MetaBackend::Context *ctx, + int add_bucket(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const rgw_bucket& bucket, ceph::real_time creation_time, optional_yield y) override; - int remove_bucket(RGWSI_MetaBackend::Context *ctx, + int remove_bucket(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const rgw_bucket& _bucket, optional_yield y) override; - int list_buckets(RGWSI_MetaBackend::Context *ctx, + int list_buckets(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const string& marker, const string& end_marker, @@ -181,23 +192,27 @@ public: optional_yield y) override; /* quota related */ - int flush_bucket_stats(RGWSI_MetaBackend::Context *ctx, + int flush_bucket_stats(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const RGWBucketEnt& ent, optional_yield y) override; - int complete_flush_stats(RGWSI_MetaBackend::Context *ctx, + int complete_flush_stats(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, optional_yield y) override; - int reset_bucket_stats(RGWSI_MetaBackend::Context *ctx, + int reset_bucket_stats(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, optional_yield y) override; - int read_stats(RGWSI_MetaBackend::Context *ctx, + int read_stats(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, RGWStorageStats *stats, ceph::real_time *last_stats_sync, /* last time a full stats sync completed */ ceph::real_time *last_stats_update, optional_yield y) override; /* last time a stats update was done */ - int read_stats_async(RGWSI_MetaBackend::Context *ctx, + int read_stats_async(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx, const rgw_user& user, RGWGetUserStats_CB *cb) override; }; diff --git a/src/rgw/services/svc_zone.cc b/src/rgw/services/svc_zone.cc index 1e67fe89da667..f98be61af2c65 100644 --- a/src/rgw/services/svc_zone.cc +++ b/src/rgw/services/svc_zone.cc @@ -64,51 +64,51 @@ bool RGWSI_Zone::zone_syncs_from(const RGWZone& target_zone, const RGWZone& sour sync_modules_svc->get_manager()->supports_data_export(source_zone.tier_type); } -int RGWSI_Zone::do_start(optional_yield y) +int RGWSI_Zone::do_start(optional_yield y, const DoutPrefixProvider *dpp) { - int ret = sysobj_svc->start(y); + int ret = sysobj_svc->start(y, dpp); if (ret < 0) { return ret; } assert(sysobj_svc->is_started()); /* if not then there's ordering issue */ - ret = rados_svc->start(y); + ret = rados_svc->start(y, dpp); if (ret < 0) { return ret; } - ret = realm->init(cct, sysobj_svc, y); + ret = realm->init(dpp, cct, sysobj_svc, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "failed reading realm info: ret "<< ret << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "failed reading realm info: ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } else if (ret != -ENOENT) { - ldout(cct, 20) << "realm " << realm->get_name() << " " << realm->get_id() << dendl; - ret = current_period->init(cct, sysobj_svc, realm->get_id(), y, + ldpp_dout(dpp, 20) << "realm " << realm->get_name() << " " << realm->get_id() << dendl; + ret = current_period->init(dpp, cct, sysobj_svc, realm->get_id(), y, realm->get_name()); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "failed reading current period info: " << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "failed reading current period info: " << " " << cpp_strerror(-ret) << dendl; return ret; } - ldout(cct, 20) << "current period " << current_period->get_id() << dendl; + ldpp_dout(dpp, 20) << "current period " << current_period->get_id() << dendl; } - ret = replace_region_with_zonegroup(y); + ret = replace_region_with_zonegroup(dpp, y); if (ret < 0) { - lderr(cct) << "failed converting region to zonegroup : ret "<< ret << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, -1) << "failed converting region to zonegroup : ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } - ret = convert_regionmap(y); + ret = convert_regionmap(dpp, y); if (ret < 0) { - lderr(cct) << "failed converting regionmap: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, -1) << "failed converting regionmap: " << cpp_strerror(-ret) << dendl; return ret; } bool zg_initialized = false; if (!current_period->get_id().empty()) { - ret = init_zg_from_period(&zg_initialized, y); + ret = init_zg_from_period(dpp, &zg_initialized, y); if (ret < 0) { return ret; } @@ -117,30 +117,30 @@ int RGWSI_Zone::do_start(optional_yield y) bool creating_defaults = false; bool using_local = (!zg_initialized); if (using_local) { - ldout(cct, 10) << " cannot find current period zonegroup using local zonegroup" << dendl; - ret = init_zg_from_local(&creating_defaults, y); + ldpp_dout(dpp, 10) << " cannot find current period zonegroup using local zonegroup" << dendl; + ret = init_zg_from_local(dpp, &creating_defaults, y); if (ret < 0) { return ret; } // read period_config into current_period auto& period_config = current_period->get_config(); - ret = period_config.read(sysobj_svc, zonegroup->realm_id, y); + ret = period_config.read(dpp, sysobj_svc, zonegroup->realm_id, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "ERROR: failed to read period config: " + ldpp_dout(dpp, 0) << "ERROR: failed to read period config: " << cpp_strerror(ret) << dendl; return ret; } } - ldout(cct, 10) << "Cannot find current period zone using local zone" << dendl; + ldpp_dout(dpp, 10) << "Cannot find current period zone using local zone" << dendl; if (creating_defaults && cct->_conf->rgw_zone.empty()) { - ldout(cct, 10) << " Using default name "<< default_zone_name << dendl; + ldpp_dout(dpp, 10) << " Using default name "<< default_zone_name << dendl; zone_params->set_name(default_zone_name); } - ret = zone_params->init(cct, sysobj_svc, y); + ret = zone_params->init(dpp, cct, sysobj_svc, y); if (ret < 0 && ret != -ENOENT) { - lderr(cct) << "failed reading zone info: ret "<< ret << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, -1) << "failed reading zone info: ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } @@ -149,11 +149,11 @@ int RGWSI_Zone::do_start(optional_yield y) auto zone_iter = zonegroup->zones.find(zone_params->get_id()); if (zone_iter == zonegroup->zones.end()) { if (using_local) { - lderr(cct) << "Cannot find zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() << ")" << dendl; + ldpp_dout(dpp, -1) << "Cannot find zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() << ")" << dendl; return -EINVAL; } - ldout(cct, 1) << "Cannot find zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() << "), switching to local zonegroup configuration" << dendl; - ret = init_zg_from_local(&creating_defaults, y); + ldpp_dout(dpp, 1) << "Cannot find zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() << "), switching to local zonegroup configuration" << dendl; + ret = init_zg_from_local(dpp, &creating_defaults, y); if (ret < 0) { return ret; } @@ -161,9 +161,9 @@ int RGWSI_Zone::do_start(optional_yield y) } if (zone_iter != zonegroup->zones.end()) { *zone_public_config = zone_iter->second; - ldout(cct, 20) << "zone " << zone_params->get_name() << " found" << dendl; + ldpp_dout(dpp, 20) << "zone " << zone_params->get_name() << " found" << dendl; } else { - lderr(cct) << "Cannot find zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() << ")" << dendl; + ldpp_dout(dpp, -1) << "Cannot find zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() << ")" << dendl; return -EINVAL; } @@ -171,9 +171,9 @@ int RGWSI_Zone::do_start(optional_yield y) for (auto ziter : zonegroup->zones) { auto zone_handler = std::make_shared(this, sync_modules_svc, bucket_sync_svc, ziter.second.id); - ret = zone_handler->init(y); + ret = zone_handler->init(dpp, y); if (ret < 0) { - lderr(cct) << "ERROR: could not initialize zone policy handler for zone=" << ziter.second.name << dendl; + ldpp_dout(dpp, -1) << "ERROR: could not initialize zone policy handler for zone=" << ziter.second.name << dendl; return ret; } sync_policy_handlers[ziter.second.id] = zone_handler; @@ -190,7 +190,7 @@ int RGWSI_Zone::do_start(optional_yield y) &target_zones, false); /* relaxed: also get all zones that we allow to sync to/from */ - ret = sync_modules_svc->start(y); + ret = sync_modules_svc->start(y, dpp); if (ret < 0) { return ret; } @@ -198,7 +198,7 @@ int RGWSI_Zone::do_start(optional_yield y) auto sync_modules = sync_modules_svc->get_manager(); RGWSyncModuleRef sm; if (!sync_modules->get_module(zone_public_config->tier_type, &sm)) { - lderr(cct) << "ERROR: tier type not found: " << zone_public_config->tier_type << dendl; + ldpp_dout(dpp, -1) << "ERROR: tier type not found: " << zone_public_config->tier_type << dendl; return -EINVAL; } @@ -214,7 +214,7 @@ int RGWSI_Zone::do_start(optional_yield y) } if (zone_by_id.find(zone_id()) == zone_by_id.end()) { - ldout(cct, 0) << "WARNING: could not find zone config in zonegroup for local zone (" << zone_id() << "), will use defaults" << dendl; + ldpp_dout(dpp, 0) << "WARNING: could not find zone config in zonegroup for local zone (" << zone_id() << "), will use defaults" << dendl; } for (const auto& ziter : zonegroup->zones) { @@ -224,10 +224,10 @@ int RGWSI_Zone::do_start(optional_yield y) continue; } if (z.endpoints.empty()) { - ldout(cct, 0) << "WARNING: can't generate connection for zone " << z.id << " id " << z.name << ": no endpoints defined" << dendl; + ldpp_dout(dpp, 0) << "WARNING: can't generate connection for zone " << z.id << " id " << z.name << ": no endpoints defined" << dendl; continue; } - ldout(cct, 20) << "generating connection object for zone " << z.name << " id " << z.id << dendl; + ldpp_dout(dpp, 20) << "generating connection object for zone " << z.name << " id " << z.id << dendl; RGWRESTConn *conn = new RGWRESTConn(cct, this, z.id, z.endpoints); zone_conn_map[id] = conn; @@ -242,11 +242,11 @@ int RGWSI_Zone::do_start(optional_yield y) zone_data_notify_to_map[id] = conn; } } else { - ldout(cct, 20) << "NOTICE: not syncing to/from zone " << z.name << " id " << z.id << dendl; + ldpp_dout(dpp, 20) << "NOTICE: not syncing to/from zone " << z.name << " id " << z.id << dendl; } } - ldout(cct, 20) << "started zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() << + ldpp_dout(dpp, 20) << "started zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() << ") with tier type = " << zone_public_config->tier_type << dendl; return 0; @@ -267,44 +267,44 @@ void RGWSI_Zone::shutdown() } } -int RGWSI_Zone::list_regions(list& regions) +int RGWSI_Zone::list_regions(const DoutPrefixProvider *dpp, list& regions) { RGWZoneGroup zonegroup; RGWSI_SysObj::Pool syspool = sysobj_svc->get_pool(zonegroup.get_pool(cct)); - return syspool.list_prefixed_objs(region_info_oid_prefix, ®ions); + return syspool.list_prefixed_objs(dpp, region_info_oid_prefix, ®ions); } -int RGWSI_Zone::list_zonegroups(list& zonegroups) +int RGWSI_Zone::list_zonegroups(const DoutPrefixProvider *dpp, list& zonegroups) { RGWZoneGroup zonegroup; RGWSI_SysObj::Pool syspool = sysobj_svc->get_pool(zonegroup.get_pool(cct)); - return syspool.list_prefixed_objs(zonegroup_names_oid_prefix, &zonegroups); + return syspool.list_prefixed_objs(dpp, zonegroup_names_oid_prefix, &zonegroups); } -int RGWSI_Zone::list_zones(list& zones) +int RGWSI_Zone::list_zones(const DoutPrefixProvider *dpp, list& zones) { RGWZoneParams zoneparams; RGWSI_SysObj::Pool syspool = sysobj_svc->get_pool(zoneparams.get_pool(cct)); - return syspool.list_prefixed_objs(zone_names_oid_prefix, &zones); + return syspool.list_prefixed_objs(dpp, zone_names_oid_prefix, &zones); } -int RGWSI_Zone::list_realms(list& realms) +int RGWSI_Zone::list_realms(const DoutPrefixProvider *dpp, list& realms) { RGWRealm realm(cct, sysobj_svc); RGWSI_SysObj::Pool syspool = sysobj_svc->get_pool(realm.get_pool(cct)); - return syspool.list_prefixed_objs(realm_names_oid_prefix, &realms); + return syspool.list_prefixed_objs(dpp, realm_names_oid_prefix, &realms); } -int RGWSI_Zone::list_periods(list& periods) +int RGWSI_Zone::list_periods(const DoutPrefixProvider *dpp, list& periods) { RGWPeriod period; list raw_periods; RGWSI_SysObj::Pool syspool = sysobj_svc->get_pool(period.get_pool(cct)); - int ret = syspool.list_prefixed_objs(period.get_info_oid_prefix(), &raw_periods); + int ret = syspool.list_prefixed_objs(dpp, period.get_info_oid_prefix(), &raw_periods); if (ret < 0) { return ret; } @@ -322,13 +322,13 @@ int RGWSI_Zone::list_periods(list& periods) } -int RGWSI_Zone::list_periods(const string& current_period, list& periods, optional_yield y) +int RGWSI_Zone::list_periods(const DoutPrefixProvider *dpp, const string& current_period, list& periods, optional_yield y) { int ret = 0; string period_id = current_period; while(!period_id.empty()) { RGWPeriod period(period_id); - ret = period.init(cct, sysobj_svc, y); + ret = period.init(dpp, cct, sysobj_svc, y); if (ret < 0) { return ret; } @@ -344,7 +344,7 @@ int RGWSI_Zone::list_periods(const string& current_period, list& periods * backward compatability * Returns 0 on success, -ERR# on failure. */ -int RGWSI_Zone::replace_region_with_zonegroup(optional_yield y) +int RGWSI_Zone::replace_region_with_zonegroup(const DoutPrefixProvider *dpp, optional_yield y) { /* copy default region */ /* convert default region to default zonegroup */ @@ -361,51 +361,51 @@ int RGWSI_Zone::replace_region_with_zonegroup(optional_yield y) RGWSysObjectCtx obj_ctx = sysobj_svc->init_obj_ctx(); RGWSysObj sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid)); - int ret = sysobj.rop().read(&bl, y); + int ret = sysobj.rop().read(dpp, &bl, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << __func__ << " failed to read converted: ret "<< ret << " " << cpp_strerror(-ret) + ldpp_dout(dpp, 0) << __func__ << " failed to read converted: ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } else if (ret != -ENOENT) { - ldout(cct, 20) << "System already converted " << dendl; + ldpp_dout(dpp, 20) << "System already converted " << dendl; return 0; } string default_region; - ret = default_zonegroup.init(cct, sysobj_svc, y, false, true); + ret = default_zonegroup.init(dpp, cct, sysobj_svc, y, false, true); if (ret < 0) { - ldout(cct, 0) << __func__ << " failed init default region: ret "<< ret << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed init default region: ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } - ret = default_zonegroup.read_default_id(default_region, y, true); + ret = default_zonegroup.read_default_id(dpp, default_region, y, true); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << __func__ << " failed reading old default region: ret "<< ret << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed reading old default region: ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } /* convert regions to zonegroups */ list regions; - ret = list_regions(regions); + ret = list_regions(dpp, regions); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << __func__ << " failed to list regions: ret "<< ret << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed to list regions: ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } else if (ret == -ENOENT || regions.empty()) { RGWZoneParams zoneparams(default_zone_name); - int ret = zoneparams.init(cct, sysobj_svc, y); + int ret = zoneparams.init(dpp, cct, sysobj_svc, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << __func__ << ": error initializing default zone params: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << ": error initializing default zone params: " << cpp_strerror(-ret) << dendl; return ret; } /* update master zone */ RGWZoneGroup default_zg(default_zonegroup_name); - ret = default_zg.init(cct, sysobj_svc, y); + ret = default_zg.init(dpp, cct, sysobj_svc, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << __func__ << ": error in initializing default zonegroup: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << ": error in initializing default zonegroup: " << cpp_strerror(-ret) << dendl; return ret; } if (ret != -ENOENT && default_zg.master_zone.empty()) { default_zg.master_zone = zoneparams.get_id(); - return default_zg.update(y); + return default_zg.update(dpp, y); } return 0; } @@ -415,9 +415,9 @@ int RGWSI_Zone::replace_region_with_zonegroup(optional_yield y) for (list::iterator iter = regions.begin(); iter != regions.end(); ++iter) { if (*iter != default_zonegroup_name){ RGWZoneGroup region(*iter); - int ret = region.init(cct, sysobj_svc, y, true, true); + int ret = region.init(dpp, cct, sysobj_svc, y, true, true); if (ret < 0) { - ldout(cct, 0) << __func__ << " failed init region "<< *iter << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed init region "<< *iter << ": " << cpp_strerror(-ret) << dendl; return ret; } if (region.is_master_zonegroup()) { @@ -440,30 +440,30 @@ int RGWSI_Zone::replace_region_with_zonegroup(optional_yield y) buf_to_hex(md5, CEPH_CRYPTO_MD5_DIGESTSIZE, md5_str); string new_realm_id(md5_str); RGWRealm new_realm(new_realm_id,new_realm_name); - ret = new_realm.init(cct, sysobj_svc, y, false); + ret = new_realm.init(dpp, cct, sysobj_svc, y, false); if (ret < 0) { - ldout(cct, 0) << __func__ << " Error initing new realm: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " Error initing new realm: " << cpp_strerror(-ret) << dendl; return ret; } - ret = new_realm.create(y); + ret = new_realm.create(dpp, y); if (ret < 0 && ret != -EEXIST) { - ldout(cct, 0) << __func__ << " Error creating new realm: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " Error creating new realm: " << cpp_strerror(-ret) << dendl; return ret; } - ret = new_realm.set_as_default(y); + ret = new_realm.set_as_default(dpp, y); if (ret < 0) { - ldout(cct, 0) << __func__ << " Error setting realm as default: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " Error setting realm as default: " << cpp_strerror(-ret) << dendl; return ret; } - ret = realm->init(cct, sysobj_svc, y); + ret = realm->init(dpp, cct, sysobj_svc, y); if (ret < 0) { - ldout(cct, 0) << __func__ << " Error initing realm: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " Error initing realm: " << cpp_strerror(-ret) << dendl; return ret; } - ret = current_period->init(cct, sysobj_svc, realm->get_id(), y, + ret = current_period->init(dpp, cct, sysobj_svc, realm->get_id(), y, realm->get_name()); if (ret < 0) { - ldout(cct, 0) << __func__ << " Error initing current period: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " Error initing current period: " << cpp_strerror(-ret) << dendl; return ret; } } @@ -472,112 +472,112 @@ int RGWSI_Zone::replace_region_with_zonegroup(optional_yield y) /* create zonegroups */ for (iter = regions.begin(); iter != regions.end(); ++iter) { - ldout(cct, 0) << __func__ << " Converting " << *iter << dendl; + ldpp_dout(dpp, 0) << __func__ << " Converting " << *iter << dendl; /* check to see if we don't have already a zonegroup with this name */ RGWZoneGroup new_zonegroup(*iter); - ret = new_zonegroup.init(cct , sysobj_svc, y); + ret = new_zonegroup.init(dpp, cct , sysobj_svc, y); if (ret == 0 && new_zonegroup.get_id() != *iter) { - ldout(cct, 0) << __func__ << " zonegroup "<< *iter << " already exists id " << new_zonegroup.get_id () << + ldpp_dout(dpp, 0) << __func__ << " zonegroup "<< *iter << " already exists id " << new_zonegroup.get_id () << " skipping conversion " << dendl; continue; } RGWZoneGroup zonegroup(*iter); zonegroup.set_id(*iter); - int ret = zonegroup.init(cct, sysobj_svc, y, true, true); + int ret = zonegroup.init(dpp, cct, sysobj_svc, y, true, true); if (ret < 0) { - ldout(cct, 0) << __func__ << " failed init zonegroup: ret "<< ret << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed init zonegroup: ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } zonegroup.realm_id = realm->get_id(); /* fix default region master zone */ if (*iter == default_zonegroup_name && zonegroup.master_zone.empty()) { - ldout(cct, 0) << __func__ << " Setting default zone as master for default region" << dendl; + ldpp_dout(dpp, 0) << __func__ << " Setting default zone as master for default region" << dendl; zonegroup.master_zone = default_zone_name; } - ret = zonegroup.update(y); + ret = zonegroup.update(dpp, y); if (ret < 0 && ret != -EEXIST) { - ldout(cct, 0) << __func__ << " failed to update zonegroup " << *iter << ": ret "<< ret << " " << cpp_strerror(-ret) + ldpp_dout(dpp, 0) << __func__ << " failed to update zonegroup " << *iter << ": ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } - ret = zonegroup.update_name(y); + ret = zonegroup.update_name(dpp, y); if (ret < 0 && ret != -EEXIST) { - ldout(cct, 0) << __func__ << " failed to update_name for zonegroup " << *iter << ": ret "<< ret << " " << cpp_strerror(-ret) + ldpp_dout(dpp, 0) << __func__ << " failed to update_name for zonegroup " << *iter << ": ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } if (zonegroup.get_name() == default_region) { - ret = zonegroup.set_as_default(y); + ret = zonegroup.set_as_default(dpp, y); if (ret < 0) { - ldout(cct, 0) << __func__ << " failed to set_as_default " << *iter << ": ret "<< ret << " " << cpp_strerror(-ret) + ldpp_dout(dpp, 0) << __func__ << " failed to set_as_default " << *iter << ": ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } } for (auto iter = zonegroup.zones.begin(); iter != zonegroup.zones.end(); ++iter) { - ldout(cct, 0) << __func__ << " Converting zone" << iter->first << dendl; + ldpp_dout(dpp, 0) << __func__ << " Converting zone" << iter->first << dendl; RGWZoneParams zoneparams(iter->first, iter->second.name); zoneparams.set_id(iter->first.id); zoneparams.realm_id = realm->get_id(); - ret = zoneparams.init(cct, sysobj_svc, y); + ret = zoneparams.init(dpp, cct, sysobj_svc, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << __func__ << " failed to init zoneparams " << iter->first << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed to init zoneparams " << iter->first << ": " << cpp_strerror(-ret) << dendl; return ret; } else if (ret == -ENOENT) { - ldout(cct, 0) << __func__ << " zone is part of another cluster " << iter->first << " skipping " << dendl; + ldpp_dout(dpp, 0) << __func__ << " zone is part of another cluster " << iter->first << " skipping " << dendl; continue; } zonegroup.realm_id = realm->get_id(); - ret = zoneparams.update(y); + ret = zoneparams.update(dpp, y); if (ret < 0 && ret != -EEXIST) { - ldout(cct, 0) << __func__ << " failed to update zoneparams " << iter->first << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed to update zoneparams " << iter->first << ": " << cpp_strerror(-ret) << dendl; return ret; } - ret = zoneparams.update_name(y); + ret = zoneparams.update_name(dpp, y); if (ret < 0 && ret != -EEXIST) { - ldout(cct, 0) << __func__ << " failed to init zoneparams " << iter->first << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed to init zoneparams " << iter->first << ": " << cpp_strerror(-ret) << dendl; return ret; } } if (!current_period->get_id().empty()) { - ret = current_period->add_zonegroup(zonegroup, y); + ret = current_period->add_zonegroup(dpp, zonegroup, y); if (ret < 0) { - ldout(cct, 0) << __func__ << " failed to add zonegroup to current_period: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed to add zonegroup to current_period: " << cpp_strerror(-ret) << dendl; return ret; } } } if (!current_period->get_id().empty()) { - ret = current_period->update(y); + ret = current_period->update(dpp, y); if (ret < 0) { - ldout(cct, 0) << __func__ << " failed to update new period: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed to update new period: " << cpp_strerror(-ret) << dendl; return ret; } - ret = current_period->store_info(false, y); + ret = current_period->store_info(dpp, false, y); if (ret < 0) { - ldout(cct, 0) << __func__ << " failed to store new period: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed to store new period: " << cpp_strerror(-ret) << dendl; return ret; } - ret = current_period->reflect(y); + ret = current_period->reflect(dpp, y); if (ret < 0) { - ldout(cct, 0) << __func__ << " failed to update local objects: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed to update local objects: " << cpp_strerror(-ret) << dendl; return ret; } } for (auto const& iter : regions) { RGWZoneGroup zonegroup(iter); - int ret = zonegroup.init(cct, sysobj_svc, y, true, true); + int ret = zonegroup.init(dpp, cct, sysobj_svc, y, true, true); if (ret < 0) { - ldout(cct, 0) << __func__ << " failed init zonegroup" << iter << ": ret "<< ret << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed init zonegroup" << iter << ": ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } - ret = zonegroup.delete_obj(y, true); + ret = zonegroup.delete_obj(dpp, y, true); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << __func__ << " failed to delete region " << iter << ": ret "<< ret << " " << cpp_strerror(-ret) + ldpp_dout(dpp, 0) << __func__ << " failed to delete region " << iter << ": ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } @@ -586,9 +586,9 @@ int RGWSI_Zone::replace_region_with_zonegroup(optional_yield y) /* mark as converted */ ret = sysobj.wop() .set_exclusive(true) - .write(bl, y); + .write(dpp, bl, y); if (ret < 0 ) { - ldout(cct, 0) << __func__ << " failed to mark cluster as converted: ret "<< ret << " " << cpp_strerror(-ret) + ldpp_dout(dpp, 0) << __func__ << " failed to mark cluster as converted: ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } @@ -615,7 +615,7 @@ static void add_new_connection_to_map(map &zonegroup_conn zonegroup_conn_map[zonegroup.get_id()] = new_connection; } -int RGWSI_Zone::init_zg_from_period(bool *initialized, optional_yield y) +int RGWSI_Zone::init_zg_from_period(const DoutPrefixProvider *dpp, bool *initialized, optional_yield y) { *initialized = false; @@ -623,38 +623,38 @@ int RGWSI_Zone::init_zg_from_period(bool *initialized, optional_yield y) return 0; } - int ret = zonegroup->init(cct, sysobj_svc, y); - ldout(cct, 20) << "period zonegroup init ret " << ret << dendl; + int ret = zonegroup->init(dpp, cct, sysobj_svc, y); + ldpp_dout(dpp, 20) << "period zonegroup init ret " << ret << dendl; if (ret == -ENOENT) { return 0; } if (ret < 0) { - ldout(cct, 0) << "failed reading zonegroup info: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "failed reading zonegroup info: " << cpp_strerror(-ret) << dendl; return ret; } - ldout(cct, 20) << "period zonegroup name " << zonegroup->get_name() << dendl; + ldpp_dout(dpp, 20) << "period zonegroup name " << zonegroup->get_name() << dendl; map::const_iterator iter = current_period->get_map().zonegroups.find(zonegroup->get_id()); if (iter != current_period->get_map().zonegroups.end()) { - ldout(cct, 20) << "using current period zonegroup " << zonegroup->get_name() << dendl; + ldpp_dout(dpp, 20) << "using current period zonegroup " << zonegroup->get_name() << dendl; *zonegroup = iter->second; - ret = zonegroup->init(cct, sysobj_svc, y, false); + ret = zonegroup->init(dpp, cct, sysobj_svc, y, false); if (ret < 0) { - ldout(cct, 0) << "failed init zonegroup: " << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "failed init zonegroup: " << " " << cpp_strerror(-ret) << dendl; return ret; } - ret = zone_params->init(cct, sysobj_svc, y); + ret = zone_params->init(dpp, cct, sysobj_svc, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "failed reading zone params info: " << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "failed reading zone params info: " << " " << cpp_strerror(-ret) << dendl; return ret; } if (ret ==-ENOENT && zonegroup->get_name() == default_zonegroup_name) { - ldout(cct, 10) << " Using default name "<< default_zone_name << dendl; + ldpp_dout(dpp, 10) << " Using default name "<< default_zone_name << dendl; zone_params->set_name(default_zone_name); - ret = zone_params->init(cct, sysobj_svc, y); + ret = zone_params->init(dpp, cct, sysobj_svc, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "failed reading zone params info: " << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "failed reading zone params info: " << " " << cpp_strerror(-ret) << dendl; return ret; } } @@ -671,31 +671,31 @@ int RGWSI_Zone::init_zg_from_period(bool *initialized, optional_yield y) // fix missing master zone for a single zone zonegroup if (zg.master_zone.empty() && zg.zones.size() == 1) { master = zg.zones.begin(); - ldout(cct, 0) << "zonegroup " << zg.get_name() << " missing master_zone, setting zone " << + ldpp_dout(dpp, 0) << "zonegroup " << zg.get_name() << " missing master_zone, setting zone " << master->second.name << " id:" << master->second.id << " as master" << dendl; if (zonegroup->get_id() == zg.get_id()) { zonegroup->master_zone = master->second.id; - ret = zonegroup->update(y); + ret = zonegroup->update(dpp, y); if (ret < 0) { - ldout(cct, 0) << "error updating zonegroup : " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "error updating zonegroup : " << cpp_strerror(-ret) << dendl; return ret; } } else { RGWZoneGroup fixed_zg(zg.get_id(),zg.get_name()); - ret = fixed_zg.init(cct, sysobj_svc, y); + ret = fixed_zg.init(dpp, cct, sysobj_svc, y); if (ret < 0) { - ldout(cct, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl; return ret; } fixed_zg.master_zone = master->second.id; - ret = fixed_zg.update(y); + ret = fixed_zg.update(dpp, y); if (ret < 0) { - ldout(cct, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl; return ret; } } } else { - ldout(cct, 0) << "zonegroup " << zg.get_name() << " missing zone for master_zone=" << + ldpp_dout(dpp, 0) << "zonegroup " << zg.get_name() << " missing zone for master_zone=" << zg.master_zone << dendl; return -EINVAL; } @@ -713,29 +713,29 @@ int RGWSI_Zone::init_zg_from_period(bool *initialized, optional_yield y) return 0; } -int RGWSI_Zone::init_zg_from_local(bool *creating_defaults, optional_yield y) +int RGWSI_Zone::init_zg_from_local(const DoutPrefixProvider *dpp, bool *creating_defaults, optional_yield y) { - int ret = zonegroup->init(cct, sysobj_svc, y); + int ret = zonegroup->init(dpp, cct, sysobj_svc, y); if ( (ret < 0 && ret != -ENOENT) || (ret == -ENOENT && !cct->_conf->rgw_zonegroup.empty())) { - ldout(cct, 0) << "failed reading zonegroup info: ret "<< ret << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "failed reading zonegroup info: ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } else if (ret == -ENOENT) { *creating_defaults = true; - ldout(cct, 10) << "Creating default zonegroup " << dendl; - ret = zonegroup->create_default(y); + ldpp_dout(dpp, 10) << "Creating default zonegroup " << dendl; + ret = zonegroup->create_default(dpp, y); if (ret < 0) { - ldout(cct, 0) << "failure in zonegroup create_default: ret "<< ret << " " << cpp_strerror(-ret) + ldpp_dout(dpp, 0) << "failure in zonegroup create_default: ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } - ret = zonegroup->init(cct, sysobj_svc, y); + ret = zonegroup->init(dpp, cct, sysobj_svc, y); if (ret < 0) { - ldout(cct, 0) << "failure in zonegroup create_default: ret "<< ret << " " << cpp_strerror(-ret) + ldpp_dout(dpp, 0) << "failure in zonegroup create_default: ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } } - ldout(cct, 20) << "zonegroup " << zonegroup->get_name() << dendl; + ldpp_dout(dpp, 20) << "zonegroup " << zonegroup->get_name() << dendl; if (zonegroup->is_master_zonegroup()) { // use endpoints from the zonegroup's master zone auto master = zonegroup->zones.find(zonegroup->master_zone); @@ -743,16 +743,16 @@ int RGWSI_Zone::init_zg_from_local(bool *creating_defaults, optional_yield y) // fix missing master zone for a single zone zonegroup if (zonegroup->master_zone.empty() && zonegroup->zones.size() == 1) { master = zonegroup->zones.begin(); - ldout(cct, 0) << "zonegroup " << zonegroup->get_name() << " missing master_zone, setting zone " << + ldpp_dout(dpp, 0) << "zonegroup " << zonegroup->get_name() << " missing master_zone, setting zone " << master->second.name << " id:" << master->second.id << " as master" << dendl; zonegroup->master_zone = master->second.id; - ret = zonegroup->update(y); + ret = zonegroup->update(dpp, y); if (ret < 0) { - ldout(cct, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl; return ret; } } else { - ldout(cct, 0) << "zonegroup " << zonegroup->get_name() << " missing zone for " + ldpp_dout(dpp, 0) << "zonegroup " << zonegroup->get_name() << " missing zone for " "master_zone=" << zonegroup->master_zone << dendl; return -EINVAL; } @@ -764,7 +764,7 @@ int RGWSI_Zone::init_zg_from_local(bool *creating_defaults, optional_yield y) return 0; } -int RGWSI_Zone::convert_regionmap(optional_yield y) +int RGWSI_Zone::convert_regionmap(const DoutPrefixProvider *dpp, optional_yield y) { RGWZoneGroupMap zonegroupmap; @@ -780,7 +780,7 @@ int RGWSI_Zone::convert_regionmap(optional_yield y) RGWSysObjectCtx obj_ctx = sysobj_svc->init_obj_ctx(); RGWSysObj sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid)); - int ret = sysobj.rop().read(&bl, y); + int ret = sysobj.rop().read(dpp, &bl, y); if (ret < 0 && ret != -ENOENT) { return ret; } else if (ret == -ENOENT) { @@ -791,23 +791,23 @@ int RGWSI_Zone::convert_regionmap(optional_yield y) auto iter = bl.cbegin(); decode(zonegroupmap, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "error decoding regionmap from " << pool << ":" << oid << dendl; + ldpp_dout(dpp, 0) << "error decoding regionmap from " << pool << ":" << oid << dendl; return -EIO; } for (map::iterator iter = zonegroupmap.zonegroups.begin(); iter != zonegroupmap.zonegroups.end(); ++iter) { RGWZoneGroup& zonegroup = iter->second; - ret = zonegroup.init(cct, sysobj_svc, y, false); - ret = zonegroup.update(y); + ret = zonegroup.init(dpp, cct, sysobj_svc, y, false); + ret = zonegroup.update(dpp, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "Error could not update zonegroup " << zonegroup.get_name() << ": " << + ldpp_dout(dpp, 0) << "Error could not update zonegroup " << zonegroup.get_name() << ": " << cpp_strerror(-ret) << dendl; return ret; } else if (ret == -ENOENT) { - ret = zonegroup.create(y); + ret = zonegroup.create(dpp, y); if (ret < 0) { - ldout(cct, 0) << "Error could not create " << zonegroup.get_name() << ": " << + ldpp_dout(dpp, 0) << "Error could not create " << zonegroup.get_name() << ": " << cpp_strerror(-ret) << dendl; return ret; } @@ -818,9 +818,9 @@ int RGWSI_Zone::convert_regionmap(optional_yield y) current_period->set_bucket_quota(zonegroupmap.bucket_quota); // remove the region_map so we don't try to convert again - ret = sysobj.wop().remove(y); + ret = sysobj.wop().remove(dpp, y); if (ret < 0) { - ldout(cct, 0) << "Error could not remove " << sysobj.get_obj() + ldpp_dout(dpp, 0) << "Error could not remove " << sysobj.get_obj() << " after upgrading to zonegroup map: " << cpp_strerror(ret) << dendl; return ret; } @@ -998,7 +998,7 @@ bool RGWSI_Zone::is_syncing_bucket_meta(const rgw_bucket& bucket) } -int RGWSI_Zone::select_new_bucket_location(const RGWUserInfo& user_info, const string& zonegroup_id, +int RGWSI_Zone::select_new_bucket_location(const DoutPrefixProvider *dpp, const RGWUserInfo& user_info, const string& zonegroup_id, const rgw_placement_rule& request_rule, rgw_placement_rule *pselected_rule_name, RGWZonePlacementInfo *rule_info, optional_yield y) @@ -1007,7 +1007,7 @@ int RGWSI_Zone::select_new_bucket_location(const RGWUserInfo& user_info, const s RGWZoneGroup zonegroup; int ret = get_zonegroup(zonegroup_id, zonegroup); if (ret < 0) { - ldout(cct, 0) << "could not find zonegroup " << zonegroup_id << " in current period" << dendl; + ldpp_dout(dpp, 0) << "could not find zonegroup " << zonegroup_id << " in current period" << dendl; return ret; } @@ -1020,7 +1020,7 @@ int RGWSI_Zone::select_new_bucket_location(const RGWUserInfo& user_info, const s used_rule = &request_rule; titer = zonegroup.placement_targets.find(request_rule.name); if (titer == zonegroup.placement_targets.end()) { - ldout(cct, 0) << "could not find requested placement id " << request_rule + ldpp_dout(dpp, 0) << "could not find requested placement id " << request_rule << " within zonegroup " << dendl; return -ERR_INVALID_LOCATION_CONSTRAINT; } @@ -1028,19 +1028,19 @@ int RGWSI_Zone::select_new_bucket_location(const RGWUserInfo& user_info, const s used_rule = &user_info.default_placement; titer = zonegroup.placement_targets.find(user_info.default_placement.name); if (titer == zonegroup.placement_targets.end()) { - ldout(cct, 0) << "could not find user default placement id " << user_info.default_placement + ldpp_dout(dpp, 0) << "could not find user default placement id " << user_info.default_placement << " within zonegroup " << dendl; return -ERR_INVALID_LOCATION_CONSTRAINT; } } else { if (zonegroup.default_placement.name.empty()) { // zonegroup default rule as fallback, it should not be empty. - ldout(cct, 0) << "misconfiguration, zonegroup default placement id should not be empty." << dendl; + ldpp_dout(dpp, 0) << "misconfiguration, zonegroup default placement id should not be empty." << dendl; return -ERR_ZONEGROUP_DEFAULT_PLACEMENT_MISCONFIGURATION; } else { used_rule = &zonegroup.default_placement; titer = zonegroup.placement_targets.find(zonegroup.default_placement.name); if (titer == zonegroup.placement_targets.end()) { - ldout(cct, 0) << "could not find zonegroup default placement id " << zonegroup.default_placement + ldpp_dout(dpp, 0) << "could not find zonegroup default placement id " << zonegroup.default_placement << " within zonegroup " << dendl; return -ERR_INVALID_LOCATION_CONSTRAINT; } @@ -1050,7 +1050,7 @@ int RGWSI_Zone::select_new_bucket_location(const RGWUserInfo& user_info, const s /* now check tag for the rule, whether user is permitted to use rule */ const auto& target_rule = titer->second; if (!target_rule.user_permitted(user_info.placement_tags)) { - ldout(cct, 0) << "user not permitted to use placement rule " << titer->first << dendl; + ldpp_dout(dpp, 0) << "user not permitted to use placement rule " << titer->first << dendl; return -EPERM; } @@ -1066,17 +1066,17 @@ int RGWSI_Zone::select_new_bucket_location(const RGWUserInfo& user_info, const s *pselected_rule_name = rule; } - return select_bucket_location_by_rule(rule, rule_info, y); + return select_bucket_location_by_rule(dpp, rule, rule_info, y); } -int RGWSI_Zone::select_bucket_location_by_rule(const rgw_placement_rule& location_rule, RGWZonePlacementInfo *rule_info, optional_yield y) +int RGWSI_Zone::select_bucket_location_by_rule(const DoutPrefixProvider *dpp, const rgw_placement_rule& location_rule, RGWZonePlacementInfo *rule_info, optional_yield y) { if (location_rule.name.empty()) { /* we can only reach here if we're trying to set a bucket location from a bucket * created on a different zone, using a legacy / default pool configuration */ if (rule_info) { - return select_legacy_bucket_placement(rule_info, y); + return select_legacy_bucket_placement(dpp, rule_info, y); } return 0; @@ -1090,14 +1090,14 @@ int RGWSI_Zone::select_bucket_location_by_rule(const rgw_placement_rule& locatio auto piter = zone_params->placement_pools.find(location_rule.name); if (piter == zone_params->placement_pools.end()) { /* couldn't find, means we cannot really place data for this bucket in this zone */ - ldout(cct, 0) << "ERROR: This zone does not contain placement rule " + ldpp_dout(dpp, 0) << "ERROR: This zone does not contain placement rule " << location_rule << " present in the zonegroup!" << dendl; return -EINVAL; } auto storage_class = location_rule.get_storage_class(); if (!piter->second.storage_class_exists(storage_class)) { - ldout(cct, 5) << "requested storage class does not exist: " << storage_class << dendl; + ldpp_dout(dpp, 5) << "requested storage class does not exist: " << storage_class << dendl; return -EINVAL; } @@ -1111,13 +1111,13 @@ int RGWSI_Zone::select_bucket_location_by_rule(const rgw_placement_rule& locatio return 0; } -int RGWSI_Zone::select_bucket_placement(const RGWUserInfo& user_info, const string& zonegroup_id, +int RGWSI_Zone::select_bucket_placement(const DoutPrefixProvider *dpp, const RGWUserInfo& user_info, const string& zonegroup_id, const rgw_placement_rule& placement_rule, rgw_placement_rule *pselected_rule, RGWZonePlacementInfo *rule_info, optional_yield y) { if (!zone_params->placement_pools.empty()) { - return select_new_bucket_location(user_info, zonegroup_id, placement_rule, + return select_new_bucket_location(dpp, user_info, zonegroup_id, placement_rule, pselected_rule, rule_info, y); } @@ -1126,13 +1126,13 @@ int RGWSI_Zone::select_bucket_placement(const RGWUserInfo& user_info, const stri } if (rule_info) { - return select_legacy_bucket_placement(rule_info, y); + return select_legacy_bucket_placement(dpp, rule_info, y); } return 0; } -int RGWSI_Zone::select_legacy_bucket_placement(RGWZonePlacementInfo *rule_info, +int RGWSI_Zone::select_legacy_bucket_placement(const DoutPrefixProvider *dpp, RGWZonePlacementInfo *rule_info, optional_yield y) { bufferlist map_bl; @@ -1145,7 +1145,7 @@ int RGWSI_Zone::select_legacy_bucket_placement(RGWZonePlacementInfo *rule_info, auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = obj_ctx.get_obj(obj); - int ret = sysobj.rop().read(&map_bl, y); + int ret = sysobj.rop().read(dpp, &map_bl, y); if (ret < 0) { goto read_omap; } @@ -1154,12 +1154,12 @@ int RGWSI_Zone::select_legacy_bucket_placement(RGWZonePlacementInfo *rule_info, auto iter = map_bl.cbegin(); decode(m, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: couldn't decode avail_pools" << dendl; + ldpp_dout(dpp, 0) << "ERROR: couldn't decode avail_pools" << dendl; } read_omap: if (m.empty()) { - ret = sysobj.omap().get_all(&m, y); + ret = sysobj.omap().get_all(dpp, &m, y); write_map = true; } @@ -1173,7 +1173,7 @@ read_omap: ret = rados_svc->pool().create(pools, &retcodes); if (ret < 0) return ret; - ret = sysobj.omap().set(s, bl, y); + ret = sysobj.omap().set(dpp, s, bl, y); if (ret < 0) return ret; m[s] = bl; @@ -1182,9 +1182,9 @@ read_omap: if (write_map) { bufferlist new_bl; encode(m, new_bl); - ret = sysobj.wop().write(new_bl, y); + ret = sysobj.wop().write(dpp, new_bl, y); if (ret < 0) { - ldout(cct, 0) << "WARNING: could not save avail pools map info ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "WARNING: could not save avail pools map info ret=" << ret << dendl; } } @@ -1206,7 +1206,7 @@ read_omap: return 0; } -int RGWSI_Zone::update_placement_map(optional_yield y) +int RGWSI_Zone::update_placement_map(const DoutPrefixProvider *dpp, optional_yield y) { bufferlist header; map m; @@ -1215,21 +1215,21 @@ int RGWSI_Zone::update_placement_map(optional_yield y) auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = obj_ctx.get_obj(obj); - int ret = sysobj.omap().get_all(&m, y); + int ret = sysobj.omap().get_all(dpp, &m, y); if (ret < 0) return ret; bufferlist new_bl; encode(m, new_bl); - ret = sysobj.wop().write(new_bl, y); + ret = sysobj.wop().write(dpp, new_bl, y); if (ret < 0) { - ldout(cct, 0) << "WARNING: could not save avail pools map info ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "WARNING: could not save avail pools map info ret=" << ret << dendl; } return ret; } -int RGWSI_Zone::add_bucket_placement(const rgw_pool& new_pool, optional_yield y) +int RGWSI_Zone::add_bucket_placement(const DoutPrefixProvider *dpp, const rgw_pool& new_pool, optional_yield y) { int ret = rados_svc->pool(new_pool).lookup(); if (ret < 0) { // DNE, or something @@ -1241,29 +1241,29 @@ int RGWSI_Zone::add_bucket_placement(const rgw_pool& new_pool, optional_yield y) auto sysobj = obj_ctx.get_obj(obj); bufferlist empty_bl; - ret = sysobj.omap().set(new_pool.to_str(), empty_bl, y); + ret = sysobj.omap().set(dpp, new_pool.to_str(), empty_bl, y); // don't care about return value - update_placement_map(y); + update_placement_map(dpp, y); return ret; } -int RGWSI_Zone::remove_bucket_placement(const rgw_pool& old_pool, optional_yield y) +int RGWSI_Zone::remove_bucket_placement(const DoutPrefixProvider *dpp, const rgw_pool& old_pool, optional_yield y) { rgw_raw_obj obj(zone_params->domain_root, avail_pools); auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = obj_ctx.get_obj(obj); - int ret = sysobj.omap().del(old_pool.to_str(), y); + int ret = sysobj.omap().del(dpp, old_pool.to_str(), y); // don't care about return value - update_placement_map(y); + update_placement_map(dpp, y); return ret; } -int RGWSI_Zone::list_placement_set(set& names, optional_yield y) +int RGWSI_Zone::list_placement_set(const DoutPrefixProvider *dpp, set& names, optional_yield y) { bufferlist header; map m; @@ -1271,7 +1271,7 @@ int RGWSI_Zone::list_placement_set(set& names, optional_yield y) rgw_raw_obj obj(zone_params->domain_root, avail_pools); auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = obj_ctx.get_obj(obj); - int ret = sysobj.omap().get_all(&m, y); + int ret = sysobj.omap().get_all(dpp, &m, y); if (ret < 0) return ret; diff --git a/src/rgw/services/svc_zone.h b/src/rgw/services/svc_zone.h index 539d56d57e06a..9f503df5e4146 100644 --- a/src/rgw/services/svc_zone.h +++ b/src/rgw/services/svc_zone.h @@ -61,15 +61,15 @@ class RGWSI_Zone : public RGWServiceInstance RGWSI_RADOS *_rados_svc, RGWSI_SyncModules *_sync_modules_svc, RGWSI_Bucket_Sync *_bucket_sync_svc); - int do_start(optional_yield y) override; + int do_start(optional_yield y, const DoutPrefixProvider *dpp) override; void shutdown() override; - int replace_region_with_zonegroup(optional_yield y); - int init_zg_from_period(bool *initialized, optional_yield y); - int init_zg_from_local(bool *creating_defaults, optional_yield y); - int convert_regionmap(optional_yield y); + int replace_region_with_zonegroup(const DoutPrefixProvider *dpp, optional_yield y); + int init_zg_from_period(const DoutPrefixProvider *dpp, bool *initialized, optional_yield y); + int init_zg_from_local(const DoutPrefixProvider *dpp, bool *creating_defaults, optional_yield y); + int convert_regionmap(const DoutPrefixProvider *dpp, optional_yield y); - int update_placement_map(optional_yield y); + int update_placement_map(const DoutPrefixProvider *dpp, optional_yield y); public: RGWSI_Zone(CephContext *cct); ~RGWSI_Zone(); @@ -124,19 +124,19 @@ public: RGWRESTConn *get_zone_conn_by_name(const string& name); bool find_zone_id_by_name(const string& name, rgw_zone_id *id); - int select_bucket_placement(const RGWUserInfo& user_info, const string& zonegroup_id, + int select_bucket_placement(const DoutPrefixProvider *dpp, const RGWUserInfo& user_info, const string& zonegroup_id, const rgw_placement_rule& rule, rgw_placement_rule *pselected_rule, RGWZonePlacementInfo *rule_info, optional_yield y); - int select_legacy_bucket_placement(RGWZonePlacementInfo *rule_info, optional_yield y); - int select_new_bucket_location(const RGWUserInfo& user_info, const string& zonegroup_id, + int select_legacy_bucket_placement(const DoutPrefixProvider *dpp, RGWZonePlacementInfo *rule_info, optional_yield y); + int select_new_bucket_location(const DoutPrefixProvider *dpp, const RGWUserInfo& user_info, const string& zonegroup_id, const rgw_placement_rule& rule, rgw_placement_rule *pselected_rule_name, RGWZonePlacementInfo *rule_info, optional_yield y); - int select_bucket_location_by_rule(const rgw_placement_rule& location_rule, RGWZonePlacementInfo *rule_info, optional_yield y); + int select_bucket_location_by_rule(const DoutPrefixProvider *dpp, const rgw_placement_rule& location_rule, RGWZonePlacementInfo *rule_info, optional_yield y); - int add_bucket_placement(const rgw_pool& new_pool, optional_yield y); - int remove_bucket_placement(const rgw_pool& old_pool, optional_yield y); - int list_placement_set(set& names, optional_yield y); + int add_bucket_placement(const DoutPrefixProvider *dpp, const rgw_pool& new_pool, optional_yield y); + int remove_bucket_placement(const DoutPrefixProvider *dpp, const rgw_pool& old_pool, optional_yield y); + int list_placement_set(const DoutPrefixProvider *dpp, set& names, optional_yield y); bool is_meta_master() const; @@ -146,10 +146,10 @@ public: bool can_reshard() const; bool is_syncing_bucket_meta(const rgw_bucket& bucket); - int list_zonegroups(list& zonegroups); - int list_regions(list& regions); - int list_zones(list& zones); - int list_realms(list& realms); - int list_periods(list& periods); - int list_periods(const string& current_period, list& periods, optional_yield y); + int list_zonegroups(const DoutPrefixProvider *dpp, list& zonegroups); + int list_regions(const DoutPrefixProvider *dpp, list& regions); + int list_zones(const DoutPrefixProvider *dpp, list& zones); + int list_realms(const DoutPrefixProvider *dpp, list& realms); + int list_periods(const DoutPrefixProvider *dpp, list& periods); + int list_periods(const DoutPrefixProvider *dpp, const string& current_period, list& periods, optional_yield y); }; diff --git a/src/rgw/services/svc_zone_utils.cc b/src/rgw/services/svc_zone_utils.cc index 55e02e061005a..b292d7b992f07 100644 --- a/src/rgw/services/svc_zone_utils.cc +++ b/src/rgw/services/svc_zone_utils.cc @@ -7,7 +7,7 @@ #include "rgw/rgw_zone.h" -int RGWSI_ZoneUtils::do_start(optional_yield) +int RGWSI_ZoneUtils::do_start(optional_yield, const DoutPrefixProvider *dpp) { init_unique_trans_id_deps(); diff --git a/src/rgw/services/svc_zone_utils.h b/src/rgw/services/svc_zone_utils.h index f596ec64754d9..24abe528e53b3 100644 --- a/src/rgw/services/svc_zone_utils.h +++ b/src/rgw/services/svc_zone_utils.h @@ -24,7 +24,7 @@ class RGWSI_ZoneUtils : public RGWServiceInstance zone_svc = _zone_svc; } - int do_start(optional_yield) override; + int do_start(optional_yield, const DoutPrefixProvider *dpp) override; void init_unique_trans_id_deps(); diff --git a/src/test/rgw/test_cls_fifo_legacy.cc b/src/test/rgw/test_cls_fifo_legacy.cc index 26d9e9a9253e4..02678c5221275 100644 --- a/src/test/rgw/test_cls_fifo_legacy.cc +++ b/src/test/rgw/test_cls_fifo_legacy.cc @@ -19,6 +19,7 @@ #include "include/scope_guard.h" #include "include/types.h" #include "include/rados/librados.hpp" +#include "common/ceph_context.h" #include "cls/fifo/cls_fifo_ops.h" #include "test/librados/test_cxx.h" @@ -34,8 +35,11 @@ namespace cb = ceph::buffer; namespace fifo = rados::cls::fifo; namespace RCf = rgw::cls::fifo; +auto cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT); +const DoutPrefix dp(cct, 1, "test legacy cls fifo: "); + namespace { -int fifo_create(R::IoCtx& ioctx, +int fifo_create(const DoutPrefixProvider *dpp, R::IoCtx& ioctx, const std::string& oid, std::string_view id, optional_yield y, @@ -48,7 +52,7 @@ int fifo_create(R::IoCtx& ioctx, R::ObjectWriteOperation op; RCf::create_meta(&op, id, objv, oid_prefix, exclusive, max_part_size, max_entry_size); - return rgw_rados_operate(ioctx, oid, &op, y); + return rgw_rados_operate(dpp, ioctx, oid, &op, y); } } @@ -74,54 +78,54 @@ using AioLegacyFIFO = LegacyFIFO; TEST_F(LegacyClsFIFO, TestCreate) { - auto r = fifo_create(ioctx, fifo_id, ""s, null_yield); + auto r = fifo_create(&dp, ioctx, fifo_id, ""s, null_yield); EXPECT_EQ(-EINVAL, r); - r = fifo_create(ioctx, fifo_id, fifo_id, null_yield, std::nullopt, + r = fifo_create(&dp, ioctx, fifo_id, fifo_id, null_yield, std::nullopt, std::nullopt, false, 0); EXPECT_EQ(-EINVAL, r); - r = fifo_create(ioctx, fifo_id, {}, null_yield, + r = fifo_create(&dp, ioctx, fifo_id, {}, null_yield, std::nullopt, std::nullopt, false, RCf::default_max_part_size, 0); EXPECT_EQ(-EINVAL, r); - r = fifo_create(ioctx, fifo_id, fifo_id, null_yield); + r = fifo_create(&dp, ioctx, fifo_id, fifo_id, null_yield); EXPECT_EQ(0, r); std::uint64_t size; ioctx.stat(fifo_id, &size, nullptr); EXPECT_GT(size, 0); /* test idempotency */ - r = fifo_create(ioctx, fifo_id, fifo_id, null_yield); + r = fifo_create(&dp, ioctx, fifo_id, fifo_id, null_yield); EXPECT_EQ(0, r); - r = fifo_create(ioctx, fifo_id, {}, null_yield, std::nullopt, + r = fifo_create(&dp, ioctx, fifo_id, {}, null_yield, std::nullopt, std::nullopt, false); EXPECT_EQ(-EINVAL, r); - r = fifo_create(ioctx, fifo_id, {}, null_yield, std::nullopt, + r = fifo_create(&dp, ioctx, fifo_id, {}, null_yield, std::nullopt, "myprefix"sv, false); EXPECT_EQ(-EINVAL, r); - r = fifo_create(ioctx, fifo_id, "foo"sv, null_yield, + r = fifo_create(&dp, ioctx, fifo_id, "foo"sv, null_yield, std::nullopt, std::nullopt, false); EXPECT_EQ(-EEXIST, r); } TEST_F(LegacyClsFIFO, TestGetInfo) { - auto r = fifo_create(ioctx, fifo_id, fifo_id, null_yield); + auto r = fifo_create(&dp, ioctx, fifo_id, fifo_id, null_yield); fifo::info info; std::uint32_t part_header_size; std::uint32_t part_entry_overhead; - r = RCf::get_meta(ioctx, fifo_id, std::nullopt, &info, &part_header_size, + r = RCf::get_meta(&dp, ioctx, fifo_id, std::nullopt, &info, &part_header_size, &part_entry_overhead, 0, null_yield); EXPECT_EQ(0, r); EXPECT_GT(part_header_size, 0); EXPECT_GT(part_entry_overhead, 0); EXPECT_FALSE(info.version.instance.empty()); - r = RCf::get_meta(ioctx, fifo_id, info.version, &info, &part_header_size, + r = RCf::get_meta(&dp, ioctx, fifo_id, info.version, &info, &part_header_size, &part_entry_overhead, 0, null_yield); EXPECT_EQ(0, r); fifo::objv objv; objv.instance = "foo"; objv.ver = 12; - r = RCf::get_meta(ioctx, fifo_id, objv, &info, &part_header_size, + r = RCf::get_meta(&dp, ioctx, fifo_id, objv, &info, &part_header_size, &part_entry_overhead, 0, null_yield); EXPECT_EQ(-ECANCELED, r); } @@ -129,10 +133,10 @@ TEST_F(LegacyClsFIFO, TestGetInfo) TEST_F(LegacyFIFO, TestOpenDefault) { std::unique_ptr fifo; - auto r = RCf::FIFO::create(ioctx, fifo_id, &fifo, null_yield); + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &fifo, null_yield); ASSERT_EQ(0, r); // force reading from backend - r = fifo->read_meta(null_yield); + r = fifo->read_meta(&dp, null_yield); EXPECT_EQ(0, r); auto info = fifo->meta(); EXPECT_EQ(info.id, fifo_id); @@ -149,12 +153,12 @@ TEST_F(LegacyFIFO, TestOpenParams) /* first successful create */ std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, objv, oid_prefix, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, objv, oid_prefix, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); /* force reading from backend */ - r = f->read_meta(null_yield); + r = f->read_meta(&dp, null_yield); auto info = f->meta(); EXPECT_EQ(info.id, fifo_id); EXPECT_EQ(info.params.max_part_size, max_part_size); @@ -177,13 +181,13 @@ std::pair decode_entry(const RCf::list_entry& entry) TEST_F(LegacyFIFO, TestPushListTrim) { std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield); + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield); ASSERT_EQ(0, r); static constexpr auto max_entries = 10u; for (uint32_t i = 0; i < max_entries; ++i) { cb::list bl; encode(i, bl); - r = f->push(bl, null_yield); + r = f->push(&dp, bl, null_yield); ASSERT_EQ(0, r); } @@ -193,7 +197,7 @@ TEST_F(LegacyFIFO, TestPushListTrim) bool more = false; for (auto i = 0u; i < max_entries; ++i) { - r = f->list(1, marker, &result, &more, null_yield); + r = f->list(&dp, 1, marker, &result, &more, null_yield); ASSERT_EQ(0, r); bool expected_more = (i != (max_entries - 1)); @@ -210,7 +214,7 @@ TEST_F(LegacyFIFO, TestPushListTrim) /* get all entries at once */ std::string markers[max_entries]; std::uint32_t min_entry = 0; - r = f->list(max_entries * 10, std::nullopt, &result, &more, null_yield); + r = f->list(&dp, max_entries * 10, std::nullopt, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_FALSE(more); @@ -222,11 +226,11 @@ TEST_F(LegacyFIFO, TestPushListTrim) } /* trim one entry */ - r = f->trim(markers[min_entry], false, null_yield); + r = f->trim(&dp, markers[min_entry], false, null_yield); ASSERT_EQ(0, r); ++min_entry; - r = f->list(max_entries * 10, std::nullopt, &result, &more, null_yield); + r = f->list(&dp, max_entries * 10, std::nullopt, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_FALSE(more); ASSERT_EQ(max_entries - min_entry, result.size()); @@ -246,7 +250,7 @@ TEST_F(LegacyFIFO, TestPushTooBig) static constexpr auto max_entry_size = 128ull; std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, std::nullopt, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); @@ -256,7 +260,7 @@ TEST_F(LegacyFIFO, TestPushTooBig) cb::list bl; bl.append(buf, sizeof(buf)); - r = f->push(bl, null_yield); + r = f->push(&dp, bl, null_yield); EXPECT_EQ(-E2BIG, r); } @@ -266,7 +270,7 @@ TEST_F(LegacyFIFO, TestMultipleParts) static constexpr auto max_part_size = 2048ull; static constexpr auto max_entry_size = 128ull; std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, std::nullopt, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); @@ -283,7 +287,7 @@ TEST_F(LegacyFIFO, TestMultipleParts) cb::list bl; *(int *)buf = i; bl.append(buf, sizeof(buf)); - r = f->push(bl, null_yield); + r = f->push(&dp, bl, null_yield); ASSERT_EQ(0, r); } @@ -295,7 +299,7 @@ TEST_F(LegacyFIFO, TestMultipleParts) /* list all at once */ std::vector result; bool more = false; - r = f->list(max_entries, std::nullopt, &result, &more, null_yield); + r = f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); ASSERT_EQ(0, r); EXPECT_EQ(false, more); ASSERT_EQ(max_entries, result.size()); @@ -309,7 +313,7 @@ TEST_F(LegacyFIFO, TestMultipleParts) /* get entries one by one */ for (auto i = 0u; i < max_entries; ++i) { - r = f->list(1, marker, &result, &more, null_yield); + r = f->list(&dp, 1, marker, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_EQ(result.size(), 1); const bool expected_more = (i != (max_entries - 1)); @@ -328,14 +332,14 @@ TEST_F(LegacyFIFO, TestMultipleParts) marker.reset(); for (auto i = 0u; i < max_entries; ++i) { /* read single entry */ - r = f->list(1, marker, &result, &more, null_yield); + r = f->list(&dp, 1, marker, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_EQ(result.size(), 1); const bool expected_more = (i != (max_entries - 1)); ASSERT_EQ(expected_more, more); marker = result.front().marker; - r = f->trim(*marker, false, null_yield); + r = f->trim(&dp, *marker, false, null_yield); ASSERT_EQ(0, r); /* check tail */ @@ -343,7 +347,7 @@ TEST_F(LegacyFIFO, TestMultipleParts) ASSERT_EQ(info.tail_part_num, i / entries_per_part); /* try to read all again, see how many entries left */ - r = f->list(max_entries, marker, &result, &more, null_yield); + r = f->list(&dp, max_entries, marker, &result, &more, null_yield); ASSERT_EQ(max_entries - i - 1, result.size()); ASSERT_EQ(false, more); } @@ -355,11 +359,11 @@ TEST_F(LegacyFIFO, TestMultipleParts) RCf::part_info partinfo; /* check old tails are removed */ for (auto i = 0; i < info.tail_part_num; ++i) { - r = f->get_part_info(i, &partinfo, null_yield); + r = f->get_part_info(&dp, i, &partinfo, null_yield); ASSERT_EQ(-ENOENT, r); } /* check current tail exists */ - r = f->get_part_info(info.tail_part_num, &partinfo, null_yield); + r = f->get_part_info(&dp, info.tail_part_num, &partinfo, null_yield); ASSERT_EQ(0, r); } @@ -369,7 +373,7 @@ TEST_F(LegacyFIFO, TestTwoPushers) static constexpr auto max_entry_size = 128ull; std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, std::nullopt, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); @@ -381,7 +385,7 @@ TEST_F(LegacyFIFO, TestTwoPushers) (max_entry_size + part_entry_overhead)); const auto max_entries = entries_per_part * 4 + 1; std::unique_ptr f2; - r = RCf::FIFO::open(ioctx, fifo_id, &f2, null_yield); + r = RCf::FIFO::open(&dp, ioctx, fifo_id, &f2, null_yield); std::vector fifos{&f, &f2}; for (auto i = 0u; i < max_entries; ++i) { @@ -389,19 +393,19 @@ TEST_F(LegacyFIFO, TestTwoPushers) *(int *)buf = i; bl.append(buf, sizeof(buf)); auto& f = *fifos[i % fifos.size()]; - r = f->push(bl, null_yield); + r = f->push(&dp, bl, null_yield); ASSERT_EQ(0, r); } /* list all by both */ std::vector result; bool more = false; - r = f2->list(max_entries, std::nullopt, &result, &more, null_yield); + r = f2->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_EQ(false, more); ASSERT_EQ(max_entries, result.size()); - r = f2->list(max_entries, std::nullopt, &result, &more, null_yield); + r = f2->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_EQ(false, more); ASSERT_EQ(max_entries, result.size()); @@ -417,7 +421,7 @@ TEST_F(LegacyFIFO, TestTwoPushersTrim) static constexpr auto max_part_size = 2048ull; static constexpr auto max_entry_size = 128ull; std::unique_ptr f1; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f1, null_yield, std::nullopt, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f1, null_yield, std::nullopt, std::nullopt, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); @@ -431,7 +435,7 @@ TEST_F(LegacyFIFO, TestTwoPushersTrim) const auto max_entries = entries_per_part * 4 + 1; std::unique_ptr f2; - r = RCf::FIFO::open(ioctx, fifo_id, &f2, null_yield); + r = RCf::FIFO::open(&dp, ioctx, fifo_id, &f2, null_yield); ASSERT_EQ(0, r); /* push one entry to f2 and the rest to f1 */ @@ -440,7 +444,7 @@ TEST_F(LegacyFIFO, TestTwoPushersTrim) *(int *)buf = i; bl.append(buf, sizeof(buf)); auto& f = (i < 1 ? f2 : f1); - r = f->push(bl, null_yield); + r = f->push(&dp, bl, null_yield); ASSERT_EQ(0, r); } @@ -449,7 +453,7 @@ TEST_F(LegacyFIFO, TestTwoPushersTrim) std::string marker; std::vector result; bool more = false; - r = f1->list(num, std::nullopt, &result, &more, null_yield); + r = f1->list(&dp, num, std::nullopt, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_EQ(true, more); ASSERT_EQ(num, result.size()); @@ -461,11 +465,11 @@ TEST_F(LegacyFIFO, TestTwoPushersTrim) auto& entry = result[num - 1]; marker = entry.marker; - r = f1->trim(marker, false, null_yield); + r = f1->trim(&dp, marker, false, null_yield); /* list what's left by fifo2 */ const auto left = max_entries - num; - f2->list(left, marker, &result, &more, null_yield); + f2->list(&dp, left, marker, &result, &more, null_yield); ASSERT_EQ(left, result.size()); ASSERT_EQ(false, more); @@ -481,7 +485,7 @@ TEST_F(LegacyFIFO, TestPushBatch) static constexpr auto max_entry_size = 128ull; std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, std::nullopt, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); @@ -501,14 +505,14 @@ TEST_F(LegacyFIFO, TestPushBatch) } ASSERT_EQ(max_entries, bufs.size()); - r = f->push(bufs, null_yield); + r = f->push(&dp, bufs, null_yield); ASSERT_EQ(0, r); /* list all */ std::vector result; bool more = false; - r = f->list(max_entries, std::nullopt, &result, &more, null_yield); + r = f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_EQ(false, more); ASSERT_EQ(max_entries, result.size()); @@ -525,7 +529,7 @@ TEST_F(LegacyFIFO, TestAioTrim) static constexpr auto max_part_size = 2048ull; static constexpr auto max_entry_size = 128ull; std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, std::nullopt, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); @@ -547,7 +551,7 @@ TEST_F(LegacyFIFO, TestAioTrim) } ASSERT_EQ(max_entries, bufs.size()); - r = f->push(bufs, null_yield); + r = f->push(&dp, bufs, null_yield); ASSERT_EQ(0, r); auto info = f->meta(); @@ -558,7 +562,7 @@ TEST_F(LegacyFIFO, TestAioTrim) /* list all at once */ std::vector result; bool more = false; - r = f->list(max_entries, std::nullopt, &result, &more, null_yield); + r = f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_EQ(false, more); ASSERT_EQ(max_entries, result.size()); @@ -570,7 +574,7 @@ TEST_F(LegacyFIFO, TestAioTrim) marker.reset(); for (auto i = 0u; i < max_entries; ++i) { /* read single entry */ - r = f->list(1, marker, &result, &more, null_yield); + r = f->list(&dp, 1, marker, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_EQ(result.size(), 1); const bool expected_more = (i != (max_entries - 1)); @@ -579,7 +583,7 @@ TEST_F(LegacyFIFO, TestAioTrim) marker = result.front().marker; std::unique_ptr c(rados.aio_create_completion(nullptr, nullptr)); - f->trim(*marker, false, c.get()); + f->trim(&dp, *marker, false, c.get()); c->wait_for_complete(); r = c->get_return_value(); ASSERT_EQ(0, r); @@ -589,7 +593,7 @@ TEST_F(LegacyFIFO, TestAioTrim) ASSERT_EQ(info.tail_part_num, i / entries_per_part); /* try to read all again, see how many entries left */ - r = f->list(max_entries, marker, &result, &more, null_yield); + r = f->list(&dp, max_entries, marker, &result, &more, null_yield); ASSERT_EQ(max_entries - i - 1, result.size()); ASSERT_EQ(false, more); } @@ -601,17 +605,17 @@ TEST_F(LegacyFIFO, TestAioTrim) RCf::part_info partinfo; /* check old tails are removed */ for (auto i = 0; i < info.tail_part_num; ++i) { - r = f->get_part_info(i, &partinfo, null_yield); + r = f->get_part_info(&dp, i, &partinfo, null_yield); ASSERT_EQ(-ENOENT, r); } /* check current tail exists */ - r = f->get_part_info(info.tail_part_num, &partinfo, null_yield); + r = f->get_part_info(&dp, info.tail_part_num, &partinfo, null_yield); ASSERT_EQ(0, r); } TEST_F(LegacyFIFO, TestTrimExclusive) { std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield); + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield); ASSERT_EQ(0, r); std::vector result; bool more = false; @@ -620,28 +624,28 @@ TEST_F(LegacyFIFO, TestTrimExclusive) { for (uint32_t i = 0; i < max_entries; ++i) { cb::list bl; encode(i, bl); - f->push(bl, null_yield); + f->push(&dp, bl, null_yield); } - f->list(1, std::nullopt, &result, &more, null_yield); + f->list(&dp, 1, std::nullopt, &result, &more, null_yield); auto [val, marker] = decode_entry(result.front()); ASSERT_EQ(0, val); - f->trim(marker, true, null_yield); + f->trim(&dp, marker, true, null_yield); result.clear(); - f->list(max_entries, std::nullopt, &result, &more, null_yield); + f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); std::tie(val, marker) = decode_entry(result.front()); ASSERT_EQ(0, val); - f->trim(result[4].marker, true, null_yield); + f->trim(&dp, result[4].marker, true, null_yield); result.clear(); - f->list(max_entries, std::nullopt, &result, &more, null_yield); + f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); std::tie(val, marker) = decode_entry(result.front()); ASSERT_EQ(4, val); - f->trim(result.back().marker, true, null_yield); + f->trim(&dp, result.back().marker, true, null_yield); result.clear(); - f->list(max_entries, std::nullopt, &result, &more, null_yield); + f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); std::tie(val, marker) = decode_entry(result.front()); ASSERT_EQ(result.size(), 1); ASSERT_EQ(max_entries - 1, val); @@ -650,14 +654,14 @@ TEST_F(LegacyFIFO, TestTrimExclusive) { TEST_F(AioLegacyFIFO, TestPushListTrim) { std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield); + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield); ASSERT_EQ(0, r); static constexpr auto max_entries = 10u; for (uint32_t i = 0; i < max_entries; ++i) { cb::list bl; encode(i, bl); auto c = R::Rados::aio_create_completion(); - f->push(bl, c); + f->push(&dp, bl, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -670,7 +674,7 @@ TEST_F(AioLegacyFIFO, TestPushListTrim) bool more = false; for (auto i = 0u; i < max_entries; ++i) { auto c = R::Rados::aio_create_completion(); - f->list(1, marker, &result, &more, c); + f->list(&dp, 1, marker, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -691,7 +695,7 @@ TEST_F(AioLegacyFIFO, TestPushListTrim) std::string markers[max_entries]; std::uint32_t min_entry = 0; auto c = R::Rados::aio_create_completion(); - f->list(max_entries * 10, std::nullopt, &result, &more, c); + f->list(&dp, max_entries * 10, std::nullopt, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -707,7 +711,7 @@ TEST_F(AioLegacyFIFO, TestPushListTrim) /* trim one entry */ c = R::Rados::aio_create_completion(); - f->trim(markers[min_entry], false, c); + f->trim(&dp, markers[min_entry], false, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -715,7 +719,7 @@ TEST_F(AioLegacyFIFO, TestPushListTrim) ++min_entry; c = R::Rados::aio_create_completion(); - f->list(max_entries * 10, std::nullopt, &result, &more, c); + f->list(&dp, max_entries * 10, std::nullopt, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -738,7 +742,7 @@ TEST_F(AioLegacyFIFO, TestPushTooBig) static constexpr auto max_entry_size = 128ull; std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, std::nullopt, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); @@ -749,14 +753,14 @@ TEST_F(AioLegacyFIFO, TestPushTooBig) bl.append(buf, sizeof(buf)); auto c = R::Rados::aio_create_completion(); - f->push(bl, c); + f->push(&dp, bl, c); c->wait_for_complete(); r = c->get_return_value(); ASSERT_EQ(-E2BIG, r); c->release(); c = R::Rados::aio_create_completion(); - f->push(std::vector{}, c); + f->push(&dp, std::vector{}, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -769,14 +773,14 @@ TEST_F(AioLegacyFIFO, TestMultipleParts) static constexpr auto max_part_size = 2048ull; static constexpr auto max_entry_size = 128ull; std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, std::nullopt, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); { auto c = R::Rados::aio_create_completion(); - f->get_head_info([&](int r, RCf::part_info&& p) { + f->get_head_info(&dp, [&](int r, RCf::part_info&& p) { ASSERT_TRUE(p.tag.empty()); ASSERT_EQ(0, p.magic); ASSERT_EQ(0, p.min_ofs); @@ -804,7 +808,7 @@ TEST_F(AioLegacyFIFO, TestMultipleParts) *(int *)buf = i; bl.append(buf, sizeof(buf)); auto c = R::Rados::aio_create_completion(); - f->push(bl, c); + f->push(&dp, bl, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -820,7 +824,7 @@ TEST_F(AioLegacyFIFO, TestMultipleParts) std::vector result; bool more = false; auto c = R::Rados::aio_create_completion(); - f->list(max_entries, std::nullopt, &result, &more, c); + f->list(&dp, max_entries, std::nullopt, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -838,7 +842,7 @@ TEST_F(AioLegacyFIFO, TestMultipleParts) for (auto i = 0u; i < max_entries; ++i) { c = R::Rados::aio_create_completion(); - f->list(1, marker, &result, &more, c); + f->list(&dp, 1, marker, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -861,7 +865,7 @@ TEST_F(AioLegacyFIFO, TestMultipleParts) for (auto i = 0u; i < max_entries; ++i) { /* read single entry */ c = R::Rados::aio_create_completion(); - f->list(1, marker, &result, &more, c); + f->list(&dp, 1, marker, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -872,7 +876,7 @@ TEST_F(AioLegacyFIFO, TestMultipleParts) marker = result.front().marker; c = R::Rados::aio_create_completion(); - f->trim(*marker, false, c); + f->trim(&dp, *marker, false, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -885,7 +889,7 @@ TEST_F(AioLegacyFIFO, TestMultipleParts) /* try to read all again, see how many entries left */ c = R::Rados::aio_create_completion(); - f->list(max_entries, marker, &result, &more, c); + f->list(&dp, max_entries, marker, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -922,7 +926,7 @@ TEST_F(AioLegacyFIFO, TestMultipleParts) ASSERT_EQ(0, r); c = R::Rados::aio_create_completion(); - f->get_head_info([&](int r, RCf::part_info&& p) { + f->get_head_info(&dp, [&](int r, RCf::part_info&& p) { ASSERT_EQ(next_ofs, p.next_ofs); }, c); c->wait_for_complete(); @@ -937,7 +941,7 @@ TEST_F(AioLegacyFIFO, TestTwoPushers) static constexpr auto max_entry_size = 128ull; std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, std::nullopt, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); @@ -949,7 +953,7 @@ TEST_F(AioLegacyFIFO, TestTwoPushers) (max_entry_size + part_entry_overhead)); const auto max_entries = entries_per_part * 4 + 1; std::unique_ptr f2; - r = RCf::FIFO::open(ioctx, fifo_id, &f2, null_yield); + r = RCf::FIFO::open(&dp, ioctx, fifo_id, &f2, null_yield); std::vector fifos{&f, &f2}; for (auto i = 0u; i < max_entries; ++i) { @@ -958,7 +962,7 @@ TEST_F(AioLegacyFIFO, TestTwoPushers) bl.append(buf, sizeof(buf)); auto& f = *fifos[i % fifos.size()]; auto c = R::Rados::aio_create_completion(); - f->push(bl, c); + f->push(&dp, bl, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -969,7 +973,7 @@ TEST_F(AioLegacyFIFO, TestTwoPushers) std::vector result; bool more = false; auto c = R::Rados::aio_create_completion(); - f2->list(max_entries, std::nullopt, &result, &more, c); + f2->list(&dp, max_entries, std::nullopt, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -978,7 +982,7 @@ TEST_F(AioLegacyFIFO, TestTwoPushers) ASSERT_EQ(max_entries, result.size()); c = R::Rados::aio_create_completion(); - f2->list(max_entries, std::nullopt, &result, &more, c); + f2->list(&dp, max_entries, std::nullopt, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -997,7 +1001,7 @@ TEST_F(AioLegacyFIFO, TestTwoPushersTrim) static constexpr auto max_part_size = 2048ull; static constexpr auto max_entry_size = 128ull; std::unique_ptr f1; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f1, null_yield, std::nullopt, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f1, null_yield, std::nullopt, std::nullopt, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); @@ -1011,7 +1015,7 @@ TEST_F(AioLegacyFIFO, TestTwoPushersTrim) const auto max_entries = entries_per_part * 4 + 1; std::unique_ptr f2; - r = RCf::FIFO::open(ioctx, fifo_id, &f2, null_yield); + r = RCf::FIFO::open(&dp, ioctx, fifo_id, &f2, null_yield); ASSERT_EQ(0, r); /* push one entry to f2 and the rest to f1 */ @@ -1021,7 +1025,7 @@ TEST_F(AioLegacyFIFO, TestTwoPushersTrim) bl.append(buf, sizeof(buf)); auto& f = (i < 1 ? f2 : f1); auto c = R::Rados::aio_create_completion(); - f->push(bl, c); + f->push(&dp, bl, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -1034,7 +1038,7 @@ TEST_F(AioLegacyFIFO, TestTwoPushersTrim) std::vector result; bool more = false; auto c = R::Rados::aio_create_completion(); - f1->list(num, std::nullopt, &result, &more, c); + f1->list(&dp, num, std::nullopt, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -1050,7 +1054,7 @@ TEST_F(AioLegacyFIFO, TestTwoPushersTrim) auto& entry = result[num - 1]; marker = entry.marker; c = R::Rados::aio_create_completion(); - f1->trim(marker, false, c); + f1->trim(&dp, marker, false, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -1059,7 +1063,7 @@ TEST_F(AioLegacyFIFO, TestTwoPushersTrim) const auto left = max_entries - num; c = R::Rados::aio_create_completion(); - f2->list(left, marker, &result, &more, c); + f2->list(&dp, left, marker, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -1079,7 +1083,7 @@ TEST_F(AioLegacyFIFO, TestPushBatch) static constexpr auto max_entry_size = 128ull; std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, std::nullopt, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); @@ -1100,7 +1104,7 @@ TEST_F(AioLegacyFIFO, TestPushBatch) ASSERT_EQ(max_entries, bufs.size()); auto c = R::Rados::aio_create_completion(); - f->push(bufs, c); + f->push(&dp, bufs, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -1111,7 +1115,7 @@ TEST_F(AioLegacyFIFO, TestPushBatch) std::vector result; bool more = false; c = R::Rados::aio_create_completion(); - f->list(max_entries, std::nullopt, &result, &more, c); + f->list(&dp, max_entries, std::nullopt, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -1129,23 +1133,23 @@ TEST_F(AioLegacyFIFO, TestPushBatch) TEST_F(LegacyFIFO, TrimAll) { std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield); + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield); ASSERT_EQ(0, r); static constexpr auto max_entries = 10u; for (uint32_t i = 0; i < max_entries; ++i) { cb::list bl; encode(i, bl); - r = f->push(bl, null_yield); + r = f->push(&dp, bl, null_yield); ASSERT_EQ(0, r); } /* trim one entry */ - r = f->trim(RCf::marker::max().to_string(), false, null_yield); + r = f->trim(&dp, RCf::marker::max().to_string(), false, null_yield); ASSERT_EQ(-ENODATA, r); std::vector result; bool more; - r = f->list(1, std::nullopt, &result, &more, null_yield); + r = f->list(&dp, 1, std::nullopt, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_TRUE(result.empty()); } @@ -1153,18 +1157,18 @@ TEST_F(LegacyFIFO, TrimAll) TEST_F(LegacyFIFO, AioTrimAll) { std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield); + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield); ASSERT_EQ(0, r); static constexpr auto max_entries = 10u; for (uint32_t i = 0; i < max_entries; ++i) { cb::list bl; encode(i, bl); - r = f->push(bl, null_yield); + r = f->push(&dp, bl, null_yield); ASSERT_EQ(0, r); } auto c = R::Rados::aio_create_completion(); - f->trim(RCf::marker::max().to_string(), false, c); + f->trim(&dp, RCf::marker::max().to_string(), false, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -1172,7 +1176,7 @@ TEST_F(LegacyFIFO, AioTrimAll) std::vector result; bool more; - r = f->list(1, std::nullopt, &result, &more, null_yield); + r = f->list(&dp, 1, std::nullopt, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_TRUE(result.empty()); } diff --git a/src/test/rgw/test_log_backing.cc b/src/test/rgw/test_log_backing.cc index 95f1e613936b0..f1bc30c762abb 100644 --- a/src/test/rgw/test_log_backing.cc +++ b/src/test/rgw/test_log_backing.cc @@ -40,6 +40,9 @@ namespace cb = ceph::buffer; namespace fifo = rados::cls::fifo; namespace RCf = rgw::cls::fifo; +auto cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT); +const DoutPrefix dp(cct, 1, "test log backing: "); + class LogBacking : public testing::Test { protected: static constexpr int SHARDS = 3; @@ -72,7 +75,7 @@ protected: cb::list bl; encode(i, bl); cls_log_add(op, ceph_clock_now(), {}, "meow", bl); - auto r = rgw_rados_operate(ioctx, get_oid(0, i), &op, null_yield); + auto r = rgw_rados_operate(&dp, ioctx, get_oid(0, i), &op, null_yield); ASSERT_GE(r, 0); } } @@ -83,7 +86,7 @@ protected: cb::list bl; encode(i, bl); cls_log_add(op, ceph_clock_now(), {}, "meow", bl); - auto r = rgw_rados_operate(ioctx, get_oid(0, i), &op, null_yield); + auto r = rgw_rados_operate(&dp, ioctx, get_oid(0, i), &op, null_yield); ASSERT_GE(r, 0); } @@ -96,14 +99,14 @@ protected: std::list entries; bool truncated = false; cls_log_list(op, {}, {}, {}, 1, entries, &to_marker, &truncated); - auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, null_yield); + auto r = rgw_rados_operate(&dp, ioctx, oid, &op, nullptr, null_yield); ASSERT_GE(r, 0); ASSERT_FALSE(entries.empty()); } { lr::ObjectWriteOperation op; cls_log_trim(op, {}, {}, {}, to_marker); - auto r = rgw_rados_operate(ioctx, oid, &op, null_yield); + auto r = rgw_rados_operate(&dp, ioctx, oid, &op, null_yield); ASSERT_GE(r, 0); } { @@ -111,7 +114,7 @@ protected: std::list entries; bool truncated = false; cls_log_list(op, {}, {}, {}, 1, entries, &to_marker, &truncated); - auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, null_yield); + auto r = rgw_rados_operate(&dp, ioctx, oid, &op, nullptr, null_yield); ASSERT_GE(r, 0); ASSERT_TRUE(entries.empty()); } @@ -122,7 +125,7 @@ protected: { for (int i = 0; i < SHARDS; ++i) { std::unique_ptr fifo; - auto r = RCf::FIFO::create(ioctx, get_oid(0, i), &fifo, null_yield); + auto r = RCf::FIFO::create(&dp, ioctx, get_oid(0, i), &fifo, null_yield); ASSERT_EQ(0, r); ASSERT_TRUE(fifo); } @@ -132,12 +135,12 @@ protected: { using ceph::encode; std::unique_ptr fifo; - auto r = RCf::FIFO::open(ioctx, get_oid(0, i), &fifo, null_yield); + auto r = RCf::FIFO::open(&dp, ioctx, get_oid(0, i), &fifo, null_yield); ASSERT_GE(0, r); ASSERT_TRUE(fifo); cb::list bl; encode(i, bl); - r = fifo->push(bl, null_yield); + r = fifo->push(&dp, bl, null_yield); ASSERT_GE(0, r); } @@ -154,7 +157,7 @@ protected: TEST_F(LogBacking, TestOmap) { make_omap(); - auto stat = log_backing_type(ioctx, log_type::fifo, SHARDS, + auto stat = log_backing_type(&dp, ioctx, log_type::fifo, SHARDS, [this](int shard){ return get_oid(0, shard); }, null_yield); ASSERT_EQ(log_type::omap, *stat); @@ -162,7 +165,7 @@ TEST_F(LogBacking, TestOmap) TEST_F(LogBacking, TestOmapEmpty) { - auto stat = log_backing_type(ioctx, log_type::omap, SHARDS, + auto stat = log_backing_type(&dp, ioctx, log_type::omap, SHARDS, [this](int shard){ return get_oid(0, shard); }, null_yield); ASSERT_EQ(log_type::omap, *stat); @@ -171,7 +174,7 @@ TEST_F(LogBacking, TestOmapEmpty) TEST_F(LogBacking, TestFIFO) { make_fifo(); - auto stat = log_backing_type(ioctx, log_type::fifo, SHARDS, + auto stat = log_backing_type(&dp, ioctx, log_type::fifo, SHARDS, [this](int shard){ return get_oid(0, shard); }, null_yield); ASSERT_EQ(log_type::fifo, *stat); @@ -179,7 +182,7 @@ TEST_F(LogBacking, TestFIFO) TEST_F(LogBacking, TestFIFOEmpty) { - auto stat = log_backing_type(ioctx, log_type::fifo, SHARDS, + auto stat = log_backing_type(&dp, ioctx, log_type::fifo, SHARDS, [this](int shard){ return get_oid(0, shard); }, null_yield); ASSERT_EQ(log_type::fifo, *stat); @@ -230,7 +233,7 @@ public: TEST_F(LogBacking, GenerationSingle) { auto lgr = logback_generations::init( - ioctx, "foobar", [this](uint64_t gen_id, int shard) { + &dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) { return get_oid(gen_id, shard); }, SHARDS, log_type::fifo, null_yield); ASSERT_TRUE(lgr); @@ -243,14 +246,14 @@ TEST_F(LogBacking, GenerationSingle) ASSERT_EQ(log_type::fifo, lg->got_entries[0].type); ASSERT_FALSE(lg->got_entries[0].pruned); - auto ec = lg->empty_to(0, null_yield); + auto ec = lg->empty_to(&dp, 0, null_yield); ASSERT_TRUE(ec); lg.reset(); lg = *logback_generations::init( - ioctx, "foobar", [this](uint64_t gen_id, int shard) { + &dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) { return get_oid(gen_id, shard); }, SHARDS, log_type::fifo, null_yield); @@ -262,7 +265,7 @@ TEST_F(LogBacking, GenerationSingle) lg->got_entries.clear(); - ec = lg->new_backing(log_type::omap, null_yield); + ec = lg->new_backing(&dp, log_type::omap, null_yield); ASSERT_FALSE(ec); ASSERT_EQ(1, lg->got_entries.size()); @@ -273,7 +276,7 @@ TEST_F(LogBacking, GenerationSingle) lg.reset(); lg = *logback_generations::init( - ioctx, "foobar", [this](uint64_t gen_id, int shard) { + &dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) { return get_oid(gen_id, shard); }, SHARDS, log_type::fifo, null_yield); @@ -286,7 +289,7 @@ TEST_F(LogBacking, GenerationSingle) ASSERT_EQ(log_type::omap, lg->got_entries[1].type); ASSERT_FALSE(lg->got_entries[1].pruned); - ec = lg->empty_to(0, null_yield); + ec = lg->empty_to(&dp, 0, null_yield); ASSERT_FALSE(ec); ASSERT_EQ(0, *lg->tail); @@ -294,7 +297,7 @@ TEST_F(LogBacking, GenerationSingle) lg.reset(); lg = *logback_generations::init( - ioctx, "foobar", [this](uint64_t gen_id, int shard) { + &dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) { return get_oid(gen_id, shard); }, SHARDS, log_type::fifo, null_yield); @@ -303,7 +306,7 @@ TEST_F(LogBacking, GenerationSingle) ASSERT_EQ(log_type::omap, lg->got_entries[1].type); ASSERT_FALSE(lg->got_entries[1].pruned); - ec = lg->remove_empty(null_yield); + ec = lg->remove_empty(&dp, null_yield); ASSERT_FALSE(ec); auto entries = lg->entries(); @@ -319,11 +322,11 @@ TEST_F(LogBacking, GenerationSingle) TEST_F(LogBacking, GenerationWN) { auto lg1 = *logback_generations::init( - ioctx, "foobar", [this](uint64_t gen_id, int shard) { + &dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) { return get_oid(gen_id, shard); }, SHARDS, log_type::fifo, null_yield); - auto ec = lg1->new_backing(log_type::omap, null_yield); + auto ec = lg1->new_backing(&dp, log_type::omap, null_yield); ASSERT_FALSE(ec); ASSERT_EQ(1, lg1->got_entries.size()); @@ -334,7 +337,7 @@ TEST_F(LogBacking, GenerationWN) lg1->got_entries.clear(); auto lg2 = *logback_generations::init( - ioctx2, "foobar", [this](uint64_t gen_id, int shard) { + &dp, ioctx2, "foobar", [this](uint64_t gen_id, int shard) { return get_oid(gen_id, shard); }, SHARDS, log_type::fifo, null_yield); @@ -350,7 +353,7 @@ TEST_F(LogBacking, GenerationWN) lg2->got_entries.clear(); - ec = lg1->new_backing(log_type::fifo, null_yield); + ec = lg1->new_backing(&dp, log_type::fifo, null_yield); ASSERT_FALSE(ec); ASSERT_EQ(1, lg1->got_entries.size()); @@ -366,7 +369,7 @@ TEST_F(LogBacking, GenerationWN) lg1->got_entries.clear(); lg2->got_entries.clear(); - ec = lg2->empty_to(1, null_yield); + ec = lg2->empty_to(&dp, 1, null_yield); ASSERT_FALSE(ec); ASSERT_EQ(1, *lg1->tail); diff --git a/src/test/rgw/test_rgw_lua.cc b/src/test/rgw/test_rgw_lua.cc index bdfe37ef2db25..7d7af6d786f3d 100644 --- a/src/test/rgw/test_rgw_lua.cc +++ b/src/test/rgw/test_rgw_lua.cc @@ -22,7 +22,7 @@ public: class TestRGWUser : public sal::RGWUser { public: - virtual int list_buckets(const string&, const string&, uint64_t, bool, sal::RGWBucketList&, optional_yield y) override { + virtual int list_buckets(const DoutPrefixProvider *dpp, const string&, const string&, uint64_t, bool, sal::RGWBucketList&, optional_yield y) override { return 0; } @@ -30,7 +30,7 @@ public: return nullptr; } - virtual int load_by_id(optional_yield y) override { + virtual int load_by_id(const DoutPrefixProvider *dpp, optional_yield y) override { return 0; } diff --git a/src/test/rgw/test_rgw_manifest.cc b/src/test/rgw/test_rgw_manifest.cc index 0c22b908aff13..8ccf17c94713d 100644 --- a/src/test/rgw/test_rgw_manifest.cc +++ b/src/test/rgw/test_rgw_manifest.cc @@ -21,6 +21,9 @@ using namespace std; +auto cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT); +const DoutPrefix dp(cct, 1, "test rgw manifest: "); + struct OldObjManifestPart { old_rgw_obj loc; /* the object where the data is located */ uint64_t loc_ofs; /* the offset at that object where the data is located */ @@ -222,18 +225,18 @@ TEST(TestRGWManifest, head_only_obj) { list::iterator liter; RGWObjManifest::obj_iterator iter; - for (iter = manifest.obj_begin(), liter = objs.begin(); - iter != manifest.obj_end() && liter != objs.end(); + for (iter = manifest.obj_begin(&dp), liter = objs.begin(); + iter != manifest.obj_end(&dp) && liter != objs.end(); ++iter, ++liter) { ASSERT_TRUE(env.get_raw(*liter) == env.get_raw(iter.get_location())); } - ASSERT_TRUE(iter == manifest.obj_end()); + ASSERT_TRUE(iter == manifest.obj_end(&dp)); ASSERT_TRUE(liter == objs.end()); rgw_raw_obj raw_head; - iter = manifest.obj_find(100 * 1024); + iter = manifest.obj_find(&dp, 100 * 1024); ASSERT_TRUE(env.get_raw(iter.get_location()) == env.get_raw(head)); ASSERT_EQ((int)iter.get_stripe_size(), obj_size); } @@ -258,8 +261,8 @@ TEST(TestRGWManifest, obj_with_head_and_tail) { rgw_obj_select last_obj; RGWObjManifest::obj_iterator iter; - for (iter = manifest.obj_begin(), liter = objs.begin(); - iter != manifest.obj_end() && liter != objs.end(); + for (iter = manifest.obj_begin(&dp), liter = objs.begin(); + iter != manifest.obj_end(&dp) && liter != objs.end(); ++iter, ++liter) { cout << "*liter=" << *liter << " iter.get_location()=" << env.get_raw(iter.get_location()) << std::endl; ASSERT_TRUE(env.get_raw(*liter) == env.get_raw(iter.get_location())); @@ -267,15 +270,15 @@ TEST(TestRGWManifest, obj_with_head_and_tail) { last_obj = iter.get_location(); } - ASSERT_TRUE(iter == manifest.obj_end()); + ASSERT_TRUE(iter == manifest.obj_end(&dp)); ASSERT_TRUE(liter == objs.end()); - iter = manifest.obj_find(100 * 1024); + iter = manifest.obj_find(&dp, 100 * 1024); ASSERT_TRUE(env.get_raw(iter.get_location()) == env.get_raw(head)); ASSERT_EQ((int)iter.get_stripe_size(), head_size); uint64_t ofs = 20 * 1024 * 1024 + head_size; - iter = manifest.obj_find(ofs + 100); + iter = manifest.obj_find(&dp, ofs + 100); ASSERT_TRUE(env.get_raw(iter.get_location()) == env.get_raw(last_obj)); ASSERT_EQ(iter.get_stripe_ofs(), ofs); @@ -319,11 +322,11 @@ TEST(TestRGWManifest, multipart) { RGWObjManifest m; for (int i = 0; i < num_parts; i++) { - m.append(pm[i], env.zonegroup, env.zone_params); + m.append(&dp, pm[i], env.zonegroup, env.zone_params); } RGWObjManifest::obj_iterator iter; - for (iter = m.obj_begin(); iter != m.obj_end(); ++iter) { - RGWObjManifest::obj_iterator fiter = m.obj_find(iter.get_ofs()); + for (iter = m.obj_begin(&dp); iter != m.obj_end(&dp); ++iter) { + RGWObjManifest::obj_iterator fiter = m.obj_find(&dp, iter.get_ofs()); ASSERT_TRUE(env.get_raw(fiter.get_location()) == env.get_raw(iter.get_location())); } @@ -363,8 +366,8 @@ TEST(TestRGWManifest, old_obj_manifest) { RGWObjManifest::obj_iterator iter; auto liter = old_objs.begin(); - for (iter = manifest.obj_begin(); - iter != manifest.obj_end() && liter != old_objs.end(); + for (iter = manifest.obj_begin(&dp); + iter != manifest.obj_end(&dp) && liter != old_objs.end(); ++iter, ++liter) { rgw_pool old_pool(liter->bucket.data_pool); string old_oid; @@ -377,7 +380,7 @@ TEST(TestRGWManifest, old_obj_manifest) { } ASSERT_TRUE(liter == old_objs.end()); - ASSERT_TRUE(iter == manifest.obj_end()); + ASSERT_TRUE(iter == manifest.obj_end(&dp)); } diff --git a/src/test/rgw/test_rgw_period_history.cc b/src/test/rgw/test_rgw_period_history.cc index 2bac99b0bb922..a4854ea778f14 100644 --- a/src/test/rgw/test_rgw_period_history.cc +++ b/src/test/rgw/test_rgw_period_history.cc @@ -35,7 +35,7 @@ const auto current_period = make_period("5", 5, "4"); // mock puller that throws an exception if it's called struct ErrorPuller : public RGWPeriodHistory::Puller { - int pull(const std::string& id, RGWPeriod& period, optional_yield) override { + int pull(const DoutPrefixProvider *dpp, const std::string& id, RGWPeriod& period, optional_yield) override { throw std::runtime_error("unexpected call to pull"); } }; @@ -48,7 +48,7 @@ class RecordingPuller : public RGWPeriodHistory::Puller { public: explicit RecordingPuller(int error) : error(error) {} Ids ids; - int pull(const std::string& id, RGWPeriod& period, optional_yield) override { + int pull(const DoutPrefixProvider *dpp, const std::string& id, RGWPeriod& period, optional_yield) override { ids.push_back(id); return error; } @@ -56,7 +56,7 @@ class RecordingPuller : public RGWPeriodHistory::Puller { // mock puller that returns a fake period by parsing the period id struct NumericPuller : public RGWPeriodHistory::Puller { - int pull(const std::string& id, RGWPeriod& period, optional_yield) override { + int pull(const DoutPrefixProvider *dpp, const std::string& id, RGWPeriod& period, optional_yield) override { // relies on numeric period ids to divine the realm_epoch auto realm_epoch = boost::lexical_cast(id); auto predecessor = boost::lexical_cast(realm_epoch-1); @@ -130,10 +130,11 @@ TEST(PeriodHistory, PullPredecessorsBeforeCurrent) { RecordingPuller puller{-EFAULT}; RGWPeriodHistory history(g_ceph_context, &puller, current_period); + const DoutPrefix dp(g_ceph_context, 1, "test rgw period history: "); // create a disjoint history at 1 and verify that periods are requested // backwards from current_period - auto c1 = history.attach(make_period("1", 1, ""), null_yield); + auto c1 = history.attach(&dp, make_period("1", 1, ""), null_yield); ASSERT_FALSE(c1); ASSERT_EQ(-EFAULT, c1.get_error()); ASSERT_EQ(Ids{"4"}, puller.ids); @@ -141,7 +142,7 @@ TEST(PeriodHistory, PullPredecessorsBeforeCurrent) auto c4 = history.insert(make_period("4", 4, "3")); ASSERT_TRUE(c4); - c1 = history.attach(make_period("1", 1, ""), null_yield); + c1 = history.attach(&dp, make_period("1", 1, ""), null_yield); ASSERT_FALSE(c1); ASSERT_EQ(-EFAULT, c1.get_error()); ASSERT_EQ(Ids({"4", "3"}), puller.ids); @@ -149,7 +150,7 @@ TEST(PeriodHistory, PullPredecessorsBeforeCurrent) auto c3 = history.insert(make_period("3", 3, "2")); ASSERT_TRUE(c3); - c1 = history.attach(make_period("1", 1, ""), null_yield); + c1 = history.attach(&dp, make_period("1", 1, ""), null_yield); ASSERT_FALSE(c1); ASSERT_EQ(-EFAULT, c1.get_error()); ASSERT_EQ(Ids({"4", "3", "2"}), puller.ids); @@ -157,7 +158,7 @@ TEST(PeriodHistory, PullPredecessorsBeforeCurrent) auto c2 = history.insert(make_period("2", 2, "1")); ASSERT_TRUE(c2); - c1 = history.attach(make_period("1", 1, ""), null_yield); + c1 = history.attach(&dp, make_period("1", 1, ""), null_yield); ASSERT_TRUE(c1); ASSERT_EQ(Ids({"4", "3", "2"}), puller.ids); } @@ -166,25 +167,26 @@ TEST(PeriodHistory, PullPredecessorsAfterCurrent) { RecordingPuller puller{-EFAULT}; RGWPeriodHistory history(g_ceph_context, &puller, current_period); + const DoutPrefix dp(g_ceph_context, 1, "test rgw period history: "); // create a disjoint history at 9 and verify that periods are requested // backwards down to current_period - auto c9 = history.attach(make_period("9", 9, "8"), null_yield); + auto c9 = history.attach(&dp, make_period("9", 9, "8"), null_yield); ASSERT_FALSE(c9); ASSERT_EQ(-EFAULT, c9.get_error()); ASSERT_EQ(Ids{"8"}, puller.ids); - auto c8 = history.attach(make_period("8", 8, "7"), null_yield); + auto c8 = history.attach(&dp, make_period("8", 8, "7"), null_yield); ASSERT_FALSE(c8); ASSERT_EQ(-EFAULT, c8.get_error()); ASSERT_EQ(Ids({"8", "7"}), puller.ids); - auto c7 = history.attach(make_period("7", 7, "6"), null_yield); + auto c7 = history.attach(&dp, make_period("7", 7, "6"), null_yield); ASSERT_FALSE(c7); ASSERT_EQ(-EFAULT, c7.get_error()); ASSERT_EQ(Ids({"8", "7", "6"}), puller.ids); - auto c6 = history.attach(make_period("6", 6, "5"), null_yield); + auto c6 = history.attach(&dp, make_period("6", 6, "5"), null_yield); ASSERT_TRUE(c6); ASSERT_EQ(Ids({"8", "7", "6"}), puller.ids); } @@ -270,8 +272,9 @@ TEST(PeriodHistory, AttachBefore) { NumericPuller puller; RGWPeriodHistory history(g_ceph_context, &puller, current_period); + const DoutPrefix dp(g_ceph_context, 1, "test rgw period history: "); - auto c1 = history.attach(make_period("1", 1, ""), null_yield); + auto c1 = history.attach(&dp, make_period("1", 1, ""), null_yield); ASSERT_TRUE(c1); // verify that we pulled and merged all periods from 1-5 @@ -296,8 +299,9 @@ TEST(PeriodHistory, AttachAfter) { NumericPuller puller; RGWPeriodHistory history(g_ceph_context, &puller, current_period); + const DoutPrefix dp(g_ceph_context, 1, "test rgw period history: "); - auto c9 = history.attach(make_period("9", 9, "8"), null_yield); + auto c9 = history.attach(&dp, make_period("9", 9, "8"), null_yield); ASSERT_TRUE(c9); // verify that we pulled and merged all periods from 5-9 diff --git a/src/test/rgw/test_rgw_throttle.cc b/src/test/rgw/test_rgw_throttle.cc index a762a8c7a06ca..109dd10061954 100644 --- a/src/test/rgw/test_rgw_throttle.cc +++ b/src/test/rgw/test_rgw_throttle.cc @@ -29,7 +29,8 @@ struct RadosEnv : public ::testing::Environment { void SetUp() override { rados.emplace(g_ceph_context); - ASSERT_EQ(0, rados->start(null_yield)); + const NoDoutPrefix no_dpp(g_ceph_context, 1); + ASSERT_EQ(0, rados->start(null_yield, &no_dpp)); int r = rados->pool({poolname}).create(); if (r == -EEXIST) r = 0; @@ -49,7 +50,8 @@ class RadosFixture : public ::testing::Test { protected: RGWSI_RADOS::Obj make_obj(const std::string& oid) { auto obj = RadosEnv::rados->obj({{RadosEnv::poolname}, oid}); - ceph_assert_always(0 == obj.open()); + const NoDoutPrefix no_dpp(g_ceph_context, 1); + ceph_assert_always(0 == obj.open(&no_dpp)); return obj; } }; diff --git a/src/test/test_cors.cc b/src/test/test_cors.cc index ebb55c5b270a5..9cb735b5c1a78 100644 --- a/src/test/test_cors.cc +++ b/src/test/test_cors.cc @@ -307,7 +307,8 @@ static int delete_bucket(void){ RGWCORSRule *xml_to_cors_rule(string s){ RGWCORSConfiguration_S3 *cors_config; - RGWCORSXMLParser_S3 parser(g_ceph_context); + const DoutPrefix dp(g_ceph_context, 1, "test cors: "); + RGWCORSXMLParser_S3 parser(&dp, g_ceph_context); const string *data = g_test->get_response_data(); if (!parser.init()) { return NULL; -- 2.39.5