From 6cfc2ce2ce669e139acb682898c53c5602075e31 Mon Sep 17 00:00:00 2001 From: Ali Maredia Date: Sat, 30 Jan 2021 00:47:55 -0500 Subject: [PATCH] rgw: add DPP's to logging for most ops This commit contains the following major changes: - Most log output lines for major ops now have DoutPrefixProviders. - Create new macro ldpp_subdout. This macro is meant as a replacement for lsubdout for headerfiles that do not define the rgw subsys. - Changes to RGWObjManifest begin and end iterators. - Make it so that rgw_dencoder.cc should only contain the logic related to encoding. Also add dpp to ldouts and lderr's already using req_state and replace sync_env/env->dpp's with dpp's Signed-off-by: Ali Maredia Signed-off-by: Kalpesh Pandya Signed-off-by: Casey Bodley --- src/common/dout.h | 5 + src/rgw/cls_fifo_legacy.cc | 558 +++++++-------- src/rgw/cls_fifo_legacy.h | 68 +- src/rgw/librgw.cc | 15 +- src/rgw/rgw_acl.cc | 19 +- src/rgw/rgw_acl.h | 4 +- src/rgw/rgw_acl_s3.cc | 10 +- src/rgw/rgw_admin.cc | 434 +++++------ src/rgw/rgw_auth.cc | 2 +- src/rgw/rgw_auth_s3.cc | 20 +- src/rgw/rgw_auth_s3.h | 8 +- src/rgw/rgw_bucket.cc | 130 ++-- src/rgw/rgw_bucket.h | 8 +- src/rgw/rgw_bucket_sync.cc | 6 +- src/rgw/rgw_bucket_sync.h | 2 +- src/rgw/rgw_cache.cc | 49 +- src/rgw/rgw_cache.h | 15 +- src/rgw/rgw_coroutine.cc | 28 +- src/rgw/rgw_coroutine.h | 18 +- src/rgw/rgw_cors_s3.cc | 24 +- src/rgw/rgw_cors_s3.h | 10 +- src/rgw/rgw_cr_rados.cc | 146 ++-- src/rgw/rgw_cr_rados.h | 167 +++-- src/rgw/rgw_cr_rest.cc | 10 +- src/rgw/rgw_cr_rest.h | 26 +- src/rgw/rgw_cr_tools.cc | 20 +- src/rgw/rgw_crypt.cc | 70 +- src/rgw/rgw_crypt.h | 2 +- src/rgw/rgw_data_sync.cc | 240 ++++--- src/rgw/rgw_data_sync.h | 57 +- src/rgw/rgw_datalog.cc | 180 ++--- src/rgw/rgw_datalog.h | 40 +- src/rgw/rgw_dencoder.cc | 159 ---- src/rgw/rgw_etag_verifier.cc | 15 +- src/rgw/rgw_etag_verifier.h | 3 +- src/rgw/rgw_file.cc | 2 +- src/rgw/rgw_gc.cc | 10 +- src/rgw/rgw_json_enc.cc | 7 +- src/rgw/rgw_lc.cc | 32 +- src/rgw/rgw_lc.h | 3 +- src/rgw/rgw_lib.h | 8 +- src/rgw/rgw_lib_frontend.h | 2 +- src/rgw/rgw_loadgen.cc | 5 +- src/rgw/rgw_loadgen.h | 2 +- src/rgw/rgw_loadgen_process.cc | 4 +- src/rgw/rgw_log.cc | 20 +- src/rgw/rgw_log_backing.cc | 160 ++--- src/rgw/rgw_log_backing.h | 109 +-- src/rgw/rgw_lua.cc | 18 +- src/rgw/rgw_lua.h | 8 +- src/rgw/rgw_lua_request.cc | 6 +- src/rgw/rgw_main.cc | 6 +- src/rgw/rgw_mdlog.h | 17 +- src/rgw/rgw_metadata.cc | 48 +- src/rgw/rgw_metadata.h | 10 +- src/rgw/rgw_multi.cc | 24 +- src/rgw/rgw_multi.h | 7 +- src/rgw/rgw_notify.cc | 42 +- src/rgw/rgw_notify.h | 10 +- src/rgw/rgw_obj_manifest.cc | 215 ++++-- src/rgw/rgw_obj_manifest.h | 88 +-- src/rgw/rgw_object_expirer_core.cc | 45 +- src/rgw/rgw_object_expirer_core.h | 17 +- src/rgw/rgw_op.cc | 136 ++-- src/rgw/rgw_op.h | 13 +- src/rgw/rgw_orphan.cc | 190 ++--- src/rgw/rgw_orphan.h | 25 +- src/rgw/rgw_otp.cc | 14 +- src/rgw/rgw_otp.h | 6 +- src/rgw/rgw_period_history.cc | 12 +- src/rgw/rgw_period_history.h | 5 +- src/rgw/rgw_period_puller.cc | 30 +- src/rgw/rgw_period_puller.h | 2 +- src/rgw/rgw_period_pusher.cc | 33 +- src/rgw/rgw_period_pusher.h | 2 +- src/rgw/rgw_process.cc | 2 +- src/rgw/rgw_process.h | 13 +- src/rgw/rgw_pubsub.cc | 128 ++-- src/rgw/rgw_pubsub.h | 34 +- src/rgw/rgw_pubsub_push.cc | 10 +- src/rgw/rgw_putobj_processor.cc | 4 +- src/rgw/rgw_quota.cc | 69 +- src/rgw/rgw_quota.h | 2 +- src/rgw/rgw_rados.cc | 837 +++++++++++----------- src/rgw/rgw_rados.h | 146 ++-- src/rgw/rgw_realm_reloader.cc | 24 +- src/rgw/rgw_realm_watcher.cc | 22 +- src/rgw/rgw_realm_watcher.h | 4 +- src/rgw/rgw_reshard.cc | 83 +-- src/rgw/rgw_reshard.h | 30 +- src/rgw/rgw_rest.cc | 2 +- src/rgw/rgw_rest_bucket.cc | 4 +- src/rgw/rgw_rest_client.cc | 62 +- src/rgw/rgw_rest_client.h | 24 +- src/rgw/rgw_rest_config.cc | 4 +- src/rgw/rgw_rest_conn.cc | 47 +- src/rgw/rgw_rest_conn.h | 43 +- src/rgw/rgw_rest_iam.cc | 4 +- src/rgw/rgw_rest_log.cc | 146 ++-- src/rgw/rgw_rest_metadata.cc | 14 +- src/rgw/rgw_rest_oidc_provider.cc | 6 +- src/rgw/rgw_rest_pubsub.cc | 98 +-- src/rgw/rgw_rest_pubsub_common.cc | 52 +- src/rgw/rgw_rest_realm.cc | 68 +- src/rgw/rgw_rest_role.cc | 22 +- src/rgw/rgw_rest_s3.cc | 52 +- src/rgw/rgw_rest_s3.h | 2 +- src/rgw/rgw_rest_s3website.h | 2 +- src/rgw/rgw_rest_sts.cc | 26 +- src/rgw/rgw_rest_swift.cc | 4 +- src/rgw/rgw_rest_user.cc | 36 +- src/rgw/rgw_rest_user_policy.cc | 18 +- src/rgw/rgw_sal.h | 67 +- src/rgw/rgw_sal_rados.cc | 197 ++--- src/rgw/rgw_sal_rados.h | 75 +- src/rgw/rgw_service.cc | 6 +- src/rgw/rgw_sync.cc | 227 +++--- src/rgw/rgw_sync.h | 46 +- src/rgw/rgw_sync_checkpoint.cc | 7 +- src/rgw/rgw_sync_error_repo.cc | 8 +- src/rgw/rgw_sync_module.cc | 8 +- src/rgw/rgw_sync_module.h | 2 +- src/rgw/rgw_sync_module_aws.cc | 60 +- src/rgw/rgw_sync_module_es.cc | 30 +- src/rgw/rgw_sync_module_es_rest.cc | 16 +- src/rgw/rgw_sync_module_log.cc | 4 +- src/rgw/rgw_sync_module_pubsub.cc | 128 ++-- src/rgw/rgw_sync_module_pubsub_rest.cc | 28 +- src/rgw/rgw_sync_trace.cc | 5 +- src/rgw/rgw_tools.cc | 35 +- src/rgw/rgw_tools.h | 14 +- src/rgw/rgw_torrent.cc | 16 +- src/rgw/rgw_trim_bilog.cc | 82 +-- src/rgw/rgw_trim_datalog.cc | 53 +- src/rgw/rgw_trim_datalog.h | 6 +- src/rgw/rgw_trim_mdlog.cc | 87 +-- src/rgw/rgw_usage.cc | 16 +- src/rgw/rgw_usage.h | 3 +- src/rgw/rgw_user.cc | 48 +- src/rgw/rgw_user.h | 22 +- src/rgw/rgw_worker.h | 13 +- src/rgw/rgw_zone.cc | 420 +++++------ src/rgw/rgw_zone.h | 105 +-- src/rgw/services/svc_bi.h | 7 +- src/rgw/services/svc_bi_rados.cc | 83 ++- src/rgw/services/svc_bi_rados.h | 33 +- src/rgw/services/svc_bilog_rados.cc | 23 +- src/rgw/services/svc_bilog_rados.h | 13 +- src/rgw/services/svc_bucket_sobj.cc | 12 +- src/rgw/services/svc_bucket_sync.h | 9 +- src/rgw/services/svc_bucket_sync_sobj.cc | 97 +-- src/rgw/services/svc_bucket_sync_sobj.h | 9 +- src/rgw/services/svc_cls.cc | 115 +-- src/rgw/services/svc_cls.h | 47 +- src/rgw/services/svc_mdlog.cc | 98 +-- src/rgw/services/svc_mdlog.h | 15 +- src/rgw/services/svc_meta_be.cc | 14 +- src/rgw/services/svc_meta_be.h | 18 +- src/rgw/services/svc_meta_be_otp.cc | 7 +- src/rgw/services/svc_meta_be_otp.h | 3 +- src/rgw/services/svc_meta_be_sobj.cc | 28 +- src/rgw/services/svc_meta_be_sobj.h | 14 +- src/rgw/services/svc_notify.cc | 61 +- src/rgw/services/svc_notify.h | 13 +- src/rgw/services/svc_otp.cc | 20 +- src/rgw/services/svc_otp.h | 12 +- src/rgw/services/svc_rados.cc | 30 +- src/rgw/services/svc_rados.h | 14 +- src/rgw/services/svc_sys_obj.cc | 63 +- src/rgw/services/svc_sys_obj.h | 38 +- src/rgw/services/svc_sys_obj_cache.cc | 108 +-- src/rgw/services/svc_sys_obj_cache.h | 36 +- src/rgw/services/svc_sys_obj_core.cc | 148 ++-- src/rgw/services/svc_sys_obj_core.h | 43 +- src/rgw/services/svc_sys_obj_core_types.h | 3 +- src/rgw/services/svc_user.h | 22 +- src/rgw/services/svc_user_rados.cc | 140 ++-- src/rgw/services/svc_user_rados.h | 46 +- src/rgw/services/svc_zone.cc | 326 ++++----- src/rgw/services/svc_zone.h | 30 +- src/test/rgw/test_cls_fifo_legacy.cc | 216 +++--- src/test/rgw/test_log_backing.cc | 53 +- src/test/rgw/test_rgw_lua.cc | 10 +- src/test/rgw/test_rgw_manifest.cc | 33 +- src/test/rgw/test_rgw_period_history.cc | 30 +- src/test/rgw/test_rgw_throttle.cc | 3 +- src/test/test_cors.cc | 3 +- 187 files changed, 5206 insertions(+), 4879 deletions(-) diff --git a/src/common/dout.h b/src/common/dout.h index b8f762991db..421222d535f 100644 --- a/src/common/dout.h +++ b/src/common/dout.h @@ -175,6 +175,11 @@ struct is_dynamic> : public std::true_type {}; #define ldout(cct, v) dout_impl(cct, dout_subsys, v) dout_prefix #define lderr(cct) dout_impl(cct, ceph_subsys_, -1) dout_prefix +#define ldpp_subdout(dpp, sub, v) \ + if (decltype(auto) pdpp = (dpp); pdpp) /* workaround -Wnonnull-compare for 'this' */ \ + dout_impl(pdpp->get_cct(), ceph_subsys_##sub, v) \ + pdpp->gen_prefix(*_dout) + #define ldpp_dout(dpp, v) \ if (decltype(auto) pdpp = (dpp); pdpp) /* workaround -Wnonnull-compare for 'this' */ \ dout_impl(pdpp->get_cct(), ceph::dout::need_dynamic(pdpp->get_subsys()), v) \ diff --git a/src/rgw/cls_fifo_legacy.cc b/src/rgw/cls_fifo_legacy.cc index 45a3ad50514..80af9005525 100644 --- a/src/rgw/cls_fifo_legacy.cc +++ b/src/rgw/cls_fifo_legacy.cc @@ -65,7 +65,7 @@ void create_meta(lr::ObjectWriteOperation* op, op->exec(fifo::op::CLASS, fifo::op::CREATE_META, in); } -int get_meta(lr::IoCtx& ioctx, const std::string& oid, +int get_meta(const DoutPrefixProvider *dpp, lr::IoCtx& ioctx, const std::string& oid, std::optional objv, fifo::info* info, std::uint32_t* part_header_size, std::uint32_t* part_entry_overhead, @@ -81,7 +81,7 @@ int get_meta(lr::IoCtx& ioctx, const std::string& oid, op.exec(fifo::op::CLASS, fifo::op::GET_META, in, &bl, nullptr); - auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y); if (r >= 0) try { fifo::op::get_meta_reply reply; auto iter = bl.cbegin(); @@ -91,13 +91,13 @@ int get_meta(lr::IoCtx& ioctx, const std::string& oid, if (part_entry_overhead) *part_entry_overhead = reply.part_entry_overhead; } catch (const cb::error& err) { - lderr(static_cast(ioctx.cct())) + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " decode failed: " << err.what() << " tid=" << tid << dendl; r = from_error_code(err.code()); } else if (!(probe && (r == -ENOENT || r == -ENODATA))) { - lderr(static_cast(ioctx.cct())) + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " fifo::op::GET_META failed r=" << r << " tid=" << tid << dendl; @@ -137,7 +137,7 @@ void part_init(lr::ObjectWriteOperation* op, std::string_view tag, op->exec(fifo::op::CLASS, fifo::op::INIT_PART, in); } -int push_part(lr::IoCtx& ioctx, const std::string& oid, std::string_view tag, +int push_part(const DoutPrefixProvider *dpp, lr::IoCtx& ioctx, const std::string& oid, std::string_view tag, std::deque data_bufs, std::uint64_t tid, optional_yield y) { @@ -155,16 +155,16 @@ int push_part(lr::IoCtx& ioctx, const std::string& oid, std::string_view tag, encode(pp, in); auto retval = 0; op.exec(fifo::op::CLASS, fifo::op::PUSH_PART, in, nullptr, &retval); - auto r = rgw_rados_operate(ioctx, oid, &op, y, lr::OPERATION_RETURNVEC); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y, lr::OPERATION_RETURNVEC); if (r < 0) { - lderr(static_cast(ioctx.cct())) + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " fifo::op::PUSH_PART failed r=" << r << " tid=" << tid << dendl; return r; } if (retval < 0) { - lderr(static_cast(ioctx.cct())) + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " error handling response retval=" << retval << " tid=" << tid << dendl; @@ -208,7 +208,7 @@ void trim_part(lr::ObjectWriteOperation* op, op->exec(fifo::op::CLASS, fifo::op::TRIM_PART, in); } -int list_part(lr::IoCtx& ioctx, const std::string& oid, +int list_part(const DoutPrefixProvider *dpp, lr::IoCtx& ioctx, const std::string& oid, std::optional tag, std::uint64_t ofs, std::uint64_t max_entries, std::vector* entries, @@ -226,7 +226,7 @@ int list_part(lr::IoCtx& ioctx, const std::string& oid, encode(lp, in); cb::list bl; op.exec(fifo::op::CLASS, fifo::op::LIST_PART, in, &bl, nullptr); - auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y); if (r >= 0) try { fifo::op::list_part_reply reply; auto iter = bl.cbegin(); @@ -236,13 +236,13 @@ int list_part(lr::IoCtx& ioctx, const std::string& oid, if (full_part) *full_part = reply.full_part; if (ptag) *ptag = reply.tag; } catch (const cb::error& err) { - lderr(static_cast(ioctx.cct())) + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " decode failed: " << err.what() << " tid=" << tid << dendl; r = from_error_code(err.code()); } else if (r != -ENOENT) { - lderr(static_cast(ioctx.cct())) + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " fifo::op::LIST_PART failed r=" << r << " tid=" << tid << dendl; @@ -314,7 +314,7 @@ lr::ObjectReadOperation list_part(CephContext* cct, return op; } -int get_part_info(lr::IoCtx& ioctx, const std::string& oid, +int get_part_info(const DoutPrefixProvider *dpp, lr::IoCtx& ioctx, const std::string& oid, fifo::part_header* header, std::uint64_t tid, optional_yield y) { @@ -325,20 +325,20 @@ int get_part_info(lr::IoCtx& ioctx, const std::string& oid, cb::list bl; encode(gpi, in); op.exec(fifo::op::CLASS, fifo::op::GET_PART_INFO, in, &bl, nullptr); - auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y); if (r >= 0) try { fifo::op::get_part_info_reply reply; auto iter = bl.cbegin(); decode(reply, iter); if (header) *header = std::move(reply.header); } catch (const cb::error& err) { - lderr(static_cast(ioctx.cct())) + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " decode failed: " << err.what() << " tid=" << tid << dendl; r = from_error_code(err.code()); } else { - lderr(static_cast(ioctx.cct())) + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " fifo::op::GET_PART_INFO failed r=" << r << " tid=" << tid << dendl; @@ -457,16 +457,16 @@ int FIFO::apply_update(fifo::info* info, return {}; } -int FIFO::_update_meta(const fifo::update& update, +int FIFO::_update_meta(const DoutPrefixProvider *dpp, const fifo::update& update, fifo::objv version, bool* pcanceled, std::uint64_t tid, optional_yield y) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; lr::ObjectWriteOperation op; bool canceled = false; update_meta(&op, info.version, update); - auto r = rgw_rados_operate(ioctx, oid, &op, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y); if (r >= 0 || r == -ECANCELED) { canceled = (r == -ECANCELED); if (!canceled) { @@ -474,17 +474,17 @@ int FIFO::_update_meta(const fifo::update& update, if (r < 0) canceled = true; } if (canceled) { - r = read_meta(tid, y); + r = read_meta(dpp, tid, y); canceled = r < 0 ? false : true; } } if (pcanceled) *pcanceled = canceled; if (canceled) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled: tid=" << tid << dendl; } if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " returning error: r=" << r << " tid=" << tid << dendl; } return r; @@ -497,27 +497,27 @@ struct Updater : public Completion { bool reread = false; bool* pcanceled = nullptr; std::uint64_t tid; - Updater(FIFO* fifo, lr::AioCompletion* super, + Updater(const DoutPrefixProvider *dpp, FIFO* fifo, lr::AioCompletion* super, const fifo::update& update, fifo::objv version, bool* pcanceled, std::uint64_t tid) - : Completion(super), fifo(fifo), update(update), version(version), + : Completion(dpp, super), fifo(fifo), update(update), version(version), pcanceled(pcanceled) {} - void handle(Ptr&& p, int r) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; if (reread) handle_reread(std::move(p), r); else - handle_update(std::move(p), r); + handle_update(dpp, std::move(p), r); } - void handle_update(Ptr&& p, int r) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + void handle_update(const DoutPrefixProvider *dpp, Ptr&& p, int r) { + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " handling async update_meta: tid=" << tid << dendl; if (r < 0 && r != -ECANCELED) { - lderr(fifo->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " update failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); return; @@ -526,7 +526,7 @@ struct Updater : public Completion { if (!canceled) { int r = fifo->apply_update(&fifo->info, version, update, tid); if (r < 0) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " update failed, marking canceled: r=" << r << " tid=" << tid << dendl; canceled = true; @@ -534,12 +534,12 @@ struct Updater : public Completion { } if (canceled) { reread = true; - fifo->read_meta(tid, call(std::move(p))); + fifo->read_meta(dpp, tid, call(std::move(p))); return; } if (pcanceled) *pcanceled = false; - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " completing: tid=" << tid << dendl; complete(std::move(p), 0); } @@ -565,24 +565,24 @@ struct Updater : public Completion { } }; -void FIFO::_update_meta(const fifo::update& update, +void FIFO::_update_meta(const DoutPrefixProvider *dpp, const fifo::update& update, fifo::objv version, bool* pcanceled, std::uint64_t tid, lr::AioCompletion* c) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; lr::ObjectWriteOperation op; update_meta(&op, info.version, update); - auto updater = std::make_unique(this, c, update, version, pcanceled, + auto updater = std::make_unique(dpp, this, c, update, version, pcanceled, tid); auto r = ioctx.aio_operate(oid, Updater::call(std::move(updater)), &op); assert(r >= 0); } -int FIFO::create_part(int64_t part_num, std::string_view tag, std::uint64_t tid, +int FIFO::create_part(const DoutPrefixProvider *dpp, int64_t part_num, std::string_view tag, std::uint64_t tid, optional_yield y) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; lr::ObjectWriteOperation op; op.create(false); /* We don't need exclusivity, part_init ensures @@ -591,37 +591,37 @@ int FIFO::create_part(int64_t part_num, std::string_view tag, std::uint64_t tid, part_init(&op, tag, info.params); auto oid = info.part_oid(part_num); l.unlock(); - auto r = rgw_rados_operate(ioctx, oid, &op, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " part_init failed: r=" << r << " tid=" << tid << dendl; } return r; } -int FIFO::remove_part(int64_t part_num, std::string_view tag, std::uint64_t tid, +int FIFO::remove_part(const DoutPrefixProvider *dpp, int64_t part_num, std::string_view tag, std::uint64_t tid, optional_yield y) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; lr::ObjectWriteOperation op; op.remove(); std::unique_lock l(m); auto oid = info.part_oid(part_num); l.unlock(); - auto r = rgw_rados_operate(ioctx, oid, &op, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " remove failed: r=" << r << " tid=" << tid << dendl; } return r; } -int FIFO::process_journal(std::uint64_t tid, optional_yield y) +int FIFO::process_journal(const DoutPrefixProvider *dpp, std::uint64_t tid, optional_yield y) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; std::vector processed; @@ -634,12 +634,12 @@ int FIFO::process_journal(std::uint64_t tid, optional_yield y) int r = 0; for (auto& [n, entry] : tmpjournal) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " processing entry: entry=" << entry << " tid=" << tid << dendl; switch (entry.op) { case fifo::journal_entry::Op::create: - r = create_part(entry.part_num, entry.part_tag, tid, y); + r = create_part(dpp, entry.part_num, entry.part_tag, tid, y); if (entry.part_num > new_max) { new_max = entry.part_num; } @@ -651,21 +651,21 @@ int FIFO::process_journal(std::uint64_t tid, optional_yield y) } break; case fifo::journal_entry::Op::remove: - r = remove_part(entry.part_num, entry.part_tag, tid, y); + r = remove_part(dpp, entry.part_num, entry.part_tag, tid, y); if (r == -ENOENT) r = 0; if (entry.part_num >= new_tail) { new_tail = entry.part_num + 1; } break; default: - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " unknown journaled op: entry=" << entry << " tid=" << tid << dendl; return -EIO; } if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " processing entry failed: entry=" << entry << " r=" << r << " tid=" << tid << dendl; return -r; @@ -678,7 +678,7 @@ int FIFO::process_journal(std::uint64_t tid, optional_yield y) bool canceled = true; for (auto i = 0; canceled && i < MAX_RACE_RETRIES; ++i) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " postprocessing: i=" << i << " tid=" << tid << dendl; std::optional tail_part_num; @@ -695,7 +695,7 @@ int FIFO::process_journal(std::uint64_t tid, optional_yield y) if (processed.empty() && !tail_part_num && !max_part_num) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " nothing to update any more: i=" << i << " tid=" << tid << dendl; canceled = false; @@ -704,9 +704,9 @@ int FIFO::process_journal(std::uint64_t tid, optional_yield y) auto u = fifo::update().tail_part_num(tail_part_num) .head_part_num(head_part_num).max_push_part_num(max_part_num) .journal_entries_rm(processed); - r = _update_meta(u, objv, &canceled, tid, y); + r = _update_meta(dpp, u, objv, &canceled, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _update_meta failed: update=" << u << " r=" << r << " tid=" << tid << dendl; break; @@ -715,7 +715,7 @@ int FIFO::process_journal(std::uint64_t tid, optional_yield y) if (canceled) { std::vector new_processed; std::unique_lock l(m); - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " update canceled, retrying: i=" << i << " tid=" << tid << dendl; for (auto& e : processed) { @@ -731,31 +731,31 @@ int FIFO::process_journal(std::uint64_t tid, optional_yield y) } } if (r == 0 && canceled) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; r = -ECANCELED; } if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " failed, r=: " << r << " tid=" << tid << dendl; } return r; } -int FIFO::_prepare_new_part(bool is_head, std::uint64_t tid, optional_yield y) +int FIFO::_prepare_new_part(const DoutPrefixProvider *dpp, bool is_head, std::uint64_t tid, optional_yield y) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; std::unique_lock l(m); std::vector jentries = { info.next_journal_entry(generate_tag()) }; if (info.journal.find(jentries.front().part_num) != info.journal.end()) { l.unlock(); - ldout(cct, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " new part journaled, but not processed: tid=" << tid << dendl; - auto r = process_journal(tid, y); + auto r = process_journal(dpp, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " process_journal failed: r=" << r << " tid=" << tid << dendl; } return r; @@ -764,7 +764,7 @@ int FIFO::_prepare_new_part(bool is_head, std::uint64_t tid, optional_yield y) auto version = info.version; if (is_head) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " needs new head: tid=" << tid << dendl; auto new_head_jentry = jentries.front(); new_head_jentry.op = fifo::journal_entry::Op::set_head; @@ -777,23 +777,23 @@ int FIFO::_prepare_new_part(bool is_head, std::uint64_t tid, optional_yield y) bool canceled = true; for (auto i = 0; canceled && i < MAX_RACE_RETRIES; ++i) { canceled = false; - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " updating metadata: i=" << i << " tid=" << tid << dendl; auto u = fifo::update{}.journal_entries_add(jentries); - r = _update_meta(u, version, &canceled, tid, y); + r = _update_meta(dpp, u, version, &canceled, tid, y); if (r >= 0 && canceled) { std::unique_lock l(m); auto found = (info.journal.find(jentries.front().part_num) != info.journal.end()); if ((info.max_push_part_num >= jentries.front().part_num && info.head_part_num >= new_head_part_num)) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " raced, but journaled and processed: i=" << i << " tid=" << tid << dendl; return 0; } if (found) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " raced, journaled but not processed: i=" << i << " tid=" << tid << dendl; canceled = false; @@ -801,28 +801,28 @@ int FIFO::_prepare_new_part(bool is_head, std::uint64_t tid, optional_yield y) l.unlock(); } if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _update_meta failed: update=" << u << " r=" << r << " tid=" << tid << dendl; return r; } } if (canceled) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; return -ECANCELED; } - r = process_journal(tid, y); + r = process_journal(dpp, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " process_journal failed: r=" << r << " tid=" << tid << dendl; } return r; } -int FIFO::_prepare_new_head(std::uint64_t tid, optional_yield y) +int FIFO::_prepare_new_head(const DoutPrefixProvider *dpp, std::uint64_t tid, optional_yield y) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; std::unique_lock l(m); std::int64_t new_head_num = info.head_part_num + 1; @@ -832,18 +832,18 @@ int FIFO::_prepare_new_head(std::uint64_t tid, optional_yield y) int r = 0; if (max_push_part_num < new_head_num) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " need new part: tid=" << tid << dendl; - r = _prepare_new_part(true, tid, y); + r = _prepare_new_part(dpp, true, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _prepare_new_part failed: r=" << r << " tid=" << tid << dendl; return r; } std::unique_lock l(m); if (info.max_push_part_num < new_head_num) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " inconsistency, push part less than head part: " << " tid=" << tid << dendl; return -EIO; @@ -854,12 +854,12 @@ int FIFO::_prepare_new_head(std::uint64_t tid, optional_yield y) bool canceled = true; for (auto i = 0; canceled && i < MAX_RACE_RETRIES; ++i) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " updating head: i=" << i << " tid=" << tid << dendl; auto u = fifo::update{}.head_part_num(new_head_num); - r = _update_meta(u, version, &canceled, tid, y); + r = _update_meta(dpp, u, version, &canceled, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _update_meta failed: update=" << u << " r=" << r << " tid=" << tid << dendl; return r; @@ -869,14 +869,14 @@ int FIFO::_prepare_new_head(std::uint64_t tid, optional_yield y) version = info.version; l.unlock(); if (canceled && (head_part_num >= new_head_num)) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " raced, but completed by the other caller: i=" << i << " tid=" << tid << dendl; canceled = false; } } if (canceled) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; return -ECANCELED; } @@ -891,18 +891,18 @@ struct NewPartPreparer : public Completion { bool canceled = false; uint64_t tid; - NewPartPreparer(FIFO* f, lr::AioCompletion* super, + NewPartPreparer(const DoutPrefixProvider *dpp, FIFO* f, lr::AioCompletion* super, std::vector jentries, std::int64_t new_head_part_num, std::uint64_t tid) - : Completion(super), f(f), jentries(std::move(jentries)), + : Completion(dpp, super), f(f), jentries(std::move(jentries)), new_head_part_num(new_head_part_num), tid(tid) {} - void handle(Ptr&& p, int r) { - ldout(f->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; if (r < 0) { - lderr(f->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _update_meta failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); @@ -919,7 +919,7 @@ struct NewPartPreparer : public Completion { l.unlock(); if ((max_push_part_num >= jentries.front().part_num && head_part_num >= new_head_part_num)) { - ldout(f->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " raced, but journaled and processed: i=" << i << " tid=" << tid << dendl; complete(std::move(p), 0); @@ -931,34 +931,34 @@ struct NewPartPreparer : public Completion { } if (!found) { ++i; - f->_update_meta(fifo::update{} + f->_update_meta(dpp, fifo::update{} .journal_entries_add(jentries), version, &canceled, tid, call(std::move(p))); return; } else { - ldout(f->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " raced, journaled but not processed: i=" << i << " tid=" << tid << dendl; canceled = false; } // Fall through. We still need to process the journal. } - f->process_journal(tid, super()); + f->process_journal(dpp, tid, super()); return; } }; -void FIFO::_prepare_new_part(bool is_head, std::uint64_t tid, +void FIFO::_prepare_new_part(const DoutPrefixProvider *dpp, bool is_head, std::uint64_t tid, lr::AioCompletion* c) { std::unique_lock l(m); std::vector jentries = { info.next_journal_entry(generate_tag()) }; if (info.journal.find(jentries.front().part_num) != info.journal.end()) { l.unlock(); - ldout(cct, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " new part journaled, but not processed: tid=" << tid << dendl; - process_journal(tid, c); + process_journal(dpp, tid, c); return; } std::int64_t new_head_part_num = info.head_part_num; @@ -972,10 +972,10 @@ void FIFO::_prepare_new_part(bool is_head, std::uint64_t tid, } l.unlock(); - auto n = std::make_unique(this, c, jentries, + auto n = std::make_unique(dpp, this, c, jentries, new_head_part_num, tid); auto np = n.get(); - _update_meta(fifo::update{}.journal_entries_add(jentries), version, + _update_meta(dpp, fifo::update{}.journal_entries_add(jentries), version, &np->canceled, tid, NewPartPreparer::call(std::move(n))); } @@ -987,16 +987,16 @@ struct NewHeadPreparer : public Completion { bool canceled = false; std::uint64_t tid; - NewHeadPreparer(FIFO* f, lr::AioCompletion* super, + NewHeadPreparer(const DoutPrefixProvider *dpp, FIFO* f, lr::AioCompletion* super, bool newpart, std::int64_t new_head_num, std::uint64_t tid) - : Completion(super), f(f), newpart(newpart), new_head_num(new_head_num), + : Completion(dpp, super), f(f), newpart(newpart), new_head_num(new_head_num), tid(tid) {} - void handle(Ptr&& p, int r) { + void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { if (newpart) handle_newpart(std::move(p), r); else - handle_update(std::move(p), r); + handle_update(dpp, std::move(p), r); } void handle_newpart(Ptr&& p, int r) { @@ -1020,14 +1020,14 @@ struct NewHeadPreparer : public Completion { } } - void handle_update(Ptr&& p, int r) { + void handle_update(const DoutPrefixProvider *dpp, Ptr&& p, int r) { std::unique_lock l(f->m); auto head_part_num = f->info.head_part_num; auto version = f->info.version; l.unlock(); if (r < 0) { - lderr(f->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _update_meta failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); @@ -1035,7 +1035,7 @@ struct NewHeadPreparer : public Completion { } if (canceled) { if (i >= MAX_RACE_RETRIES) { - lderr(f->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; complete(std::move(p), -ECANCELED); return; @@ -1045,23 +1045,23 @@ struct NewHeadPreparer : public Completion { if (head_part_num < new_head_num) { canceled = false; ++i; - ldout(f->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " updating head: i=" << i << " tid=" << tid << dendl; - f->_update_meta(fifo::update{}.head_part_num(new_head_num), + f->_update_meta(dpp, fifo::update{}.head_part_num(new_head_num), version, &this->canceled, tid, call(std::move(p))); return; } } - ldout(f->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " succeeded : i=" << i << " tid=" << tid << dendl; complete(std::move(p), 0); return; } }; -void FIFO::_prepare_new_head(std::uint64_t tid, lr::AioCompletion* c) +void FIFO::_prepare_new_head(const DoutPrefixProvider *dpp, std::uint64_t tid, lr::AioCompletion* c) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; std::unique_lock l(m); int64_t new_head_num = info.head_part_num + 1; @@ -1070,26 +1070,26 @@ void FIFO::_prepare_new_head(std::uint64_t tid, lr::AioCompletion* c) l.unlock(); if (max_push_part_num < new_head_num) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " need new part: tid=" << tid << dendl; - auto n = std::make_unique(this, c, true, new_head_num, + auto n = std::make_unique(dpp, this, c, true, new_head_num, tid); - _prepare_new_part(true, tid, NewHeadPreparer::call(std::move(n))); + _prepare_new_part(dpp, true, tid, NewHeadPreparer::call(std::move(n))); } else { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " updating head: tid=" << tid << dendl; - auto n = std::make_unique(this, c, false, new_head_num, + auto n = std::make_unique(dpp, this, c, false, new_head_num, tid); auto np = n.get(); - _update_meta(fifo::update{}.head_part_num(new_head_num), version, + _update_meta(dpp, fifo::update{}.head_part_num(new_head_num), version, &np->canceled, tid, NewHeadPreparer::call(std::move(n))); } } -int FIFO::push_entries(const std::deque& data_bufs, +int FIFO::push_entries(const DoutPrefixProvider *dpp, const std::deque& data_bufs, std::uint64_t tid, optional_yield y) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; std::unique_lock l(m); auto head_part_num = info.head_part_num; @@ -1097,9 +1097,9 @@ int FIFO::push_entries(const std::deque& data_bufs, const auto part_oid = info.part_oid(head_part_num); l.unlock(); - auto r = push_part(ioctx, part_oid, tag, data_bufs, tid, y); + auto r = push_part(dpp, ioctx, part_oid, tag, data_bufs, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " push_part failed: r=" << r << " tid=" << tid << dendl; } return r; @@ -1117,21 +1117,21 @@ void FIFO::push_entries(const std::deque& data_bufs, push_part(ioctx, part_oid, tag, data_bufs, tid, c); } -int FIFO::trim_part(int64_t part_num, uint64_t ofs, +int FIFO::trim_part(const DoutPrefixProvider *dpp, int64_t part_num, uint64_t ofs, std::optional tag, bool exclusive, std::uint64_t tid, optional_yield y) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; lr::ObjectWriteOperation op; std::unique_lock l(m); const auto part_oid = info.part_oid(part_num); l.unlock(); rgw::cls::fifo::trim_part(&op, tag, ofs, exclusive); - auto r = rgw_rados_operate(ioctx, part_oid, &op, y); + auto r = rgw_rados_operate(dpp, ioctx, part_oid, &op, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " trim_part failed: r=" << r << " tid=" << tid << dendl; } return 0; @@ -1153,22 +1153,21 @@ void FIFO::trim_part(int64_t part_num, uint64_t ofs, ceph_assert(r >= 0); } -int FIFO::open(lr::IoCtx ioctx, std::string oid, std::unique_ptr* fifo, +int FIFO::open(const DoutPrefixProvider *dpp, lr::IoCtx ioctx, std::string oid, std::unique_ptr* fifo, optional_yield y, std::optional objv, bool probe) { - auto cct = static_cast(ioctx.cct()); - ldout(cct, 20) + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering" << dendl; fifo::info info; std::uint32_t size; std::uint32_t over; - int r = get_meta(ioctx, std::move(oid), objv, &info, &size, &over, 0, y, + int r = get_meta(dpp, ioctx, std::move(oid), objv, &info, &size, &over, 0, y, probe); if (r < 0) { if (!(probe && (r == -ENOENT || r == -ENODATA))) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " get_meta failed: r=" << r << dendl; } return r; @@ -1180,12 +1179,12 @@ int FIFO::open(lr::IoCtx ioctx, std::string oid, std::unique_ptr* fifo, // If there are journal entries, process them, in case // someone crashed mid-transaction. if (!info.journal.empty()) { - ldout(cct, 20) + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " processing leftover journal" << dendl; - r = f->process_journal(0, y); + r = f->process_journal(dpp, 0, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " process_journal failed: r=" << r << dendl; return r; } @@ -1194,39 +1193,38 @@ int FIFO::open(lr::IoCtx ioctx, std::string oid, std::unique_ptr* fifo, return 0; } -int FIFO::create(lr::IoCtx ioctx, std::string oid, std::unique_ptr* fifo, +int FIFO::create(const DoutPrefixProvider *dpp, lr::IoCtx ioctx, std::string oid, std::unique_ptr* fifo, optional_yield y, std::optional objv, std::optional oid_prefix, bool exclusive, std::uint64_t max_part_size, std::uint64_t max_entry_size) { - auto cct = static_cast(ioctx.cct()); - ldout(cct, 20) + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering" << dendl; lr::ObjectWriteOperation op; create_meta(&op, oid, objv, oid_prefix, exclusive, max_part_size, max_entry_size); - auto r = rgw_rados_operate(ioctx, oid, &op, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " create_meta failed: r=" << r << dendl; return r; } - r = open(std::move(ioctx), std::move(oid), fifo, y, objv); + r = open(dpp, std::move(ioctx), std::move(oid), fifo, y, objv); return r; } -int FIFO::read_meta(std::uint64_t tid, optional_yield y) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ +int FIFO::read_meta(const DoutPrefixProvider *dpp, std::uint64_t tid, optional_yield y) { + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; fifo::info _info; std::uint32_t _phs; std::uint32_t _peo; - auto r = get_meta(ioctx, oid, nullopt, &_info, &_phs, &_peo, tid, y); + auto r = get_meta(dpp, ioctx, oid, nullopt, &_info, &_phs, &_peo, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " get_meta failed: r=" << r << " tid=" << tid << dendl; return r; } @@ -1240,23 +1238,22 @@ int FIFO::read_meta(std::uint64_t tid, optional_yield y) { return 0; } -int FIFO::read_meta(optional_yield y) { +int FIFO::read_meta(const DoutPrefixProvider *dpp, optional_yield y) { std::unique_lock l(m); auto tid = ++next_tid; l.unlock(); - return read_meta(tid, y); + return read_meta(dpp, tid, y); } struct Reader : public Completion { FIFO* fifo; cb::list bl; std::uint64_t tid; - Reader(FIFO* fifo, lr::AioCompletion* super, std::uint64_t tid) - : Completion(super), fifo(fifo), tid(tid) {} + Reader(const DoutPrefixProvider *dpp, FIFO* fifo, lr::AioCompletion* super, std::uint64_t tid) + : Completion(dpp, super), fifo(fifo), tid(tid) {} - void handle(Ptr&& p, int r) { - auto cct = fifo->cct; - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; if (r >= 0) try { fifo::op::get_meta_reply reply; @@ -1269,12 +1266,12 @@ struct Reader : public Completion { fifo->part_entry_overhead = reply.part_entry_overhead; } } catch (const cb::error& err) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " failed to decode response err=" << err.what() << " tid=" << tid << dendl; r = from_error_code(err.code()); } else { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " read_meta failed r=" << r << " tid=" << tid << dendl; } @@ -1282,15 +1279,15 @@ struct Reader : public Completion { } }; -void FIFO::read_meta(std::uint64_t tid, lr::AioCompletion* c) +void FIFO::read_meta(const DoutPrefixProvider *dpp, std::uint64_t tid, lr::AioCompletion* c) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; lr::ObjectReadOperation op; fifo::op::get_meta gm; cb::list in; encode(gm, in); - auto reader = std::make_unique(this, c, tid); + auto reader = std::make_unique(dpp, this, c, tid); auto rp = reader.get(); auto r = ioctx.aio_exec(oid, Reader::call(std::move(reader)), fifo::op::CLASS, fifo::op::GET_META, in, &rp->bl); @@ -1305,25 +1302,25 @@ std::pair FIFO::get_part_layout_info() const { return {part_header_size, part_entry_overhead}; } -int FIFO::push(const cb::list& bl, optional_yield y) { - return push(std::vector{ bl }, y); +int FIFO::push(const DoutPrefixProvider *dpp, const cb::list& bl, optional_yield y) { + return push(dpp, std::vector{ bl }, y); } -void FIFO::push(const cb::list& bl, lr::AioCompletion* c) { - push(std::vector{ bl }, c); +void FIFO::push(const DoutPrefixProvider *dpp, const cb::list& bl, lr::AioCompletion* c) { + push(dpp, std::vector{ bl }, c); } -int FIFO::push(const std::vector& data_bufs, optional_yield y) +int FIFO::push(const DoutPrefixProvider *dpp, const std::vector& data_bufs, optional_yield y) { std::unique_lock l(m); auto tid = ++next_tid; auto max_entry_size = info.params.max_entry_size; auto need_new_head = info.need_new_head(); l.unlock(); - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; if (data_bufs.empty()) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " empty push, returning success tid=" << tid << dendl; return 0; } @@ -1331,7 +1328,7 @@ int FIFO::push(const std::vector& data_bufs, optional_yield y) // Validate sizes for (const auto& bl : data_bufs) { if (bl.length() > max_entry_size) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entry bigger than max_entry_size tid=" << tid << dendl; return -E2BIG; } @@ -1339,11 +1336,11 @@ int FIFO::push(const std::vector& data_bufs, optional_yield y) int r = 0; if (need_new_head) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " need new head tid=" << tid << dendl; - r = _prepare_new_head(tid, y); + r = _prepare_new_head(dpp, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _prepare_new_head failed: r=" << r << " tid=" << tid << dendl; return r; @@ -1358,7 +1355,7 @@ int FIFO::push(const std::vector& data_bufs, optional_yield y) bool canceled = true; while ((!remaining.empty() || !batch.empty()) && (retries <= MAX_RACE_RETRIES)) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " preparing push: remaining=" << remaining.size() << " batch=" << batch.size() << " retries=" << retries << " tid=" << tid << dendl; @@ -1377,21 +1374,21 @@ int FIFO::push(const std::vector& data_bufs, optional_yield y) batch.push_back(std::move(remaining.front())); remaining.pop_front(); } - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " prepared push: remaining=" << remaining.size() << " batch=" << batch.size() << " retries=" << retries << " batch_len=" << batch_len << " tid=" << tid << dendl; - auto r = push_entries(batch, tid, y); + auto r = push_entries(dpp, batch, tid, y); if (r == -ERANGE) { canceled = true; ++retries; - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " need new head tid=" << tid << dendl; - r = _prepare_new_head(tid, y); + r = _prepare_new_head(dpp, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " prepare_new_head failed: r=" << r << " tid=" << tid << dendl; return r; @@ -1400,7 +1397,7 @@ int FIFO::push(const std::vector& data_bufs, optional_yield y) continue; } if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " push_entries failed: r=" << r << " tid=" << tid << dendl; return r; @@ -1419,7 +1416,7 @@ int FIFO::push(const std::vector& data_bufs, optional_yield y) } } if (canceled) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; return -ECANCELED; } @@ -1485,21 +1482,21 @@ struct Pusher : public Completion { f->push_entries(batch, tid, call(std::move(p))); } - void new_head(Ptr&& p) { + void new_head(const DoutPrefixProvider *dpp, Ptr&& p) { new_heading = true; - f->_prepare_new_head(tid, call(std::move(p))); + f->_prepare_new_head(dpp, tid, call(std::move(p))); } - void handle(Ptr&& p, int r) { + void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { if (!new_heading) { if (r == -ERANGE) { - ldout(f->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " need new head tid=" << tid << dendl; - new_head(std::move(p)); + new_head(dpp, std::move(p)); return; } if (r < 0) { - lderr(f->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " push_entries failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); @@ -1509,7 +1506,7 @@ struct Pusher : public Completion { prep_then_push(std::move(p), r); } else { if (r < 0) { - lderr(f->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " prepare_new_head failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); @@ -1543,13 +1540,13 @@ struct Pusher : public Completion { } } - Pusher(FIFO* f, std::deque&& remaining, + Pusher(const DoutPrefixProvider *dpp, FIFO* f, std::deque&& remaining, std::uint64_t tid, lr::AioCompletion* super) - : Completion(super), f(f), remaining(std::move(remaining)), + : Completion(dpp, super), f(f), remaining(std::move(remaining)), tid(tid) {} }; -void FIFO::push(const std::vector& data_bufs, +void FIFO::push(const DoutPrefixProvider *dpp, const std::vector& data_bufs, lr::AioCompletion* c) { std::unique_lock l(m); @@ -1557,14 +1554,14 @@ void FIFO::push(const std::vector& data_bufs, auto max_entry_size = info.params.max_entry_size; auto need_new_head = info.need_new_head(); l.unlock(); - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; - auto p = std::make_unique(this, std::deque(data_bufs.begin(), data_bufs.end()), + auto p = std::make_unique(dpp, this, std::deque(data_bufs.begin(), data_bufs.end()), tid, c); // Validate sizes for (const auto& bl : data_bufs) { if (bl.length() > max_entry_size) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entry bigger than max_entry_size tid=" << tid << dendl; Pusher::complete(std::move(p), -E2BIG); return; @@ -1572,22 +1569,22 @@ void FIFO::push(const std::vector& data_bufs, } if (data_bufs.empty() ) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " empty push, returning success tid=" << tid << dendl; Pusher::complete(std::move(p), 0); return; } if (need_new_head) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " need new head tid=" << tid << dendl; - p->new_head(std::move(p)); + p->new_head(dpp, std::move(p)); } else { p->prep_then_push(std::move(p), 0); } } -int FIFO::list(int max_entries, +int FIFO::list(const DoutPrefixProvider *dpp, int max_entries, std::optional markstr, std::vector* presult, bool* pmore, optional_yield y) @@ -1596,13 +1593,13 @@ int FIFO::list(int max_entries, auto tid = ++next_tid; std::int64_t part_num = info.tail_part_num; l.unlock(); - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; std::uint64_t ofs = 0; if (markstr) { auto marker = to_marker(*markstr); if (!marker) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " invalid marker string: " << markstr << " tid= "<< tid << dendl; return -EINVAL; @@ -1618,7 +1615,7 @@ int FIFO::list(int max_entries, std::vector entries; int r = 0; while (max_entries > 0) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " max_entries=" << max_entries << " tid=" << tid << dendl; bool part_more = false; bool part_full = false; @@ -1627,22 +1624,22 @@ int FIFO::list(int max_entries, auto part_oid = info.part_oid(part_num); l.unlock(); - r = list_part(ioctx, part_oid, {}, ofs, max_entries, &entries, + r = list_part(dpp, ioctx, part_oid, {}, ofs, max_entries, &entries, &part_more, &part_full, nullptr, tid, y); if (r == -ENOENT) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " missing part, rereading metadata" << " tid= "<< tid << dendl; - r = read_meta(tid, y); + r = read_meta(dpp, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " read_meta failed: r=" << r << " tid= "<< tid << dendl; return r; } if (part_num < info.tail_part_num) { /* raced with trim? restart */ - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " raced with trim, restarting: tid=" << tid << dendl; max_entries += result.size(); result.clear(); @@ -1652,7 +1649,7 @@ int FIFO::list(int max_entries, ofs = 0; continue; } - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " assuming part was not written yet, so end of data: " << "tid=" << tid << dendl; more = false; @@ -1660,7 +1657,7 @@ int FIFO::list(int max_entries, break; } if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " list_entries failed: r=" << r << " tid= "<< tid << dendl; return r; @@ -1682,7 +1679,7 @@ int FIFO::list(int max_entries, } if (!part_full) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " head part is not full, so we can assume we're done: " << "tid=" << tid << dendl; break; @@ -1699,7 +1696,7 @@ int FIFO::list(int max_entries, return 0; } -int FIFO::trim(std::string_view markstr, bool exclusive, optional_yield y) +int FIFO::trim(const DoutPrefixProvider *dpp, std::string_view markstr, bool exclusive, optional_yield y) { bool overshoot = false; auto marker = to_marker(markstr); @@ -1714,7 +1711,7 @@ int FIFO::trim(std::string_view markstr, bool exclusive, optional_yield y) const auto max_part_size = info.params.max_part_size; if (part_num > hn) { l.unlock(); - auto r = read_meta(tid, y); + auto r = read_meta(dpp, tid, y); if (r < 0) { return r; } @@ -1731,27 +1728,27 @@ int FIFO::trim(std::string_view markstr, bool exclusive, optional_yield y) } auto pn = info.tail_part_num; l.unlock(); - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; int r = 0; while (pn < part_num) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " pn=" << pn << " tid=" << tid << dendl; std::unique_lock l(m); l.unlock(); - r = trim_part(pn, max_part_size, std::nullopt, false, tid, y); + r = trim_part(dpp, pn, max_part_size, std::nullopt, false, tid, y); if (r < 0 && r == -ENOENT) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " trim_part failed: r=" << r << " tid= "<< tid << dendl; return r; } ++pn; } - r = trim_part(part_num, ofs, std::nullopt, exclusive, tid, y); + r = trim_part(dpp, part_num, ofs, std::nullopt, exclusive, tid, y); if (r < 0 && r != -ENOENT) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " trim_part failed: r=" << r << " tid= "<< tid << dendl; return r; @@ -1766,16 +1763,16 @@ int FIFO::trim(std::string_view markstr, bool exclusive, optional_yield y) while ((tail_part_num < part_num) && canceled && (retries <= MAX_RACE_RETRIES)) { - r = _update_meta(fifo::update{}.tail_part_num(part_num), objv, &canceled, + r = _update_meta(dpp, fifo::update{}.tail_part_num(part_num), objv, &canceled, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _update_meta failed: r=" << r << " tid= "<< tid << dendl; return r; } if (canceled) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled: retries=" << retries << " tid=" << tid << dendl; l.lock(); @@ -1786,7 +1783,7 @@ int FIFO::trim(std::string_view markstr, bool exclusive, optional_yield y) } } if (canceled) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; return -EIO; } @@ -1806,20 +1803,19 @@ struct Trimmer : public Completion { bool overshoot = false; int retries = 0; - Trimmer(FIFO* fifo, std::int64_t part_num, std::uint64_t ofs, std::int64_t pn, + Trimmer(const DoutPrefixProvider *dpp, FIFO* fifo, std::int64_t part_num, std::uint64_t ofs, std::int64_t pn, bool exclusive, lr::AioCompletion* super, std::uint64_t tid) - : Completion(super), fifo(fifo), part_num(part_num), ofs(ofs), pn(pn), + : Completion(dpp, super), fifo(fifo), part_num(part_num), ofs(ofs), pn(pn), exclusive(exclusive), tid(tid) {} - void handle(Ptr&& p, int r) { - auto cct = fifo->cct; - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; if (reread) { reread = false; if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " read_meta failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); @@ -1841,7 +1837,7 @@ struct Trimmer : public Completion { } pn = tail_part_num; if (pn < part_num) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " pn=" << pn << " tid=" << tid << dendl; fifo->trim_part(pn++, max_part_size, std::nullopt, false, tid, call(std::move(p))); @@ -1859,7 +1855,7 @@ struct Trimmer : public Completion { } if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << (update ? " update_meta " : " trim ") << "failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); @@ -1867,11 +1863,11 @@ struct Trimmer : public Completion { } if (!update) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " handling preceding trim callback: tid=" << tid << dendl; retries = 0; if (pn < part_num) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " pn=" << pn << " tid=" << tid << dendl; std::unique_lock l(fifo->m); const auto max_part_size = fifo->info.params.max_part_size; @@ -1891,7 +1887,7 @@ struct Trimmer : public Completion { return; } - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " handling update-needed callback: tid=" << tid << dendl; std::unique_lock l(fifo->m); auto tail_part_num = fifo->info.tail_part_num; @@ -1900,13 +1896,13 @@ struct Trimmer : public Completion { if ((tail_part_num < part_num) && canceled) { if (retries > MAX_RACE_RETRIES) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; complete(std::move(p), -EIO); return; } ++retries; - fifo->_update_meta(fifo::update{} + fifo->_update_meta(dpp, fifo::update{} .tail_part_num(part_num), objv, &canceled, tid, call(std::move(p))); } else { @@ -1915,7 +1911,7 @@ struct Trimmer : public Completion { } }; -void FIFO::trim(std::string_view markstr, bool exclusive, +void FIFO::trim(const DoutPrefixProvider *dpp, std::string_view markstr, bool exclusive, lr::AioCompletion* c) { auto marker = to_marker(markstr); auto realmark = marker.value_or(::rgw::cls::fifo::marker{}); @@ -1926,9 +1922,9 @@ void FIFO::trim(std::string_view markstr, bool exclusive, const auto part_oid = info.part_oid(pn); auto tid = ++next_tid; l.unlock(); - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; - auto trimmer = std::make_unique(this, realmark.num, realmark.ofs, + auto trimmer = std::make_unique(dpp, this, realmark.num, realmark.ofs, pn, exclusive, c, tid); if (!marker) { Trimmer::complete(std::move(trimmer), -EINVAL); @@ -1938,11 +1934,11 @@ void FIFO::trim(std::string_view markstr, bool exclusive, auto ofs = marker->ofs; if (marker->num > hn) { trimmer->reread = true; - read_meta(tid, Trimmer::call(std::move(trimmer))); + read_meta(dpp, tid, Trimmer::call(std::move(trimmer))); return; } if (pn < marker->num) { - ldout(cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " pn=" << pn << " tid=" << tid << dendl; ofs = max_part_size; } else { @@ -1952,7 +1948,7 @@ void FIFO::trim(std::string_view markstr, bool exclusive, tid, Trimmer::call(std::move(trimmer))); } -int FIFO::get_part_info(int64_t part_num, +int FIFO::get_part_info(const DoutPrefixProvider *dpp, int64_t part_num, fifo::part_header* header, optional_yield y) { @@ -1960,9 +1956,9 @@ int FIFO::get_part_info(int64_t part_num, const auto part_oid = info.part_oid(part_num); auto tid = ++next_tid; l.unlock(); - auto r = rgw::cls::fifo::get_part_info(ioctx, part_oid, header, tid, y); + auto r = rgw::cls::fifo::get_part_info(dpp, ioctx, part_oid, header, tid, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " get_part_info failed: r=" << r << " tid=" << tid << dendl; } @@ -1989,13 +1985,13 @@ struct InfoGetter : Completion { std::uint64_t tid; bool headerread = false; - InfoGetter(FIFO* fifo, fu2::function f, + InfoGetter(const DoutPrefixProvider *dpp, FIFO* fifo, fu2::function f, std::uint64_t tid, lr::AioCompletion* super) - : Completion(super), fifo(fifo), f(std::move(f)), tid(tid) {} - void handle(Ptr&& p, int r) { + : Completion(dpp, super), fifo(fifo), f(std::move(f)), tid(tid) {} + void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { if (!headerread) { if (r < 0) { - lderr(fifo->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " read_meta failed: r=" << r << " tid=" << tid << dendl; if (f) @@ -2007,7 +2003,7 @@ struct InfoGetter : Completion { auto info = fifo->meta(); auto hpn = info.head_part_num; if (hpn < 0) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " no head, returning empty partinfo r=" << r << " tid=" << tid << dendl; if (f) @@ -2027,7 +2023,7 @@ struct InfoGetter : Completion { } if (r < 0) { - lderr(fifo->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " get_part_info failed: r=" << r << " tid=" << tid << dendl; } @@ -2039,15 +2035,15 @@ struct InfoGetter : Completion { } }; -void FIFO::get_head_info(fu2::unique_function f, lr::AioCompletion* c) { std::unique_lock l(m); auto tid = ++next_tid; l.unlock(); - auto ig = std::make_unique(this, std::move(f), tid, c); - read_meta(tid, InfoGetter::call(std::move(ig))); + auto ig = std::make_unique(dpp, this, std::move(f), tid, c); + read_meta(dpp, tid, InfoGetter::call(std::move(ig))); } struct JournalProcessor : public Completion { @@ -2102,12 +2098,12 @@ private: return; } - void finish_je(Ptr&& p, int r, + void finish_je(const DoutPrefixProvider *dpp, Ptr&& p, int r, const fifo::journal_entry& entry) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " finishing entry: entry=" << entry << " tid=" << tid << dendl; @@ -2115,7 +2111,7 @@ private: r = 0; if (r < 0) { - lderr(fifo->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " processing entry failed: entry=" << entry << " r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); @@ -2142,26 +2138,26 @@ private: processed.push_back(entry); } ++iter; - process(std::move(p)); + process(dpp, std::move(p)); } - void postprocess(Ptr&& p) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + void postprocess(const DoutPrefixProvider *dpp, Ptr&& p) { + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; if (processed.empty()) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " nothing to update any more: race_retries=" << race_retries << " tid=" << tid << dendl; complete(std::move(p), 0); return; } - pp_run(std::move(p), 0, false); + pp_run(dpp, std::move(p), 0, false); } public: - JournalProcessor(FIFO* fifo, std::uint64_t tid, lr::AioCompletion* super) - : Completion(super), fifo(fifo), tid(tid) { + JournalProcessor(const DoutPrefixProvider *dpp, FIFO* fifo, std::uint64_t tid, lr::AioCompletion* super) + : Completion(dpp, super), fifo(fifo), tid(tid) { std::unique_lock l(fifo->m); journal = fifo->info.journal; iter = journal.begin(); @@ -2170,26 +2166,26 @@ public: new_max = fifo->info.max_push_part_num; } - void pp_run(Ptr&& p, int r, bool canceled) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + void pp_run(const DoutPrefixProvider *dpp, Ptr&& p, int r, bool canceled) { + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; std::optional tail_part_num; std::optional head_part_num; std::optional max_part_num; if (r < 0) { - lderr(fifo->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " failed, r=: " << r << " tid=" << tid << dendl; complete(std::move(p), r); } - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " postprocessing: race_retries=" << race_retries << " tid=" << tid << dendl; if (!first_pp && r == 0 && !canceled) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " nothing to update any more: race_retries=" << race_retries << " tid=" << tid << dendl; complete(std::move(p), 0); @@ -2200,13 +2196,13 @@ public: if (canceled) { if (race_retries >= MAX_RACE_RETRIES) { - lderr(fifo->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; complete(std::move(p), -ECANCELED); return; } - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " update canceled, retrying: race_retries=" << race_retries << " tid=" << tid << dendl; @@ -2245,14 +2241,14 @@ public: !tail_part_num && !max_part_num) { /* nothing to update anymore */ - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " nothing to update any more: race_retries=" << race_retries << " tid=" << tid << dendl; complete(std::move(p), 0); return; } state = pp_callback; - fifo->_update_meta(fifo::update{} + fifo->_update_meta(dpp, fifo::update{} .tail_part_num(tail_part_num) .head_part_num(head_part_num) .max_push_part_num(max_part_num) @@ -2266,11 +2262,11 @@ public: JournalProcessor(JournalProcessor&&) = delete; JournalProcessor& operator =(JournalProcessor&&) = delete; - void process(Ptr&& p) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + void process(const DoutPrefixProvider *dpp, Ptr&& p) { + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; while (iter != journal.end()) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " processing entry: entry=" << *iter << " tid=" << tid << dendl; const auto entry = iter->second; @@ -2296,21 +2292,21 @@ public: return; } } - postprocess(std::move(p)); + postprocess(dpp, std::move(p)); return; } - void handle(Ptr&& p, int r) { - ldout(fifo->cct, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ + void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; switch (state) { case entry_callback: - finish_je(std::move(p), r, iter->second); + finish_je(dpp, std::move(p), r, iter->second); return; case pp_callback: auto c = canceled; canceled = false; - pp_run(std::move(p), r, c); + pp_run(dpp, std::move(p), r, c); return; } @@ -2319,9 +2315,9 @@ public: }; -void FIFO::process_journal(std::uint64_t tid, lr::AioCompletion* c) { - auto p = std::make_unique(this, tid, c); - p->process(std::move(p)); +void FIFO::process_journal(const DoutPrefixProvider *dpp, std::uint64_t tid, lr::AioCompletion* c) { + auto p = std::make_unique(dpp, this, tid, c); + p->process(dpp, std::move(p)); } struct Lister : Completion { @@ -2350,10 +2346,10 @@ struct Lister : Completion { } public: - Lister(FIFO* f, std::int64_t part_num, std::uint64_t ofs, int max_entries, + Lister(const DoutPrefixProvider *dpp, FIFO* f, std::int64_t part_num, std::uint64_t ofs, int max_entries, std::vector* entries_out, bool* more_out, std::uint64_t tid, lr::AioCompletion* super) - : Completion(super), f(f), part_num(part_num), ofs(ofs), max_entries(max_entries), + : Completion(dpp, super), f(f), part_num(part_num), ofs(ofs), max_entries(max_entries), entries_out(entries_out), more_out(more_out), tid(tid) { result.reserve(max_entries); } @@ -2363,11 +2359,11 @@ public: Lister(Lister&&) = delete; Lister& operator =(Lister&&) = delete; - void handle(Ptr&& p, int r) { + void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { if (read) handle_read(std::move(p), r); else - handle_list(std::move(p), r); + handle_list(dpp, std::move(p), r); } void list(Ptr&& p) { @@ -2415,7 +2411,7 @@ public: return; } - void handle_list(Ptr&& p, int r) { + void handle_list(const DoutPrefixProvider *dpp, Ptr&& p, int r) { if (r >= 0) r = r_out; r_out = 0; std::unique_lock l(f->m); @@ -2423,7 +2419,7 @@ public: l.unlock(); if (r == -ENOENT) { read = true; - f->read_meta(tid, call(std::move(p))); + f->read_meta(dpp, tid, call(std::move(p))); return; } if (r < 0) { @@ -2456,7 +2452,7 @@ public: } }; -void FIFO::list(int max_entries, +void FIFO::list(const DoutPrefixProvider *dpp, int max_entries, std::optional markstr, std::vector* out, bool* more, @@ -2476,7 +2472,7 @@ void FIFO::list(int max_entries, } } - auto ls = std::make_unique(this, part_num, ofs, max_entries, out, + auto ls = std::make_unique(dpp, this, part_num, ofs, max_entries, out, more, tid, c); if (markstr && !marker) { auto l = ls.get(); diff --git a/src/rgw/cls_fifo_legacy.h b/src/rgw/cls_fifo_legacy.h index 307abbb1989..21d4b72bb17 100644 --- a/src/rgw/cls_fifo_legacy.h +++ b/src/rgw/cls_fifo_legacy.h @@ -56,13 +56,12 @@ void create_meta(lr::ObjectWriteOperation* op, std::string_view id, bool exclusive = false, std::uint64_t max_part_size = default_max_part_size, std::uint64_t max_entry_size = default_max_entry_size); -int get_meta(lr::IoCtx& ioctx, const std::string& oid, +int get_meta(const DoutPrefixProvider *dpp, lr::IoCtx& ioctx, const std::string& oid, std::optional objv, fifo::info* info, std::uint32_t* part_header_size, std::uint32_t* part_entry_overhead, std::uint64_t tid, optional_yield y, bool probe = false); - struct marker { std::int64_t num = 0; std::uint64_t ofs = 0; @@ -134,27 +133,27 @@ class FIFO { const fifo::objv& objv, const fifo::update& update, std::uint64_t tid); - int _update_meta(const fifo::update& update, + int _update_meta(const DoutPrefixProvider *dpp, const fifo::update& update, fifo::objv version, bool* pcanceled, std::uint64_t tid, optional_yield y); - void _update_meta(const fifo::update& update, + void _update_meta(const DoutPrefixProvider *dpp, const fifo::update& update, fifo::objv version, bool* pcanceled, std::uint64_t tid, lr::AioCompletion* c); - int create_part(int64_t part_num, std::string_view tag, std::uint64_t tid, + int create_part(const DoutPrefixProvider *dpp, int64_t part_num, std::string_view tag, std::uint64_t tid, optional_yield y); - int remove_part(int64_t part_num, std::string_view tag, std::uint64_t tid, + int remove_part(const DoutPrefixProvider *dpp, int64_t part_num, std::string_view tag, std::uint64_t tid, optional_yield y); - int process_journal(std::uint64_t tid, optional_yield y); - void process_journal(std::uint64_t tid, lr::AioCompletion* c); - int _prepare_new_part(bool is_head, std::uint64_t tid, optional_yield y); - void _prepare_new_part(bool is_head, std::uint64_t tid, lr::AioCompletion* c); - int _prepare_new_head(std::uint64_t tid, optional_yield y); - void _prepare_new_head(std::uint64_t tid, lr::AioCompletion* c); - int push_entries(const std::deque& data_bufs, + int process_journal(const DoutPrefixProvider *dpp, std::uint64_t tid, optional_yield y); + void process_journal(const DoutPrefixProvider *dpp, std::uint64_t tid, lr::AioCompletion* c); + int _prepare_new_part(const DoutPrefixProvider *dpp, bool is_head, std::uint64_t tid, optional_yield y); + void _prepare_new_part(const DoutPrefixProvider *dpp, bool is_head, std::uint64_t tid, lr::AioCompletion* c); + int _prepare_new_head(const DoutPrefixProvider *dpp, std::uint64_t tid, optional_yield y); + void _prepare_new_head(const DoutPrefixProvider *dpp, std::uint64_t tid, lr::AioCompletion* c); + int push_entries(const DoutPrefixProvider *dpp, const std::deque& data_bufs, std::uint64_t tid, optional_yield y); void push_entries(const std::deque& data_bufs, std::uint64_t tid, lr::AioCompletion* c); - int trim_part(int64_t part_num, uint64_t ofs, + int trim_part(const DoutPrefixProvider *dpp, int64_t part_num, uint64_t ofs, std::optional tag, bool exclusive, std::uint64_t tid, optional_yield y); void trim_part(int64_t part_num, uint64_t ofs, @@ -162,9 +161,9 @@ class FIFO { std::uint64_t tid, lr::AioCompletion* c); /// Force refresh of metadata, yielding/blocking style - int read_meta(std::uint64_t tid, optional_yield y); + int read_meta(const DoutPrefixProvider *dpp, std::uint64_t tid, optional_yield y); /// Force refresh of metadata, with a librados Completion - void read_meta(std::uint64_t tid, lr::AioCompletion* c); + void read_meta(const DoutPrefixProvider *dpp, std::uint64_t tid, lr::AioCompletion* c); public: @@ -174,7 +173,7 @@ public: FIFO& operator =(FIFO&&) = delete; /// Open an existing FIFO. - static int open(lr::IoCtx ioctx, //< IO Context + static int open(const DoutPrefixProvider *dpp, lr::IoCtx ioctx, //< IO Context std::string oid, //< OID for metadata object std::unique_ptr* fifo, //< OUT: Pointer to FIFO object optional_yield y, //< Optional yield context @@ -184,7 +183,7 @@ public: /// can't find it. bool probe = false); /// Create a new or open an existing FIFO. - static int create(lr::IoCtx ioctx, //< IO Context + static int create(const DoutPrefixProvider *dpp, lr::IoCtx ioctx, //< IO Context std::string oid, //< OID for metadata object std::unique_ptr* fifo, //< OUT: Pointer to FIFO object optional_yield y, //< Optional yield context @@ -201,29 +200,32 @@ public: std::uint64_t max_entry_size = default_max_entry_size); /// Force refresh of metadata, yielding/blocking style - int read_meta(optional_yield y); + int read_meta(const DoutPrefixProvider *dpp, optional_yield y); /// Get currently known metadata const fifo::info& meta() const; /// Get partition header and entry overhead size std::pair get_part_layout_info() const; /// Push an entry to the FIFO - int push(const cb::list& bl, //< Entry to push + int push(const DoutPrefixProvider *dpp, + const cb::list& bl, //< Entry to push optional_yield y //< Optional yield ); /// Push an entry to the FIFO - void push(const cb::list& bl, //< Entry to push + void push(const DoutPrefixProvider *dpp, const cb::list& bl, //< Entry to push lr::AioCompletion* c //< Async Completion ); /// Push entries to the FIFO - int push(const std::vector& data_bufs, //< Entries to push + int push(const DoutPrefixProvider *dpp, + const std::vector& data_bufs, //< Entries to push optional_yield y //< Optional yield ); /// Push entries to the FIFO - void push(const std::vector& data_bufs, //< Entries to push + void push(const DoutPrefixProvider *dpp, const std::vector& data_bufs, //< Entries to push lr::AioCompletion* c //< Async Completion ); /// List entries - int list(int max_entries, //< Maximum entries to list + int list(const DoutPrefixProvider *dpp, + int max_entries, //< Maximum entries to list /// Point after which to begin listing. Start at tail if null std::optional markstr, std::vector* out, //< OUT: entries @@ -231,7 +233,8 @@ public: bool* more, optional_yield y //< Optional yield ); - void list(int max_entries, //< Maximum entries to list + void list(const DoutPrefixProvider *dpp, + int max_entries, //< Maximum entries to list /// Point after which to begin listing. Start at tail if null std::optional markstr, std::vector* out, //< OUT: entries @@ -240,19 +243,21 @@ public: lr::AioCompletion* c //< Async Completion ); /// Trim entries, coroutine/block style - int trim(std::string_view markstr, //< Position to which to trim, inclusive + int trim(const DoutPrefixProvider *dpp, + std::string_view markstr, //< Position to which to trim, inclusive bool exclusive, //< If true, do not trim the target entry //< itself, just all those before it. optional_yield y //< Optional yield ); /// Trim entries, librados AioCompletion style - void trim(std::string_view markstr, //< Position to which to trim, inclusive + void trim(const DoutPrefixProvider *dpp, + std::string_view markstr, //< Position to which to trim, inclusive bool exclusive, //< If true, do not trim the target entry //< itself, just all those before it. lr::AioCompletion* c //< librados AIO Completion ); /// Get part info - int get_part_info(int64_t part_num, /// Part number + int get_part_info(const DoutPrefixProvider *dpp, int64_t part_num, /// Part number fifo::part_header* header, //< OUT: Information optional_yield y //< Optional yield ); @@ -264,7 +269,7 @@ public: /// A convenience method to fetch the part information for the FIFO /// head, using librados::AioCompletion, since /// libradio::AioCompletions compose lousily. - void get_head_info(fu2::unique_function< //< Function to receive info + void get_head_info(const DoutPrefixProvider *dpp, fu2::unique_function< //< Function to receive info void(int r, fifo::part_header&&)>, lr::AioCompletion* c //< AIO Completion ); @@ -273,6 +278,7 @@ public: template struct Completion { private: + const DoutPrefixProvider *_dpp; lr::AioCompletion* _cur = nullptr; lr::AioCompletion* _super; public: @@ -286,7 +292,7 @@ public: return _super; } - Completion(lr::AioCompletion* super) : _super(super) { + Completion(const DoutPrefixProvider *dpp, lr::AioCompletion* super) : _dpp(dpp), _super(super) { super->pc->get(); } @@ -326,7 +332,7 @@ public: auto r = t->_cur->get_return_value(); t->_cur->release(); t->_cur = nullptr; - t->handle(Ptr(t), r); + t->handle(t->_dpp, Ptr(t), r); } }; diff --git a/src/rgw/librgw.cc b/src/rgw/librgw.cc index 94281b272c1..7d43662abdd 100644 --- a/src/rgw/librgw.cc +++ b/src/rgw/librgw.cc @@ -67,7 +67,6 @@ #include #include - #define dout_subsys ceph_subsys_rgw bool global_stop = false; @@ -141,7 +140,7 @@ namespace rgw { } } - void RGWLibProcess::handle_request(RGWRequest* r) + void RGWLibProcess::handle_request(const DoutPrefixProvider *dpp, RGWRequest* r) { /* * invariant: valid requests are derived from RGWLibRequst @@ -541,8 +540,7 @@ namespace rgw { g_conf()->rgw_run_sync_thread && g_conf()->rgw_nfs_run_sync_thread; - const DoutPrefix dp(cct.get(), dout_subsys, "librgw: "); - store = StoreManager::get_storage(&dp, g_ceph_context, + store = StoreManager::get_storage(this, g_ceph_context, "rados", run_gc, run_lc, @@ -693,11 +691,10 @@ namespace rgw { int RGWLibRequest::read_permissions(RGWOp* op, optional_yield y) { /* bucket and object ops */ - const DoutPrefix dp(store->ctx(), dout_subsys, "librgw: "); int ret = - rgw_build_bucket_policies(&dp, rgwlib.get_store(), get_state(), y); + rgw_build_bucket_policies(op, rgwlib.get_store(), get_state(), y); if (ret < 0) { - ldpp_dout(&dp, 10) << "read_permissions (bucket policy) on " + ldpp_dout(op, 10) << "read_permissions (bucket policy) on " << get_state()->bucket << ":" << get_state()->object << " only_bucket=" << only_bucket() @@ -706,10 +703,10 @@ namespace rgw { ret = -EACCES; } else if (! only_bucket()) { /* object ops */ - ret = rgw_build_object_policies(&dp, rgwlib.get_store(), get_state(), + ret = rgw_build_object_policies(op, rgwlib.get_store(), get_state(), op->prefetch_data(), y); if (ret < 0) { - ldpp_dout(&dp, 10) << "read_permissions (object policy) on" + ldpp_dout(op, 10) << "read_permissions (object policy) on" << get_state()->bucket << ":" << get_state()->object << " ret=" << ret << dendl; diff --git a/src/rgw/rgw_acl.cc b/src/rgw/rgw_acl.cc index caee6d329d9..ec5de88cecb 100644 --- a/src/rgw/rgw_acl.cc +++ b/src/rgw/rgw_acl.cc @@ -134,18 +134,19 @@ uint32_t RGWAccessControlList::get_perm(const DoutPrefixProvider* dpp, return perm_mask & auth_identity.get_perms_from_aclspec(dpp, acl_user_map); } -uint32_t RGWAccessControlList::get_group_perm(ACLGroupTypeEnum group, +uint32_t RGWAccessControlList::get_group_perm(const DoutPrefixProvider *dpp, + ACLGroupTypeEnum group, const uint32_t perm_mask) const { - ldout(cct, 5) << "Searching permissions for group=" << (int)group + ldpp_dout(dpp, 5) << "Searching permissions for group=" << (int)group << " mask=" << perm_mask << dendl; const auto iter = acl_group_map.find((uint32_t)group); if (iter != acl_group_map.end()) { - ldout(cct, 5) << "Found permission: " << iter->second << dendl; + ldpp_dout(dpp, 5) << "Found permission: " << iter->second << dendl; return iter->second & perm_mask; } - ldout(cct, 5) << "Permissions for group not found" << dendl; + ldpp_dout(dpp, 5) << "Permissions for group not found" << dendl; return 0; } @@ -192,11 +193,11 @@ uint32_t RGWAccessControlPolicy::get_perm(const DoutPrefixProvider* dpp, /* should we continue looking up? */ if (!ignore_public_acls && ((perm & perm_mask) != perm_mask)) { - perm |= acl.get_group_perm(ACL_GROUP_ALL_USERS, perm_mask); + perm |= acl.get_group_perm(dpp, ACL_GROUP_ALL_USERS, perm_mask); if (false == auth_identity.is_owner_of(rgw_user(RGW_USER_ANON_ID))) { /* this is not the anonymous user */ - perm |= acl.get_group_perm(ACL_GROUP_AUTHENTICATED_USERS, perm_mask); + perm |= acl.get_group_perm(dpp, ACL_GROUP_AUTHENTICATED_USERS, perm_mask); } } @@ -246,14 +247,14 @@ bool RGWAccessControlPolicy::verify_permission(const DoutPrefixProvider* dpp, } -bool RGWAccessControlPolicy::is_public() const +bool RGWAccessControlPolicy::is_public(const DoutPrefixProvider *dpp) const { static constexpr auto public_groups = {ACL_GROUP_ALL_USERS, ACL_GROUP_AUTHENTICATED_USERS}; return std::any_of(public_groups.begin(), public_groups.end(), - [&](ACLGroupTypeEnum g) { - auto p = acl.get_group_perm(g, RGW_PERM_FULL_CONTROL); + [&, dpp](ACLGroupTypeEnum g) { + auto p = acl.get_group_perm(dpp, g, RGW_PERM_FULL_CONTROL); return (p != RGW_PERM_NONE) && (p != RGW_PERM_INVALID); } ); diff --git a/src/rgw/rgw_acl.h b/src/rgw/rgw_acl.h index 06c79e744ce..dd6db08fbe0 100644 --- a/src/rgw/rgw_acl.h +++ b/src/rgw/rgw_acl.h @@ -336,7 +336,7 @@ public: uint32_t get_perm(const DoutPrefixProvider* dpp, const rgw::auth::Identity& auth_identity, uint32_t perm_mask); - uint32_t get_group_perm(ACLGroupTypeEnum group, uint32_t perm_mask) const; + uint32_t get_group_perm(const DoutPrefixProvider *dpp, ACLGroupTypeEnum group, uint32_t perm_mask) const; uint32_t get_referer_perm(uint32_t current_perm, std::string http_referer, uint32_t perm_mask); @@ -502,7 +502,7 @@ public: } virtual bool compare_group_name(string& id, ACLGroupTypeEnum group) { return false; } - bool is_public() const; + bool is_public(const DoutPrefixProvider *dpp) const; friend bool operator==(const RGWAccessControlPolicy& lhs, const RGWAccessControlPolicy& rhs); friend bool operator!=(const RGWAccessControlPolicy& lhs, const RGWAccessControlPolicy& rhs); diff --git a/src/rgw/rgw_acl_s3.cc b/src/rgw/rgw_acl_s3.cc index da8a31ead3c..316125935fc 100644 --- a/src/rgw/rgw_acl_s3.cc +++ b/src/rgw/rgw_acl_s3.cc @@ -493,7 +493,7 @@ int RGWAccessControlPolicy_S3::rebuild(const DoutPrefixProvider *dpp, std::unique_ptr user = store->get_user(owner->get_id()); if (user->load_user(dpp, null_yield) < 0) { - ldout(cct, 10) << "owner info does not exist" << dendl; + ldpp_dout(dpp, 10) << "owner info does not exist" << dendl; err_msg = "Invalid id"; return -EINVAL; } @@ -525,9 +525,9 @@ int RGWAccessControlPolicy_S3::rebuild(const DoutPrefixProvider *dpp, return -EINVAL; } email = u.id; - ldout(cct, 10) << "grant user email=" << email << dendl; + ldpp_dout(dpp, 10) << "grant user email=" << email << dendl; if (store->get_user_by_email(dpp, email, null_yield, &user) < 0) { - ldout(cct, 10) << "grant user email not found or other error" << dendl; + ldpp_dout(dpp, 10) << "grant user email not found or other error" << dendl; err_msg = "The e-mail address you provided does not match any account on record."; return -ERR_UNRESOLVABLE_EMAIL; } @@ -547,7 +547,7 @@ int RGWAccessControlPolicy_S3::rebuild(const DoutPrefixProvider *dpp, if (grant_user.user_id.empty()) { user = store->get_user(uid); if (user->load_user(dpp, null_yield) < 0) { - ldout(cct, 10) << "grant user does not exist:" << uid << dendl; + ldpp_dout(dpp, 10) << "grant user does not exist:" << uid << dendl; err_msg = "Invalid id"; return -EINVAL; } else { @@ -559,7 +559,7 @@ int RGWAccessControlPolicy_S3::rebuild(const DoutPrefixProvider *dpp, grant_ok = true; rgw_user new_id; new_grant.get_id(new_id); - ldout(cct, 10) << "new grant: " << new_id << ":" << grant_user.display_name << dendl; + ldpp_dout(dpp, 10) << "new grant: " << new_id << ":" << grant_user.display_name << dendl; } break; case ACL_TYPE_GROUP: diff --git a/src/rgw/rgw_admin.cc b/src/rgw/rgw_admin.cc index 204df3aae0e..c27f8b0d84e 100644 --- a/src/rgw/rgw_admin.cc +++ b/src/rgw/rgw_admin.cc @@ -1360,7 +1360,7 @@ int check_min_obj_stripe_size(rgw::sal::Store* store, rgw::sal::Object* obj, uin RGWObjectCtx obj_ctx(store); int ret = obj->get_obj_attrs(&obj_ctx, null_yield, dpp()); if (ret < 0) { - lderr(store->ctx()) << "ERROR: failed to stat object, returned error: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp(), -1) << "ERROR: failed to stat object, returned error: " << cpp_strerror(-ret) << dendl; return ret; } @@ -1425,7 +1425,7 @@ int check_obj_locator_underscore(rgw::sal::Object* obj, bool fix, bool remove_ba string status = (needs_fixing ? "needs_fixing" : "ok"); if ((needs_fixing || remove_bad) && fix) { - ret = static_cast(store)->getRados()->fix_head_obj_locator(obj->get_bucket()->get_info(), needs_fixing, remove_bad, obj->get_key()); + ret = static_cast(store)->getRados()->fix_head_obj_locator(dpp(), obj->get_bucket()->get_info(), needs_fixing, remove_bad, obj->get_key()); if (ret < 0) { cerr << "ERROR: fix_head_object_locator() returned ret=" << ret << std::endl; goto done; @@ -1588,7 +1588,7 @@ static int send_to_remote_gateway(RGWRESTConn* conn, req_info& info, ceph::bufferlist response; rgw_user user; - int ret = conn->forward(user, info, nullptr, MAX_REST_RESPONSE, &in_data, &response, null_yield); + int ret = conn->forward(dpp(), user, info, nullptr, MAX_REST_RESPONSE, &in_data, &response, null_yield); int parse_ret = parser.parse(response.c_str(), response.length()); if (parse_ret < 0) { @@ -1616,7 +1616,7 @@ static int send_to_url(const string& url, RGWRESTSimpleRequest req(g_ceph_context, info.method, url, NULL, ¶ms, opt_region); bufferlist response; - int ret = req.forward_request(key, info, MAX_REST_RESPONSE, &in_data, &response, null_yield); + int ret = req.forward_request(dpp(), key, info, MAX_REST_RESPONSE, &in_data, &response, null_yield); int parse_ret = parser.parse(response.c_str(), response.length()); if (parse_ret < 0) { @@ -1653,7 +1653,7 @@ static int commit_period(RGWRealm& realm, RGWPeriod& period, if (store->get_zone()->get_id() == master_zone) { // read the current period RGWPeriod current_period; - int ret = current_period.init(g_ceph_context, + int ret = current_period.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, realm.get_id(), null_yield); if (ret < 0) { @@ -1726,23 +1726,23 @@ static int commit_period(RGWRealm& realm, RGWPeriod& period, } // the master zone gave us back the period that it committed, so it's // safe to save it as our latest epoch - ret = period.store_info(false, null_yield); + ret = period.store_info(dpp(), false, null_yield); if (ret < 0) { cerr << "Error storing committed period " << period.get_id() << ": " << cpp_strerror(ret) << std::endl; return ret; } - ret = period.set_latest_epoch(null_yield, period.get_epoch()); + ret = period.set_latest_epoch(dpp(), null_yield, period.get_epoch()); if (ret < 0) { cerr << "Error updating period epoch: " << cpp_strerror(ret) << std::endl; return ret; } - ret = period.reflect(null_yield); + ret = period.reflect(dpp(), null_yield); if (ret < 0) { cerr << "Error updating local objects: " << cpp_strerror(ret) << std::endl; return ret; } - realm.notify_new_period(period, null_yield); + realm.notify_new_period(dpp(), period, null_yield); return ret; } @@ -1754,7 +1754,7 @@ static int update_period(const string& realm_id, const string& realm_name, Formatter *formatter, bool force) { RGWRealm realm(realm_id, realm_name); - int ret = realm.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0 ) { cerr << "Error initializing realm " << cpp_strerror(-ret) << std::endl; return ret; @@ -1764,19 +1764,19 @@ static int update_period(const string& realm_id, const string& realm_name, epoch = atoi(period_epoch.c_str()); } RGWPeriod period(period_id, epoch); - ret = period.init(g_ceph_context, static_cast(store)->svc()->sysobj, realm.get_id(), null_yield); + ret = period.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, realm.get_id(), null_yield); if (ret < 0) { cerr << "period init failed: " << cpp_strerror(-ret) << std::endl; return ret; } period.fork(); - ret = period.update(null_yield); + ret = period.update(dpp(), null_yield); if(ret < 0) { // Dropping the error message here, as both the ret codes were handled in // period.update() return ret; } - ret = period.store_info(false, null_yield); + ret = period.store_info(dpp(), false, null_yield); if (ret < 0) { cerr << "failed to store period: " << cpp_strerror(-ret) << std::endl; return ret; @@ -1837,7 +1837,7 @@ static int do_period_pull(RGWRESTConn *remote_conn, const string& url, cerr << "request failed: " << cpp_strerror(-ret) << std::endl; return ret; } - ret = period->init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield, false); + ret = period->init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield, false); if (ret < 0) { cerr << "faile to init period " << cpp_strerror(-ret) << std::endl; return ret; @@ -1848,12 +1848,12 @@ static int do_period_pull(RGWRESTConn *remote_conn, const string& url, cout << "failed to decode JSON input: " << e.what() << std::endl; return -EINVAL; } - ret = period->store_info(false, null_yield); + ret = period->store_info(dpp(), false, null_yield); if (ret < 0) { cerr << "Error storing period " << period->get_id() << ": " << cpp_strerror(ret) << std::endl; } // store latest epoch (ignore errors) - period->update_latest_epoch(period->get_epoch(), null_yield); + period->update_latest_epoch(dpp(), period->get_epoch(), null_yield); return 0; } @@ -1862,7 +1862,7 @@ static int read_current_period_id(rgw::sal::RadosStore* store, const std::string std::string* period_id) { RGWRealm realm(realm_id, realm_name); - int ret = realm.init(g_ceph_context, store->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, store->svc()->sysobj, null_yield); if (ret < 0) { std::cerr << "failed to read realm: " << cpp_strerror(-ret) << std::endl; return ret; @@ -1892,14 +1892,14 @@ static void get_md_sync_status(list& status) { RGWMetaSyncStatusManager sync(static_cast(store), static_cast(store)->svc()->rados->get_async_processor()); - int ret = sync.init(); + int ret = sync.init(dpp()); if (ret < 0) { status.push_back(string("failed to retrieve sync info: sync.init() failed: ") + cpp_strerror(-ret)); return; } rgw_meta_sync_status sync_status; - ret = sync.read_sync_status(&sync_status); + ret = sync.read_sync_status(dpp(), &sync_status); if (ret < 0) { status.push_back(string("failed to read sync status: ") + cpp_strerror(-ret)); return; @@ -1958,7 +1958,7 @@ static void get_md_sync_status(list& status) map master_shards_info; string master_period = static_cast(store)->svc()->zone->get_current_period_id(); - ret = sync.read_master_log_shards_info(master_period, &master_shards_info); + ret = sync.read_master_log_shards_info(dpp(), master_period, &master_shards_info); if (ret < 0) { status.push_back(string("failed to fetch master sync status: ") + cpp_strerror(-ret)); return; @@ -1996,7 +1996,7 @@ static void get_md_sync_status(list& status) push_ss(ss, status) << "behind shards: " << "[" << shards_behind_set << "]"; map master_pos; - ret = sync.read_master_log_shards_next(sync_status.sync_info.period, shards_behind, &master_pos); + ret = sync.read_master_log_shards_next(dpp(), sync_status.sync_info.period, shards_behind, &master_pos); if (ret < 0) { derr << "ERROR: failed to fetch master next positions (" << cpp_strerror(-ret) << ")" << dendl; } else { @@ -2044,7 +2044,7 @@ static void get_data_sync_status(const rgw_zone_id& source_zone, list& s } RGWDataSyncStatusManager sync(static_cast(store), static_cast(store)->svc()->rados->get_async_processor(), source_zone, nullptr); - int ret = sync.init(); + int ret = sync.init(dpp()); if (ret < 0) { push_ss(ss, status, tab) << string("failed to retrieve sync info: ") + cpp_strerror(-ret); flush_ss(ss, status); @@ -2052,14 +2052,14 @@ static void get_data_sync_status(const rgw_zone_id& source_zone, list& s } rgw_data_sync_status sync_status; - ret = sync.read_sync_status(&sync_status); + ret = sync.read_sync_status(dpp(), &sync_status); if (ret < 0 && ret != -ENOENT) { push_ss(ss, status, tab) << string("failed read sync status: ") + cpp_strerror(-ret); return; } set recovering_shards; - ret = sync.read_recovering_shards(sync_status.sync_info.num_shards, recovering_shards); + ret = sync.read_recovering_shards(dpp(), sync_status.sync_info.num_shards, recovering_shards); if (ret < 0 && ret != ENOENT) { push_ss(ss, status, tab) << string("failed read recovering shards: ") + cpp_strerror(-ret); return; @@ -2116,7 +2116,7 @@ static void get_data_sync_status(const rgw_zone_id& source_zone, list& s map source_shards_info; - ret = sync.read_source_log_shards_info(&source_shards_info); + ret = sync.read_source_log_shards_info(dpp(), &source_shards_info); if (ret < 0) { push_ss(ss, status, tab) << string("failed to fetch source sync status: ") + cpp_strerror(-ret); return; @@ -2151,7 +2151,7 @@ static void get_data_sync_status(const rgw_zone_id& source_zone, list& s push_ss(ss, status, tab) << "behind shards: " << "[" << shards_behind_set << "]" ; map master_pos; - ret = sync.read_source_log_shards_next(shards_behind, &master_pos); + ret = sync.read_source_log_shards_next(dpp(), shards_behind, &master_pos); if (ret < 0) { derr << "ERROR: failed to fetch next positions (" << cpp_strerror(-ret) << ")" << dendl; } else { @@ -2246,7 +2246,7 @@ std::ostream& operator<<(std::ostream& out, const indented& h) { return out << std::setw(h.w) << h.header << std::setw(1) << ' '; } -static int bucket_source_sync_status(rgw::sal::RadosStore* store, const RGWZone& zone, +static int bucket_source_sync_status(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, const RGWZone& zone, const RGWZone& source, RGWRESTConn *conn, const RGWBucketInfo& bucket_info, rgw_sync_bucket_pipe pipe, @@ -2261,14 +2261,14 @@ static int bucket_source_sync_status(rgw::sal::RadosStore* store, const RGWZone& } if (!pipe.source.bucket) { - lderr(store->ctx()) << __func__ << "(): missing source bucket" << dendl; + ldpp_dout(dpp, -1) << __func__ << "(): missing source bucket" << dendl; return -EINVAL; } std::unique_ptr source_bucket; int r = init_bucket(nullptr, *pipe.source.bucket, &source_bucket); if (r < 0) { - lderr(store->ctx()) << "failed to read source bucket info: " << cpp_strerror(r) << dendl; + ldpp_dout(dpp, -1) << "failed to read source bucket info: " << cpp_strerror(r) << dendl; return r; } @@ -2276,9 +2276,9 @@ static int bucket_source_sync_status(rgw::sal::RadosStore* store, const RGWZone& pipe.dest.bucket = bucket_info.bucket; std::vector status; - r = rgw_bucket_sync_status(dpp(), store, pipe, bucket_info, &source_bucket->get_info(), &status); + r = rgw_bucket_sync_status(dpp, store, pipe, bucket_info, &source_bucket->get_info(), &status); if (r < 0) { - lderr(store->ctx()) << "failed to read bucket sync status: " << cpp_strerror(r) << dendl; + ldpp_dout(dpp, -1) << "failed to read bucket sync status: " << cpp_strerror(r) << dendl; return r; } @@ -2307,9 +2307,9 @@ static int bucket_source_sync_status(rgw::sal::RadosStore* store, const RGWZone& out << indented{width} << "incremental sync: " << num_inc << "/" << total_shards << " shards\n"; BucketIndexShardsManager remote_markers; - r = rgw_read_remote_bilog_info(conn, source_bucket->get_key(), remote_markers, null_yield); + r = rgw_read_remote_bilog_info(dpp, conn, source_bucket->get_key(), remote_markers, null_yield); if (r < 0) { - lderr(store->ctx()) << "failed to read remote log: " << cpp_strerror(r) << dendl; + ldpp_dout(dpp, -1) << "failed to read remote log: " << cpp_strerror(r) << dendl; return r; } @@ -2421,7 +2421,7 @@ static int sync_info(std::optional opt_target_zone, std::optionalalloc_child(*eff_bucket, nullopt)); } - ret = bucket_handler->init(null_yield); + ret = bucket_handler->init(dpp(), null_yield); if (ret < 0) { cerr << "ERROR: failed to init bucket sync policy handler: " << cpp_strerror(-ret) << " (ret=" << ret << ")" << std::endl; return ret; @@ -2527,7 +2527,7 @@ static int bucket_sync_info(rgw::sal::RadosStore* store, const RGWBucketInfo& in int r = store->get_sync_policy_handler(dpp(), std::nullopt, info.bucket, &handler, null_yield); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to get policy handler for bucket (" << info.bucket << "): r=" << r << ": " << cpp_strerror(-r) << dendl; + ldpp_dout(dpp(), -1) << "ERROR: failed to get policy handler for bucket (" << info.bucket << "): r=" << r << ": " << cpp_strerror(-r) << dendl; return r; } @@ -2568,7 +2568,7 @@ static int bucket_sync_status(rgw::sal::RadosStore* store, const RGWBucketInfo& int r = store->get_sync_policy_handler(dpp(), std::nullopt, info.bucket, &handler, null_yield); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to get policy handler for bucket (" << info.bucket << "): r=" << r << ": " << cpp_strerror(-r) << dendl; + ldpp_dout(dpp(), -1) << "ERROR: failed to get policy handler for bucket (" << info.bucket << "): r=" << r << ": " << cpp_strerror(-r) << dendl; return r; } @@ -2580,13 +2580,13 @@ static int bucket_sync_status(rgw::sal::RadosStore* store, const RGWBucketInfo& if (!source_zone_id.empty()) { auto z = zonegroup.zones.find(source_zone_id); if (z == zonegroup.zones.end()) { - lderr(store->ctx()) << "Source zone not found in zonegroup " + ldpp_dout(dpp(), -1) << "Source zone not found in zonegroup " << zonegroup.get_name() << dendl; return -EINVAL; } auto c = zone_conn_map.find(source_zone_id); if (c == zone_conn_map.end()) { - lderr(store->ctx()) << "No connection to zone " << z->second.name << dendl; + ldpp_dout(dpp(), -1) << "No connection to zone " << z->second.name << dendl; return -EINVAL; } zone_ids.insert(source_zone_id); @@ -2613,7 +2613,7 @@ static int bucket_sync_status(rgw::sal::RadosStore* store, const RGWBucketInfo& continue; } if (pipe.source.zone.value_or(rgw_zone_id()) == z->second.id) { - bucket_source_sync_status(store, zone, z->second, + bucket_source_sync_status(dpp(), store, zone, z->second, c->second, info, pipe, width, out); @@ -2781,7 +2781,7 @@ static int trim_sync_error_log(int shard_id, const string& marker, int delay_ms) shard_id); // call cls_log_trim() until it returns -ENODATA for (;;) { - int ret = static_cast(store)->svc()->cls->timelog.trim(oid, {}, {}, {}, marker, nullptr, + int ret = static_cast(store)->svc()->cls->timelog.trim(dpp(), oid, {}, {}, {}, marker, nullptr, null_yield); if (ret == -ENODATA) { return 0; @@ -2875,7 +2875,7 @@ public: b(_bucket) {} int init() { - int ret = zonegroup.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = zonegroup.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; return ret; @@ -2906,7 +2906,7 @@ public: int write_policy() { if (!b) { - int ret = zonegroup.update(null_yield); + int ret = zonegroup.update(dpp(), null_yield); if (ret < 0) { cerr << "failed to update zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -3941,12 +3941,12 @@ int main(int argc, const char **argv) return EINVAL; } RGWPeriod period(period_id); - int ret = period.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = period.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "period.init failed: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = period.delete_obj(null_yield); + ret = period.delete_obj(dpp(), null_yield); if (ret < 0) { cerr << "ERROR: couldn't delete period: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -3962,7 +3962,7 @@ int main(int argc, const char **argv) } if (staging) { RGWRealm realm(realm_id, realm_name); - int ret = realm.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0 ) { cerr << "Error initializing realm " << cpp_strerror(-ret) << std::endl; return -ret; @@ -3973,7 +3973,7 @@ int main(int argc, const char **argv) epoch = 1; } RGWPeriod period(period_id, epoch); - int ret = period.init(g_ceph_context, static_cast(store)->svc()->sysobj, realm_id, + int ret = period.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, realm_id, null_yield, realm_name); if (ret < 0) { cerr << "period init failed: " << cpp_strerror(-ret) << std::endl; @@ -3998,7 +3998,7 @@ int main(int argc, const char **argv) case OPT::PERIOD_LIST: { list periods; - int ret = static_cast(store)->svc()->zone->list_periods(periods); + int ret = static_cast(store)->svc()->zone->list_periods(dpp(), periods); if (ret < 0) { cerr << "failed to list periods: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4027,13 +4027,13 @@ int main(int argc, const char **argv) if (url.empty()) { // load current period for endpoints RGWRealm realm(realm_id, realm_name); - int ret = realm.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl; return -ret; } RGWPeriod current_period(realm.get_current_period()); - ret = current_period.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + ret = current_period.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init current period: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4074,7 +4074,7 @@ int main(int argc, const char **argv) RGWRealm realm(g_ceph_context, static_cast(store)->svc()->sysobj); if (!realm_name.empty()) { // look up realm_id for the given realm_name - int ret = realm.read_id(realm_name, realm_id, null_yield); + int ret = realm.read_id(dpp(), realm_name, realm_id, null_yield); if (ret < 0) { cerr << "ERROR: failed to read realm for " << realm_name << ": " << cpp_strerror(-ret) << std::endl; @@ -4082,7 +4082,7 @@ int main(int argc, const char **argv) } } else { // use default realm_id when none is given - int ret = realm.read_default_id(realm_id, null_yield); + int ret = realm.read_default_id(dpp(), realm_id, null_yield); if (ret < 0 && ret != -ENOENT) { // on ENOENT, use empty realm_id cerr << "ERROR: failed to read default realm: " << cpp_strerror(-ret) << std::endl; @@ -4092,7 +4092,7 @@ int main(int argc, const char **argv) } RGWPeriodConfig period_config; - int ret = period_config.read(static_cast(store)->svc()->sysobj, realm_id, null_yield); + int ret = period_config.read(dpp(), static_cast(store)->svc()->sysobj, realm_id, null_yield); if (ret < 0 && ret != -ENOENT) { cerr << "ERROR: failed to read period config: " << cpp_strerror(-ret) << std::endl; @@ -4123,7 +4123,7 @@ int main(int argc, const char **argv) if (opt_cmd != OPT::GLOBAL_QUOTA_GET) { // write the modified period config - ret = period_config.write(static_cast(store)->svc()->sysobj, realm_id, null_yield); + ret = period_config.write(dpp(), static_cast(store)->svc()->sysobj, realm_id, null_yield); if (ret < 0) { cerr << "ERROR: failed to write period config: " << cpp_strerror(-ret) << std::endl; @@ -4157,7 +4157,7 @@ int main(int argc, const char **argv) } if (set_default) { - ret = realm.set_as_default(null_yield); + ret = realm.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set realm " << realm_name << " as default: " << cpp_strerror(-ret) << std::endl; } @@ -4174,12 +4174,12 @@ int main(int argc, const char **argv) cerr << "missing realm name or id" << std::endl; return EINVAL; } - int ret = realm.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "realm.init failed: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = realm.delete_obj(null_yield); + ret = realm.delete_obj(dpp(), null_yield); if (ret < 0) { cerr << "ERROR: couldn't : " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4190,7 +4190,7 @@ int main(int argc, const char **argv) case OPT::REALM_GET: { RGWRealm realm(realm_id, realm_name); - int ret = realm.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { if (ret == -ENOENT && realm_name.empty() && realm_id.empty()) { cerr << "missing realm name or id, or default realm not found" << std::endl; @@ -4207,7 +4207,7 @@ int main(int argc, const char **argv) { RGWRealm realm(g_ceph_context, static_cast(store)->svc()->sysobj); string default_id; - int ret = realm.read_default_id(default_id, null_yield); + int ret = realm.read_default_id(dpp(), default_id, null_yield); if (ret == -ENOENT) { cout << "No default realm is set" << std::endl; return -ret; @@ -4222,12 +4222,12 @@ int main(int argc, const char **argv) { RGWRealm realm(g_ceph_context, static_cast(store)->svc()->sysobj); string default_id; - int ret = realm.read_default_id(default_id, null_yield); + int ret = realm.read_default_id(dpp(), default_id, null_yield); if (ret < 0 && ret != -ENOENT) { cerr << "could not determine default realm: " << cpp_strerror(-ret) << std::endl; } list realms; - ret = static_cast(store)->svc()->zone->list_realms(realms); + ret = static_cast(store)->svc()->zone->list_realms(dpp(), realms); if (ret < 0) { cerr << "failed to list realms: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4246,7 +4246,7 @@ int main(int argc, const char **argv) return -ret; } list periods; - ret = static_cast(store)->svc()->zone->list_periods(period_id, periods, null_yield); + ret = static_cast(store)->svc()->zone->list_periods(dpp(), period_id, periods, null_yield); if (ret < 0) { cerr << "list periods failed: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4270,12 +4270,12 @@ int main(int argc, const char **argv) cerr << "missing realm name or id" << std::endl; return EINVAL; } - int ret = realm.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "realm.init failed: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = realm.rename(realm_new_name, null_yield); + ret = realm.rename(dpp(), realm_new_name, null_yield); if (ret < 0) { cerr << "realm.rename failed: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4293,7 +4293,7 @@ int main(int argc, const char **argv) } RGWRealm realm(realm_id, realm_name); bool new_realm = false; - int ret = realm.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0 && ret != -ENOENT) { cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4319,7 +4319,7 @@ int main(int argc, const char **argv) return 1; } } else { - ret = realm.update(null_yield); + ret = realm.update(dpp(), null_yield); if (ret < 0) { cerr << "ERROR: couldn't store realm info: " << cpp_strerror(-ret) << std::endl; return 1; @@ -4327,7 +4327,7 @@ int main(int argc, const char **argv) } if (set_default) { - ret = realm.set_as_default(null_yield); + ret = realm.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set realm " << realm_name << " as default: " << cpp_strerror(-ret) << std::endl; } @@ -4340,12 +4340,12 @@ int main(int argc, const char **argv) case OPT::REALM_DEFAULT: { RGWRealm realm(realm_id, realm_name); - int ret = realm.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = realm.set_as_default(null_yield); + ret = realm.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set realm as default: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4382,7 +4382,7 @@ int main(int argc, const char **argv) return -ret; } RGWRealm realm; - realm.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield, false); + realm.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield, false); try { decode_json_obj(realm, &p); } catch (const JSONDecoder::err& e) { @@ -4408,7 +4408,7 @@ int main(int argc, const char **argv) << cpp_strerror(ret) << std::endl; return -ret; } else if (ret ==-EEXIST) { - ret = realm.update(null_yield); + ret = realm.update(dpp(), null_yield); if (ret < 0) { cerr << "Error storing realm " << realm.get_id() << ": " << cpp_strerror(ret) << std::endl; @@ -4416,7 +4416,7 @@ int main(int argc, const char **argv) } if (set_default) { - ret = realm.set_as_default(null_yield); + ret = realm.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set realm " << realm_name << " as default: " << cpp_strerror(-ret) << std::endl; } @@ -4435,21 +4435,21 @@ int main(int argc, const char **argv) } RGWZoneGroup zonegroup(zonegroup_id,zonegroup_name); - int ret = zonegroup.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = zonegroup.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to initialize zonegroup " << zonegroup_name << " id " << zonegroup_id << " :" << cpp_strerror(-ret) << std::endl; return -ret; } RGWZoneParams zone(zone_id, zone_name); - ret = zone.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + ret = zone.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl; return -ret; } if (zone.realm_id != zonegroup.realm_id) { zone.realm_id = zonegroup.realm_id; - ret = zone.update(null_yield); + ret = zone.update(dpp(), null_yield); if (ret < 0) { cerr << "failed to save zone info: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4469,7 +4469,7 @@ int main(int argc, const char **argv) bool *psync_from_all = (sync_from_all_specified ? &sync_from_all : nullptr); string *predirect_zone = (redirect_zone_set ? &redirect_zone : nullptr); - ret = zonegroup.add_zone(zone, + ret = zonegroup.add_zone(dpp(), zone, (is_master_set ? &is_master : NULL), (is_read_only_set ? &read_only : NULL), endpoints, ptier_type, @@ -4494,7 +4494,7 @@ int main(int argc, const char **argv) return EINVAL; } RGWRealm realm(realm_id, realm_name); - int ret = realm.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4509,7 +4509,7 @@ int main(int argc, const char **argv) } if (set_default) { - ret = zonegroup.set_as_default(null_yield); + ret = zonegroup.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set zonegroup " << zonegroup_name << " as default: " << cpp_strerror(-ret) << std::endl; } @@ -4527,13 +4527,13 @@ int main(int argc, const char **argv) } RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - int ret = zonegroup.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = zonegroup.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = zonegroup.set_as_default(null_yield); + ret = zonegroup.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set zonegroup as default: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4547,13 +4547,13 @@ int main(int argc, const char **argv) return EINVAL; } RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - int ret = zonegroup.init(g_ceph_context, static_cast(store)->svc()->sysobj, + int ret = zonegroup.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = zonegroup.delete_obj(null_yield); + ret = zonegroup.delete_obj(dpp(), null_yield); if (ret < 0) { cerr << "ERROR: couldn't delete zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4563,7 +4563,7 @@ int main(int argc, const char **argv) case OPT::ZONEGROUP_GET: { RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - int ret = zonegroup.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = zonegroup.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4576,7 +4576,7 @@ int main(int argc, const char **argv) case OPT::ZONEGROUP_LIST: { RGWZoneGroup zonegroup; - int ret = zonegroup.init(g_ceph_context, static_cast(store)->svc()->sysobj, + int ret = zonegroup.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield, false); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; @@ -4584,13 +4584,13 @@ int main(int argc, const char **argv) } list zonegroups; - ret = static_cast(store)->svc()->zone->list_zonegroups(zonegroups); + ret = static_cast(store)->svc()->zone->list_zonegroups(dpp(), zonegroups); if (ret < 0) { cerr << "failed to list zonegroups: " << cpp_strerror(-ret) << std::endl; return -ret; } string default_zonegroup; - ret = zonegroup.read_default_id(default_zonegroup, null_yield); + ret = zonegroup.read_default_id(dpp(), default_zonegroup, null_yield); if (ret < 0 && ret != -ENOENT) { cerr << "could not determine default zonegroup: " << cpp_strerror(-ret) << std::endl; } @@ -4604,7 +4604,7 @@ int main(int argc, const char **argv) case OPT::ZONEGROUP_MODIFY: { RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - int ret = zonegroup.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = zonegroup.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4618,7 +4618,7 @@ int main(int argc, const char **argv) } if (is_master_set) { - zonegroup.update_master(is_master, null_yield); + zonegroup.update_master(dpp(), is_master, null_yield); need_update = true; } @@ -4638,7 +4638,7 @@ int main(int argc, const char **argv) } else if (!realm_name.empty()) { // get realm id from name RGWRealm realm{g_ceph_context, static_cast(store)->svc()->sysobj}; - ret = realm.read_id(realm_name, zonegroup.realm_id, null_yield); + ret = realm.read_id(dpp(), realm_name, zonegroup.realm_id, null_yield); if (ret < 0) { cerr << "failed to find realm by name " << realm_name << std::endl; return -ret; @@ -4654,7 +4654,7 @@ int main(int argc, const char **argv) } if (need_update) { - ret = zonegroup.update(null_yield); + ret = zonegroup.update(dpp(), null_yield); if (ret < 0) { cerr << "failed to update zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4662,7 +4662,7 @@ int main(int argc, const char **argv) } if (set_default) { - ret = zonegroup.set_as_default(null_yield); + ret = zonegroup.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set zonegroup " << zonegroup_name << " as default: " << cpp_strerror(-ret) << std::endl; } @@ -4675,7 +4675,7 @@ int main(int argc, const char **argv) case OPT::ZONEGROUP_SET: { RGWRealm realm(realm_id, realm_name); - int ret = realm.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); bool default_realm_not_exist = (ret == -ENOENT && realm_id.empty() && realm_name.empty()); if (ret < 0 && !default_realm_not_exist ) { @@ -4684,7 +4684,7 @@ int main(int argc, const char **argv) } RGWZoneGroup zonegroup; - ret = zonegroup.init(g_ceph_context, static_cast(store)->svc()->sysobj, + ret = zonegroup.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield, false); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; @@ -4702,7 +4702,7 @@ int main(int argc, const char **argv) cerr << "ERROR: couldn't create zonegroup info: " << cpp_strerror(-ret) << std::endl; return 1; } else if (ret == -EEXIST) { - ret = zonegroup.update(null_yield); + ret = zonegroup.update(dpp(), null_yield); if (ret < 0) { cerr << "ERROR: couldn't store zonegroup info: " << cpp_strerror(-ret) << std::endl; return 1; @@ -4710,7 +4710,7 @@ int main(int argc, const char **argv) } if (set_default) { - ret = zonegroup.set_as_default(null_yield); + ret = zonegroup.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set zonegroup " << zonegroup_name << " as default: " << cpp_strerror(-ret) << std::endl; } @@ -4723,7 +4723,7 @@ int main(int argc, const char **argv) case OPT::ZONEGROUP_REMOVE: { RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - int ret = zonegroup.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = zonegroup.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4748,7 +4748,7 @@ int main(int argc, const char **argv) } } - ret = zonegroup.remove_zone(zone_id, null_yield); + ret = zonegroup.remove_zone(dpp(), zone_id, null_yield); if (ret < 0) { cerr << "failed to remove zone: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4769,12 +4769,12 @@ int main(int argc, const char **argv) return EINVAL; } RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - int ret = zonegroup.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = zonegroup.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = zonegroup.rename(zonegroup_new_name, null_yield); + ret = zonegroup.rename(dpp(), zonegroup_new_name, null_yield); if (ret < 0) { cerr << "failed to rename zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4784,7 +4784,7 @@ int main(int argc, const char **argv) case OPT::ZONEGROUP_PLACEMENT_LIST: { RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - int ret = zonegroup.init(g_ceph_context, static_cast(store)->svc()->sysobj, + int ret = zonegroup.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; @@ -4803,7 +4803,7 @@ int main(int argc, const char **argv) } RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - int ret = zonegroup.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = zonegroup.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4840,7 +4840,7 @@ int main(int argc, const char **argv) } RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - int ret = zonegroup.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = zonegroup.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4883,8 +4883,8 @@ int main(int argc, const char **argv) zonegroup.default_placement = rule; } - zonegroup.post_process_params(null_yield); - ret = zonegroup.update(null_yield); + zonegroup.post_process_params(dpp(), null_yield); + ret = zonegroup.update(dpp(), null_yield); if (ret < 0) { cerr << "failed to update zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4904,7 +4904,7 @@ int main(int argc, const char **argv) RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); /* if the user didn't provide zonegroup info , create stand alone zone */ if (!zonegroup_id.empty() || !zonegroup_name.empty()) { - ret = zonegroup.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + ret = zonegroup.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "unable to initialize zonegroup " << zonegroup_name << ": " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4915,7 +4915,7 @@ int main(int argc, const char **argv) } RGWZoneParams zone(zone_id, zone_name); - ret = zone.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield, false); + ret = zone.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield, false); if (ret < 0) { cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -4942,7 +4942,7 @@ int main(int argc, const char **argv) string *ptier_type = (tier_type_specified ? &tier_type : nullptr); bool *psync_from_all = (sync_from_all_specified ? &sync_from_all : nullptr); string *predirect_zone = (redirect_zone_set ? &redirect_zone : nullptr); - ret = zonegroup.add_zone(zone, + ret = zonegroup.add_zone(dpp(), zone, (is_master_set ? &is_master : NULL), (is_read_only_set ? &read_only : NULL), endpoints, @@ -4960,7 +4960,7 @@ int main(int argc, const char **argv) } if (set_default) { - ret = zone.set_as_default(null_yield); + ret = zone.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set zone " << zone_name << " as default: " << cpp_strerror(-ret) << std::endl; } @@ -4973,7 +4973,7 @@ int main(int argc, const char **argv) case OPT::ZONE_DEFAULT: { RGWZoneGroup zonegroup(zonegroup_id,zonegroup_name); - int ret = zonegroup.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = zonegroup.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "WARNING: failed to initialize zonegroup " << zonegroup_name << std::endl; } @@ -4982,12 +4982,12 @@ int main(int argc, const char **argv) return EINVAL; } RGWZoneParams zone(zone_id, zone_name); - ret = zone.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + ret = zone.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = zone.set_as_default(null_yield); + ret = zone.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set zone as default: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5001,14 +5001,14 @@ int main(int argc, const char **argv) return EINVAL; } RGWZoneParams zone(zone_id, zone_name); - int ret = zone.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = zone.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl; return -ret; } list zonegroups; - ret = static_cast(store)->svc()->zone->list_zonegroups(zonegroups); + ret = static_cast(store)->svc()->zone->list_zonegroups(dpp(), zonegroups); if (ret < 0) { cerr << "failed to list zonegroups: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5016,19 +5016,19 @@ int main(int argc, const char **argv) for (list::iterator iter = zonegroups.begin(); iter != zonegroups.end(); ++iter) { RGWZoneGroup zonegroup(string(), *iter); - int ret = zonegroup.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = zonegroup.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "WARNING: failed to initialize zonegroup " << zonegroup_name << std::endl; continue; } - ret = zonegroup.remove_zone(zone.get_id(), null_yield); + ret = zonegroup.remove_zone(dpp(), zone.get_id(), null_yield); if (ret < 0 && ret != -ENOENT) { cerr << "failed to remove zone " << zone_name << " from zonegroup " << zonegroup.get_name() << ": " << cpp_strerror(-ret) << std::endl; } } - ret = zone.delete_obj(null_yield); + ret = zone.delete_obj(dpp(), null_yield); if (ret < 0) { cerr << "failed to delete zone " << zone_name << ": " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5038,7 +5038,7 @@ int main(int argc, const char **argv) case OPT::ZONE_GET: { RGWZoneParams zone(zone_id, zone_name); - int ret = zone.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = zone.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5050,13 +5050,13 @@ int main(int argc, const char **argv) case OPT::ZONE_SET: { RGWZoneParams zone(zone_name); - int ret = zone.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield, + int ret = zone.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield, false); if (ret < 0) { return -ret; } - ret = zone.read(null_yield); + ret = zone.read(dpp(), null_yield); if (ret < 0 && ret != -ENOENT) { cerr << "zone.read() returned ret=" << ret << std::endl; return -ret; @@ -5071,7 +5071,7 @@ int main(int argc, const char **argv) if(zone.realm_id.empty()) { RGWRealm realm(realm_id, realm_name); - int ret = realm.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0 && ret != -ENOENT) { cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5109,19 +5109,19 @@ int main(int argc, const char **argv) } cerr << "zone id " << zone.get_id(); - ret = zone.fix_pool_names(null_yield); + ret = zone.fix_pool_names(dpp(), null_yield); if (ret < 0) { cerr << "ERROR: couldn't fix zone: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = zone.write(false, null_yield); + ret = zone.write(dpp(), false, null_yield); if (ret < 0) { cerr << "ERROR: couldn't create zone: " << cpp_strerror(-ret) << std::endl; return 1; } if (set_default) { - ret = zone.set_as_default(null_yield); + ret = zone.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set zone " << zone_name << " as default: " << cpp_strerror(-ret) << std::endl; } @@ -5134,20 +5134,20 @@ int main(int argc, const char **argv) case OPT::ZONE_LIST: { list zones; - int ret = static_cast(store)->svc()->zone->list_zones(zones); + int ret = static_cast(store)->svc()->zone->list_zones(dpp(), zones); if (ret < 0) { cerr << "failed to list zones: " << cpp_strerror(-ret) << std::endl; return -ret; } RGWZoneParams zone; - ret = zone.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield, false); + ret = zone.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield, false); if (ret < 0) { cerr << "failed to init zone: " << cpp_strerror(-ret) << std::endl; return -ret; } string default_zone; - ret = zone.read_default_id(default_zone, null_yield); + ret = zone.read_default_id(dpp(), default_zone, null_yield); if (ret < 0 && ret != -ENOENT) { cerr << "could not determine default zone: " << cpp_strerror(-ret) << std::endl; } @@ -5161,7 +5161,7 @@ int main(int argc, const char **argv) case OPT::ZONE_MODIFY: { RGWZoneParams zone(zone_id, zone_name); - int ret = zone.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = zone.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zone: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5184,7 +5184,7 @@ int main(int argc, const char **argv) } else if (!realm_name.empty()) { // get realm id from name RGWRealm realm{g_ceph_context, static_cast(store)->svc()->sysobj}; - ret = realm.read_id(realm_name, zone.realm_id, null_yield); + ret = realm.read_id(dpp(), realm_name, zone.realm_id, null_yield); if (ret < 0) { cerr << "failed to find realm by name " << realm_name << std::endl; return -ret; @@ -5211,7 +5211,7 @@ int main(int argc, const char **argv) } if (need_zone_update) { - ret = zone.update(null_yield); + ret = zone.update(dpp(), null_yield); if (ret < 0) { cerr << "failed to save zone info: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5219,7 +5219,7 @@ int main(int argc, const char **argv) } RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - ret = zonegroup.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + ret = zonegroup.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5229,7 +5229,7 @@ int main(int argc, const char **argv) bool *psync_from_all = (sync_from_all_specified ? &sync_from_all : nullptr); string *predirect_zone = (redirect_zone_set ? &redirect_zone : nullptr); - ret = zonegroup.add_zone(zone, + ret = zonegroup.add_zone(dpp(), zone, (is_master_set ? &is_master : NULL), (is_read_only_set ? &read_only : NULL), endpoints, ptier_type, @@ -5242,14 +5242,14 @@ int main(int argc, const char **argv) return -ret; } - ret = zonegroup.update(null_yield); + ret = zonegroup.update(dpp(), null_yield); if (ret < 0) { cerr << "failed to update zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; } if (set_default) { - ret = zone.set_as_default(null_yield); + ret = zone.set_as_default(dpp(), null_yield); if (ret < 0) { cerr << "failed to set zone " << zone_name << " as default: " << cpp_strerror(-ret) << std::endl; } @@ -5270,23 +5270,23 @@ int main(int argc, const char **argv) return EINVAL; } RGWZoneParams zone(zone_id,zone_name); - int ret = zone.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = zone.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = zone.rename(zone_new_name, null_yield); + ret = zone.rename(dpp(), zone_new_name, null_yield); if (ret < 0) { cerr << "failed to rename zone " << zone_name << " to " << zone_new_name << ": " << cpp_strerror(-ret) << std::endl; return -ret; } RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - ret = zonegroup.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + ret = zonegroup.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "WARNING: failed to initialize zonegroup " << zonegroup_name << std::endl; } else { - ret = zonegroup.rename_zone(zone, null_yield); + ret = zonegroup.rename_zone(dpp(), zone, null_yield); if (ret < 0) { cerr << "Error in zonegroup rename for " << zone_name << ": " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5310,7 +5310,7 @@ int main(int argc, const char **argv) } RGWZoneParams zone(zone_id, zone_name); - int ret = zone.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = zone.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zone: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5319,7 +5319,7 @@ int main(int argc, const char **argv) if (opt_cmd == OPT::ZONE_PLACEMENT_ADD || opt_cmd == OPT::ZONE_PLACEMENT_MODIFY) { RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name); - ret = zonegroup.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + ret = zonegroup.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5393,7 +5393,7 @@ int main(int argc, const char **argv) } } - ret = zone.update(null_yield); + ret = zone.update(dpp(), null_yield); if (ret < 0) { cerr << "failed to save zone info: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5406,7 +5406,7 @@ int main(int argc, const char **argv) case OPT::ZONE_PLACEMENT_LIST: { RGWZoneParams zone(zone_id, zone_name); - int ret = zone.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = zone.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5423,7 +5423,7 @@ int main(int argc, const char **argv) } RGWZoneParams zone(zone_id, zone_name); - int ret = zone.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = zone.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5722,7 +5722,7 @@ int main(int argc, const char **argv) // load the period RGWPeriod period(period_id); - int ret = period.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = period.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "period init failed: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -5758,13 +5758,13 @@ int main(int argc, const char **argv) { // read realm and staging period RGWRealm realm(realm_id, realm_name); - int ret = realm.init(g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); + int ret = realm.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, null_yield); if (ret < 0) { cerr << "Error initializing realm: " << cpp_strerror(-ret) << std::endl; return -ret; } RGWPeriod period(RGWPeriod::get_staging_id(realm.get_id()), 1); - ret = period.init(g_ceph_context, static_cast(store)->svc()->sysobj, realm.get_id(), null_yield); + ret = period.init(dpp(), g_ceph_context, static_cast(store)->svc()->sysobj, realm.get_id(), null_yield); if (ret < 0) { cerr << "period init failed: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -6028,7 +6028,7 @@ int main(int argc, const char **argv) } else { /* list users in groups of max-keys, then perform user-bucket * limit-check on each group */ - ret = store->meta_list_keys_init(metadata_key, string(), &handle); + ret = store->meta_list_keys_init(dpp(), metadata_key, string(), &handle); if (ret < 0) { cerr << "ERROR: buckets limit check can't get user metadata_key: " << cpp_strerror(-ret) << std::endl; @@ -6151,7 +6151,7 @@ int main(int argc, const char **argv) if (opt_cmd == OPT::BUCKET_STATS) { if (bucket_name.empty() && !bucket_id.empty()) { rgw_bucket bucket; - if (!rgw_find_bucket_by_id(store->ctx(), store, marker, bucket_id, &bucket)) { + if (!rgw_find_bucket_by_id(dpp(), store->ctx(), store, marker, bucket_id, &bucket)) { cerr << "failure: no such bucket id" << std::endl; return -ENOENT; } @@ -6210,7 +6210,7 @@ int main(int argc, const char **argv) formatter->reset(); formatter->open_array_section("logs"); RGWAccessHandle h; - int r = static_cast(store)->getRados()->log_list_init(date, &h); + int r = static_cast(store)->getRados()->log_list_init(dpp(), date, &h); if (r == -ENOENT) { // no logs. } else { @@ -6255,7 +6255,7 @@ int main(int argc, const char **argv) if (opt_cmd == OPT::LOG_SHOW) { RGWAccessHandle h; - int r = static_cast(store)->getRados()->log_show_init(oid, &h); + int r = static_cast(store)->getRados()->log_show_init(dpp(), oid, &h); if (r < 0) { cerr << "error opening log " << oid << ": " << cpp_strerror(-r) << std::endl; return -r; @@ -6326,7 +6326,7 @@ next: cout << std::endl; } if (opt_cmd == OPT::LOG_RM) { - int r = static_cast(store)->getRados()->log_remove(oid); + int r = static_cast(store)->getRados()->log_remove(dpp(), oid); if (r < 0) { cerr << "error removing log " << oid << ": " << cpp_strerror(-r) << std::endl; return -r; @@ -6340,7 +6340,7 @@ next: exit(1); } - int ret = static_cast(store)->svc()->zone->add_bucket_placement(pool, null_yield); + int ret = static_cast(store)->svc()->zone->add_bucket_placement(dpp(), pool, null_yield); if (ret < 0) cerr << "failed to add bucket placement: " << cpp_strerror(-ret) << std::endl; } @@ -6351,14 +6351,14 @@ next: exit(1); } - int ret = static_cast(store)->svc()->zone->remove_bucket_placement(pool, null_yield); + int ret = static_cast(store)->svc()->zone->remove_bucket_placement(dpp(), pool, null_yield); if (ret < 0) cerr << "failed to remove bucket placement: " << cpp_strerror(-ret) << std::endl; } if (opt_cmd == OPT::POOLS_LIST) { set pools; - int ret = static_cast(store)->svc()->zone->list_placement_set(pools, null_yield); + int ret = static_cast(store)->svc()->zone->list_placement_set(dpp(), pools, null_yield); if (ret < 0) { cerr << "could not list placement set: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -6462,7 +6462,7 @@ next: return 1; } - ret = RGWUsage::clear(store); + ret = RGWUsage::clear(dpp(), store); if (ret < 0) { return ret; } @@ -6488,7 +6488,7 @@ next: } RGWOLHInfo olh; rgw_obj obj(bucket->get_key(), object); - ret = static_cast(store)->getRados()->get_olh(bucket->get_info(), obj, &olh); + ret = static_cast(store)->getRados()->get_olh(dpp(), bucket->get_info(), obj, &olh); if (ret < 0) { cerr << "ERROR: failed reading olh: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -6549,7 +6549,7 @@ next: rgw_cls_bi_entry entry; - ret = static_cast(store)->getRados()->bi_get(bucket->get_info(), obj, bi_index_type, &entry); + ret = static_cast(store)->getRados()->bi_get(dpp(), bucket->get_info(), obj, bi_index_type, &entry); if (ret < 0) { cerr << "ERROR: bi_get(): " << cpp_strerror(-ret) << std::endl; return -ret; @@ -6953,7 +6953,7 @@ next: entry.old_num_shards = num_source_shards; entry.new_num_shards = num_shards; - return reshard.add(entry); + return reshard.add(dpp(), entry); } if (opt_cmd == OPT::RESHARD_LIST) { @@ -7013,7 +7013,7 @@ next: RGWBucketReshard br(static_cast(store), bucket->get_info(), bucket->get_attrs(), nullptr /* no callback */); list status; - int r = br.get_status(&status); + int r = br.get_status(dpp(), &status); if (r < 0) { cerr << "ERROR: could not get resharding status for bucket " << bucket_name << std::endl; @@ -7056,7 +7056,7 @@ next: // we did not encounter an error, so let's work with the bucket RGWBucketReshard br(static_cast(store), bucket->get_info(), bucket->get_attrs(), nullptr /* no callback */); - int ret = br.cancel(); + int ret = br.cancel(dpp()); if (ret < 0) { if (ret == -EBUSY) { cerr << "There is ongoing resharding, please retry after " << @@ -7078,7 +7078,7 @@ next: entry.bucket_name = bucket_name; //entry.bucket_id = bucket_id; - ret = reshard.remove(entry); + ret = reshard.remove(dpp(), entry); if (ret < 0 && ret != -ENOENT) { cerr << "Error in updating reshard log with bucket " << bucket_name << ": " << cpp_strerror(-ret) << std::endl; @@ -7097,7 +7097,7 @@ next: rgw_obj_index_key index_key; key.get_index_key(&index_key); oid_list.push_back(index_key); - ret = bucket->remove_objs_from_index(oid_list); + ret = bucket->remove_objs_from_index(dpp(), oid_list); if (ret < 0) { cerr << "ERROR: remove_obj_from_index() returned error: " << cpp_strerror(-ret) << std::endl; return 1; @@ -7341,7 +7341,7 @@ next: info.job_name = job_id; info.num_shards = num_shards; - int ret = search.init(job_id, &info, detail); + int ret = search.init(dpp(), job_id, &info, detail); if (ret < 0) { cerr << "could not init search, ret=" << ret << std::endl; return -ret; @@ -7370,7 +7370,7 @@ next: cerr << "ERROR: --job-id not specified" << std::endl; return EINVAL; } - int ret = search.init(job_id, NULL); + int ret = search.init(dpp(), job_id, NULL); if (ret < 0) { if (ret == -ENOENT) { cerr << "job not found" << std::endl; @@ -7395,7 +7395,7 @@ next: } RGWOrphanStore orphan_store(store); - int ret = orphan_store.init(); + int ret = orphan_store.init(dpp()); if (ret < 0){ cerr << "connection to cluster failed!" << std::endl; return -ret; @@ -7439,7 +7439,7 @@ next: "so at most one of the two should be specified" << std::endl; return EINVAL; } - ret = static_cast(store)->ctl()->user->reset_stats(user->get_id(), null_yield); + ret = static_cast(store)->ctl()->user->reset_stats(dpp(), user->get_id(), null_yield); if (ret < 0) { cerr << "ERROR: could not reset user stats: " << cpp_strerror(-ret) << std::endl; @@ -7454,7 +7454,7 @@ next: cerr << "ERROR: could not init bucket: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = bucket->sync_user_stats(null_yield); + ret = bucket->sync_user_stats(dpp(), null_yield); if (ret < 0) { cerr << "ERROR: could not sync bucket stats: " << cpp_strerror(-ret) << std::endl; @@ -7473,7 +7473,7 @@ next: RGWStorageStats stats; ceph::real_time last_stats_sync; ceph::real_time last_stats_update; - int ret = static_cast(store)->ctl()->user->read_stats(user->get_id(), &stats, null_yield, + int ret = static_cast(store)->ctl()->user->read_stats(dpp(), user->get_id(), &stats, null_yield, &last_stats_sync, &last_stats_update); if (ret < 0) { @@ -7535,7 +7535,7 @@ next: } void *handle; int max = 1000; - int ret = store->meta_list_keys_init(metadata_key, marker, &handle); + int ret = store->meta_list_keys_init(dpp(), metadata_key, marker, &handle); if (ret < 0) { cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -7623,7 +7623,7 @@ next: meta_log->init_list_entries(i, {}, {}, marker, &handle); bool truncated; do { - int ret = meta_log->list_entries(handle, 1000, entries, NULL, &truncated); + int ret = meta_log->list_entries(dpp(), handle, 1000, entries, NULL, &truncated); if (ret < 0) { cerr << "ERROR: meta_log->list_entries(): " << cpp_strerror(-ret) << std::endl; return -ret; @@ -7664,7 +7664,7 @@ next: for (; i < g_ceph_context->_conf->rgw_md_log_max_shards; i++) { RGWMetadataLogInfo info; - meta_log->get_info(i, &info); + meta_log->get_info(dpp(), i, &info); ::encode_json("info", info, formatter.get()); @@ -7690,7 +7690,7 @@ next: } auto num_shards = g_conf()->rgw_md_log_max_shards; - ret = crs.run(create_admin_meta_log_trim_cr(dpp(), static_cast(store), &http, num_shards)); + ret = crs.run(dpp(), create_admin_meta_log_trim_cr(dpp(), static_cast(store), &http, num_shards)); if (ret < 0) { cerr << "automated mdlog trim failed with " << cpp_strerror(ret) << std::endl; return -ret; @@ -7732,7 +7732,7 @@ next: // trim until -ENODATA do { - ret = meta_log->trim(shard_id, {}, {}, {}, marker); + ret = meta_log->trim(dpp(), shard_id, {}, {}, {}, marker); } while (ret == 0); if (ret < 0 && ret != -ENODATA) { cerr << "ERROR: meta_log->trim(): " << cpp_strerror(-ret) << std::endl; @@ -7751,14 +7751,14 @@ next: if (opt_cmd == OPT::METADATA_SYNC_STATUS) { RGWMetaSyncStatusManager sync(static_cast(store), static_cast(store)->svc()->rados->get_async_processor()); - int ret = sync.init(); + int ret = sync.init(dpp()); if (ret < 0) { cerr << "ERROR: sync.init() returned ret=" << ret << std::endl; return -ret; } rgw_meta_sync_status sync_status; - ret = sync.read_sync_status(&sync_status); + ret = sync.read_sync_status(dpp(), &sync_status); if (ret < 0) { cerr << "ERROR: sync.read_sync_status() returned ret=" << ret << std::endl; return -ret; @@ -7792,12 +7792,12 @@ next: if (opt_cmd == OPT::METADATA_SYNC_INIT) { RGWMetaSyncStatusManager sync(static_cast(store), static_cast(store)->svc()->rados->get_async_processor()); - int ret = sync.init(); + int ret = sync.init(dpp()); if (ret < 0) { cerr << "ERROR: sync.init() returned ret=" << ret << std::endl; return -ret; } - ret = sync.init_sync_status(); + ret = sync.init_sync_status(dpp()); if (ret < 0) { cerr << "ERROR: sync.init_sync_status() returned ret=" << ret << std::endl; return -ret; @@ -7808,13 +7808,13 @@ next: if (opt_cmd == OPT::METADATA_SYNC_RUN) { RGWMetaSyncStatusManager sync(static_cast(store), static_cast(store)->svc()->rados->get_async_processor()); - int ret = sync.init(); + int ret = sync.init(dpp()); if (ret < 0) { cerr << "ERROR: sync.init() returned ret=" << ret << std::endl; return -ret; } - ret = sync.run(null_yield); + ret = sync.run(dpp(), null_yield); if (ret < 0) { cerr << "ERROR: sync.run() returned ret=" << ret << std::endl; return -ret; @@ -7828,7 +7828,7 @@ next: } RGWDataSyncStatusManager sync(static_cast(store), static_cast(store)->svc()->rados->get_async_processor(), source_zone, nullptr); - int ret = sync.init(); + int ret = sync.init(dpp()); if (ret < 0) { cerr << "ERROR: sync.init() returned ret=" << ret << std::endl; return -ret; @@ -7839,7 +7839,7 @@ next: set pending_buckets; set recovering_buckets; rgw_data_sync_marker sync_marker; - ret = sync.read_shard_status(shard_id, pending_buckets, recovering_buckets, &sync_marker, + ret = sync.read_shard_status(dpp(), shard_id, pending_buckets, recovering_buckets, &sync_marker, max_entries_specified ? max_entries : 20); if (ret < 0 && ret != -ENOENT) { cerr << "ERROR: sync.read_shard_status() returned ret=" << ret << std::endl; @@ -7853,7 +7853,7 @@ next: formatter->close_section(); formatter->flush(cout); } else { - ret = sync.read_sync_status(&sync_status); + ret = sync.read_sync_status(dpp(), &sync_status); if (ret < 0 && ret != -ENOENT) { cerr << "ERROR: sync.read_sync_status() returned ret=" << ret << std::endl; return -ret; @@ -7892,13 +7892,13 @@ next: RGWDataSyncStatusManager sync(static_cast(store), static_cast(store)->svc()->rados->get_async_processor(), source_zone, nullptr); - int ret = sync.init(); + int ret = sync.init(dpp()); if (ret < 0) { cerr << "ERROR: sync.init() returned ret=" << ret << std::endl; return -ret; } - ret = sync.init_sync_status(); + ret = sync.init_sync_status(dpp()); if (ret < 0) { cerr << "ERROR: sync.init_sync_status() returned ret=" << ret << std::endl; return -ret; @@ -7915,19 +7915,19 @@ next: int ret = static_cast(store)->svc()->sync_modules->get_manager()->create_instance(g_ceph_context, static_cast(store)->svc()->zone->get_zone().tier_type, store->get_zone()->get_params().tier_config, &sync_module); if (ret < 0) { - lderr(cct) << "ERROR: failed to init sync module instance, ret=" << ret << dendl; + ldpp_dout(dpp(), -1) << "ERROR: failed to init sync module instance, ret=" << ret << dendl; return ret; } RGWDataSyncStatusManager sync(static_cast(store), static_cast(store)->svc()->rados->get_async_processor(), source_zone, nullptr, sync_module); - ret = sync.init(); + ret = sync.init(dpp()); if (ret < 0) { cerr << "ERROR: sync.init() returned ret=" << ret << std::endl; return -ret; } - ret = sync.run(); + ret = sync.run(dpp()); if (ret < 0) { cerr << "ERROR: sync.run() returned ret=" << ret << std::endl; return -ret; @@ -7960,12 +7960,12 @@ next: RGWBucketPipeSyncStatusManager sync(static_cast(store), source_zone, opt_sb, bucket->get_key()); - ret = sync.init(); + ret = sync.init(dpp()); if (ret < 0) { cerr << "ERROR: sync.init() returned ret=" << ret << std::endl; return -ret; } - ret = sync.init_sync_status(); + ret = sync.init_sync_status(dpp()); if (ret < 0) { cerr << "ERROR: sync.init_sync_status() returned ret=" << ret << std::endl; return -ret; @@ -8004,7 +8004,7 @@ next: opt_source_zone, opt_source_bucket, opt_retry_delay_ms, timeout_at); if (ret < 0) { - lderr(store->ctx()) << "bucket sync checkpoint failed: " << cpp_strerror(ret) << dendl; + ldpp_dout(dpp(), -1) << "bucket sync checkpoint failed: " << cpp_strerror(ret) << dendl; return -ret; } } @@ -8067,12 +8067,12 @@ next: } RGWBucketPipeSyncStatusManager sync(static_cast(store), source_zone, opt_source_bucket, bucket->get_key()); - ret = sync.init(); + ret = sync.init(dpp()); if (ret < 0) { cerr << "ERROR: sync.init() returned ret=" << ret << std::endl; return -ret; } - ret = sync.read_sync_status(); + ret = sync.read_sync_status(dpp()); if (ret < 0) { cerr << "ERROR: sync.read_sync_status() returned ret=" << ret << std::endl; return -ret; @@ -8099,13 +8099,13 @@ next: } RGWBucketPipeSyncStatusManager sync(static_cast(store), source_zone, opt_source_bucket, bucket->get_key()); - ret = sync.init(); + ret = sync.init(dpp()); if (ret < 0) { cerr << "ERROR: sync.init() returned ret=" << ret << std::endl; return -ret; } - ret = sync.run(); + ret = sync.run(dpp()); if (ret < 0) { cerr << "ERROR: sync.run() returned ret=" << ret << std::endl; return -ret; @@ -8130,7 +8130,7 @@ next: do { list entries; - ret = static_cast(store)->svc()->bilog_rados->log_list(bucket->get_info(), shard_id, marker, max_entries - count, entries, &truncated); + ret = static_cast(store)->svc()->bilog_rados->log_list(dpp(), bucket->get_info(), shard_id, marker, max_entries - count, entries, &truncated); if (ret < 0) { cerr << "ERROR: list_bi_log_entries(): " << cpp_strerror(-ret) << std::endl; return -ret; @@ -8194,7 +8194,7 @@ next: do { list entries; - ret = static_cast(store)->svc()->cls->timelog.list(oid, {}, {}, max_entries - count, entries, marker, &marker, &truncated, + ret = static_cast(store)->svc()->cls->timelog.list(dpp(), oid, {}, {}, max_entries - count, entries, marker, &marker, &truncated, null_yield); if (ret == -ENOENT) { break; @@ -8612,7 +8612,7 @@ next: cerr << "ERROR: could not init bucket: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = static_cast(store)->svc()->bilog_rados->log_trim(bucket->get_info(), shard_id, start_marker, end_marker); + ret = static_cast(store)->svc()->bilog_rados->log_trim(dpp(), bucket->get_info(), shard_id, start_marker, end_marker); if (ret < 0) { cerr << "ERROR: trim_bi_log_entries(): " << cpp_strerror(-ret) << std::endl; return -ret; @@ -8630,7 +8630,7 @@ next: return -ret; } map markers; - ret = static_cast(store)->svc()->bilog_rados->get_log_status(bucket->get_info(), shard_id, + ret = static_cast(store)->svc()->bilog_rados->get_log_status(dpp(), bucket->get_info(), shard_id, &markers, null_yield); if (ret < 0) { cerr << "ERROR: get_bi_log_status(): " << cpp_strerror(-ret) << std::endl; @@ -8660,7 +8660,7 @@ next: cerr << "trim manager init failed with " << cpp_strerror(ret) << std::endl; return -ret; } - ret = crs.run(trim.create_admin_bucket_trim_cr(&http)); + ret = crs.run(dpp(), trim.create_admin_bucket_trim_cr(&http)); if (ret < 0) { cerr << "automated bilog trim failed with " << cpp_strerror(ret) << std::endl; return -ret; @@ -8700,14 +8700,14 @@ next: do { std::vector entries; if (specified_shard_id) { - ret = datalog_svc->list_entries(shard_id, max_entries - count, + ret = datalog_svc->list_entries(dpp(), shard_id, max_entries - count, entries, marker.empty() ? std::nullopt : std::make_optional(marker), &marker, &truncated); } else { - ret = datalog_svc->list_entries(max_entries - count, entries, + ret = datalog_svc->list_entries(dpp(), max_entries - count, entries, log_marker, &truncated); } if (ret < 0) { @@ -8739,7 +8739,7 @@ next: list entries; RGWDataChangesLogInfo info; - static_cast(store)->svc()->datalog_rados->get_info(i, &info); + static_cast(store)->svc()->datalog_rados->get_info(dpp(), i, &info); ::encode_json("info", info, formatter.get()); @@ -8762,7 +8762,7 @@ next: auto num_shards = g_conf()->rgw_data_log_num_shards; std::vector markers(num_shards); - ret = crs.run(create_admin_data_log_trim_cr(static_cast(store), &http, num_shards, markers)); + ret = crs.run(dpp(), create_admin_data_log_trim_cr(dpp(), static_cast(store), &http, num_shards, markers)); if (ret < 0) { cerr << "automated datalog trim failed with " << cpp_strerror(ret) << std::endl; return -ret; @@ -8799,7 +8799,7 @@ next: // loop until -ENODATA do { auto datalog = static_cast(store)->svc()->datalog_rados; - ret = datalog->trim_entries(shard_id, marker); + ret = datalog->trim_entries(dpp(), shard_id, marker); } while (ret == 0); if (ret < 0 && ret != -ENODATA) { @@ -8814,7 +8814,7 @@ next: return -EINVAL; } auto datalog = static_cast(store)->svc()->datalog_rados; - ret = datalog->change_format(*opt_log_type, null_yield); + ret = datalog->change_format(dpp(), *opt_log_type, null_yield); if (ret < 0) { cerr << "ERROR: change_format(): " << cpp_strerror(-ret) << std::endl; return -ret; @@ -8824,7 +8824,7 @@ next: if (opt_cmd == OPT::DATALOG_PRUNE) { auto datalog = static_cast(store)->svc()->datalog_rados; std::optional through; - ret = datalog->trim_generations(through); + ret = datalog->trim_generations(dpp(), through); if (ret < 0) { cerr << "ERROR: trim_generations(): " << cpp_strerror(-ret) << std::endl; @@ -8914,7 +8914,7 @@ next: null_yield, dpp(), MDLOG_STATUS_WRITE, [&] { - return static_cast(store)->svc()->cls->mfa.create_mfa(user->get_id(), config, &objv_tracker, mtime, null_yield); + return static_cast(store)->svc()->cls->mfa.create_mfa(dpp(), user->get_id(), config, &objv_tracker, mtime, null_yield); }); if (ret < 0) { cerr << "MFA creation failed, error: " << cpp_strerror(-ret) << std::endl; @@ -8950,7 +8950,7 @@ next: null_yield, dpp(), MDLOG_STATUS_WRITE, [&] { - return static_cast(store)->svc()->cls->mfa.remove_mfa(user->get_id(), totp_serial, &objv_tracker, mtime, null_yield); + return static_cast(store)->svc()->cls->mfa.remove_mfa(dpp(), user->get_id(), totp_serial, &objv_tracker, mtime, null_yield); }); if (ret < 0) { cerr << "MFA removal failed, error: " << cpp_strerror(-ret) << std::endl; @@ -8980,7 +8980,7 @@ next: } rados::cls::otp::otp_info_t result; - int ret = static_cast(store)->svc()->cls->mfa.get_mfa(user->get_id(), totp_serial, &result, null_yield); + int ret = static_cast(store)->svc()->cls->mfa.get_mfa(dpp(), user->get_id(), totp_serial, &result, null_yield); if (ret < 0) { if (ret == -ENOENT || ret == -ENODATA) { cerr << "MFA serial id not found" << std::endl; @@ -9002,7 +9002,7 @@ next: } list result; - int ret = static_cast(store)->svc()->cls->mfa.list_mfa(user->get_id(), &result, null_yield); + int ret = static_cast(store)->svc()->cls->mfa.list_mfa(dpp(), user->get_id(), &result, null_yield); if (ret < 0) { cerr << "MFA listing failed, error: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -9030,7 +9030,7 @@ next: } list result; - int ret = static_cast(store)->svc()->cls->mfa.check_mfa(user->get_id(), totp_serial, totp_pin.front(), null_yield); + int ret = static_cast(store)->svc()->cls->mfa.check_mfa(dpp(), user->get_id(), totp_serial, totp_pin.front(), null_yield); if (ret < 0) { cerr << "MFA check failed, error: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -9055,7 +9055,7 @@ next: } rados::cls::otp::otp_info_t config; - int ret = static_cast(store)->svc()->cls->mfa.get_mfa(user->get_id(), totp_serial, &config, null_yield); + int ret = static_cast(store)->svc()->cls->mfa.get_mfa(dpp(), user->get_id(), totp_serial, &config, null_yield); if (ret < 0) { if (ret == -ENOENT || ret == -ENODATA) { cerr << "MFA serial id not found" << std::endl; @@ -9067,7 +9067,7 @@ next: ceph::real_time now; - ret = static_cast(store)->svc()->cls->mfa.otp_get_current_time(user->get_id(), &now, null_yield); + ret = static_cast(store)->svc()->cls->mfa.otp_get_current_time(dpp(), user->get_id(), &now, null_yield); if (ret < 0) { cerr << "ERROR: failed to fetch current time from osd: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -9094,7 +9094,7 @@ next: null_yield, dpp(), MDLOG_STATUS_WRITE, [&] { - return static_cast(store)->svc()->cls->mfa.create_mfa(user->get_id(), config, &objv_tracker, mtime, null_yield); + return static_cast(store)->svc()->cls->mfa.create_mfa(dpp(), user->get_id(), config, &objv_tracker, mtime, null_yield); }); if (ret < 0) { cerr << "MFA update failed, error: " << cpp_strerror(-ret) << std::endl; @@ -9186,7 +9186,7 @@ next: RGWPubSub ps(static_cast(store), tenant); - ret = ps.remove_topic(topic_name, null_yield); + ret = ps.remove_topic(dpp(), topic_name, null_yield); if (ret < 0) { cerr << "ERROR: could not remove topic: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -9230,7 +9230,7 @@ next: RGWPubSub ps(static_cast(store), tenant); auto sub = ps.get_sub(sub_name); - ret = sub->unsubscribe(topic_name, null_yield); + ret = sub->unsubscribe(dpp(), topic_name, null_yield); if (ret < 0) { cerr << "ERROR: could not get subscription info: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -9365,7 +9365,7 @@ next: cerr << "ERROR: lua package name was not provided (via --package)" << std::endl; return EINVAL; } - const auto rc = rgw::lua::add_package(store, null_yield, *script_package, bool(allow_compilation)); + const auto rc = rgw::lua::add_package(dpp(), store, null_yield, *script_package, bool(allow_compilation)); if (rc < 0) { cerr << "ERROR: failed to add lua package: " << script_package << " .error: " << rc << std::endl; return -rc; @@ -9382,7 +9382,7 @@ next: cerr << "ERROR: lua package name was not provided (via --package)" << std::endl; return EINVAL; } - const auto rc = rgw::lua::remove_package(store, null_yield, *script_package); + const auto rc = rgw::lua::remove_package(dpp(), store, null_yield, *script_package); if (rc == -ENOENT) { cerr << "WARNING: package " << script_package << " did not exists or already removed" << std::endl; return 0; @@ -9400,7 +9400,7 @@ next: if (opt_cmd == OPT::SCRIPT_PACKAGE_LIST) { #ifdef WITH_RADOSGW_LUA_PACKAGES rgw::lua::packages_t packages; - const auto rc = rgw::lua::list_packages(store, null_yield, packages); + const auto rc = rgw::lua::list_packages(dpp(), store, null_yield, packages); if (rc == -ENOENT) { std::cout << "no lua packages in allowlist" << std::endl; } else if (rc < 0) { diff --git a/src/rgw/rgw_auth.cc b/src/rgw/rgw_auth.cc index a0de33d88ab..9d7ce187673 100644 --- a/src/rgw/rgw_auth.cc +++ b/src/rgw/rgw_auth.cc @@ -410,7 +410,7 @@ void rgw::auth::WebIdentityApplier::load_acct_info(const DoutPrefixProvider* dpp //Check if user_id.buckets already exists, may have been from the time, when shadow users didnt exist RGWStorageStats stats; - int ret = user->read_stats(null_yield, &stats); + int ret = user->read_stats(dpp, null_yield, &stats); if (ret < 0 && ret != -ENOENT) { ldpp_dout(dpp, 0) << "ERROR: reading stats for the user returned error " << ret << dendl; return; diff --git a/src/rgw/rgw_auth_s3.cc b/src/rgw/rgw_auth_s3.cc index 629d1b9825b..0a34e81e88d 100644 --- a/src/rgw/rgw_auth_s3.cc +++ b/src/rgw/rgw_auth_s3.cc @@ -77,7 +77,7 @@ get_canon_amz_hdr(const meta_map_t& meta_map) * ?get the canonical representation of the object's location */ static std::string -get_canon_resource(const char* const request_uri, +get_canon_resource(const DoutPrefixProvider *dpp, const char* const request_uri, const std::map& sub_resources) { std::string dest; @@ -107,7 +107,7 @@ get_canon_resource(const char* const request_uri, } } - dout(10) << "get_canon_resource(): dest=" << dest << dendl; + ldpp_dout(dpp, 10) << "get_canon_resource(): dest=" << dest << dendl; return dest; } @@ -116,6 +116,7 @@ get_canon_resource(const char* const request_uri, * compute a request's signature */ void rgw_create_s3_canonical_header( + const DoutPrefixProvider *dpp, const char* const method, const char* const content_md5, const char* const content_type, @@ -150,7 +151,7 @@ void rgw_create_s3_canonical_header( dest.append(get_canon_amz_hdr(meta_map)); dest.append(get_canon_amz_hdr(qs_map)); - dest.append(get_canon_resource(request_uri, sub_resources)); + dest.append(get_canon_resource(dpp, request_uri, sub_resources)); dest_str = dest; } @@ -177,7 +178,8 @@ static inline void get_v2_qs_map(const req_info& info, * get the header authentication information required to * compute a request's signature */ -bool rgw_create_s3_canonical_header(const req_info& info, +bool rgw_create_s3_canonical_header(const DoutPrefixProvider *dpp, + const req_info& info, utime_t* const header_time, std::string& dest, const bool qsr) @@ -186,7 +188,7 @@ bool rgw_create_s3_canonical_header(const req_info& info, if (content_md5) { for (const char *p = content_md5; *p; p++) { if (!is_base64_for_content_md5(*p)) { - dout(0) << "NOTICE: bad content-md5 provided (not base64)," + ldpp_dout(dpp, 0) << "NOTICE: bad content-md5 provided (not base64)," << " aborting request p=" << *p << " " << (int)*p << dendl; return false; } @@ -207,7 +209,7 @@ bool rgw_create_s3_canonical_header(const req_info& info, if (str == NULL) { req_date = info.env->get("HTTP_DATE"); if (!req_date) { - dout(0) << "NOTICE: missing date for auth header" << dendl; + ldpp_dout(dpp, 0) << "NOTICE: missing date for auth header" << dendl; return false; } date = req_date; @@ -216,11 +218,11 @@ bool rgw_create_s3_canonical_header(const req_info& info, if (header_time) { struct tm t; if (!parse_rfc2616(req_date, &t)) { - dout(0) << "NOTICE: failed to parse date for auth header" << dendl; + ldpp_dout(dpp, 0) << "NOTICE: failed to parse date for auth header" << dendl; return false; } if (t.tm_year < 70) { - dout(0) << "NOTICE: bad date (predates epoch): " << req_date << dendl; + ldpp_dout(dpp, 0) << "NOTICE: bad date (predates epoch): " << req_date << dendl; return false; } *header_time = utime_t(internal_timegm(&t), 0); @@ -238,7 +240,7 @@ bool rgw_create_s3_canonical_header(const req_info& info, request_uri = info.effective_uri; } - rgw_create_s3_canonical_header(info.method, content_md5, content_type, + rgw_create_s3_canonical_header(dpp, info.method, content_md5, content_type, date.c_str(), meta_map, qs_map, request_uri.c_str(), sub_resources, dest); return true; diff --git a/src/rgw/rgw_auth_s3.h b/src/rgw/rgw_auth_s3.h index bc2945076a9..7c513274065 100644 --- a/src/rgw/rgw_auth_s3.h +++ b/src/rgw/rgw_auth_s3.h @@ -419,6 +419,7 @@ public: } /* namespace rgw */ void rgw_create_s3_canonical_header( + const DoutPrefixProvider *dpp, const char *method, const char *content_md5, const char *content_type, @@ -428,16 +429,17 @@ void rgw_create_s3_canonical_header( const char *request_uri, const std::map& sub_resources, std::string& dest_str); -bool rgw_create_s3_canonical_header(const req_info& info, +bool rgw_create_s3_canonical_header(const DoutPrefixProvider *dpp, + const req_info& info, utime_t *header_time, /* out */ std::string& dest, /* out */ bool qsr); static inline std::tuple -rgw_create_s3_canonical_header(const req_info& info, const bool qsr) { +rgw_create_s3_canonical_header(const DoutPrefixProvider *dpp, const req_info& info, const bool qsr) { std::string dest; utime_t header_time; - const bool ok = rgw_create_s3_canonical_header(info, &header_time, dest, qsr); + const bool ok = rgw_create_s3_canonical_header(dpp, info, &header_time, dest, qsr); return std::make_tuple(ok, dest, header_time); } diff --git a/src/rgw/rgw_bucket.cc b/src/rgw/rgw_bucket.cc index a1a95ace655..5ab35c5e1a8 100644 --- a/src/rgw/rgw_bucket.cc +++ b/src/rgw/rgw_bucket.cc @@ -331,7 +331,7 @@ int rgw_remove_bucket_bypass_gc(rgw::sal::Store* store, rgw::sal::Bucket* bucket if (ret < 0) return ret; - ret = bucket->get_bucket_stats(RGW_NO_SHARD, &bucket_ver, &master_ver, stats, NULL); + ret = bucket->get_bucket_stats(dpp, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, NULL); if (ret < 0) return ret; @@ -365,7 +365,7 @@ int rgw_remove_bucket_bypass_gc(rgw::sal::Store* store, rgw::sal::Bucket* bucket ret = obj->get_obj_state(dpp, &obj_ctx, &astate, y, false); if (ret == -ENOENT) { - dout(1) << "WARNING: cannot find obj state for obj " << obj << dendl; + ldpp_dout(dpp, 1) << "WARNING: cannot find obj state for obj " << obj << dendl; continue; } if (ret < 0) { @@ -375,16 +375,16 @@ int rgw_remove_bucket_bypass_gc(rgw::sal::Store* store, rgw::sal::Bucket* bucket if (astate->manifest) { RGWObjManifest& manifest = *astate->manifest; - RGWObjManifest::obj_iterator miter = manifest.obj_begin(); + RGWObjManifest::obj_iterator miter = manifest.obj_begin(dpp); std::unique_ptr head_obj = bucket->get_object(manifest.get_obj().key); rgw_raw_obj raw_head_obj; head_obj->get_raw_obj(&raw_head_obj); - for (; miter != manifest.obj_end() && max_aio--; ++miter) { + for (; miter != manifest.obj_end(dpp) && max_aio--; ++miter) { if (!max_aio) { ret = handles->drain(); if (ret < 0) { - lderr(store->ctx()) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl; return ret; } max_aio = concurrent_max; @@ -396,16 +396,16 @@ int rgw_remove_bucket_bypass_gc(rgw::sal::Store* store, rgw::sal::Bucket* bucket continue; } - ret = store->delete_raw_obj_aio(last_obj, handles.get()); + ret = store->delete_raw_obj_aio(dpp, last_obj, handles.get()); if (ret < 0) { - lderr(store->ctx()) << "ERROR: delete obj aio failed with " << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: delete obj aio failed with " << ret << dendl; return ret; } } // for all shadow objs ret = head_obj->delete_obj_aio(dpp, astate, handles.get(), keep_index_consistent, null_yield); if (ret < 0) { - lderr(store->ctx()) << "ERROR: delete obj aio failed with " << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: delete obj aio failed with " << ret << dendl; return ret; } } @@ -413,7 +413,7 @@ int rgw_remove_bucket_bypass_gc(rgw::sal::Store* store, rgw::sal::Bucket* bucket if (!max_aio) { ret = handles->drain(); if (ret < 0) { - lderr(store->ctx()) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl; return ret; } max_aio = concurrent_max; @@ -424,13 +424,13 @@ int rgw_remove_bucket_bypass_gc(rgw::sal::Store* store, rgw::sal::Bucket* bucket ret = handles->drain(); if (ret < 0) { - lderr(store->ctx()) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl; return ret; } - bucket->sync_user_stats(y); + bucket->sync_user_stats(dpp, y); if (ret < 0) { - dout(1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret << dendl; } RGWObjVersionTracker objv_tracker; @@ -440,7 +440,7 @@ int rgw_remove_bucket_bypass_gc(rgw::sal::Store* store, rgw::sal::Bucket* bucket // remain are detritus from a prior bug ret = bucket->remove_bucket(dpp, true, std::string(), std::string(), false, nullptr, y); if (ret < 0) { - lderr(store->ctx()) << "ERROR: could not remove bucket " << bucket << dendl; + ldpp_dout(dpp, -1) << "ERROR: could not remove bucket " << bucket << dendl; return ret; } @@ -500,14 +500,14 @@ int RGWBucket::init(rgw::sal::Store* _store, RGWBucketAdminOpState& op_state, return 0; } -bool rgw_find_bucket_by_id(CephContext *cct, rgw::sal::Store* store, +bool rgw_find_bucket_by_id(const DoutPrefixProvider *dpp, CephContext *cct, rgw::sal::Store* store, const string& marker, const string& bucket_id, rgw_bucket* bucket_out) { void *handle = NULL; bool truncated = false; string s; - int ret = store->meta_list_keys_init("bucket.instance", marker, &handle); + int ret = store->meta_list_keys_init(dpp, "bucket.instance", marker, &handle); if (ret < 0) { cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl; store->meta_list_keys_complete(handle); @@ -821,7 +821,7 @@ int RGWBucket::check_bad_index_multipart(RGWBucketAdminOpState& op_state, if (objs_to_unlink.size() > listing_max_entries) { if (fix_index) { - int r = bucket->remove_objs_from_index(objs_to_unlink); + int r = bucket->remove_objs_from_index(dpp, objs_to_unlink); if (r < 0) { set_err_msg(err_msg, "ERROR: remove_obj_from_index() returned error: " + cpp_strerror(-r)); @@ -836,7 +836,7 @@ int RGWBucket::check_bad_index_multipart(RGWBucketAdminOpState& op_state, } if (fix_index) { - int r = bucket->remove_objs_from_index(objs_to_unlink); + int r = bucket->remove_objs_from_index(dpp, objs_to_unlink); if (r < 0) { set_err_msg(err_msg, "ERROR: remove_obj_from_index() returned error: " + cpp_strerror(-r)); @@ -866,7 +866,7 @@ int RGWBucket::check_object_index(const DoutPrefixProvider *dpp, return -EINVAL; } - bucket->set_tag_timeout(BUCKET_TAG_TIMEOUT); + bucket->set_tag_timeout(dpp, BUCKET_TAG_TIMEOUT); string prefix; string empty_delimiter; @@ -895,27 +895,28 @@ int RGWBucket::check_object_index(const DoutPrefixProvider *dpp, formatter->close_section(); - bucket->set_tag_timeout(0); + bucket->set_tag_timeout(dpp, 0); return 0; } -int RGWBucket::check_index(RGWBucketAdminOpState& op_state, +int RGWBucket::check_index(const DoutPrefixProvider *dpp, + RGWBucketAdminOpState& op_state, map& existing_stats, map& calculated_stats, std::string *err_msg) { bool fix_index = op_state.will_fix_index(); - int r = bucket->check_index(existing_stats, calculated_stats); + int r = bucket->check_index(dpp, existing_stats, calculated_stats); if (r < 0) { set_err_msg(err_msg, "failed to check index error=" + cpp_strerror(-r)); return r; } if (fix_index) { - r = bucket->rebuild_index(); + r = bucket->rebuild_index(dpp); if (r < 0) { set_err_msg(err_msg, "failed to rebuild index err=" + cpp_strerror(-r)); return r; @@ -948,13 +949,13 @@ int RGWBucket::sync(RGWBucketAdminOpState& op_state, const DoutPrefixProvider *d int shard_id = bucket->get_info().layout.current_index.layout.normal.num_shards? 0 : -1; if (!sync) { - r = static_cast(store)->svc()->bilog_rados->log_stop(bucket->get_info(), -1); + r = static_cast(store)->svc()->bilog_rados->log_stop(dpp, bucket->get_info(), -1); if (r < 0) { set_err_msg(err_msg, "ERROR: failed writing stop bilog:" + cpp_strerror(-r)); return r; } } else { - r = static_cast(store)->svc()->bilog_rados->log_start(bucket->get_info(), -1); + r = static_cast(store)->svc()->bilog_rados->log_start(dpp, bucket->get_info(), -1); if (r < 0) { set_err_msg(err_msg, "ERROR: failed writing resync bilog:" + cpp_strerror(-r)); return r; @@ -1149,7 +1150,7 @@ int RGWBucketAdminOp::check_index(rgw::sal::Store* store, RGWBucketAdminOpState& if (ret < 0) return ret; - ret = bucket.check_index(op_state, existing_stats, calculated_stats); + ret = bucket.check_index(dpp, op_state, existing_stats, calculated_stats); if (ret < 0) return ret; @@ -1219,7 +1220,7 @@ static int bucket_stats(rgw::sal::Store* store, string bucket_ver, master_ver; string max_marker; - ret = bucket->get_bucket_stats(RGW_NO_SHARD, &bucket_ver, &master_ver, stats, &max_marker); + ret = bucket->get_bucket_stats(dpp, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, &max_marker); if (ret < 0) { cerr << "error getting bucket stats bucket=" << bucket->get_name() << " ret=" << ret << std::endl; return ret; @@ -1326,7 +1327,7 @@ int RGWBucketAdminOp::limit_check(rgw::sal::Store* store, /* need stats for num_entries */ string bucket_ver, master_ver; std::map stats; - ret = bucket->get_bucket_stats(RGW_NO_SHARD, &bucket_ver, &master_ver, stats, nullptr); + ret = bucket->get_bucket_stats(dpp, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, nullptr); if (ret < 0) continue; @@ -1457,7 +1458,7 @@ int RGWBucketAdminOp::info(rgw::sal::Store* store, bool truncated = true; formatter->open_array_section("buckets"); - ret = store->meta_list_keys_init("bucket", string(), &handle); + ret = store->meta_list_keys_init(dpp, "bucket", string(), &handle); while (ret == 0 && truncated) { std::list buckets; constexpr int max_keys = 1000; @@ -1527,7 +1528,7 @@ void get_stale_instances(rgw::sal::Store* store, const std::string& bucket_name, int r = store->get_bucket(dpp, nullptr, rbucket, &bucket, null_yield); if (r < 0){ // this can only happen if someone deletes us right when we're processing - lderr(store->ctx()) << "Bucket instance is invalid: " << bucket_instance + ldpp_dout(dpp, -1) << "Bucket instance is invalid: " << bucket_instance << cpp_strerror(-r) << dendl; continue; } @@ -1585,7 +1586,7 @@ void get_stale_instances(rgw::sal::Store* store, const std::string& bucket_name, r = reshard_lock.lock(); if (r < 0) { // most likely bucket is under reshard, return the sureshot stale instances - ldout(store->ctx(), 5) << __func__ + ldpp_dout(dpp, 5) << __func__ << "failed to take reshard lock; reshard underway likey" << dendl; return; } @@ -1612,7 +1613,7 @@ static int process_stale_instances(rgw::sal::Store* store, RGWBucketAdminOpState Formatter *formatter = flusher.get_formatter(); static constexpr auto default_max_keys = 1000; - int ret = store->meta_list_keys_init("bucket.instance", marker, &handle); + int ret = store->meta_list_keys_init(dpp, "bucket.instance", marker, &handle); if (ret < 0) { cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl; return ret; @@ -1706,7 +1707,7 @@ static int fix_single_bucket_lc(rgw::sal::Store* store, return ret; } - return rgw::lc::fix_lc_shard_entry(store, store->get_rgwlc()->get_lc(), bucket.get()); + return rgw::lc::fix_lc_shard_entry(dpp, store, store->get_rgwlc()->get_lc(), bucket.get()); } static void format_lc_status(Formatter* formatter, @@ -1748,7 +1749,7 @@ int RGWBucketAdminOp::fix_lc_shards(rgw::sal::Store* store, process_single_lc_entry(store, formatter, user_id.tenant, bucket_name, dpp); formatter->flush(cout); } else { - int ret = store->meta_list_keys_init("bucket", marker, &handle); + int ret = store->meta_list_keys_init(dpp, "bucket", marker, &handle); if (ret < 0) { std::cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl; return ret; @@ -1833,7 +1834,7 @@ static int fix_bucket_obj_expiry(const DoutPrefixProvider *dpp, do { int ret = bucket->list(dpp, params, listing_max_entries, results, null_yield); if (ret < 0) { - lderr(store->ctx()) << "ERROR failed to list objects in the bucket" << dendl; + ldpp_dout(dpp, -1) << "ERROR failed to list objects in the bucket" << dendl; return ret; } for (const auto& obj : results.objs) { @@ -1866,7 +1867,7 @@ int RGWBucketAdminOp::fix_obj_expiry(rgw::sal::Store* store, RGWBucket admin_bucket; int ret = admin_bucket.init(store, op_state, null_yield, dpp); if (ret < 0) { - lderr(store->ctx()) << "failed to initialize bucket" << dendl; + ldpp_dout(dpp, -1) << "failed to initialize bucket" << dendl; return ret; } std::unique_ptr bucket; @@ -1968,12 +1969,12 @@ public: */ ret = ctl.bucket->unlink_bucket(be.owner, be.bucket, y, dpp, false); if (ret < 0) { - lderr(svc.bucket->ctx()) << "could not unlink bucket=" << entry << " owner=" << be.owner << dendl; + ldpp_dout(dpp, -1) << "could not unlink bucket=" << entry << " owner=" << be.owner << dendl; } ret = svc.bucket->remove_bucket_entrypoint_info(ctx, entry, &objv_tracker, y, dpp); if (ret < 0) { - lderr(svc.bucket->ctx()) << "could not delete bucket=" << entry << dendl; + ldpp_dout(dpp, -1) << "could not delete bucket=" << entry << dendl; } /* idempotent */ return 0; @@ -2218,7 +2219,7 @@ public: ret = svc.bucket->store_bucket_entrypoint_info(ctx, RGWSI_Bucket::get_entrypoint_meta_key(new_be.bucket), new_be, true, mtime, &attrs, nullptr, y, dpp); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to put new bucket entrypoint for bucket=" << new_be.bucket << " ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to put new bucket entrypoint for bucket=" << new_be.bucket << " ret=" << ret << dendl; return ret; } @@ -2226,7 +2227,7 @@ public: ret = ctl.bucket->link_bucket(new_be.owner, new_be.bucket, new_be.creation_time, y, dpp, false); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to link new bucket for bucket=" << new_be.bucket << " ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to link new bucket for bucket=" << new_be.bucket << " ret=" << ret << dendl; return ret; } @@ -2234,7 +2235,7 @@ public: ret = ctl.bucket->unlink_bucket(be.owner, entry_bucket, y, dpp, false); if (ret < 0) { - lderr(cct) << "could not unlink bucket=" << entry << " owner=" << be.owner << dendl; + ldpp_dout(dpp, -1) << "could not unlink bucket=" << entry << " owner=" << be.owner << dendl; } // if (ret == -ECANCELED) it means that there was a race here, and someone @@ -2248,13 +2249,13 @@ public: y, dpp); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to put new bucket entrypoint for bucket=" << new_be.bucket << " ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to put new bucket entrypoint for bucket=" << new_be.bucket << " ret=" << ret << dendl; return ret; } ret = ctl.bucket->remove_bucket_instance_info(be.bucket, old_bi, y, dpp); if (ret < 0) { - lderr(cct) << "could not delete bucket=" << entry << dendl; + ldpp_dout(dpp, -1) << "could not delete bucket=" << entry << dendl; } @@ -2411,7 +2412,7 @@ public: obj->get_bucket_info().encode(*bl); } - int put_check() override; + int put_check(const DoutPrefixProvider *dpp) override; int put_checked(const DoutPrefixProvider *dpp) override; int put_post(const DoutPrefixProvider *dpp) override; }; @@ -2456,7 +2457,7 @@ void init_default_bucket_layout(CephContext *cct, rgw::BucketLayout& layout, } } -int RGWMetadataHandlerPut_BucketInstance::put_check() +int RGWMetadataHandlerPut_BucketInstance::put_check(const DoutPrefixProvider *dpp) { int ret; @@ -2495,9 +2496,9 @@ int RGWMetadataHandlerPut_BucketInstance::put_check() bci.info.bucket.tenant = tenant_name; // if the sync module never writes data, don't require the zone to specify all placement targets if (bihandler->svc.zone->sync_module_supports_writes()) { - ret = bihandler->svc.zone->select_bucket_location_by_rule(bci.info.placement_rule, &rule_info, y); + ret = bihandler->svc.zone->select_bucket_location_by_rule(dpp, bci.info.placement_rule, &rule_info, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: select_bucket_placement() returned " << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: select_bucket_placement() returned " << ret << dendl; return ret; } } @@ -2544,7 +2545,7 @@ int RGWMetadataHandlerPut_BucketInstance::put_post(const DoutPrefixProvider *dpp objv_tracker = bci.info.objv_tracker; - int ret = bihandler->svc.bi->init_index(bci.info); + int ret = bihandler->svc.bi->init_index(dpp, bci.info); if (ret < 0) { return ret; } @@ -2557,7 +2558,7 @@ public: RGWArchiveBucketInstanceMetadataHandler() {} int do_remove(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWObjVersionTracker& objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) override { - ldout(cct, 0) << "SKIP: bucket instance removal is not allowed on archive zone: bucket.instance:" << entry << dendl; + ldpp_dout(dpp, 0) << "SKIP: bucket instance removal is not allowed on archive zone: bucket.instance:" << entry << dendl; return 0; } }; @@ -2853,7 +2854,7 @@ int RGWBucketCtl::convert_old_bucket_info(RGWSI_Bucket_X_Ctx& ctx, RGWSI_Bucket::get_entrypoint_meta_key(bucket), &entry_point, &ot, &ep_mtime, &attrs, y, dpp); if (ret < 0) { - ldout(cct, 0) << "ERROR: get_bucket_entrypoint_info() returned " << ret << " bucket=" << bucket << dendl; + ldpp_dout(dpp, 0) << "ERROR: get_bucket_entrypoint_info() returned " << ret << " bucket=" << bucket << dendl; return ret; } @@ -2868,7 +2869,7 @@ int RGWBucketCtl::convert_old_bucket_info(RGWSI_Bucket_X_Ctx& ctx, ret = do_store_linked_bucket_info(ctx, info, nullptr, false, ep_mtime, &ot.write_version, &attrs, true, y, dpp); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to put_linked_bucket_info(): " << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to put_linked_bucket_info(): " << ret << dendl; return ret; } @@ -2888,7 +2889,7 @@ int RGWBucketCtl::set_bucket_instance_attrs(RGWBucketInfo& bucket_info, /* an old bucket object, need to convert it */ int ret = convert_old_bucket_info(ctx, bucket, y, dpp); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed converting old bucket info: " << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed converting old bucket info: " << ret << dendl; return ret; } } @@ -2948,16 +2949,16 @@ int RGWBucketCtl::do_link_bucket(RGWSI_Bucket_EP_Ctx& ctx, nullptr, &attrs, y, dpp); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "ERROR: store->get_bucket_entrypoint_info() returned: " + ldpp_dout(dpp, 0) << "ERROR: store->get_bucket_entrypoint_info() returned: " << cpp_strerror(-ret) << dendl; } pattrs = &attrs; } } - ret = ctl.user->add_bucket(user_id, bucket, creation_time, y); + ret = ctl.user->add_bucket(dpp, user_id, bucket, creation_time, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: error adding bucket to user directory:" + ldpp_dout(dpp, 0) << "ERROR: error adding bucket to user directory:" << " user=" << user_id << " bucket=" << bucket << " err=" << cpp_strerror(-ret) @@ -2981,7 +2982,7 @@ int RGWBucketCtl::do_link_bucket(RGWSI_Bucket_EP_Ctx& ctx, done_err: int r = do_unlink_bucket(ctx, user_id, bucket, true, y, dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed unlinking bucket on error cleanup: " + ldpp_dout(dpp, 0) << "ERROR: failed unlinking bucket on error cleanup: " << cpp_strerror(-r) << dendl; } return ret; @@ -3001,7 +3002,7 @@ int RGWBucketCtl::do_unlink_bucket(RGWSI_Bucket_EP_Ctx& ctx, optional_yield y, const DoutPrefixProvider *dpp) { - int ret = ctl.user->remove_bucket(user_id, bucket, y); + int ret = ctl.user->remove_bucket(dpp, user_id, bucket, y); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: error removing bucket from directory: " << cpp_strerror(-ret)<< dendl; @@ -3056,7 +3057,7 @@ int RGWBucketCtl::chown(rgw::sal::Store* store, rgw::sal::Bucket* bucket, results.objs.clear(); int ret = bucket->list(dpp, params, max_entries, results, y); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: list objects failed: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "ERROR: list objects failed: " << cpp_strerror(-ret) << dendl; return ret; } @@ -3068,7 +3069,7 @@ int RGWBucketCtl::chown(rgw::sal::Store* store, rgw::sal::Bucket* bucket, ret = r_obj->get_obj_attrs(&obj_ctx, y, dpp); if (ret < 0){ - ldout(store->ctx(), 0) << "ERROR: failed to read object " << obj.key.name << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to read object " << obj.key.name << cpp_strerror(-ret) << dendl; continue; } const auto& aiter = r_obj->get_attrs().find(RGW_ATTR_ACL); @@ -3112,7 +3113,7 @@ int RGWBucketCtl::chown(rgw::sal::Store* store, rgw::sal::Bucket* bucket, attrs[RGW_ATTR_ACL] = bl; ret = r_obj->set_obj_attrs(dpp, &obj_ctx, &attrs, nullptr, y); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: modify attr failed " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "ERROR: modify attr failed " << cpp_strerror(-ret) << dendl; return ret; } } @@ -3141,7 +3142,8 @@ int RGWBucketCtl::read_buckets_stats(map& m, }); } -int RGWBucketCtl::sync_user_stats(const rgw_user& user_id, +int RGWBucketCtl::sync_user_stats(const DoutPrefixProvider *dpp, + const rgw_user& user_id, const RGWBucketInfo& bucket_info, optional_yield y, RGWBucketEnt* pent) @@ -3150,13 +3152,13 @@ int RGWBucketCtl::sync_user_stats(const rgw_user& user_id, if (!pent) { pent = &ent; } - int r = svc.bi->read_stats(bucket_info, pent, null_yield); + int r = svc.bi->read_stats(dpp, bucket_info, pent, null_yield); if (r < 0) { - ldout(cct, 20) << __func__ << "(): failed to read bucket stats (r=" << r << ")" << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): failed to read bucket stats (r=" << r << ")" << dendl; return r; } - return ctl.user->flush_bucket_stats(user_id, *pent, y); + return ctl.user->flush_bucket_stats(dpp, user_id, *pent, y); } int RGWBucketCtl::get_sync_policy_handler(std::optional zone, @@ -3169,7 +3171,7 @@ int RGWBucketCtl::get_sync_policy_handler(std::optional zone, return svc.bucket_sync->get_policy_handler(ctx, zone, bucket, phandler, y, dpp); }); if (r < 0) { - ldout(cct, 20) << __func__ << "(): failed to get policy handler for bucket=" << bucket << " (r=" << r << ")" << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): failed to get policy handler for bucket=" << bucket << " (r=" << r << ")" << dendl; return r; } return 0; diff --git a/src/rgw/rgw_bucket.h b/src/rgw/rgw_bucket.h index c867a80fd8f..ae0df7f6a1f 100644 --- a/src/rgw/rgw_bucket.h +++ b/src/rgw/rgw_bucket.h @@ -343,7 +343,8 @@ public: optional_yield y, std::string *err_msg = NULL); - int check_index(RGWBucketAdminOpState& op_state, + int check_index(const DoutPrefixProvider *dpp, + RGWBucketAdminOpState& op_state, map& existing_stats, map& calculated_stats, std::string *err_msg = NULL); @@ -704,7 +705,8 @@ public: const DoutPrefixProvider *dpp); /* quota related */ - int sync_user_stats(const rgw_user& user_id, const RGWBucketInfo& bucket_info, + int sync_user_stats(const DoutPrefixProvider *dpp, + const rgw_user& user_id, const RGWBucketInfo& bucket_info, optional_yield y, RGWBucketEnt* pent = nullptr); @@ -762,7 +764,7 @@ private: }; -bool rgw_find_bucket_by_id(CephContext *cct, rgw::sal::Store* store, const string& marker, +bool rgw_find_bucket_by_id(const DoutPrefixProvider *dpp, CephContext *cct, rgw::sal::Store* store, const string& marker, const string& bucket_id, rgw_bucket* bucket_out); #endif diff --git a/src/rgw/rgw_bucket_sync.cc b/src/rgw/rgw_bucket_sync.cc index 377bd8f0566..357b56d9a40 100644 --- a/src/rgw/rgw_bucket_sync.cc +++ b/src/rgw/rgw_bucket_sync.cc @@ -734,14 +734,14 @@ RGWBucketSyncPolicyHandler *RGWBucketSyncPolicyHandler::alloc_child(const rgw_bu return new RGWBucketSyncPolicyHandler(this, bucket, sync_policy); } -int RGWBucketSyncPolicyHandler::init(optional_yield y) +int RGWBucketSyncPolicyHandler::init(const DoutPrefixProvider *dpp, optional_yield y) { - int r = bucket_sync_svc->get_bucket_sync_hints(bucket.value_or(rgw_bucket()), + int r = bucket_sync_svc->get_bucket_sync_hints(dpp, bucket.value_or(rgw_bucket()), &source_hints, &target_hints, y); if (r < 0) { - ldout(bucket_sync_svc->ctx(), 0) << "ERROR: failed to initialize bucket sync policy handler: get_bucket_sync_hints() on bucket=" + ldpp_dout(dpp, 0) << "ERROR: failed to initialize bucket sync policy handler: get_bucket_sync_hints() on bucket=" << bucket << " returned r=" << r << dendl; return r; } diff --git a/src/rgw/rgw_bucket_sync.h b/src/rgw/rgw_bucket_sync.h index 488060b7a60..d1d09bbfc07 100644 --- a/src/rgw/rgw_bucket_sync.h +++ b/src/rgw/rgw_bucket_sync.h @@ -331,7 +331,7 @@ public: RGWBucketSyncPolicyHandler *alloc_child(const rgw_bucket& bucket, std::optional sync_policy) const; - int init(optional_yield y); + int init(const DoutPrefixProvider *dpp, optional_yield y); void reflect(RGWBucketSyncFlowManager::pipe_set *psource_pipes, RGWBucketSyncFlowManager::pipe_set *ptarget_pipes, diff --git a/src/rgw/rgw_cache.cc b/src/rgw/rgw_cache.cc index 6908e7f9d25..e82c142eb02 100644 --- a/src/rgw/rgw_cache.cc +++ b/src/rgw/rgw_cache.cc @@ -9,7 +9,7 @@ #define dout_subsys ceph_subsys_rgw -int ObjectCache::get(const string& name, ObjectCacheInfo& info, uint32_t mask, rgw_cache_entry_info *cache_info) +int ObjectCache::get(const DoutPrefixProvider *dpp, const string& name, ObjectCacheInfo& info, uint32_t mask, rgw_cache_entry_info *cache_info) { std::shared_lock rl{lock}; @@ -18,7 +18,7 @@ int ObjectCache::get(const string& name, ObjectCacheInfo& info, uint32_t mask, r } auto iter = cache_map.find(name); if (iter == cache_map.end()) { - ldout(cct, 10) << "cache get: name=" << name << " : miss" << dendl; + ldpp_dout(dpp, 10) << "cache get: name=" << name << " : miss" << dendl; if (perfcounter) { perfcounter->inc(l_rgw_cache_miss); } @@ -27,7 +27,7 @@ int ObjectCache::get(const string& name, ObjectCacheInfo& info, uint32_t mask, r if (expiry.count() && (ceph::coarse_mono_clock::now() - iter->second.info.time_added) > expiry) { - ldout(cct, 10) << "cache get: name=" << name << " : expiry miss" << dendl; + ldpp_dout(dpp, 10) << "cache get: name=" << name << " : expiry miss" << dendl; rl.unlock(); std::unique_lock wl{lock}; // write lock for insertion // check that wasn't already removed by other thread @@ -47,14 +47,14 @@ int ObjectCache::get(const string& name, ObjectCacheInfo& info, uint32_t mask, r ObjectCacheEntry *entry = &iter->second; if (lru_counter - entry->lru_promotion_ts > lru_window) { - ldout(cct, 20) << "cache get: touching lru, lru_counter=" << lru_counter + ldpp_dout(dpp, 20) << "cache get: touching lru, lru_counter=" << lru_counter << " promotion_ts=" << entry->lru_promotion_ts << dendl; rl.unlock(); std::unique_lock wl{lock}; // write lock for insertion /* need to redo this because entry might have dropped off the cache */ iter = cache_map.find(name); if (iter == cache_map.end()) { - ldout(cct, 10) << "lost race! cache get: name=" << name << " : miss" << dendl; + ldpp_dout(dpp, 10) << "lost race! cache get: name=" << name << " : miss" << dendl; if(perfcounter) perfcounter->inc(l_rgw_cache_miss); return -ENOENT; } @@ -62,24 +62,24 @@ int ObjectCache::get(const string& name, ObjectCacheInfo& info, uint32_t mask, r entry = &iter->second; /* check again, we might have lost a race here */ if (lru_counter - entry->lru_promotion_ts > lru_window) { - touch_lru(name, *entry, iter->second.lru_iter); + touch_lru(dpp, name, *entry, iter->second.lru_iter); } } ObjectCacheInfo& src = iter->second.info; if(src.status == -ENOENT) { - ldout(cct, 10) << "cache get: name=" << name << " : hit (negative entry)" << dendl; + ldpp_dout(dpp, 10) << "cache get: name=" << name << " : hit (negative entry)" << dendl; if (perfcounter) perfcounter->inc(l_rgw_cache_hit); return -ENODATA; } if ((src.flags & mask) != mask) { - ldout(cct, 10) << "cache get: name=" << name << " : type miss (requested=0x" + ldpp_dout(dpp, 10) << "cache get: name=" << name << " : type miss (requested=0x" << std::hex << mask << ", cached=0x" << src.flags << std::dec << ")" << dendl; if(perfcounter) perfcounter->inc(l_rgw_cache_miss); return -ENOENT; } - ldout(cct, 10) << "cache get: name=" << name << " : hit (requested=0x" + ldpp_dout(dpp, 10) << "cache get: name=" << name << " : hit (requested=0x" << std::hex << mask << ", cached=0x" << src.flags << std::dec << ")" << dendl; @@ -93,7 +93,8 @@ int ObjectCache::get(const string& name, ObjectCacheInfo& info, uint32_t mask, r return 0; } -bool ObjectCache::chain_cache_entry(std::initializer_list cache_info_entries, +bool ObjectCache::chain_cache_entry(const DoutPrefixProvider *dpp, + std::initializer_list cache_info_entries, RGWChainedCache::Entry *chained_entry) { std::unique_lock l{lock}; @@ -106,18 +107,18 @@ bool ObjectCache::chain_cache_entry(std::initializer_list entries.reserve(cache_info_entries.size()); /* first verify that all entries are still valid */ for (auto cache_info : cache_info_entries) { - ldout(cct, 10) << "chain_cache_entry: cache_locator=" + ldpp_dout(dpp, 10) << "chain_cache_entry: cache_locator=" << cache_info->cache_locator << dendl; auto iter = cache_map.find(cache_info->cache_locator); if (iter == cache_map.end()) { - ldout(cct, 20) << "chain_cache_entry: couldn't find cache locator" << dendl; + ldpp_dout(dpp, 20) << "chain_cache_entry: couldn't find cache locator" << dendl; return false; } auto entry = &iter->second; if (entry->gen != cache_info->gen) { - ldout(cct, 20) << "chain_cache_entry: entry.gen (" << entry->gen + ldpp_dout(dpp, 20) << "chain_cache_entry: entry.gen (" << entry->gen << ") != cache_info.gen (" << cache_info->gen << ")" << dendl; return false; @@ -136,7 +137,7 @@ bool ObjectCache::chain_cache_entry(std::initializer_list return true; } -void ObjectCache::put(const string& name, ObjectCacheInfo& info, rgw_cache_entry_info *cache_info) +void ObjectCache::put(const DoutPrefixProvider *dpp, const string& name, ObjectCacheInfo& info, rgw_cache_entry_info *cache_info) { std::unique_lock l{lock}; @@ -144,7 +145,7 @@ void ObjectCache::put(const string& name, ObjectCacheInfo& info, rgw_cache_entry return; } - ldout(cct, 10) << "cache put: name=" << name << " info.flags=0x" + ldpp_dout(dpp, 10) << "cache put: name=" << name << " info.flags=0x" << std::hex << info.flags << std::dec << dendl; auto [iter, inserted] = cache_map.emplace(name, ObjectCacheEntry{}); @@ -160,7 +161,7 @@ void ObjectCache::put(const string& name, ObjectCacheInfo& info, rgw_cache_entry entry.chained_entries.clear(); entry.gen++; - touch_lru(name, entry, entry.lru_iter); + touch_lru(dpp, name, entry, entry.lru_iter); target.status = info.status; @@ -190,16 +191,16 @@ void ObjectCache::put(const string& name, ObjectCacheInfo& info, rgw_cache_entry target.xattrs = info.xattrs; map::iterator iter; for (iter = target.xattrs.begin(); iter != target.xattrs.end(); ++iter) { - ldout(cct, 10) << "updating xattr: name=" << iter->first << " bl.length()=" << iter->second.length() << dendl; + ldpp_dout(dpp, 10) << "updating xattr: name=" << iter->first << " bl.length()=" << iter->second.length() << dendl; } } else if (info.flags & CACHE_FLAG_MODIFY_XATTRS) { map::iterator iter; for (iter = info.rm_xattrs.begin(); iter != info.rm_xattrs.end(); ++iter) { - ldout(cct, 10) << "removing xattr: name=" << iter->first << dendl; + ldpp_dout(dpp, 10) << "removing xattr: name=" << iter->first << dendl; target.xattrs.erase(iter->first); } for (iter = info.xattrs.begin(); iter != info.xattrs.end(); ++iter) { - ldout(cct, 10) << "appending xattr: name=" << iter->first << " bl.length()=" << iter->second.length() << dendl; + ldpp_dout(dpp, 10) << "appending xattr: name=" << iter->first << " bl.length()=" << iter->second.length() << dendl; target.xattrs[iter->first] = iter->second; } } @@ -211,7 +212,7 @@ void ObjectCache::put(const string& name, ObjectCacheInfo& info, rgw_cache_entry target.version = info.version; } -bool ObjectCache::remove(const string& name) +bool ObjectCache::remove(const DoutPrefixProvider *dpp, const string& name) { std::unique_lock l{lock}; @@ -223,7 +224,7 @@ bool ObjectCache::remove(const string& name) if (iter == cache_map.end()) return false; - ldout(cct, 10) << "removing " << name << " from cache" << dendl; + ldpp_dout(dpp, 10) << "removing " << name << " from cache" << dendl; ObjectCacheEntry& entry = iter->second; for (auto& kv : entry.chained_entries) { @@ -235,7 +236,7 @@ bool ObjectCache::remove(const string& name) return true; } -void ObjectCache::touch_lru(const string& name, ObjectCacheEntry& entry, +void ObjectCache::touch_lru(const DoutPrefixProvider *dpp, const string& name, ObjectCacheEntry& entry, std::list::iterator& lru_iter) { while (lru_size > (size_t)cct->_conf->rgw_cache_lru_size) { @@ -262,9 +263,9 @@ void ObjectCache::touch_lru(const string& name, ObjectCacheEntry& entry, lru.push_back(name); lru_size++; lru_iter--; - ldout(cct, 10) << "adding " << name << " to cache LRU end" << dendl; + ldpp_dout(dpp, 10) << "adding " << name << " to cache LRU end" << dendl; } else { - ldout(cct, 10) << "moving " << name << " to cache LRU end" << dendl; + ldpp_dout(dpp, 10) << "moving " << name << " to cache LRU end" << dendl; lru.erase(lru_iter); lru.push_back(name); lru_iter = lru.end(); diff --git a/src/rgw/rgw_cache.h b/src/rgw/rgw_cache.h index 659b5518128..852780cc664 100644 --- a/src/rgw/rgw_cache.h +++ b/src/rgw/rgw_cache.h @@ -168,7 +168,7 @@ class ObjectCache { bool enabled; ceph::timespan expiry; - void touch_lru(const string& name, ObjectCacheEntry& entry, + void touch_lru(const DoutPrefixProvider *dpp, const string& name, ObjectCacheEntry& entry, std::list::iterator& lru_iter); void remove_lru(const string& name, std::list::iterator& lru_iter); void invalidate_lru(ObjectCacheEntry& entry); @@ -178,10 +178,10 @@ class ObjectCache { public: ObjectCache() : lru_size(0), lru_counter(0), lru_window(0), cct(NULL), enabled(false) { } ~ObjectCache(); - int get(const std::string& name, ObjectCacheInfo& bl, uint32_t mask, rgw_cache_entry_info *cache_info); - std::optional get(const std::string& name) { + int get(const DoutPrefixProvider *dpp, const std::string& name, ObjectCacheInfo& bl, uint32_t mask, rgw_cache_entry_info *cache_info); + std::optional get(const DoutPrefixProvider *dpp, const std::string& name) { std::optional info{std::in_place}; - auto r = get(name, *info, 0, nullptr); + auto r = get(dpp, name, *info, 0, nullptr); return r < 0 ? std::nullopt : info; } @@ -198,15 +198,16 @@ public: } } - void put(const std::string& name, ObjectCacheInfo& bl, rgw_cache_entry_info *cache_info); - bool remove(const std::string& name); + void put(const DoutPrefixProvider *dpp, const std::string& name, ObjectCacheInfo& bl, rgw_cache_entry_info *cache_info); + bool remove(const DoutPrefixProvider *dpp, const std::string& name); void set_ctx(CephContext *_cct) { cct = _cct; lru_window = cct->_conf->rgw_cache_lru_size / 2; expiry = std::chrono::seconds(cct->_conf.get_val( "rgw_cache_expiry_interval")); } - bool chain_cache_entry(std::initializer_list cache_info_entries, + bool chain_cache_entry(const DoutPrefixProvider *dpp, + std::initializer_list cache_info_entries, RGWChainedCache::Entry *chained_entry); void set_enabled(bool status); diff --git a/src/rgw/rgw_coroutine.cc b/src/rgw/rgw_coroutine.cc index 6278d695147..98397655ee4 100644 --- a/src/rgw/rgw_coroutine.cc +++ b/src/rgw/rgw_coroutine.cc @@ -228,15 +228,15 @@ RGWCoroutinesStack::~RGWCoroutinesStack() } } -int RGWCoroutinesStack::operate(RGWCoroutinesEnv *_env) +int RGWCoroutinesStack::operate(const DoutPrefixProvider *dpp, RGWCoroutinesEnv *_env) { env = _env; RGWCoroutine *op = *pos; op->stack = this; - ldout(cct, 20) << *op << ": operate()" << dendl; - int r = op->operate_wrapper(); + ldpp_dout(dpp, 20) << *op << ": operate()" << dendl; + int r = op->operate_wrapper(dpp); if (r < 0) { - ldout(cct, 20) << *op << ": operate() returned r=" << r << dendl; + ldpp_dout(dpp, 20) << *op << ": operate() returned r=" << r << dendl; } error_flag = op->is_error(); @@ -608,7 +608,7 @@ void RGWCoroutinesManager::io_complete(RGWCoroutine *cr, const rgw_io_id& io_id) cr->io_complete(io_id); } -int RGWCoroutinesManager::run(list& stacks) +int RGWCoroutinesManager::run(const DoutPrefixProvider *dpp, list& stacks) { int ret = 0; int blocked_count = 0; @@ -645,13 +645,13 @@ int RGWCoroutinesManager::run(list& stacks) lock.unlock(); - ret = stack->operate(&env); + ret = stack->operate(dpp, &env); lock.lock(); stack->set_is_scheduled(false); if (ret < 0) { - ldout(cct, 20) << "stack->operate() returned ret=" << ret << dendl; + ldpp_dout(dpp, 20) << "stack->operate() returned ret=" << ret << dendl; } if (stack->is_error()) { @@ -772,7 +772,7 @@ next: return ret; } -int RGWCoroutinesManager::run(RGWCoroutine *op) +int RGWCoroutinesManager::run(const DoutPrefixProvider *dpp, RGWCoroutine *op) { if (!op) { return 0; @@ -784,9 +784,9 @@ int RGWCoroutinesManager::run(RGWCoroutine *op) stacks.push_back(stack); - int r = run(stacks); + int r = run(dpp, stacks); if (r < 0) { - ldout(cct, 20) << "run(stacks) returned r=" << r << dendl; + ldpp_dout(dpp, 20) << "run(stacks) returned r=" << r << dendl; } else { r = op->get_ret_status(); } @@ -1074,12 +1074,12 @@ void RGWSimpleCoroutine::call_cleanup() request_cleanup(); } -int RGWSimpleCoroutine::operate() +int RGWSimpleCoroutine::operate(const DoutPrefixProvider *dpp) { int ret = 0; reenter(this) { yield return state_init(); - yield return state_send_request(); + yield return state_send_request(dpp); yield return state_request_complete(); yield return state_all_complete(); drain_all(); @@ -1099,9 +1099,9 @@ int RGWSimpleCoroutine::state_init() return 0; } -int RGWSimpleCoroutine::state_send_request() +int RGWSimpleCoroutine::state_send_request(const DoutPrefixProvider *dpp) { - int ret = send_request(); + int ret = send_request(dpp); if (ret < 0) { call_cleanup(); return set_state(RGWCoroutine_Error, ret); diff --git a/src/rgw/rgw_coroutine.h b/src/rgw/rgw_coroutine.h index da5681d68ce..b3287e505bd 100644 --- a/src/rgw/rgw_coroutine.h +++ b/src/rgw/rgw_coroutine.h @@ -271,14 +271,14 @@ protected: return status; } - virtual int operate_wrapper() { - return operate(); + virtual int operate_wrapper(const DoutPrefixProvider *dpp) { + return operate(dpp); } public: RGWCoroutine(CephContext *_cct) : status(_cct), _yield_ret(false), cct(_cct), stack(NULL), retcode(0), state(RGWCoroutine_Run) {} ~RGWCoroutine() override; - virtual int operate() = 0; + virtual int operate(const DoutPrefixProvider *dpp) = 0; bool is_done() { return (state == RGWCoroutine_Done || state == RGWCoroutine_Error); } bool is_error() { return (state == RGWCoroutine_Error); } @@ -473,7 +473,7 @@ public: return id; } - int operate(RGWCoroutinesEnv *env); + int operate(const DoutPrefixProvider *dpp, RGWCoroutinesEnv *env); bool is_done() { return done_flag; @@ -662,8 +662,8 @@ public: } } - int run(list& ops); - int run(RGWCoroutine *op); + int run(const DoutPrefixProvider *dpp, list& ops); + int run(const DoutPrefixProvider *dpp, RGWCoroutine *op); void stop() { bool expected = false; if (going_down.compare_exchange_strong(expected, true)) { @@ -714,10 +714,10 @@ RGWAioCompletionNotifier *RGWCoroutinesStack::create_completion_notifier(T value class RGWSimpleCoroutine : public RGWCoroutine { bool called_cleanup; - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; int state_init(); - int state_send_request(); + int state_send_request(const DoutPrefixProvider *dpp); int state_request_complete(); int state_all_complete(); @@ -728,7 +728,7 @@ public: ~RGWSimpleCoroutine() override; virtual int init() { return 0; } - virtual int send_request() = 0; + virtual int send_request(const DoutPrefixProvider *dpp) = 0; virtual int request_complete() = 0; virtual int finish() { return 0; } virtual void request_cleanup() {} diff --git a/src/rgw/rgw_cors_s3.cc b/src/rgw/rgw_cors_s3.cc index 9f81744cd54..0275156d2c9 100644 --- a/src/rgw/rgw_cors_s3.cc +++ b/src/rgw/rgw_cors_s3.cc @@ -80,7 +80,7 @@ bool RGWCORSRule_S3::xml_end(const char *el) { if (obj) { for( ; obj; obj = iter.get_next()) { const char *s = obj->get_data().c_str(); - dout(10) << "RGWCORSRule::xml_end, el : " << el << ", data : " << s << dendl; + ldpp_dout(dpp, 10) << "RGWCORSRule::xml_end, el : " << el << ", data : " << s << dendl; if (strcasecmp(s, "GET") == 0) { allowed_methods |= RGW_CORS_GET; } else if (strcasecmp(s, "POST") == 0) { @@ -103,20 +103,20 @@ bool RGWCORSRule_S3::xml_end(const char *el) { if (xml_id != NULL) { string data = xml_id->get_data(); if (data.length() > 255) { - dout(0) << "RGWCORSRule has id of length greater than 255" << dendl; + ldpp_dout(dpp, 0) << "RGWCORSRule has id of length greater than 255" << dendl; return false; } - dout(10) << "RGWCORRule id : " << data << dendl; + ldpp_dout(dpp, 10) << "RGWCORRule id : " << data << dendl; id = data; } /*Check if there is atleast one AllowedOrigin*/ iter = find("AllowedOrigin"); if (!(obj = iter.get_next())) { - dout(0) << "RGWCORSRule does not have even one AllowedOrigin" << dendl; + ldpp_dout(dpp, 0) << "RGWCORSRule does not have even one AllowedOrigin" << dendl; return false; } for( ; obj; obj = iter.get_next()) { - dout(10) << "RGWCORSRule - origin : " << obj->get_data() << dendl; + ldpp_dout(dpp, 10) << "RGWCORSRule - origin : " << obj->get_data() << dendl; /*Just take the hostname*/ string host = obj->get_data(); if (validate_name_string(host) != 0) @@ -130,7 +130,7 @@ bool RGWCORSRule_S3::xml_end(const char *el) { unsigned long long ull = strtoull(obj->get_data().c_str(), &end, 10); if (*end != '\0') { - dout(0) << "RGWCORSRule's MaxAgeSeconds " << obj->get_data() << " is an invalid integer" << dendl; + ldpp_dout(dpp, 0) << "RGWCORSRule's MaxAgeSeconds " << obj->get_data() << " is an invalid integer" << dendl; return false; } if (ull >= 0x100000000ull) { @@ -138,13 +138,13 @@ bool RGWCORSRule_S3::xml_end(const char *el) { } else { max_age = (uint32_t)ull; } - dout(10) << "RGWCORSRule : max_age : " << max_age << dendl; + ldpp_dout(dpp, 10) << "RGWCORSRule : max_age : " << max_age << dendl; } /*Check and update ExposeHeader*/ iter = find("ExposeHeader"); if ((obj = iter.get_next())) { for(; obj; obj = iter.get_next()) { - dout(10) << "RGWCORSRule - exp_hdr : " << obj->get_data() << dendl; + ldpp_dout(dpp, 10) << "RGWCORSRule - exp_hdr : " << obj->get_data() << dendl; exposable_hdrs.push_back(obj->get_data()); } } @@ -152,7 +152,7 @@ bool RGWCORSRule_S3::xml_end(const char *el) { iter = find("AllowedHeader"); if ((obj = iter.get_next())) { for(; obj; obj = iter.get_next()) { - dout(10) << "RGWCORSRule - allowed_hdr : " << obj->get_data() << dendl; + ldpp_dout(dpp, 10) << "RGWCORSRule - allowed_hdr : " << obj->get_data() << dendl; string s = obj->get_data(); if (validate_name_string(s) != 0) return false; @@ -177,7 +177,7 @@ bool RGWCORSConfiguration_S3::xml_end(const char *el) { XMLObjIter iter = find("CORSRule"); RGWCORSRule_S3 *obj; if (!(obj = static_cast(iter.get_next()))) { - dout(0) << "CORSConfiguration should have atleast one CORSRule" << dendl; + ldpp_dout(dpp, 0) << "CORSConfiguration should have atleast one CORSRule" << dendl; return false; } for(; obj; obj = static_cast(iter.get_next())) { @@ -224,9 +224,9 @@ class CORSRuleExposeHeader_S3 : public XMLObj { XMLObj *RGWCORSXMLParser_S3::alloc_obj(const char *el) { if (strcmp(el, "CORSConfiguration") == 0) { - return new RGWCORSConfiguration_S3; + return new RGWCORSConfiguration_S3(dpp); } else if (strcmp(el, "CORSRule") == 0) { - return new RGWCORSRule_S3; + return new RGWCORSRule_S3(dpp); } else if (strcmp(el, "ID") == 0) { return new CORSRuleID_S3; } else if (strcmp(el, "AllowedOrigin") == 0) { diff --git a/src/rgw/rgw_cors_s3.h b/src/rgw/rgw_cors_s3.h index 2dff567c9e9..bc69c513b68 100644 --- a/src/rgw/rgw_cors_s3.h +++ b/src/rgw/rgw_cors_s3.h @@ -22,13 +22,15 @@ #include #include +#include #include "rgw_xml.h" #include "rgw_cors.h" class RGWCORSRule_S3 : public RGWCORSRule, public XMLObj { + const DoutPrefixProvider *dpp; public: - RGWCORSRule_S3() {} + RGWCORSRule_S3(const DoutPrefixProvider *dpp) : dpp(dpp) {} ~RGWCORSRule_S3() override {} bool xml_end(const char *el) override; @@ -37,8 +39,9 @@ class RGWCORSRule_S3 : public RGWCORSRule, public XMLObj class RGWCORSConfiguration_S3 : public RGWCORSConfiguration, public XMLObj { + const DoutPrefixProvider *dpp; public: - RGWCORSConfiguration_S3() {} + RGWCORSConfiguration_S3(const DoutPrefixProvider *dpp) : dpp(dpp) {} ~RGWCORSConfiguration_S3() override {} bool xml_end(const char *el) override; @@ -47,10 +50,11 @@ class RGWCORSConfiguration_S3 : public RGWCORSConfiguration, public XMLObj class RGWCORSXMLParser_S3 : public RGWXMLParser { + const DoutPrefixProvider *dpp; CephContext *cct; XMLObj *alloc_obj(const char *el) override; public: - explicit RGWCORSXMLParser_S3(CephContext *_cct) : cct(_cct) {} + explicit RGWCORSXMLParser_S3(const DoutPrefixProvider *_dpp, CephContext *_cct) : dpp(_dpp), cct(_cct) {} }; #endif /*CEPH_RGW_CORS_S3_H*/ diff --git a/src/rgw/rgw_cr_rados.cc b/src/rgw/rgw_cr_rados.cc index 98c0e3d45bb..fd33e6889b5 100644 --- a/src/rgw/rgw_cr_rados.cc +++ b/src/rgw/rgw_cr_rados.cc @@ -48,7 +48,7 @@ RGWAsyncRadosRequest *RGWAsyncRadosProcessor::RGWWQ::_dequeue() { } void RGWAsyncRadosProcessor::RGWWQ::_process(RGWAsyncRadosRequest *req, ThreadPool::TPHandle& handle) { - processor->handle_request(req); + processor->handle_request(this, req); processor->req_throttle.put(1); } @@ -89,8 +89,8 @@ void RGWAsyncRadosProcessor::stop() { } } -void RGWAsyncRadosProcessor::handle_request(RGWAsyncRadosRequest *req) { - req->send_request(); +void RGWAsyncRadosProcessor::handle_request(const DoutPrefixProvider *dpp, RGWAsyncRadosRequest *req) { + req->send_request(dpp); req->put(); } @@ -99,7 +99,7 @@ void RGWAsyncRadosProcessor::queue(RGWAsyncRadosRequest *req) { req_wq.queue(req); } -int RGWAsyncGetSystemObj::_send_request() +int RGWAsyncGetSystemObj::_send_request(const DoutPrefixProvider *dpp) { map *pattrs = want_attrs ? &attrs : nullptr; @@ -108,13 +108,13 @@ int RGWAsyncGetSystemObj::_send_request() .set_objv_tracker(&objv_tracker) .set_attrs(pattrs) .set_raw_attrs(raw_attrs) - .read(&bl, null_yield); + .read(dpp, &bl, null_yield); } -RGWAsyncGetSystemObj::RGWAsyncGetSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, +RGWAsyncGetSystemObj::RGWAsyncGetSystemObj(const DoutPrefixProvider *_dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, bool want_attrs, bool raw_attrs) - : RGWAsyncRadosRequest(caller, cn), obj_ctx(_svc), + : RGWAsyncRadosRequest(caller, cn), dpp(_dpp), obj_ctx(_svc), obj(_obj), want_attrs(want_attrs), raw_attrs(raw_attrs) { if (_objv_tracker) { @@ -122,9 +122,9 @@ RGWAsyncGetSystemObj::RGWAsyncGetSystemObj(RGWCoroutine *caller, RGWAioCompletio } } -int RGWSimpleRadosReadAttrsCR::send_request() +int RGWSimpleRadosReadAttrsCR::send_request(const DoutPrefixProvider *dpp) { - req = new RGWAsyncGetSystemObj(this, stack->create_completion_notifier(), + req = new RGWAsyncGetSystemObj(dpp, this, stack->create_completion_notifier(), svc, objv_tracker, obj, true, raw_attrs); async_rados->queue(req); return 0; @@ -141,21 +141,23 @@ int RGWSimpleRadosReadAttrsCR::request_complete() return req->get_ret_status(); } -int RGWAsyncPutSystemObj::_send_request() +int RGWAsyncPutSystemObj::_send_request(const DoutPrefixProvider *dpp) { auto obj_ctx = svc->init_obj_ctx(); auto sysobj = obj_ctx.get_obj(obj); return sysobj.wop() .set_objv_tracker(&objv_tracker) .set_exclusive(exclusive) - .write_data(bl, null_yield); + .write_data(dpp, bl, null_yield); } -RGWAsyncPutSystemObj::RGWAsyncPutSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, +RGWAsyncPutSystemObj::RGWAsyncPutSystemObj(const DoutPrefixProvider *_dpp, + RGWCoroutine *caller, + RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, bool _exclusive, bufferlist _bl) - : RGWAsyncRadosRequest(caller, cn), svc(_svc), + : RGWAsyncRadosRequest(caller, cn), dpp(_dpp), svc(_svc), obj(_obj), exclusive(_exclusive), bl(std::move(_bl)) { if (_objv_tracker) { @@ -163,7 +165,7 @@ RGWAsyncPutSystemObj::RGWAsyncPutSystemObj(RGWCoroutine *caller, RGWAioCompletio } } -int RGWAsyncPutSystemObjAttrs::_send_request() +int RGWAsyncPutSystemObjAttrs::_send_request(const DoutPrefixProvider *dpp) { auto obj_ctx = svc->init_obj_ctx(); auto sysobj = obj_ctx.get_obj(obj); @@ -171,14 +173,14 @@ int RGWAsyncPutSystemObjAttrs::_send_request() .set_objv_tracker(&objv_tracker) .set_exclusive(false) .set_attrs(attrs) - .write_attrs(null_yield); + .write_attrs(dpp, null_yield); } -RGWAsyncPutSystemObjAttrs::RGWAsyncPutSystemObjAttrs(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, +RGWAsyncPutSystemObjAttrs::RGWAsyncPutSystemObjAttrs(const DoutPrefixProvider *_dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, map _attrs) - : RGWAsyncRadosRequest(caller, cn), svc(_svc), + : RGWAsyncRadosRequest(caller, cn), dpp(_dpp), svc(_svc), obj(_obj), attrs(std::move(_attrs)) { if (_objv_tracker) { @@ -194,12 +196,12 @@ RGWOmapAppend::RGWOmapAppend(RGWAsyncRadosProcessor *_async_rados, rgw::sal::Rad { } -int RGWAsyncLockSystemObj::_send_request() +int RGWAsyncLockSystemObj::_send_request(const DoutPrefixProvider *dpp) { rgw_rados_ref ref; - int r = store->getRados()->get_raw_obj_ref(obj, &ref); + int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } @@ -222,12 +224,12 @@ RGWAsyncLockSystemObj::RGWAsyncLockSystemObj(RGWCoroutine *caller, RGWAioComplet { } -int RGWAsyncUnlockSystemObj::_send_request() +int RGWAsyncUnlockSystemObj::_send_request(const DoutPrefixProvider *dpp) { rgw_rados_ref ref; - int r = store->getRados()->get_raw_obj_ref(obj, &ref); + int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } @@ -264,11 +266,11 @@ RGWRadosSetOmapKeysCR::RGWRadosSetOmapKeysCR(rgw::sal::RadosStore* _store, s << "]"; } -int RGWRadosSetOmapKeysCR::send_request() +int RGWRadosSetOmapKeysCR::send_request(const DoutPrefixProvider *dpp) { - int r = store->getRados()->get_raw_obj_ref(obj, &ref); + int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } @@ -303,10 +305,10 @@ RGWRadosGetOmapKeysCR::RGWRadosGetOmapKeysCR(rgw::sal::RadosStore* _store, set_description() << "get omap keys dest=" << obj << " marker=" << marker; } -int RGWRadosGetOmapKeysCR::send_request() { - int r = store->getRados()->get_raw_obj_ref(obj, &result->ref); +int RGWRadosGetOmapKeysCR::send_request(const DoutPrefixProvider *dpp) { + int r = store->getRados()->get_raw_obj_ref(dpp, obj, &result->ref); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } @@ -341,10 +343,10 @@ RGWRadosGetOmapValsCR::RGWRadosGetOmapValsCR(rgw::sal::RadosStore* _store, set_description() << "get omap keys dest=" << obj << " marker=" << marker; } -int RGWRadosGetOmapValsCR::send_request() { - int r = store->getRados()->get_raw_obj_ref(obj, &result->ref); +int RGWRadosGetOmapValsCR::send_request(const DoutPrefixProvider *dpp) { + int r = store->getRados()->get_raw_obj_ref(dpp, obj, &result->ref); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } @@ -376,10 +378,10 @@ RGWRadosRemoveOmapKeysCR::RGWRadosRemoveOmapKeysCR(rgw::sal::RadosStore* _store, set_description() << "remove omap keys dest=" << obj << " keys=" << keys; } -int RGWRadosRemoveOmapKeysCR::send_request() { - int r = store->getRados()->get_raw_obj_ref(obj, &ref); +int RGWRadosRemoveOmapKeysCR::send_request(const DoutPrefixProvider *dpp) { + int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } @@ -409,7 +411,7 @@ RGWRadosRemoveCR::RGWRadosRemoveCR(rgw::sal::RadosStore* store, const rgw_raw_ob set_description() << "remove dest=" << obj; } -int RGWRadosRemoveCR::send_request() +int RGWRadosRemoveCR::send_request(const DoutPrefixProvider *dpp) { auto rados = store->getRados()->get_rados_handle(); int r = rados->ioctx_create(obj.pool.name.c_str(), ioctx); @@ -464,7 +466,7 @@ void RGWSimpleRadosLockCR::request_cleanup() } } -int RGWSimpleRadosLockCR::send_request() +int RGWSimpleRadosLockCR::send_request(const DoutPrefixProvider *dpp) { set_status() << "sending request"; req = new RGWAsyncLockSystemObj(this, stack->create_completion_notifier(), @@ -501,7 +503,7 @@ void RGWSimpleRadosUnlockCR::request_cleanup() } } -int RGWSimpleRadosUnlockCR::send_request() +int RGWSimpleRadosUnlockCR::send_request(const DoutPrefixProvider *dpp) { set_status() << "sending request"; @@ -517,7 +519,7 @@ int RGWSimpleRadosUnlockCR::request_complete() return req->get_ret_status(); } -int RGWOmapAppend::operate() { +int RGWOmapAppend::operate(const DoutPrefixProvider *dpp) { reenter(this) { for (;;) { if (!has_product() && going_down) { @@ -576,7 +578,7 @@ bool RGWOmapAppend::finish() { return (!is_done()); } -int RGWAsyncGetBucketInstanceInfo::_send_request() +int RGWAsyncGetBucketInstanceInfo::_send_request(const DoutPrefixProvider *dpp) { int r; if (!bucket.bucket_id.empty()) { @@ -595,7 +597,8 @@ int RGWAsyncGetBucketInstanceInfo::_send_request() return 0; } -RGWRadosBILogTrimCR::RGWRadosBILogTrimCR(rgw::sal::RadosStore* store, +RGWRadosBILogTrimCR::RGWRadosBILogTrimCR(const DoutPrefixProvider *dpp, + rgw::sal::RadosStore* store, const RGWBucketInfo& bucket_info, int shard_id, const std::string& start_marker, @@ -604,10 +607,10 @@ RGWRadosBILogTrimCR::RGWRadosBILogTrimCR(rgw::sal::RadosStore* store, start_marker(BucketIndexShardsManager::get_shard_marker(start_marker)), end_marker(BucketIndexShardsManager::get_shard_marker(end_marker)) { - bs.init(bucket_info, bucket_info.layout.current_index, shard_id); + bs.init(dpp, bucket_info, bucket_info.layout.current_index, shard_id); } -int RGWRadosBILogTrimCR::send_request() +int RGWRadosBILogTrimCR::send_request(const DoutPrefixProvider *dpp) { bufferlist in; cls_rgw_bi_log_trim_op call; @@ -629,7 +632,7 @@ int RGWRadosBILogTrimCR::request_complete() return r; } -int RGWAsyncFetchRemoteObj::_send_request() +int RGWAsyncFetchRemoteObj::_send_request(const DoutPrefixProvider *dpp) { RGWObjectCtx obj_ctx(store); @@ -675,7 +678,7 @@ int RGWAsyncFetchRemoteObj::_send_request() &bytes_transferred); if (r < 0) { - ldout(store->ctx(), 0) << "store->fetch_remote_obj() returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "store->fetch_remote_obj() returned r=" << r << dendl; if (counters) { counters->inc(sync_counters::l_fetch_err, 1); } @@ -689,7 +692,7 @@ int RGWAsyncFetchRemoteObj::_send_request() return r; } -int RGWAsyncStatRemoteObj::_send_request() +int RGWAsyncStatRemoteObj::_send_request(const DoutPrefixProvider *dpp) { RGWObjectCtx obj_ctx(store); @@ -700,7 +703,8 @@ int RGWAsyncStatRemoteObj::_send_request() rgw::sal::RadosBucket bucket(store, src_bucket); rgw::sal::RadosObject src_obj(store, key, &bucket); - int r = store->getRados()->stat_remote_obj(obj_ctx, + int r = store->getRados()->stat_remote_obj(dpp, + obj_ctx, rgw_user(user_id), nullptr, /* req_info */ source_zone, @@ -720,19 +724,19 @@ int RGWAsyncStatRemoteObj::_send_request() petag); /* string *petag, */ if (r < 0) { - ldout(store->ctx(), 0) << "store->fetch_remote_obj() returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "store->fetch_remote_obj() returned r=" << r << dendl; } return r; } -int RGWAsyncRemoveObj::_send_request() +int RGWAsyncRemoveObj::_send_request(const DoutPrefixProvider *dpp) { RGWObjectCtx obj_ctx(store); rgw_obj obj(bucket_info.bucket, key); - ldout(store->ctx(), 0) << __func__ << "(): deleting obj=" << obj << dendl; + ldpp_dout(dpp, 0) << __func__ << "(): deleting obj=" << obj << dendl; obj_ctx.set_atomic(obj); @@ -790,7 +794,7 @@ int RGWAsyncRemoveObj::_send_request() return ret; } -int RGWContinuousLeaseCR::operate() +int RGWContinuousLeaseCR::operate(const DoutPrefixProvider *dpp) { if (aborted) { caller->set_sleeping(false); @@ -816,8 +820,9 @@ int RGWContinuousLeaseCR::operate() return 0; } -RGWRadosTimelogAddCR::RGWRadosTimelogAddCR(rgw::sal::RadosStore* _store, const string& _oid, +RGWRadosTimelogAddCR::RGWRadosTimelogAddCR(const DoutPrefixProvider *_dpp, rgw::sal::RadosStore* _store, const string& _oid, const cls_log_entry& entry) : RGWSimpleCoroutine(_store->ctx()), + dpp(_dpp), store(_store), oid(_oid), cn(NULL) { @@ -826,12 +831,12 @@ RGWRadosTimelogAddCR::RGWRadosTimelogAddCR(rgw::sal::RadosStore* _store, const s entries.push_back(entry); } -int RGWRadosTimelogAddCR::send_request() +int RGWRadosTimelogAddCR::send_request(const DoutPrefixProvider *dpp) { set_status() << "sending request"; cn = stack->create_completion_notifier(); - return store->svc()->cls->timelog.add(oid, entries, cn->completion(), true, null_yield); + return store->svc()->cls->timelog.add(dpp, oid, entries, cn->completion(), true, null_yield); } int RGWRadosTimelogAddCR::request_complete() @@ -843,13 +848,14 @@ int RGWRadosTimelogAddCR::request_complete() return r; } -RGWRadosTimelogTrimCR::RGWRadosTimelogTrimCR(rgw::sal::RadosStore* store, +RGWRadosTimelogTrimCR::RGWRadosTimelogTrimCR(const DoutPrefixProvider *dpp, + rgw::sal::RadosStore* store, const std::string& oid, const real_time& start_time, const real_time& end_time, const std::string& from_marker, const std::string& to_marker) - : RGWSimpleCoroutine(store->ctx()), store(store), oid(oid), + : RGWSimpleCoroutine(store->ctx()), dpp(dpp), store(store), oid(oid), start_time(start_time), end_time(end_time), from_marker(from_marker), to_marker(to_marker) { @@ -858,12 +864,12 @@ RGWRadosTimelogTrimCR::RGWRadosTimelogTrimCR(rgw::sal::RadosStore* store, << " from_marker=" << from_marker << " to_marker=" << to_marker; } -int RGWRadosTimelogTrimCR::send_request() +int RGWRadosTimelogTrimCR::send_request(const DoutPrefixProvider *dpp) { set_status() << "sending request"; cn = stack->create_completion_notifier(); - return store->svc()->cls->timelog.trim(oid, start_time, end_time, from_marker, + return store->svc()->cls->timelog.trim(dpp, oid, start_time, end_time, from_marker, to_marker, cn->completion(), null_yield); } @@ -878,10 +884,11 @@ int RGWRadosTimelogTrimCR::request_complete() } -RGWSyncLogTrimCR::RGWSyncLogTrimCR(rgw::sal::RadosStore* store, const std::string& oid, +RGWSyncLogTrimCR::RGWSyncLogTrimCR(const DoutPrefixProvider *dpp, + rgw::sal::RadosStore* store, const std::string& oid, const std::string& to_marker, std::string *last_trim_marker) - : RGWRadosTimelogTrimCR(store, oid, real_time{}, real_time{}, + : RGWRadosTimelogTrimCR(dpp, store, oid, real_time{}, real_time{}, std::string{}, to_marker), cct(store->ctx()), last_trim_marker(last_trim_marker) { @@ -901,19 +908,20 @@ int RGWSyncLogTrimCR::request_complete() } -int RGWAsyncStatObj::_send_request() +int RGWAsyncStatObj::_send_request(const DoutPrefixProvider *dpp) { rgw_raw_obj raw_obj; store->getRados()->obj_to_raw(bucket_info.placement_rule, obj, &raw_obj); - return store->getRados()->raw_obj_stat(raw_obj, psize, pmtime, pepoch, + return store->getRados()->raw_obj_stat(dpp, raw_obj, psize, pmtime, pepoch, nullptr, nullptr, objv_tracker, null_yield); } -RGWStatObjCR::RGWStatObjCR(RGWAsyncRadosProcessor *async_rados, rgw::sal::RadosStore* store, +RGWStatObjCR::RGWStatObjCR(const DoutPrefixProvider *dpp, + RGWAsyncRadosProcessor *async_rados, rgw::sal::RadosStore* store, const RGWBucketInfo& _bucket_info, const rgw_obj& obj, uint64_t *psize, real_time* pmtime, uint64_t *pepoch, RGWObjVersionTracker *objv_tracker) - : RGWSimpleCoroutine(store->ctx()), store(store), async_rados(async_rados), + : RGWSimpleCoroutine(store->ctx()), dpp(dpp), store(store), async_rados(async_rados), bucket_info(_bucket_info), obj(obj), psize(psize), pmtime(pmtime), pepoch(pepoch), objv_tracker(objv_tracker) { @@ -927,9 +935,9 @@ void RGWStatObjCR::request_cleanup() } } -int RGWStatObjCR::send_request() +int RGWStatObjCR::send_request(const DoutPrefixProvider *dpp) { - req = new RGWAsyncStatObj(this, stack->create_completion_notifier(), + req = new RGWAsyncStatObj(dpp, this, stack->create_completion_notifier(), store, bucket_info, obj, psize, pmtime, pepoch, objv_tracker); async_rados->queue(req); return 0; @@ -949,11 +957,11 @@ RGWRadosNotifyCR::RGWRadosNotifyCR(rgw::sal::RadosStore* store, const rgw_raw_ob set_description() << "notify dest=" << obj; } -int RGWRadosNotifyCR::send_request() +int RGWRadosNotifyCR::send_request(const DoutPrefixProvider *dpp) { - int r = store->getRados()->get_raw_obj_ref(obj, &ref); + int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } diff --git a/src/rgw/rgw_cr_rados.h b/src/rgw/rgw_cr_rados.h index e6918c8cef5..fa94c6a1518 100644 --- a/src/rgw/rgw_cr_rados.h +++ b/src/rgw/rgw_cr_rados.h @@ -17,6 +17,8 @@ #include "services/svc_sys_obj.h" #include "services/svc_bucket.h" +#define dout_subsys ceph_subsys_rgw + class RGWAsyncRadosRequest : public RefCountedObject { RGWCoroutine *caller; RGWAioCompletionNotifier *notifier; @@ -26,7 +28,7 @@ class RGWAsyncRadosRequest : public RefCountedObject { ceph::mutex lock = ceph::make_mutex("RGWAsyncRadosRequest::lock"); protected: - virtual int _send_request() = 0; + virtual int _send_request(const DoutPrefixProvider *dpp) = 0; public: RGWAsyncRadosRequest(RGWCoroutine *_caller, RGWAioCompletionNotifier *_cn) : caller(_caller), notifier(_cn), retcode(0) { @@ -37,9 +39,9 @@ public: } } - void send_request() { + void send_request(const DoutPrefixProvider *dpp) { get(); - retcode = _send_request(); + retcode = _send_request(dpp); { std::lock_guard l{lock}; if (notifier) { @@ -74,7 +76,7 @@ protected: ThreadPool m_tp; Throttle req_throttle; - struct RGWWQ : public ThreadPool::WorkQueue { + struct RGWWQ : public DoutPrefixProvider, public ThreadPool::WorkQueue { RGWAsyncRadosProcessor *processor; RGWWQ(RGWAsyncRadosProcessor *p, ceph::timespan timeout, ceph::timespan suicide_timeout, @@ -93,6 +95,11 @@ protected: void _clear() override { ceph_assert(processor->m_req_queue.empty()); } + + CephContext *get_cct() const { return processor->cct; } + unsigned get_subsys() const { return ceph_subsys_rgw; } + std::ostream& gen_prefix(std::ostream& out) const { return out << "rgw async rados processor: ";} + } req_wq; public: @@ -100,12 +107,13 @@ public: ~RGWAsyncRadosProcessor() {} void start(); void stop(); - void handle_request(RGWAsyncRadosRequest *req); + void handle_request(const DoutPrefixProvider *dpp, RGWAsyncRadosRequest *req); void queue(RGWAsyncRadosRequest *req); bool is_going_down() { return going_down; } + }; template @@ -121,7 +129,7 @@ class RGWSimpleWriteOnlyAsyncCR : public RGWSimpleCoroutine { P params; const DoutPrefixProvider *dpp; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: Request(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, @@ -153,7 +161,7 @@ class RGWSimpleWriteOnlyAsyncCR : public RGWSimpleCoroutine { } } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { req = new Request(this, stack->create_completion_notifier(), store, @@ -184,9 +192,10 @@ class RGWSimpleAsyncCR : public RGWSimpleCoroutine { std::shared_ptr result; const DoutPrefixProvider *dpp; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: - Request(RGWCoroutine *caller, + Request(const DoutPrefixProvider *dpp, + RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store, const P& _params, @@ -220,8 +229,9 @@ class RGWSimpleAsyncCR : public RGWSimpleCoroutine { } } - int send_request() override { - req = new Request(this, + int send_request(const DoutPrefixProvider *dpp) override { + req = new Request(dpp, + this, stack->create_completion_notifier(), store, params, @@ -254,14 +264,15 @@ private: class Request : public RGWAsyncRadosRequest { std::shared_ptr action; protected: - int _send_request() override { + int _send_request(const DoutPrefixProvider *dpp) override { if (!action) { return 0; } return action->operate(); } public: - Request(RGWCoroutine *caller, + Request(const DoutPrefixProvider *dpp, + RGWCoroutine *caller, RGWAioCompletionNotifier *cn, std::shared_ptr& _action) : RGWAsyncRadosRequest(caller, cn), action(_action) {} @@ -290,8 +301,8 @@ private: } } - int send_request() override { - req = new Request(this, + int send_request(const DoutPrefixProvider *dpp) override { + req = new Request(dpp, this, stack->create_completion_notifier(), action); @@ -305,14 +316,16 @@ private: class RGWAsyncGetSystemObj : public RGWAsyncRadosRequest { + const DoutPrefixProvider *dpp; RGWSysObjectCtx obj_ctx; rgw_raw_obj obj; const bool want_attrs; const bool raw_attrs; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: - RGWAsyncGetSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, + RGWAsyncGetSystemObj(const DoutPrefixProvider *dpp, + RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, bool want_attrs, bool raw_attrs); @@ -322,15 +335,17 @@ public: }; class RGWAsyncPutSystemObj : public RGWAsyncRadosRequest { + const DoutPrefixProvider *dpp; RGWSI_SysObj *svc; rgw_raw_obj obj; bool exclusive; bufferlist bl; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: - RGWAsyncPutSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, + RGWAsyncPutSystemObj(const DoutPrefixProvider *dpp, RGWCoroutine *caller, + RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, bool _exclusive, bufferlist _bl); @@ -338,14 +353,15 @@ public: }; class RGWAsyncPutSystemObjAttrs : public RGWAsyncRadosRequest { + const DoutPrefixProvider *dpp; RGWSI_SysObj *svc; rgw_raw_obj obj; map attrs; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: - RGWAsyncPutSystemObjAttrs(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, + RGWAsyncPutSystemObjAttrs(const DoutPrefixProvider *dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, map _attrs); @@ -360,7 +376,7 @@ class RGWAsyncLockSystemObj : public RGWAsyncRadosRequest { uint32_t duration_secs; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: RGWAsyncLockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, @@ -374,7 +390,7 @@ class RGWAsyncUnlockSystemObj : public RGWAsyncRadosRequest { string cookie; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: RGWAsyncUnlockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, @@ -383,6 +399,7 @@ public: template class RGWSimpleRadosReadCR : public RGWSimpleCoroutine { + const DoutPrefixProvider *dpp; RGWAsyncRadosProcessor *async_rados; RGWSI_SysObj *svc; @@ -394,11 +411,12 @@ class RGWSimpleRadosReadCR : public RGWSimpleCoroutine { RGWAsyncGetSystemObj *req{nullptr}; public: - RGWSimpleRadosReadCR(RGWAsyncRadosProcessor *_async_rados, RGWSI_SysObj *_svc, + RGWSimpleRadosReadCR(const DoutPrefixProvider *_dpp, + RGWAsyncRadosProcessor *_async_rados, RGWSI_SysObj *_svc, const rgw_raw_obj& _obj, T *_result, bool empty_on_enoent = true, RGWObjVersionTracker *objv_tracker = nullptr) - : RGWSimpleCoroutine(_svc->ctx()), async_rados(_async_rados), svc(_svc), + : RGWSimpleCoroutine(_svc->ctx()), dpp(_dpp), async_rados(_async_rados), svc(_svc), obj(_obj), result(_result), empty_on_enoent(empty_on_enoent), objv_tracker(objv_tracker) {} ~RGWSimpleRadosReadCR() override { @@ -412,7 +430,7 @@ public: } } - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; virtual int handle_data(T& data) { @@ -421,9 +439,9 @@ public: }; template -int RGWSimpleRadosReadCR::send_request() +int RGWSimpleRadosReadCR::send_request(const DoutPrefixProvider *dpp) { - req = new RGWAsyncGetSystemObj(this, stack->create_completion_notifier(), svc, + req = new RGWAsyncGetSystemObj(dpp, this, stack->create_completion_notifier(), svc, objv_tracker, obj, false, false); async_rados->queue(req); return 0; @@ -460,6 +478,7 @@ int RGWSimpleRadosReadCR::request_complete() } class RGWSimpleRadosReadAttrsCR : public RGWSimpleCoroutine { + const DoutPrefixProvider *dpp; RGWAsyncRadosProcessor *async_rados; RGWSI_SysObj *svc; @@ -470,10 +489,11 @@ class RGWSimpleRadosReadAttrsCR : public RGWSimpleCoroutine { RGWAsyncGetSystemObj *req = nullptr; public: - RGWSimpleRadosReadAttrsCR(RGWAsyncRadosProcessor *_async_rados, RGWSI_SysObj *_svc, + RGWSimpleRadosReadAttrsCR(const DoutPrefixProvider *_dpp, RGWAsyncRadosProcessor *_async_rados, RGWSI_SysObj *_svc, const rgw_raw_obj& _obj, map *_pattrs, bool _raw_attrs, RGWObjVersionTracker* objv_tracker = nullptr) : RGWSimpleCoroutine(_svc->ctx()), + dpp(_dpp), async_rados(_async_rados), svc(_svc), obj(_obj), pattrs(_pattrs), @@ -491,12 +511,13 @@ public: } } - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; template class RGWSimpleRadosWriteCR : public RGWSimpleCoroutine { + const DoutPrefixProvider *dpp; RGWAsyncRadosProcessor *async_rados; RGWSI_SysObj *svc; bufferlist bl; @@ -505,10 +526,11 @@ class RGWSimpleRadosWriteCR : public RGWSimpleCoroutine { RGWAsyncPutSystemObj *req{nullptr}; public: - RGWSimpleRadosWriteCR(RGWAsyncRadosProcessor *_async_rados, RGWSI_SysObj *_svc, + RGWSimpleRadosWriteCR(const DoutPrefixProvider *_dpp, + RGWAsyncRadosProcessor *_async_rados, RGWSI_SysObj *_svc, const rgw_raw_obj& _obj, const T& _data, RGWObjVersionTracker *objv_tracker = nullptr) - : RGWSimpleCoroutine(_svc->ctx()), async_rados(_async_rados), + : RGWSimpleCoroutine(_svc->ctx()), dpp(_dpp), async_rados(_async_rados), svc(_svc), obj(_obj), objv_tracker(objv_tracker) { encode(_data, bl); } @@ -524,8 +546,8 @@ public: } } - int send_request() override { - req = new RGWAsyncPutSystemObj(this, stack->create_completion_notifier(), + int send_request(const DoutPrefixProvider *dpp) override { + req = new RGWAsyncPutSystemObj(dpp, this, stack->create_completion_notifier(), svc, objv_tracker, obj, false, std::move(bl)); async_rados->queue(req); return 0; @@ -540,6 +562,7 @@ public: }; class RGWSimpleRadosWriteAttrsCR : public RGWSimpleCoroutine { + const DoutPrefixProvider *dpp; RGWAsyncRadosProcessor *async_rados; RGWSI_SysObj *svc; RGWObjVersionTracker *objv_tracker; @@ -549,11 +572,12 @@ class RGWSimpleRadosWriteAttrsCR : public RGWSimpleCoroutine { RGWAsyncPutSystemObjAttrs *req = nullptr; public: - RGWSimpleRadosWriteAttrsCR(RGWAsyncRadosProcessor *_async_rados, + RGWSimpleRadosWriteAttrsCR(const DoutPrefixProvider *_dpp, + RGWAsyncRadosProcessor *_async_rados, RGWSI_SysObj *_svc, const rgw_raw_obj& _obj, map _attrs, RGWObjVersionTracker *objv_tracker = nullptr) - : RGWSimpleCoroutine(_svc->ctx()), async_rados(_async_rados), + : RGWSimpleCoroutine(_svc->ctx()), dpp(_dpp), async_rados(_async_rados), svc(_svc), objv_tracker(objv_tracker), obj(_obj), attrs(std::move(_attrs)) { } @@ -568,8 +592,8 @@ public: } } - int send_request() override { - req = new RGWAsyncPutSystemObjAttrs(this, stack->create_completion_notifier(), + int send_request(const DoutPrefixProvider *dpp) override { + req = new RGWAsyncPutSystemObjAttrs(dpp, this, stack->create_completion_notifier(), svc, objv_tracker, obj, std::move(attrs)); async_rados->queue(req); return 0; @@ -598,7 +622,7 @@ public: const rgw_raw_obj& _obj, map& _entries); - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; @@ -615,7 +639,7 @@ class RGWRadosGetOmapKeysCR : public RGWSimpleCoroutine { const string& _marker, int _max_entries, ResultPtr result); - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; private: @@ -640,7 +664,7 @@ class RGWRadosGetOmapValsCR : public RGWSimpleCoroutine { const string& _marker, int _max_entries, ResultPtr result); - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; private: @@ -668,7 +692,7 @@ public: const rgw_raw_obj& _obj, const set& _keys); - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; @@ -684,7 +708,7 @@ public: RGWRadosRemoveCR(rgw::sal::RadosStore* store, const rgw_raw_obj& obj, RGWObjVersionTracker* objv_tracker = nullptr); - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; @@ -710,7 +734,7 @@ public: } void request_cleanup() override; - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; static std::string gen_random_cookie(CephContext* cct) { @@ -741,7 +765,7 @@ public: } void request_cleanup() override; - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; @@ -766,7 +790,7 @@ public: RGWOmapAppend(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RadosStore* _store, const rgw_raw_obj& _obj, uint64_t _window_size = OMAP_APPEND_MAX_ENTRIES_DEFAULT); - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; void flush_pending(); bool append(const string& s); bool finish(); @@ -831,7 +855,7 @@ class RGWAsyncGetBucketInstanceInfo : public RGWAsyncRadosRequest { const DoutPrefixProvider *dpp; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: RGWAsyncGetBucketInstanceInfo(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store, const rgw_bucket& bucket, @@ -869,7 +893,7 @@ public: } } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { req = new RGWAsyncGetBucketInstanceInfo(this, stack->create_completion_notifier(), store, bucket, dpp); async_rados->queue(req); return 0; @@ -891,11 +915,12 @@ class RGWRadosBILogTrimCR : public RGWSimpleCoroutine { std::string end_marker; boost::intrusive_ptr cn; public: - RGWRadosBILogTrimCR(rgw::sal::RadosStore* store, const RGWBucketInfo& bucket_info, + RGWRadosBILogTrimCR(const DoutPrefixProvider *dpp, + rgw::sal::RadosStore* store, const RGWBucketInfo& bucket_info, int shard_id, const std::string& start_marker, const std::string& end_marker); - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; @@ -922,7 +947,7 @@ class RGWAsyncFetchRemoteObj : public RGWAsyncRadosRequest { const DoutPrefixProvider *dpp; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: RGWAsyncFetchRemoteObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store, const rgw_zone_id& _source_zone, @@ -1025,7 +1050,7 @@ public: } } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { req = new RGWAsyncFetchRemoteObj(this, stack->create_completion_notifier(), store, source_zone, user_id, src_bucket, dest_placement_rule, dest_bucket_info, key, dest_key, versioned_epoch, copy_if_newer, filter, @@ -1053,7 +1078,7 @@ class RGWAsyncStatRemoteObj : public RGWAsyncRadosRequest { map *pheaders; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: RGWAsyncStatRemoteObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store, const rgw_zone_id& _source_zone, @@ -1124,7 +1149,7 @@ public: } } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { req = new RGWAsyncStatRemoteObj(this, stack->create_completion_notifier(), store, source_zone, src_bucket, key, pmtime, psize, petag, pattrs, pheaders); async_rados->queue(req); @@ -1155,7 +1180,7 @@ class RGWAsyncRemoveObj : public RGWAsyncRadosRequest { rgw_zone_set zones_trace; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: RGWAsyncRemoveObj(const DoutPrefixProvider *_dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store, @@ -1255,7 +1280,7 @@ public: } } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { req = new RGWAsyncRemoveObj(dpp, this, stack->create_completion_notifier(), store, source_zone, bucket_info, key, owner, owner_display_name, versioned, versioned_epoch, delete_marker, del_if_older, timestamp, zones_trace); @@ -1295,7 +1320,7 @@ public: interval(_interval), caller(_caller) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; bool is_locked() const { return locked; @@ -1316,6 +1341,7 @@ public: }; class RGWRadosTimelogAddCR : public RGWSimpleCoroutine { + const DoutPrefixProvider *dpp; rgw::sal::RadosStore* store; list entries; @@ -1324,14 +1350,15 @@ class RGWRadosTimelogAddCR : public RGWSimpleCoroutine { boost::intrusive_ptr cn; public: - RGWRadosTimelogAddCR(rgw::sal::RadosStore* _store, const string& _oid, + RGWRadosTimelogAddCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* _store, const string& _oid, const cls_log_entry& entry); - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; class RGWRadosTimelogTrimCR : public RGWSimpleCoroutine { + const DoutPrefixProvider *dpp; rgw::sal::RadosStore* store; boost::intrusive_ptr cn; protected: @@ -1342,12 +1369,13 @@ class RGWRadosTimelogTrimCR : public RGWSimpleCoroutine { std::string to_marker; public: - RGWRadosTimelogTrimCR(rgw::sal::RadosStore* store, const std::string& oid, + RGWRadosTimelogTrimCR(const DoutPrefixProvider *dpp, + rgw::sal::RadosStore* store, const std::string& oid, const real_time& start_time, const real_time& end_time, const std::string& from_marker, const std::string& to_marker); - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; @@ -1358,12 +1386,14 @@ class RGWSyncLogTrimCR : public RGWRadosTimelogTrimCR { public: static constexpr const char* max_marker = "99999999"; - RGWSyncLogTrimCR(rgw::sal::RadosStore* store, const std::string& oid, + RGWSyncLogTrimCR(const DoutPrefixProvider *dpp, + rgw::sal::RadosStore* store, const std::string& oid, const std::string& to_marker, std::string *last_trim_marker); int request_complete() override; }; class RGWAsyncStatObj : public RGWAsyncRadosRequest { + const DoutPrefixProvider *dpp; rgw::sal::RadosStore* store; RGWBucketInfo bucket_info; rgw_obj obj; @@ -1372,17 +1402,18 @@ class RGWAsyncStatObj : public RGWAsyncRadosRequest { uint64_t *pepoch; RGWObjVersionTracker *objv_tracker; protected: - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: - RGWAsyncStatObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* store, + RGWAsyncStatObj(const DoutPrefixProvider *dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* store, const RGWBucketInfo& _bucket_info, const rgw_obj& obj, uint64_t *psize = nullptr, real_time *pmtime = nullptr, uint64_t *pepoch = nullptr, RGWObjVersionTracker *objv_tracker = nullptr) - : RGWAsyncRadosRequest(caller, cn), store(store), obj(obj), psize(psize), + : RGWAsyncRadosRequest(caller, cn), dpp(dpp), store(store), obj(obj), psize(psize), pmtime(pmtime), pepoch(pepoch), objv_tracker(objv_tracker) {} }; class RGWStatObjCR : public RGWSimpleCoroutine { + const DoutPrefixProvider *dpp; rgw::sal::RadosStore* store; RGWAsyncRadosProcessor *async_rados; RGWBucketInfo bucket_info; @@ -1393,7 +1424,7 @@ class RGWStatObjCR : public RGWSimpleCoroutine { RGWObjVersionTracker *objv_tracker; RGWAsyncStatObj *req = nullptr; public: - RGWStatObjCR(RGWAsyncRadosProcessor *async_rados, rgw::sal::RadosStore* store, + RGWStatObjCR(const DoutPrefixProvider *dpp, RGWAsyncRadosProcessor *async_rados, rgw::sal::RadosStore* store, const RGWBucketInfo& _bucket_info, const rgw_obj& obj, uint64_t *psize = nullptr, real_time* pmtime = nullptr, uint64_t *pepoch = nullptr, RGWObjVersionTracker *objv_tracker = nullptr); @@ -1402,7 +1433,7 @@ class RGWStatObjCR : public RGWSimpleCoroutine { } void request_cleanup() override; - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; @@ -1421,7 +1452,7 @@ public: bufferlist& request, uint64_t timeout_ms, bufferlist *response); - int send_request() override; + int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; diff --git a/src/rgw/rgw_cr_rest.cc b/src/rgw/rgw_cr_rest.cc index 06c36522250..f290d11929e 100644 --- a/src/rgw/rgw_cr_rest.cc +++ b/src/rgw/rgw_cr_rest.cc @@ -87,7 +87,7 @@ RGWStreamReadHTTPResourceCRF::~RGWStreamReadHTTPResourceCRF() } } -int RGWStreamReadHTTPResourceCRF::init() +int RGWStreamReadHTTPResourceCRF::init(const DoutPrefixProvider *dpp) { env->stack->init_new_io(req); @@ -191,7 +191,7 @@ RGWStreamWriteHTTPResourceCRF::~RGWStreamWriteHTTPResourceCRF() } } -void RGWStreamWriteHTTPResourceCRF::send_ready(const rgw_rest_obj& rest_obj) +void RGWStreamWriteHTTPResourceCRF::send_ready(const DoutPrefixProvider *dpp, const rgw_rest_obj& rest_obj) { req->set_send_length(rest_obj.content_len); for (auto h : rest_obj.attrs) { @@ -264,10 +264,10 @@ RGWStreamSpliceCR::RGWStreamSpliceCR(CephContext *_cct, RGWHTTPManager *_mgr, in_crf(_in_crf), out_crf(_out_crf) {} RGWStreamSpliceCR::~RGWStreamSpliceCR() { } -int RGWStreamSpliceCR::operate() { +int RGWStreamSpliceCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { { - int ret = in_crf->init(); + int ret = in_crf->init(dpp); if (ret < 0) { return set_cr_error(ret); } @@ -303,7 +303,7 @@ int RGWStreamSpliceCR::operate() { if (ret < 0) { return set_cr_error(ret); } - out_crf->send_ready(in_crf->get_rest_obj()); + out_crf->send_ready(dpp, in_crf->get_rest_obj()); ret = out_crf->send(); if (ret < 0) { return set_cr_error(ret); diff --git a/src/rgw/rgw_cr_rest.h b/src/rgw/rgw_cr_rest.h index 0776c4284f6..914eebee02d 100644 --- a/src/rgw/rgw_cr_rest.h +++ b/src/rgw/rgw_cr_rest.h @@ -69,13 +69,13 @@ public: request_cleanup(); } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { auto op = boost::intrusive_ptr( new RGWRESTReadResource(conn, path, params, &extra_headers, http_manager)); init_new_io(op.get()); - int ret = op->aio_read(); + int ret = op->aio_read(dpp); if (ret < 0) { log_error() << "failed to send http operation: " << op->to_str() << " ret=" << ret << std::endl; @@ -186,15 +186,15 @@ class RGWSendRawRESTResourceCR: public RGWSimpleCoroutine { request_cleanup(); } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { auto op = boost::intrusive_ptr( new RGWRESTSendResource(conn, method, path, params, &headers, http_manager)); init_new_io(op.get()); - int ret = op->aio_send(input_bl); + int ret = op->aio_send(dpp, input_bl); if (ret < 0) { - lsubdout(cct, rgw, 0) << "ERROR: failed to send request" << dendl; + ldpp_subdout(dpp, rgw, 0) << "ERROR: failed to send request" << dendl; op->put(); return ret; } @@ -341,7 +341,7 @@ public: request_cleanup(); } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { auto op = boost::intrusive_ptr( new RGWRESTDeleteResource(conn, path, params, nullptr, http_manager)); @@ -349,9 +349,9 @@ public: bufferlist bl; - int ret = op->aio_send(bl); + int ret = op->aio_send(dpp, bl); if (ret < 0) { - lsubdout(cct, rgw, 0) << "ERROR: failed to send DELETE request" << dendl; + ldpp_subdout(dpp, rgw, 0) << "ERROR: failed to send DELETE request" << dendl; op->put(); return ret; } @@ -421,7 +421,7 @@ protected: boost::asio::coroutine read_state; public: - virtual int init() = 0; + virtual int init(const DoutPrefixProvider *dpp) = 0; virtual int read(bufferlist *data, uint64_t max, bool *need_retry) = 0; /* reentrant */ virtual int decode_rest_obj(map& headers, bufferlist& extra_data) = 0; virtual bool has_attrs() = 0; @@ -436,7 +436,7 @@ protected: public: virtual int init() = 0; - virtual void send_ready(const rgw_rest_obj& rest_obj) = 0; + virtual void send_ready(const DoutPrefixProvider *dpp, const rgw_rest_obj& rest_obj) = 0; virtual int send() = 0; virtual int write(bufferlist& data, bool *need_retry) = 0; /* reentrant */ virtual int drain_writes(bool *need_retry) = 0; /* reentrant */ @@ -486,7 +486,7 @@ public: } ~RGWStreamReadHTTPResourceCRF(); - int init() override; + int init(const DoutPrefixProvider *dpp) override; int read(bufferlist *data, uint64_t max, bool *need_retry) override; /* reentrant */ int decode_rest_obj(map& headers, bufferlist& extra_data) override; bool has_attrs() override; @@ -549,7 +549,7 @@ public: int init() override { return 0; } - void send_ready(const rgw_rest_obj& rest_obj) override; + void send_ready(const DoutPrefixProvider *dpp, const rgw_rest_obj& rest_obj) override; int send() override; int write(bufferlist& data, bool *need_retry) override; /* reentrant */ void write_drain_notify(uint64_t pending_size); @@ -586,5 +586,5 @@ public: std::shared_ptr& _out_crf); ~RGWStreamSpliceCR(); - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; diff --git a/src/rgw/rgw_cr_tools.cc b/src/rgw/rgw_cr_tools.cc index f6eee0d207e..2d54386b5d3 100644 --- a/src/rgw/rgw_cr_tools.cc +++ b/src/rgw/rgw_cr_tools.cc @@ -16,7 +16,7 @@ #define dout_subsys ceph_subsys_rgw template<> -int RGWUserCreateCR::Request::_send_request() +int RGWUserCreateCR::Request::_send_request(const DoutPrefixProvider *dpp) { CephContext *cct = store->ctx(); @@ -91,19 +91,19 @@ int RGWUserCreateCR::Request::_send_request() } template<> -int RGWGetUserInfoCR::Request::_send_request() +int RGWGetUserInfoCR::Request::_send_request(const DoutPrefixProvider *dpp) { return store->ctl()->user->get_info_by_uid(dpp, params.user, result.get(), null_yield); } template<> -int RGWGetBucketInfoCR::Request::_send_request() +int RGWGetBucketInfoCR::Request::_send_request(const DoutPrefixProvider *dpp) { return store->get_bucket(dpp, nullptr, params.tenant, params.bucket_name, &result->bucket, null_yield); } template<> -int RGWBucketCreateLocalCR::Request::_send_request() +int RGWBucketCreateLocalCR::Request::_send_request(const DoutPrefixProvider *dpp) { CephContext *cct = store->ctx(); auto& zone_svc = store->svc()->zone; @@ -115,7 +115,7 @@ int RGWBucketCreateLocalCR::Request::_send_request() if (!placement_rule.empty() && !zone_svc->get_zone_params().valid_placement(placement_rule)) { - ldout(cct, 0) << "placement target (" << placement_rule << ")" + ldpp_dout(dpp, 0) << "placement target (" << placement_rule << ")" << " doesn't exist in the placement targets of zonegroup" << " (" << zone_svc->get_zonegroup().api_name << ")" << dendl; return -ERR_INVALID_LOCATION_CONSTRAINT; @@ -158,11 +158,11 @@ int RGWBucketCreateLocalCR::Request::_send_request() rgw_bucket bucket; bucket.tenant = user.tenant; bucket.name = bucket_name; - ret = zone_svc->select_bucket_placement(*user_info, zonegroup_id, + ret = zone_svc->select_bucket_placement(dpp, *user_info, zonegroup_id, placement_rule, &selected_placement_rule, nullptr, null_yield); if (selected_placement_rule != bucket_info.placement_rule) { - ldout(cct, 0) << "bucket already exists on a different placement rule: " + ldpp_dout(dpp, 0) << "bucket already exists on a different placement rule: " << " selected_rule= " << selected_placement_rule << " existing_rule= " << bucket_info.placement_rule << dendl; return -EEXIST; @@ -228,7 +228,7 @@ int RGWBucketCreateLocalCR::Request::_send_request() } template<> -int RGWObjectSimplePutCR::Request::_send_request() +int RGWObjectSimplePutCR::Request::_send_request(const DoutPrefixProvider *dpp) { RGWDataAccess::ObjectRef obj; @@ -253,7 +253,7 @@ int RGWObjectSimplePutCR::Request::_send_request() } template<> -int RGWBucketLifecycleConfigCR::Request::_send_request() +int RGWBucketLifecycleConfigCR::Request::_send_request(const DoutPrefixProvider *dpp) { CephContext *cct = store->ctx(); @@ -275,7 +275,7 @@ int RGWBucketLifecycleConfigCR::Request::_send_request() } template<> -int RGWBucketGetSyncPolicyHandlerCR::Request::_send_request() +int RGWBucketGetSyncPolicyHandlerCR::Request::_send_request(const DoutPrefixProvider *dpp) { int r = store->ctl()->bucket->get_sync_policy_handler(params.zone, params.bucket, diff --git a/src/rgw/rgw_crypt.cc b/src/rgw/rgw_crypt.cc index 1d00ca6f6c7..9f7e6925642 100644 --- a/src/rgw/rgw_crypt.cc +++ b/src/rgw/rgw_crypt.cc @@ -646,7 +646,7 @@ RGWGetObj_BlockDecrypt::RGWGetObj_BlockDecrypt(CephContext* cct, RGWGetObj_BlockDecrypt::~RGWGetObj_BlockDecrypt() { } -int RGWGetObj_BlockDecrypt::read_manifest(bufferlist& manifest_bl) { +int RGWGetObj_BlockDecrypt::read_manifest(const DoutPrefixProvider *dpp, bufferlist& manifest_bl) { parts_len.clear(); RGWObjManifest manifest; if (manifest_bl.length()) { @@ -654,11 +654,11 @@ int RGWGetObj_BlockDecrypt::read_manifest(bufferlist& manifest_bl) { try { decode(manifest, miter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: couldn't decode manifest" << dendl; + ldpp_dout(dpp, 0) << "ERROR: couldn't decode manifest" << dendl; return -EIO; } RGWObjManifest::obj_iterator mi; - for (mi = manifest.obj_begin(); mi != manifest.obj_end(); ++mi) { + for (mi = manifest.obj_begin(dpp); mi != manifest.obj_end(dpp); ++mi) { if (mi.get_cur_stripe() == 0) { parts_len.push_back(0); } @@ -666,7 +666,7 @@ int RGWGetObj_BlockDecrypt::read_manifest(bufferlist& manifest_bl) { } if (cct->_conf->subsys.should_gather()) { for (size_t i = 0; icct, 5) << "ERROR: Invalid value for header " + ldpp_dout(s, 5) << "ERROR: Invalid value for header " << "x-amz-server-side-encryption-customer-algorithm" << dendl; s->err.message = "The requested encryption algorithm is not valid, must be AES256."; @@ -917,7 +917,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, } if (s->cct->_conf->rgw_crypt_require_ssl && !rgw_transport_is_secure(s->cct, *s->info.env)) { - ldout(s->cct, 5) << "ERROR: Insecure request, rgw_crypt_require_ssl is set" << dendl; + ldpp_dout(s, 5) << "ERROR: Insecure request, rgw_crypt_require_ssl is set" << dendl; return -ERR_INVALID_REQUEST; } @@ -926,7 +926,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, key_bin = from_base64( get_crypt_attribute(s->info.env, parts, X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY) ); } catch (...) { - ldout(s->cct, 5) << "ERROR: rgw_s3_prepare_encrypt invalid encryption " + ldpp_dout(s, 5) << "ERROR: rgw_s3_prepare_encrypt invalid encryption " << "key which contains character that is not base64 encoded." << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " @@ -935,7 +935,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, } if (key_bin.size() != AES_256_CBC::AES_256_KEYSIZE) { - ldout(s->cct, 5) << "ERROR: invalid encryption key size" << dendl; + ldpp_dout(s, 5) << "ERROR: invalid encryption key size" << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " "provided keys must provide an appropriate secret key."; return -EINVAL; @@ -948,7 +948,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, try { keymd5_bin = from_base64(keymd5); } catch (...) { - ldout(s->cct, 5) << "ERROR: rgw_s3_prepare_encrypt invalid encryption key " + ldpp_dout(s, 5) << "ERROR: rgw_s3_prepare_encrypt invalid encryption key " << "md5 which contains character that is not base64 encoded." << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " @@ -957,7 +957,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, } if (keymd5_bin.size() != CEPH_CRYPTO_MD5_DIGESTSIZE) { - ldout(s->cct, 5) << "ERROR: Invalid key md5 size" << dendl; + ldpp_dout(s, 5) << "ERROR: Invalid key md5 size" << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " "provided keys must provide an appropriate secret key md5."; return -EINVAL; @@ -969,7 +969,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, key_hash.Final(key_hash_res); if (memcmp(key_hash_res, keymd5_bin.c_str(), CEPH_CRYPTO_MD5_DIGESTSIZE) != 0) { - ldout(s->cct, 5) << "ERROR: Invalid key md5 hash" << dendl; + ldpp_dout(s, 5) << "ERROR: Invalid key md5 hash" << dendl; s->err.message = "The calculated MD5 hash of the key did not match the hash that was provided."; return -EINVAL; } @@ -990,7 +990,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, std::string_view customer_key = get_crypt_attribute(s->info.env, parts, X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY); if (!customer_key.empty()) { - ldout(s->cct, 5) << "ERROR: SSE-C encryption request is missing the header " + ldpp_dout(s, 5) << "ERROR: SSE-C encryption request is missing the header " << "x-amz-server-side-encryption-customer-algorithm" << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " @@ -1001,7 +1001,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, std::string_view customer_key_md5 = get_crypt_attribute(s->info.env, parts, X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5); if (!customer_key_md5.empty()) { - ldout(s->cct, 5) << "ERROR: SSE-C encryption request is missing the header " + ldpp_dout(s, 5) << "ERROR: SSE-C encryption request is missing the header " << "x-amz-server-side-encryption-customer-algorithm" << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " @@ -1017,7 +1017,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, if (s->cct->_conf->rgw_crypt_require_ssl && !rgw_transport_is_secure(s->cct, *s->info.env)) { - ldout(s->cct, 5) << "ERROR: insecure request, rgw_crypt_require_ssl is set" << dendl; + ldpp_dout(s, 5) << "ERROR: insecure request, rgw_crypt_require_ssl is set" << dendl; return -ERR_INVALID_REQUEST; } @@ -1030,7 +1030,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, std::string_view key_id = get_crypt_attribute(s->info.env, parts, X_AMZ_SERVER_SIDE_ENCRYPTION_AWS_KMS_KEY_ID); if (key_id.empty()) { - ldout(s->cct, 5) << "ERROR: not provide a valid key id" << dendl; + ldpp_dout(s, 5) << "ERROR: not provide a valid key id" << dendl; s->err.message = "Server Side Encryption with KMS managed key requires " "HTTP header x-amz-server-side-encryption-aws-kms-key-id"; return -ERR_INVALID_ACCESS_KEY; @@ -1044,12 +1044,12 @@ int rgw_s3_prepare_encrypt(struct req_state* s, std::string actual_key; res = make_actual_key_from_kms(s->cct, attrs, actual_key); if (res != 0) { - ldout(s->cct, 5) << "ERROR: failed to retrieve actual key from key_id: " << key_id << dendl; + ldpp_dout(s, 5) << "ERROR: failed to retrieve actual key from key_id: " << key_id << dendl; s->err.message = "Failed to retrieve the actual key, kms-keyid: " + std::string(key_id); return res; } if (actual_key.size() != AES_256_KEYSIZE) { - ldout(s->cct, 5) << "ERROR: key obtained from key_id:" << + ldpp_dout(s, 5) << "ERROR: key obtained from key_id:" << key_id << " is not 256 bit size" << dendl; s->err.message = "KMS provided an invalid key for the given kms-keyid."; return -ERR_INVALID_ACCESS_KEY; @@ -1069,7 +1069,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, } else if (req_sse == "AES256") { /* if a default encryption key was provided, we will use it for SSE-S3 */ } else { - ldout(s->cct, 5) << "ERROR: Invalid value for header x-amz-server-side-encryption" + ldpp_dout(s, 5) << "ERROR: Invalid value for header x-amz-server-side-encryption" << dendl; s->err.message = "Server Side Encryption with KMS managed key requires " "HTTP header x-amz-server-side-encryption : aws:kms or AES256"; @@ -1081,7 +1081,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, get_crypt_attribute(s->info.env, parts, X_AMZ_SERVER_SIDE_ENCRYPTION_AWS_KMS_KEY_ID); if (!key_id.empty()) { - ldout(s->cct, 5) << "ERROR: SSE-KMS encryption request is missing the header " + ldpp_dout(s, 5) << "ERROR: SSE-KMS encryption request is missing the header " << "x-amz-server-side-encryption" << dendl; s->err.message = "Server Side Encryption with KMS managed key requires " @@ -1096,7 +1096,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, try { master_encryption_key = from_base64(s->cct->_conf->rgw_crypt_default_encryption_key); } catch (...) { - ldout(s->cct, 5) << "ERROR: rgw_s3_prepare_encrypt invalid default encryption key " + ldpp_dout(s, 5) << "ERROR: rgw_s3_prepare_encrypt invalid default encryption key " << "which contains character that is not base64 encoded." << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " @@ -1105,7 +1105,7 @@ int rgw_s3_prepare_encrypt(struct req_state* s, } if (master_encryption_key.size() != 256 / 8) { - ldout(s->cct, 0) << "ERROR: failed to decode 'rgw crypt default encryption key' to 256 bit string" << dendl; + ldpp_dout(s, 0) << "ERROR: failed to decode 'rgw crypt default encryption key' to 256 bit string" << dendl; /* not an error to return; missing encryption does not inhibit processing */ return 0; } @@ -1143,7 +1143,7 @@ int rgw_s3_prepare_decrypt(struct req_state* s, { int res = 0; std::string stored_mode = get_str_attribute(attrs, RGW_ATTR_CRYPT_MODE); - ldout(s->cct, 15) << "Encryption mode: " << stored_mode << dendl; + ldpp_dout(s, 15) << "Encryption mode: " << stored_mode << dendl; const char *req_sse = s->info.env->get("HTTP_X_AMZ_SERVER_SIDE_ENCRYPTION", NULL); if (nullptr != req_sse && (s->op == OP_GET || s->op == OP_HEAD)) { @@ -1153,21 +1153,21 @@ int rgw_s3_prepare_decrypt(struct req_state* s, if (stored_mode == "SSE-C-AES256") { if (s->cct->_conf->rgw_crypt_require_ssl && !rgw_transport_is_secure(s->cct, *s->info.env)) { - ldout(s->cct, 5) << "ERROR: Insecure request, rgw_crypt_require_ssl is set" << dendl; + ldpp_dout(s, 5) << "ERROR: Insecure request, rgw_crypt_require_ssl is set" << dendl; return -ERR_INVALID_REQUEST; } const char *req_cust_alg = s->info.env->get("HTTP_X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM", NULL); if (nullptr == req_cust_alg) { - ldout(s->cct, 5) << "ERROR: Request for SSE-C encrypted object missing " + ldpp_dout(s, 5) << "ERROR: Request for SSE-C encrypted object missing " << "x-amz-server-side-encryption-customer-algorithm" << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " "provided keys must provide a valid encryption algorithm."; return -EINVAL; } else if (strcmp(req_cust_alg, "AES256") != 0) { - ldout(s->cct, 5) << "ERROR: The requested encryption algorithm is not valid, must be AES256." << dendl; + ldpp_dout(s, 5) << "ERROR: The requested encryption algorithm is not valid, must be AES256." << dendl; s->err.message = "The requested encryption algorithm is not valid, must be AES256."; return -ERR_INVALID_ENCRYPTION_ALGORITHM; } @@ -1176,7 +1176,7 @@ int rgw_s3_prepare_decrypt(struct req_state* s, try { key_bin = from_base64(s->info.env->get("HTTP_X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY", "")); } catch (...) { - ldout(s->cct, 5) << "ERROR: rgw_s3_prepare_decrypt invalid encryption key " + ldpp_dout(s, 5) << "ERROR: rgw_s3_prepare_decrypt invalid encryption key " << "which contains character that is not base64 encoded." << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " @@ -1185,7 +1185,7 @@ int rgw_s3_prepare_decrypt(struct req_state* s, } if (key_bin.size() != AES_256_CBC::AES_256_KEYSIZE) { - ldout(s->cct, 5) << "ERROR: Invalid encryption key size" << dendl; + ldpp_dout(s, 5) << "ERROR: Invalid encryption key size" << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " "provided keys must provide an appropriate secret key."; return -EINVAL; @@ -1197,7 +1197,7 @@ int rgw_s3_prepare_decrypt(struct req_state* s, try { keymd5_bin = from_base64(keymd5); } catch (...) { - ldout(s->cct, 5) << "ERROR: rgw_s3_prepare_decrypt invalid encryption key md5 " + ldpp_dout(s, 5) << "ERROR: rgw_s3_prepare_decrypt invalid encryption key md5 " << "which contains character that is not base64 encoded." << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " @@ -1207,7 +1207,7 @@ int rgw_s3_prepare_decrypt(struct req_state* s, if (keymd5_bin.size() != CEPH_CRYPTO_MD5_DIGESTSIZE) { - ldout(s->cct, 5) << "ERROR: Invalid key md5 size " << dendl; + ldpp_dout(s, 5) << "ERROR: Invalid key md5 size " << dendl; s->err.message = "Requests specifying Server Side Encryption with Customer " "provided keys must provide an appropriate secret key md5."; return -EINVAL; @@ -1235,7 +1235,7 @@ int rgw_s3_prepare_decrypt(struct req_state* s, if (stored_mode == "SSE-KMS") { if (s->cct->_conf->rgw_crypt_require_ssl && !rgw_transport_is_secure(s->cct, *s->info.env)) { - ldout(s->cct, 5) << "ERROR: Insecure request, rgw_crypt_require_ssl is set" << dendl; + ldpp_dout(s, 5) << "ERROR: Insecure request, rgw_crypt_require_ssl is set" << dendl; return -ERR_INVALID_REQUEST; } /* try to retrieve actual key */ @@ -1243,12 +1243,12 @@ int rgw_s3_prepare_decrypt(struct req_state* s, std::string actual_key; res = reconstitute_actual_key_from_kms(s->cct, attrs, actual_key); if (res != 0) { - ldout(s->cct, 10) << "ERROR: failed to retrieve actual key from key_id: " << key_id << dendl; + ldpp_dout(s, 10) << "ERROR: failed to retrieve actual key from key_id: " << key_id << dendl; s->err.message = "Failed to retrieve the actual key, kms-keyid: " + key_id; return res; } if (actual_key.size() != AES_256_KEYSIZE) { - ldout(s->cct, 0) << "ERROR: key obtained from key_id:" << + ldpp_dout(s, 0) << "ERROR: key obtained from key_id:" << key_id << " is not 256 bit size" << dendl; s->err.message = "KMS provided an invalid key for the given kms-keyid."; return -ERR_INVALID_ACCESS_KEY; @@ -1269,7 +1269,7 @@ int rgw_s3_prepare_decrypt(struct req_state* s, try { master_encryption_key = from_base64(std::string(s->cct->_conf->rgw_crypt_default_encryption_key)); } catch (...) { - ldout(s->cct, 5) << "ERROR: rgw_s3_prepare_decrypt invalid default encryption key " + ldpp_dout(s, 5) << "ERROR: rgw_s3_prepare_decrypt invalid default encryption key " << "which contains character that is not base64 encoded." << dendl; s->err.message = "The default encryption key is not valid base64."; @@ -1277,12 +1277,12 @@ int rgw_s3_prepare_decrypt(struct req_state* s, } if (master_encryption_key.size() != 256 / 8) { - ldout(s->cct, 0) << "ERROR: failed to decode 'rgw crypt default encryption key' to 256 bit string" << dendl; + ldpp_dout(s, 0) << "ERROR: failed to decode 'rgw crypt default encryption key' to 256 bit string" << dendl; return -EIO; } std::string attr_key_selector = get_str_attribute(attrs, RGW_ATTR_CRYPT_KEYSEL); if (attr_key_selector.size() != AES_256_CBC::AES_256_KEYSIZE) { - ldout(s->cct, 0) << "ERROR: missing or invalid " RGW_ATTR_CRYPT_KEYSEL << dendl; + ldpp_dout(s, 0) << "ERROR: missing or invalid " RGW_ATTR_CRYPT_KEYSEL << dendl; return -EIO; } uint8_t actual_key[AES_256_KEYSIZE]; diff --git a/src/rgw/rgw_crypt.h b/src/rgw/rgw_crypt.h index f397941632e..ff221549d6f 100644 --- a/src/rgw/rgw_crypt.h +++ b/src/rgw/rgw_crypt.h @@ -115,7 +115,7 @@ public: off_t bl_len) override; virtual int flush() override; - int read_manifest(bufferlist& manifest_bl); + int read_manifest(const DoutPrefixProvider *dpp, bufferlist& manifest_bl); }; /* RGWGetObj_BlockDecrypt */ diff --git a/src/rgw/rgw_data_sync.cc b/src/rgw/rgw_data_sync.cc index f7905c9cf77..f43fdc96cf4 100644 --- a/src/rgw/rgw_data_sync.cc +++ b/src/rgw/rgw_data_sync.cc @@ -94,7 +94,7 @@ bool RGWReadDataSyncStatusMarkersCR::spawn_next() return false; } using CR = RGWSimpleRadosReadCR; - spawn(new CR(env->async_rados, env->svc->sysobj, + spawn(new CR(env->dpp, env->async_rados, env->svc->sysobj, rgw_raw_obj(env->svc->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id)), &markers[shard_id]), false); @@ -149,22 +149,22 @@ public: rgw_data_sync_status *_status) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(sc->env), sync_status(_status) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int RGWReadDataSyncStatusCoroutine::operate() +int RGWReadDataSyncStatusCoroutine::operate(const DoutPrefixProvider *dpp) { reenter(this) { // read sync info using ReadInfoCR = RGWSimpleRadosReadCR; yield { bool empty_on_enoent = false; // fail on ENOENT - call(new ReadInfoCR(sync_env->async_rados, sync_env->svc->sysobj, + call(new ReadInfoCR(dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::sync_status_oid(sc->source_zone)), &sync_status->sync_info, empty_on_enoent)); } if (retcode < 0) { - ldout(sync_env->cct, 4) << "failed to read sync status info with " + ldpp_dout(dpp, 4) << "failed to read sync status info with " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } @@ -173,7 +173,7 @@ int RGWReadDataSyncStatusCoroutine::operate() yield call(new ReadMarkersCR(sc, sync_status->sync_info.num_shards, sync_status->sync_markers)); if (retcode < 0) { - ldout(sync_env->cct, 4) << "failed to read sync status markers with " + ldpp_dout(dpp, 4) << "failed to read sync status markers with " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } @@ -207,7 +207,7 @@ public: } } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { char buf[16]; @@ -223,9 +223,9 @@ public: init_new_io(http_op); - int ret = http_op->aio_read(); + int ret = http_op->aio_read(dpp); if (ret < 0) { - ldout(sync_env->cct, 0) << "ERROR: failed to read from " << p << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to read from " << p << dendl; log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl; return set_cr_error(ret); } @@ -288,7 +288,7 @@ public: } } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { char buf[16]; @@ -308,9 +308,9 @@ public: if (sync_env->counters) { timer.emplace(sync_env->counters, sync_counters::l_poll); } - int ret = http_op->aio_read(); + int ret = http_op->aio_read(dpp); if (ret < 0) { - ldout(sync_env->cct, 0) << "ERROR: failed to read from " << p << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to read from " << p << dendl; log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl; if (sync_env->counters) { sync_env->counters->inc(sync_counters::l_poll_err); @@ -385,7 +385,7 @@ public: : RGWSimpleCoroutine(sc->cct), sc(sc), sync_env(sc->env), http_op(NULL), shard_id(_shard_id), marker(_marker), max_entries(_max_entries), result(_result) {} - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { RGWRESTConn *conn = sc->conn; char buf[32]; @@ -407,9 +407,9 @@ public: http_op = new RGWRESTReadResource(conn, p, pairs, NULL, sync_env->http_manager); init_new_io(http_op); - int ret = http_op->aio_read(); + int ret = http_op->aio_read(dpp); if (ret < 0) { - ldout(sync_env->cct, 0) << "ERROR: failed to read from " << p << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to read from " << p << dendl; log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl; http_op->put(); return ret; @@ -422,7 +422,7 @@ public: int ret = http_op->wait(result, null_yield); http_op->put(); if (ret < 0 && ret != -ENOENT) { - ldout(sync_env->cct, 0) << "ERROR: failed to list remote datalog shard, ret=" << ret << dendl; + ldpp_dout(sync_env->dpp, 0) << "ERROR: failed to list remote datalog shard, ret=" << ret << dendl; return ret; } return 0; @@ -502,7 +502,7 @@ public: } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { int ret; reenter(this) { using LockCR = RGWSimpleRadosLockCR; @@ -514,7 +514,7 @@ public: return set_cr_error(retcode); } using WriteInfoCR = RGWSimpleRadosWriteCR; - yield call(new WriteInfoCR(sync_env->async_rados, sync_env->svc->sysobj, + yield call(new WriteInfoCR(dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj{pool, sync_status_oid}, status->sync_info)); if (retcode < 0) { @@ -559,7 +559,7 @@ public: marker.timestamp = info.last_update; const auto& oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, i); using WriteMarkerCR = RGWSimpleRadosWriteCR; - spawn(new WriteMarkerCR(sync_env->async_rados, sync_env->svc->sysobj, + spawn(new WriteMarkerCR(dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj{pool, oid}, marker), true); } } @@ -572,7 +572,7 @@ public: } status->sync_info.state = rgw_data_sync_info::StateBuildingFullSyncMaps; - yield call(new WriteInfoCR(sync_env->async_rados, sync_env->svc->sysobj, + yield call(new WriteInfoCR(dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj{pool, sync_status_oid}, status->sync_info)); if (retcode < 0) { @@ -601,12 +601,12 @@ RGWRemoteDataLog::RGWRemoteDataLog(const DoutPrefixProvider *dpp, { } -int RGWRemoteDataLog::read_log_info(rgw_datalog_info *log_info) +int RGWRemoteDataLog::read_log_info(const DoutPrefixProvider *dpp, rgw_datalog_info *log_info) { rgw_http_param_pair pairs[] = { { "type", "data" }, { NULL, NULL } }; - int ret = sc.conn->get_json_resource("/admin/log", pairs, null_yield, *log_info); + int ret = sc.conn->get_json_resource(dpp, "/admin/log", pairs, null_yield, *log_info); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to fetch datalog info" << dendl; return ret; @@ -617,20 +617,20 @@ int RGWRemoteDataLog::read_log_info(rgw_datalog_info *log_info) return 0; } -int RGWRemoteDataLog::read_source_log_shards_info(map *shards_info) +int RGWRemoteDataLog::read_source_log_shards_info(const DoutPrefixProvider *dpp, map *shards_info) { rgw_datalog_info log_info; - int ret = read_log_info(&log_info); + int ret = read_log_info(dpp, &log_info); if (ret < 0) { return ret; } - return run(new RGWReadRemoteDataLogInfoCR(&sc, log_info.num_shards, shards_info)); + return run(dpp, new RGWReadRemoteDataLogInfoCR(&sc, log_info.num_shards, shards_info)); } -int RGWRemoteDataLog::read_source_log_shards_next(map shard_markers, map *result) +int RGWRemoteDataLog::read_source_log_shards_next(const DoutPrefixProvider *dpp, map shard_markers, map *result) { - return run(new RGWListRemoteDataLogCR(&sc, shard_markers, 1, result)); + return run(dpp, new RGWListRemoteDataLogCR(&sc, shard_markers, 1, result)); } int RGWRemoteDataLog::init(const rgw_zone_id& _source_zone, RGWRESTConn *_conn, RGWSyncErrorLogger *_error_logger, @@ -663,7 +663,7 @@ void RGWRemoteDataLog::finish() stop(); } -int RGWRemoteDataLog::read_sync_status(rgw_data_sync_status *sync_status) +int RGWRemoteDataLog::read_sync_status(const DoutPrefixProvider *dpp, rgw_data_sync_status *sync_status) { // cannot run concurrently with run_sync(), so run in a separate manager RGWCoroutinesManager crs(cct, cr_registry); @@ -679,12 +679,12 @@ int RGWRemoteDataLog::read_sync_status(rgw_data_sync_status *sync_status) RGWDataSyncCtx sc_local = sc; sc_local.env = &sync_env_local; - ret = crs.run(new RGWReadDataSyncStatusCoroutine(&sc_local, sync_status)); + ret = crs.run(dpp, new RGWReadDataSyncStatusCoroutine(&sc_local, sync_status)); http_manager.stop(); return ret; } -int RGWRemoteDataLog::read_recovering_shards(const int num_shards, set& recovering_shards) +int RGWRemoteDataLog::read_recovering_shards(const DoutPrefixProvider *dpp, const int num_shards, set& recovering_shards) { // cannot run concurrently with run_sync(), so run in a separate manager RGWCoroutinesManager crs(cct, cr_registry); @@ -704,7 +704,7 @@ int RGWRemoteDataLog::read_recovering_shards(const int num_shards, set& rec omapkeys.resize(num_shards); uint64_t max_entries{1}; - ret = crs.run(new RGWReadDataSyncRecoveringShardsCR(&sc_local, max_entries, num_shards, omapkeys)); + ret = crs.run(dpp, new RGWReadDataSyncRecoveringShardsCR(&sc_local, max_entries, num_shards, omapkeys)); http_manager.stop(); if (ret == 0) { @@ -718,7 +718,7 @@ int RGWRemoteDataLog::read_recovering_shards(const int num_shards, set& rec return ret; } -int RGWRemoteDataLog::init_sync_status(int num_shards) +int RGWRemoteDataLog::init_sync_status(const DoutPrefixProvider *dpp, int num_shards) { rgw_data_sync_status sync_status; sync_status.sync_info.num_shards = num_shards; @@ -735,7 +735,7 @@ int RGWRemoteDataLog::init_sync_status(int num_shards) auto instance_id = ceph::util::generate_random_number(); RGWDataSyncCtx sc_local = sc; sc_local.env = &sync_env_local; - ret = crs.run(new RGWInitDataSyncStatusCoroutine(&sc_local, num_shards, instance_id, tn, &sync_status)); + ret = crs.run(dpp, new RGWInitDataSyncStatusCoroutine(&sc_local, num_shards, instance_id, tn, &sync_status)); http_manager.stop(); return ret; } @@ -820,7 +820,7 @@ public: delete entries_index; } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { entries_index = new RGWShardedOmapCRManager(sync_env->async_rados, store, this, num_shards, sync_env->svc->zone->get_zone_params().log_pool, @@ -839,12 +839,12 @@ public: entrypoint, pairs, &result)); } if (retcode < 0) { - ldout(sync_env->cct, 0) << "ERROR: failed to fetch metadata for section bucket.instance" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to fetch metadata for section bucket.instance" << dendl; return set_cr_error(retcode); } for (iter = result.keys.begin(); iter != result.keys.end(); ++iter) { - ldout(sync_env->cct, 20) << "list metadata: section=bucket.instance key=" << *iter << dendl; + ldpp_dout(dpp, 20) << "list metadata: section=bucket.instance key=" << *iter << dendl; key = *iter; yield { @@ -879,18 +879,18 @@ public: int shard_id = (int)iter->first; rgw_data_sync_marker& marker = iter->second; marker.total_entries = entries_index->get_total_entries(shard_id); - spawn(new RGWSimpleRadosWriteCR(sync_env->async_rados, sync_env->svc->sysobj, + spawn(new RGWSimpleRadosWriteCR(dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id)), marker), true); } } else { - yield call(sync_env->error_logger->log_error_cr(sc->conn->get_remote_id(), "data.init", "", + yield call(sync_env->error_logger->log_error_cr(dpp, sc->conn->get_remote_id(), "data.init", "", EIO, string("failed to build bucket instances map"))); } while (collect(&ret, NULL)) { if (ret < 0) { - yield call(sync_env->error_logger->log_error_cr(sc->conn->get_remote_id(), "data.init", "", + yield call(sync_env->error_logger->log_error_cr(dpp, sc->conn->get_remote_id(), "data.init", "", -ret, string("failed to store sync status: ") + cpp_strerror(-ret))); req_ret = ret; } @@ -933,7 +933,7 @@ public: tn->log(20, SSTR("updating marker marker_oid=" << marker_oid << " marker=" << new_marker)); - return new RGWSimpleRadosWriteCR(sync_env->async_rados, sync_env->svc->sysobj, + return new RGWSimpleRadosWriteCR(sync_env->dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, marker_oid), sync_marker); } @@ -1014,7 +1014,7 @@ public: SSTR(bucket_shard_str{_sync_pair.dest_bs} << "<-" << bucket_shard_str{_sync_pair.source_bs} ))) { } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; struct all_bucket_info { @@ -1196,7 +1196,7 @@ public: ~RGWRunBucketsSyncBySourceCR() override { } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; class RGWRunBucketSourcesSyncCR : public RGWCoroutine { @@ -1240,7 +1240,7 @@ public: const RGWSyncTraceNodeRef& _tn_parent, ceph::real_time* progress); - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; void handle_complete_stack(uint64_t stack_id) { auto iter = shard_progress.find(stack_id); @@ -1291,7 +1291,7 @@ public: tn = sync_env->sync_tracer->add_node(_tn_parent, "entry", obligation.key); } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { if (state->obligation) { // this is already syncing in another DataSyncSingleEntryCR @@ -1350,7 +1350,7 @@ public: if (sync_status < 0) { // write actual sync failures for 'radosgw-admin sync error list' if (sync_status != -EBUSY && sync_status != -EAGAIN) { - yield call(sync_env->error_logger->log_error_cr(sc->conn->get_remote_id(), "data", complete->key, + yield call(sync_env->error_logger->log_error_cr(dpp, sc->conn->get_remote_id(), "data", complete->key, -sync_status, string("failed to sync bucket instance: ") + cpp_strerror(-sync_status))); if (retcode < 0) { tn->log(0, SSTR("ERROR: failed to log sync failure: retcode=" << retcode)); @@ -1493,7 +1493,7 @@ public: modified_shards.insert(keys.begin(), keys.end()); } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { int r; while (true) { switch (sync_marker.state) { @@ -1614,7 +1614,7 @@ public: sync_marker.state = rgw_data_sync_marker::IncrementalSync; sync_marker.marker = sync_marker.next_step_marker; sync_marker.next_step_marker.clear(); - call(new RGWSimpleRadosWriteCR(sync_env->async_rados, sync_env->svc->sysobj, + call(new RGWSimpleRadosWriteCR(sync_env->dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj(pool, status_oid), sync_marker)); } @@ -1811,7 +1811,7 @@ public: } RGWCoroutine *alloc_finisher_cr() override { - return new RGWSimpleRadosReadCR(sync_env->async_rados, sync_env->svc->sysobj, + return new RGWSimpleRadosReadCR(sync_env->dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id)), &sync_marker); } @@ -1858,7 +1858,7 @@ public: } } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { /* read sync status */ @@ -1944,7 +1944,7 @@ public: } RGWCoroutine *set_sync_info_cr() { - return new RGWSimpleRadosWriteCR(sync_env->async_rados, sync_env->svc->sysobj, + return new RGWSimpleRadosWriteCR(sync_env->dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::sync_status_oid(sc->source_zone)), sync_status.sync_info); } @@ -2310,7 +2310,7 @@ public: } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { #define MAX_RACE_RETRIES_OBJ_FETCH 10 @@ -2431,7 +2431,7 @@ public: key, dest_key, versioned_epoch, true, std::static_pointer_cast(filter), - zones_trace, sync_env->counters, sync_env->dpp)); + zones_trace, sync_env->counters, dpp)); } if (retcode < 0) { if (*need_retry) { @@ -2601,14 +2601,14 @@ void RGWRemoteDataLog::wakeup(int shard_id, set& keys) { data_sync_cr->wakeup(shard_id, keys); } -int RGWRemoteDataLog::run_sync(int num_shards) +int RGWRemoteDataLog::run_sync(const DoutPrefixProvider *dpp, int num_shards) { lock.lock(); data_sync_cr = new RGWDataSyncControlCR(&sc, num_shards, tn); data_sync_cr->get(); // run() will drop a ref, so take another lock.unlock(); - int r = run(data_sync_cr); + int r = run(dpp, data_sync_cr); lock.lock(); data_sync_cr->put(); @@ -2627,7 +2627,7 @@ CephContext *RGWDataSyncStatusManager::get_cct() const return store->ctx(); } -int RGWDataSyncStatusManager::init() +int RGWDataSyncStatusManager::init(const DoutPrefixProvider *dpp) { RGWZone *zone_def; @@ -2663,7 +2663,7 @@ int RGWDataSyncStatusManager::init() } rgw_datalog_info datalog_info; - r = source_log.read_log_info(&datalog_info); + r = source_log.read_log_info(dpp, &datalog_info); if (r < 0) { ldpp_dout(this, 5) << "ERROR: master.read_log_info() returned r=" << r << dendl; finalize(); @@ -2726,7 +2726,7 @@ public: : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), instance_key(bs.get_key()), info(_info) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { rgw_http_param_pair pairs[] = { { "type" , "bucket-index" }, @@ -2767,7 +2767,7 @@ public: status(_status), objv_tracker(objv_tracker) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { /* fetch current position in logs */ yield call(new RGWReadRemoteBucketIndexLogInfoCR(sc, sync_pair.source_bs, &info)); @@ -2805,7 +2805,7 @@ public: if (write_status) { map attrs; status.encode_all_attrs(attrs); - call(new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, sync_env->svc->sysobj, obj, attrs, &objv_tracker)); + call(new RGWSimpleRadosWriteAttrsCR(dpp, sync_env->async_rados, sync_env->svc->sysobj, obj, attrs, &objv_tracker)); } else { call(new RGWRadosRemoveCR(store, obj, &objv_tracker)); } @@ -2940,13 +2940,13 @@ public: oid(RGWBucketPipeSyncStatusManager::status_oid(sc->source_zone, sync_pair)), status(_status), objv_tracker(objv_tracker) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int RGWReadBucketPipeSyncStatusCoroutine::operate() +int RGWReadBucketPipeSyncStatusCoroutine::operate(const DoutPrefixProvider *dpp) { reenter(this) { - yield call(new RGWSimpleRadosReadAttrsCR(sync_env->async_rados, sync_env->svc->sysobj, + yield call(new RGWSimpleRadosReadAttrsCR(dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, oid), &attrs, true, objv_tracker)); if (retcode == -ENOENT) { @@ -2954,7 +2954,7 @@ int RGWReadBucketPipeSyncStatusCoroutine::operate() return set_cr_done(); } if (retcode < 0) { - ldout(sync_env->cct, 0) << "ERROR: failed to call fetch bucket shard info oid=" << oid << " ret=" << retcode << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to call fetch bucket shard info oid=" << oid << " ret=" << retcode << dendl; return set_cr_error(retcode); } status->decode_from_attrs(sync_env->cct, attrs); @@ -2991,10 +2991,10 @@ public: error_oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id) + ".retry"; } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int RGWReadRecoveringBucketShardsCoroutine::operate() +int RGWReadRecoveringBucketShardsCoroutine::operate(const DoutPrefixProvider *dpp) { reenter(this){ //read recovering bucket shards @@ -3009,7 +3009,7 @@ int RGWReadRecoveringBucketShardsCoroutine::operate() } if (retcode < 0) { - ldout(sync_env->cct, 0) << "failed to read recovering bucket shards with " + ldpp_dout(dpp, 0) << "failed to read recovering bucket shards with " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } @@ -3061,19 +3061,19 @@ public: status_oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id); } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int RGWReadPendingBucketShardsCoroutine::operate() +int RGWReadPendingBucketShardsCoroutine::operate(const DoutPrefixProvider *dpp) { reenter(this){ //read sync status marker using CR = RGWSimpleRadosReadCR; - yield call(new CR(sync_env->async_rados, sync_env->svc->sysobj, + yield call(new CR(dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, status_oid), sync_marker)); if (retcode < 0) { - ldout(sync_env->cct,0) << "failed to read sync status marker with " + ldpp_dout(dpp, 0) << "failed to read sync status marker with " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } @@ -3090,7 +3090,7 @@ int RGWReadPendingBucketShardsCoroutine::operate() } if (retcode < 0) { - ldout(sync_env->cct,0) << "failed to read remote data log info with " + ldpp_dout(dpp, 0) << "failed to read remote data log info with " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } @@ -3111,7 +3111,7 @@ int RGWReadPendingBucketShardsCoroutine::operate() return 0; } -int RGWRemoteDataLog::read_shard_status(int shard_id, set& pending_buckets, set& recovering_buckets, rgw_data_sync_marker *sync_marker, const int max_entries) +int RGWRemoteDataLog::read_shard_status(const DoutPrefixProvider *dpp, int shard_id, set& pending_buckets, set& recovering_buckets, rgw_data_sync_marker *sync_marker, const int max_entries) { // cannot run concurrently with run_sync(), so run in a separate manager RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry()); @@ -3132,7 +3132,7 @@ int RGWRemoteDataLog::read_shard_status(int shard_id, set& pending_bucke RGWCoroutinesStack* pending_stack = new RGWCoroutinesStack(store->ctx(), &crs); pending_stack->call(new RGWReadPendingBucketShardsCoroutine(&sc_local, shard_id, pending_buckets, sync_marker, max_entries)); stacks.push_back(pending_stack); - ret = crs.run(stacks); + ret = crs.run(dpp, stacks); http_manager.stop(); return ret; } @@ -3268,7 +3268,7 @@ public: instance_key(bs.get_key()), marker_position(_marker_position), result(_result) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { rgw_http_param_pair pairs[] = { { "rgwx-bucket-instance", instance_key.c_str() }, @@ -3306,7 +3306,7 @@ public: : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), instance_key(bs.get_key()), marker(_marker), result(_result) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { if (sync_env->counters) { timer.emplace(sync_env->counters, sync_counters::l_poll); @@ -3363,7 +3363,7 @@ public: sync_marker.encode_attr(attrs); tn->log(20, SSTR("updating marker marker_oid=" << marker_oid << " marker=" << new_marker)); - return new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, sync_env->svc->sysobj, + return new RGWSimpleRadosWriteAttrsCR(sync_env->dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, marker_oid), attrs, &objv_tracker); } @@ -3391,11 +3391,11 @@ class RGWWriteBucketShardIncSyncStatus : public RGWCoroutine { sync_marker(sync_marker), stable_timestamp(stable_timestamp), objv_tracker(objv_tracker) {} - int operate() { + int operate(const DoutPrefixProvider *dpp) { reenter(this) { sync_marker.encode_attr(attrs); - yield call(new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, sync_env->svc->sysobj, + yield call(new RGWSimpleRadosWriteAttrsCR(sync_env->dpp, sync_env->async_rados, sync_env->svc->sysobj, obj, attrs, &objv_tracker)); if (retcode < 0) { return set_cr_error(retcode); @@ -3579,7 +3579,7 @@ public: zones_trace.insert(sync_env->svc->zone->get_zone().id, _sync_pipe.info.dest_bs.get_key()); } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { /* skip entries that are not complete */ if (op_state != CLS_RGW_STATE_COMPLETE) { @@ -3644,7 +3644,7 @@ public: } } if (!error_ss.str().empty()) { - yield call(sync_env->error_logger->log_error_cr(sc->conn->get_remote_id(), "data", error_ss.str(), -retcode, string("failed to sync object") + cpp_strerror(-sync_status))); + yield call(sync_env->error_logger->log_error_cr(dpp, sc->conn->get_remote_id(), "data", error_ss.str(), -retcode, string("failed to sync object") + cpp_strerror(-sync_status))); } done: if (sync_status == 0) { @@ -3751,10 +3751,10 @@ public: prefix_handler.set_rules(sync_pipe.get_rules()); } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int RGWBucketShardFullSyncCR::operate() +int RGWBucketShardFullSyncCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { list_marker = sync_info.full_marker.position; @@ -3841,7 +3841,7 @@ int RGWBucketShardFullSyncCR::operate() sync_info.state = rgw_bucket_shard_sync_info::StateIncrementalSync; map attrs; sync_info.encode_state_attr(attrs); - call(new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, sync_env->svc->sysobj, + call(new RGWSimpleRadosWriteAttrsCR(dpp, sync_env->async_rados, sync_env->svc->sysobj, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, status_oid), attrs)); } @@ -3926,10 +3926,10 @@ public: return boost::starts_with(key.name, iter->first); } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int RGWBucketShardIncrementalSyncCR::operate() +int RGWBucketShardIncrementalSyncCR::operate(const DoutPrefixProvider *dpp) { int ret; reenter(this) { @@ -3954,7 +3954,7 @@ int RGWBucketShardIncrementalSyncCR::operate() for (; entries_iter != entries_end; ++entries_iter) { auto e = *entries_iter; if (e.op == RGWModifyOp::CLS_RGW_OP_SYNCSTOP) { - ldout(sync_env->cct, 20) << "syncstop on " << e.timestamp << dendl; + ldpp_dout(dpp, 20) << "syncstop on " << e.timestamp << dendl; syncstopped = true; entries_end = std::next(entries_iter); // stop after this entry break; @@ -3999,7 +3999,7 @@ int RGWBucketShardIncrementalSyncCR::operate() sync_info.inc_marker.position = cur_id; if (entry->op == RGWModifyOp::CLS_RGW_OP_SYNCSTOP || entry->op == RGWModifyOp::CLS_RGW_OP_RESYNC) { - ldout(sync_env->cct, 20) << "detected syncstop or resync on " << entries_iter->timestamp << ", skipping entry" << dendl; + ldpp_dout(dpp, 20) << "detected syncstop or resync on " << entries_iter->timestamp << ", skipping entry" << dendl; marker_tracker.try_update_high_marker(cur_id, 0, entry->timestamp); continue; } @@ -4243,12 +4243,13 @@ class RGWGetBucketPeersCR : public RGWCoroutine { const rgw_bucket& _source_bucket) : sync_env(_sync_env), source_bucket(_source_bucket) {} int operate() override { - int r = sync_env->svc->bucket_sync->get_bucket_sync_hints(source_bucket, + int r = sync_env->svc->bucket_sync->get_bucket_sync_hints(sync_env->dpp, + source_bucket, nullptr, &targets, null_yield); if (r < 0) { - ldout(sync_env->cct, 0) << "ERROR: " << __func__ << "(): failed to fetch bucket sync hints for bucket=" << source_bucket << dendl; + ldpp_dout(sync_env->dpp, 0) << "ERROR: " << __func__ << "(): failed to fetch bucket sync hints for bucket=" << source_bucket << dendl; return r; } @@ -4278,7 +4279,7 @@ public: << ":source_zone=" << source_zone.value_or(rgw_zone_id("*")).id))) { } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; std::ostream& operator<<(std::ostream& out, std::optional& bs) { @@ -4310,7 +4311,7 @@ RGWRunBucketSourcesSyncCR::RGWRunBucketSourcesSyncCR(RGWDataSyncCtx *_sc, } } -int RGWRunBucketSourcesSyncCR::operate() +int RGWRunBucketSourcesSyncCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { yield call(new RGWGetBucketPeersCR(sync_env, target_bucket, sc->source_zone, source_bucket, &pipes, tn)); @@ -4319,16 +4320,16 @@ int RGWRunBucketSourcesSyncCR::operate() return set_cr_error(retcode); } - ldpp_dout(sync_env->dpp, 20) << __func__ << "(): requested source_bs=" << source_bs << " target_bs=" << target_bs << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): requested source_bs=" << source_bs << " target_bs=" << target_bs << dendl; if (pipes.empty()) { - ldpp_dout(sync_env->dpp, 20) << __func__ << "(): no relevant sync pipes found" << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): no relevant sync pipes found" << dendl; return set_cr_done(); } for (siter = pipes.begin(); siter != pipes.end(); ++siter) { { - ldpp_dout(sync_env->dpp, 20) << __func__ << "(): sync pipe=" << *siter << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): sync pipe=" << *siter << dendl; source_num_shards = siter->source.get_bucket_info().layout.current_index.layout.normal.num_shards; target_num_shards = siter->target.get_bucket_info().layout.current_index.layout.normal.num_shards; @@ -4350,7 +4351,7 @@ int RGWRunBucketSourcesSyncCR::operate() } } - ldpp_dout(sync_env->dpp, 20) << __func__ << "(): num shards=" << num_shards << " cur_shard=" << cur_shard << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): num shards=" << num_shards << " cur_shard=" << cur_shard << dendl; for (; num_shards > 0; --num_shards, ++cur_shard) { /* @@ -4364,7 +4365,7 @@ int RGWRunBucketSourcesSyncCR::operate() sync_pair.dest_bs.shard_id = -1; } - ldpp_dout(sync_env->dpp, 20) << __func__ << "(): sync_pair=" << sync_pair << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): sync_pair=" << sync_pair << dendl; cur_progress = (progress ? &shard_progress[prealloc_stack_id()] : nullptr); @@ -4420,20 +4421,20 @@ public: SSTR(bucket))) { } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int RGWSyncGetBucketInfoCR::operate() +int RGWSyncGetBucketInfoCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { - yield call(new RGWGetBucketInstanceInfoCR(sync_env->async_rados, sync_env->store, bucket, pbucket_info, pattrs, sync_env->dpp)); + yield call(new RGWGetBucketInstanceInfoCR(sync_env->async_rados, sync_env->store, bucket, pbucket_info, pattrs, dpp)); if (retcode == -ENOENT) { /* bucket instance info has not been synced in yet, fetch it now */ yield { tn->log(10, SSTR("no local info for bucket:" << ": fetching metadata")); string raw_key = string("bucket.instance:") + bucket.get_key(); - meta_sync_env.init(sync_env->dpp, cct, sync_env->store, sync_env->svc->zone->get_master_conn(), sync_env->async_rados, + meta_sync_env.init(dpp, cct, sync_env->store, sync_env->svc->zone->get_master_conn(), sync_env->async_rados, sync_env->http_manager, sync_env->error_logger, sync_env->sync_tracer); call(new RGWMetaSyncSingleEntryCR(&meta_sync_env, raw_key, @@ -4447,7 +4448,7 @@ int RGWSyncGetBucketInfoCR::operate() return set_cr_error(retcode); } - yield call(new RGWGetBucketInstanceInfoCR(sync_env->async_rados, sync_env->store, bucket, pbucket_info, pattrs, sync_env->dpp)); + yield call(new RGWGetBucketInstanceInfoCR(sync_env->async_rados, sync_env->store, bucket, pbucket_info, pattrs, dpp)); } if (retcode < 0) { tn->log(0, SSTR("ERROR: failed to retrieve bucket info for bucket=" << bucket_str{bucket})); @@ -4538,14 +4539,14 @@ public: get_policy_params.bucket = bucket; } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { for (i = 0; i < 2; ++i) { yield call(new RGWBucketGetSyncPolicyHandlerCR(sync_env->async_rados, sync_env->store, get_policy_params, policy, - sync_env->dpp)); + dpp)); if (retcode < 0 && retcode != -ENOENT) { return set_cr_error(retcode); @@ -4575,7 +4576,7 @@ public: }; -int RGWGetBucketPeersCR::operate() +int RGWGetBucketPeersCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { if (pipes) { @@ -4634,7 +4635,7 @@ int RGWGetBucketPeersCR::operate() for (hiter = get_hint_targets_action->targets.begin(); hiter != get_hint_targets_action->targets.end(); ++hiter) { - ldpp_dout(sync_env->dpp, 20) << "Got sync hint for bucket=" << *source_bucket << ": " << hiter->get_key() << dendl; + ldpp_dout(dpp, 20) << "Got sync hint for bucket=" << *source_bucket << ": " << hiter->get_key() << dendl; target_policy = make_shared(); yield call(new RGWSyncGetBucketSyncPolicyHandlerCR(sync_env, @@ -4672,7 +4673,7 @@ int RGWGetBucketPeersCR::operate() return 0; } -int RGWRunBucketsSyncBySourceCR::operate() +int RGWRunBucketsSyncBySourceCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { return set_cr_done(); @@ -4681,7 +4682,7 @@ int RGWRunBucketsSyncBySourceCR::operate() return 0; } -int RGWRunBucketSyncCoroutine::operate() +int RGWRunBucketSyncCoroutine::operate(const DoutPrefixProvider *dpp) { reenter(this) { yield call(new RGWReadBucketPipeSyncStatusCoroutine(sc, sync_pair, &sync_status, &objv_tracker)); @@ -4771,7 +4772,7 @@ RGWCoroutine *RGWRemoteBucketManager::run_sync_cr(int num) return new RGWRunBucketSyncCoroutine(&sc, nullptr, sync_pairs[num], sync_env->sync_tracer->root_node, nullptr); } -int RGWBucketPipeSyncStatusManager::init() +int RGWBucketPipeSyncStatusManager::init(const DoutPrefixProvider *dpp) { int ret = http_manager.start(); if (ret < 0) { @@ -4791,7 +4792,7 @@ int RGWBucketPipeSyncStatusManager::init() rgw_sync_pipe_info_set pipes; - ret = cr_mgr.run(new RGWGetBucketPeersCR(&sync_env, + ret = cr_mgr.run(dpp, new RGWGetBucketPeersCR(&sync_env, dest_bucket, source_zone, source_bucket, @@ -4825,7 +4826,7 @@ int RGWBucketPipeSyncStatusManager::init() return 0; } -int RGWBucketPipeSyncStatusManager::init_sync_status() +int RGWBucketPipeSyncStatusManager::init_sync_status(const DoutPrefixProvider *dpp) { list stacks; // pass an empty objv tracker to each so that the version gets incremented @@ -4842,10 +4843,10 @@ int RGWBucketPipeSyncStatusManager::init_sync_status() stacks.push_back(stack); } - return cr_mgr.run(stacks); + return cr_mgr.run(dpp, stacks); } -int RGWBucketPipeSyncStatusManager::read_sync_status() +int RGWBucketPipeSyncStatusManager::read_sync_status(const DoutPrefixProvider *dpp) { list stacks; @@ -4858,7 +4859,7 @@ int RGWBucketPipeSyncStatusManager::read_sync_status() stacks.push_back(stack); } - int ret = cr_mgr.run(stacks); + int ret = cr_mgr.run(dpp, stacks); if (ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to read sync status for " << bucket_str{dest_bucket} << dendl; @@ -4868,7 +4869,7 @@ int RGWBucketPipeSyncStatusManager::read_sync_status() return 0; } -int RGWBucketPipeSyncStatusManager::run() +int RGWBucketPipeSyncStatusManager::run(const DoutPrefixProvider *dpp) { list stacks; @@ -4881,7 +4882,7 @@ int RGWBucketPipeSyncStatusManager::run() stacks.push_back(stack); } - int ret = cr_mgr.run(stacks); + int ret = cr_mgr.run(dpp, stacks); if (ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to read sync status for " << bucket_str{dest_bucket} << dendl; @@ -4925,7 +4926,8 @@ string RGWBucketPipeSyncStatusManager::obj_status_oid(const rgw_bucket_sync_pipe return prefix + ":" + obj->get_name() + ":" + obj->get_instance(); } -int rgw_read_remote_bilog_info(RGWRESTConn* conn, +int rgw_read_remote_bilog_info(const DoutPrefixProvider *dpp, + RGWRESTConn* conn, const rgw_bucket& bucket, BucketIndexShardsManager& markers, optional_yield y) @@ -4938,9 +4940,9 @@ int rgw_read_remote_bilog_info(RGWRESTConn* conn, { nullptr, nullptr } }; rgw_bucket_index_marker_info result; - int r = conn->get_json_resource("/admin/log/", params, y, result); + int r = conn->get_json_resource(dpp, "/admin/log/", params, y, result); if (r < 0) { - lderr(conn->get_ctx()) << "failed to fetch remote log markers: " << cpp_strerror(r) << dendl; + ldpp_dout(dpp, -1) << "failed to fetch remote log markers: " << cpp_strerror(r) << dendl; return r; } r = markers.from_string(result.max_marker, -1); @@ -5052,7 +5054,7 @@ int rgw_bucket_sync_status(const DoutPrefixProvider *dpp, sc.init(&env, nullptr, *pipe.source.zone); RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry()); - return crs.run(new RGWCollectBucketSyncStatusCR(store, &sc, + return crs.run(dpp, new RGWCollectBucketSyncStatusCR(store, &sc, *psource_bucket_info, dest_bucket_info, status)); diff --git a/src/rgw/rgw_data_sync.h b/src/rgw/rgw_data_sync.h index 42b97b7b9ce..5dadf04f290 100644 --- a/src/rgw/rgw_data_sync.h +++ b/src/rgw/rgw_data_sync.h @@ -376,14 +376,14 @@ public: PerfCounters* _counters); void finish(); - int read_log_info(rgw_datalog_info *log_info); - int read_source_log_shards_info(map *shards_info); - int read_source_log_shards_next(map shard_markers, map *result); - int read_sync_status(rgw_data_sync_status *sync_status); - int read_recovering_shards(const int num_shards, set& recovering_shards); - int read_shard_status(int shard_id, set& lagging_buckets,set& recovering_buckets, rgw_data_sync_marker* sync_marker, const int max_entries); - int init_sync_status(int num_shards); - int run_sync(int num_shards); + int read_log_info(const DoutPrefixProvider *dpp, rgw_datalog_info *log_info); + int read_source_log_shards_info(const DoutPrefixProvider *dpp, map *shards_info); + int read_source_log_shards_next(const DoutPrefixProvider *dpp, map shard_markers, map *result); + int read_sync_status(const DoutPrefixProvider *dpp, rgw_data_sync_status *sync_status); + int read_recovering_shards(const DoutPrefixProvider *dpp, const int num_shards, set& recovering_shards); + int read_shard_status(const DoutPrefixProvider *dpp, int shard_id, set& lagging_buckets,set& recovering_buckets, rgw_data_sync_marker* sync_marker, const int max_entries); + int init_sync_status(const DoutPrefixProvider *dpp, int num_shards); + int run_sync(const DoutPrefixProvider *dpp, int num_shards); void wakeup(int shard_id, set& keys); }; @@ -421,36 +421,36 @@ public: ~RGWDataSyncStatusManager() { finalize(); } - int init(); + int init(const DoutPrefixProvider *dpp); void finalize(); static string shard_obj_name(const rgw_zone_id& source_zone, int shard_id); static string sync_status_oid(const rgw_zone_id& source_zone); - int read_sync_status(rgw_data_sync_status *sync_status) { - return source_log.read_sync_status(sync_status); + int read_sync_status(const DoutPrefixProvider *dpp, rgw_data_sync_status *sync_status) { + return source_log.read_sync_status(dpp, sync_status); } - int read_recovering_shards(const int num_shards, set& recovering_shards) { - return source_log.read_recovering_shards(num_shards, recovering_shards); + int read_recovering_shards(const DoutPrefixProvider *dpp, const int num_shards, set& recovering_shards) { + return source_log.read_recovering_shards(dpp, num_shards, recovering_shards); } - int read_shard_status(int shard_id, set& lagging_buckets, set& recovering_buckets, rgw_data_sync_marker *sync_marker, const int max_entries) { - return source_log.read_shard_status(shard_id, lagging_buckets, recovering_buckets,sync_marker, max_entries); + int read_shard_status(const DoutPrefixProvider *dpp, int shard_id, set& lagging_buckets, set& recovering_buckets, rgw_data_sync_marker *sync_marker, const int max_entries) { + return source_log.read_shard_status(dpp, shard_id, lagging_buckets, recovering_buckets,sync_marker, max_entries); } - int init_sync_status() { return source_log.init_sync_status(num_shards); } + int init_sync_status(const DoutPrefixProvider *dpp) { return source_log.init_sync_status(dpp, num_shards); } - int read_log_info(rgw_datalog_info *log_info) { - return source_log.read_log_info(log_info); + int read_log_info(const DoutPrefixProvider *dpp, rgw_datalog_info *log_info) { + return source_log.read_log_info(dpp, log_info); } - int read_source_log_shards_info(map *shards_info) { - return source_log.read_source_log_shards_info(shards_info); + int read_source_log_shards_info(const DoutPrefixProvider *dpp, map *shards_info) { + return source_log.read_source_log_shards_info(dpp, shards_info); } - int read_source_log_shards_next(map shard_markers, map *result) { - return source_log.read_source_log_shards_next(shard_markers, result); + int read_source_log_shards_next(const DoutPrefixProvider *dpp, map shard_markers, map *result) { + return source_log.read_source_log_shards_next(dpp, shard_markers, result); } - int run() { return source_log.run_sync(num_shards); } + int run(const DoutPrefixProvider *dpp) { return source_log.run_sync(dpp, num_shards); } void wakeup(int shard_id, set& keys) { return source_log.wakeup(shard_id, keys); } void stop() { @@ -614,7 +614,8 @@ public: class BucketIndexShardsManager; -int rgw_read_remote_bilog_info(RGWRESTConn* conn, +int rgw_read_remote_bilog_info(const DoutPrefixProvider *dpp, + RGWRESTConn* conn, const rgw_bucket& bucket, BucketIndexShardsManager& markers, optional_yield y); @@ -654,10 +655,10 @@ public: const rgw_bucket& dest_bucket); ~RGWBucketPipeSyncStatusManager(); - int init(); + int init(const DoutPrefixProvider *dpp); map& get_sync_status() { return sync_status; } - int init_sync_status(); + int init_sync_status(const DoutPrefixProvider *dpp); static string status_oid(const rgw_zone_id& source_zone, const rgw_bucket_sync_pair_info& bs); static string obj_status_oid(const rgw_bucket_sync_pipe& sync_pipe, @@ -669,8 +670,8 @@ public: unsigned get_subsys() const override; std::ostream& gen_prefix(std::ostream& out) const override; - int read_sync_status(); - int run(); + int read_sync_status(const DoutPrefixProvider *dpp); + int run(const DoutPrefixProvider *dpp); }; /// read the sync status of all bucket shards from the given source zone diff --git a/src/rgw/rgw_datalog.cc b/src/rgw/rgw_datalog.cc index bdfa7e5bace..1acb04d7092 100644 --- a/src/rgw/rgw_datalog.cc +++ b/src/rgw/rgw_datalog.cc @@ -89,6 +89,7 @@ public: } } ~RGWDataChangesOmap() override = default; + void prepare(ceph::real_time ut, const std::string& key, ceph::buffer::list&& entry, entries& out) override { if (!std::holds_alternative(out)) { @@ -100,31 +101,31 @@ public: cls_log_add_prepare_entry(e, utime_t(ut), {}, key, entry); std::get(out).push_back(std::move(e)); } - int push(int index, entries&& items) override { + int push(const DoutPrefixProvider *dpp, int index, entries&& items) override { lr::ObjectWriteOperation op; cls_log_add(op, std::get(items), true); - auto r = rgw_rados_operate(ioctx, oids[index], &op, null_yield); + auto r = rgw_rados_operate(dpp, ioctx, oids[index], &op, null_yield); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to push to " << oids[index] << cpp_strerror(-r) << dendl; } return r; } - int push(int index, ceph::real_time now, + int push(const DoutPrefixProvider *dpp, int index, ceph::real_time now, const std::string& key, ceph::buffer::list&& bl) override { lr::ObjectWriteOperation op; cls_log_add(op, utime_t(now), {}, key, bl); - auto r = rgw_rados_operate(ioctx, oids[index], &op, null_yield); + auto r = rgw_rados_operate(dpp, ioctx, oids[index], &op, null_yield); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to push to " << oids[index] << cpp_strerror(-r) << dendl; } return r; } - int list(int index, int max_entries, + int list(const DoutPrefixProvider *dpp, int index, int max_entries, std::vector& entries, std::optional marker, std::string* out_marker, bool* truncated) override { @@ -132,13 +133,13 @@ public: lr::ObjectReadOperation op; cls_log_list(op, {}, {}, std::string(marker.value_or("")), max_entries, log_entries, out_marker, truncated); - auto r = rgw_rados_operate(ioctx, oids[index], &op, nullptr, null_yield); + auto r = rgw_rados_operate(dpp, ioctx, oids[index], &op, nullptr, null_yield); if (r == -ENOENT) { *truncated = false; return 0; } if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to list " << oids[index] << cpp_strerror(-r) << dendl; return r; @@ -152,7 +153,7 @@ public: try { decode(log_entry.entry, liter); } catch (ceph::buffer::error& err) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to decode data changes log entry: " << err.what() << dendl; return -EIO; @@ -161,14 +162,14 @@ public: } return 0; } - int get_info(int index, RGWDataChangesLogInfo *info) override { + int get_info(const DoutPrefixProvider *dpp, int index, RGWDataChangesLogInfo *info) override { cls_log_header header; lr::ObjectReadOperation op; cls_log_info(op, &header); - auto r = rgw_rados_operate(ioctx, oids[index], &op, nullptr, null_yield); + auto r = rgw_rados_operate(dpp, ioctx, oids[index], &op, nullptr, null_yield); if (r == -ENOENT) r = 0; if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to get info from " << oids[index] << cpp_strerror(-r) << dendl; } else { @@ -177,26 +178,26 @@ public: } return r; } - int trim(int index, std::string_view marker) override { + int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker) override { lr::ObjectWriteOperation op; cls_log_trim(op, {}, {}, {}, std::string(marker)); - auto r = rgw_rados_operate(ioctx, oids[index], &op, null_yield); + auto r = rgw_rados_operate(dpp, ioctx, oids[index], &op, null_yield); if (r == -ENOENT) r = -ENODATA; if (r < 0 && r != -ENODATA) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to get info from " << oids[index] << cpp_strerror(-r) << dendl; } return r; } - int trim(int index, std::string_view marker, + int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker, lr::AioCompletion* c) override { lr::ObjectWriteOperation op; cls_log_trim(op, {}, {}, {}, std::string(marker)); auto r = ioctx.aio_operate(oids[index], c, &op, 0); if (r == -ENOENT) r = -ENODATA; if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to get info from " << oids[index] << cpp_strerror(-r) << dendl; } @@ -205,19 +206,19 @@ public: std::string_view max_marker() const override { return "99999999"sv; } - int is_empty() override { + int is_empty(const DoutPrefixProvider *dpp) override { for (auto shard = 0u; shard < oids.size(); ++shard) { std::list log_entries; lr::ObjectReadOperation op; std::string out_marker; bool truncated; cls_log_list(op, {}, {}, {}, 1, log_entries, &out_marker, &truncated); - auto r = rgw_rados_operate(ioctx, oids[shard], &op, nullptr, null_yield); + auto r = rgw_rados_operate(dpp, ioctx, oids[shard], &op, nullptr, null_yield); if (r == -ENOENT) { continue; } if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to list " << oids[shard] << cpp_strerror(-r) << dendl; return r; @@ -251,36 +252,36 @@ public: } std::get(out).push_back(std::move(entry)); } - int push(int index, entries&& items) override { - auto r = fifos[index].push(std::get(items), null_yield); + int push(const DoutPrefixProvider *dpp, int index, entries&& items) override { + auto r = fifos[index].push(dpp, std::get(items), null_yield); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": unable to push to FIFO: " << get_oid(index) << ": " << cpp_strerror(-r) << dendl; } return r; } - int push(int index, ceph::real_time, + int push(const DoutPrefixProvider *dpp, int index, ceph::real_time, const std::string&, ceph::buffer::list&& bl) override { - auto r = fifos[index].push(std::move(bl), null_yield); + auto r = fifos[index].push(dpp, std::move(bl), null_yield); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": unable to push to FIFO: " << get_oid(index) << ": " << cpp_strerror(-r) << dendl; } return r; } - int list(int index, int max_entries, + int list(const DoutPrefixProvider *dpp, int index, int max_entries, std::vector& entries, std::optional marker, std::string* out_marker, bool* truncated) override { std::vector log_entries; bool more = false; - auto r = fifos[index].list(max_entries, marker, &log_entries, &more, + auto r = fifos[index].list(dpp, max_entries, marker, &log_entries, &more, null_yield); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": unable to list FIFO: " << get_oid(index) << ": " << cpp_strerror(-r) << dendl; return r; @@ -293,7 +294,7 @@ public: try { decode(log_entry.entry, liter); } catch (const buffer::error& err) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to decode data changes log entry: " << err.what() << dendl; return -EIO; @@ -307,17 +308,17 @@ public: } return 0; } - int get_info(int index, RGWDataChangesLogInfo *info) override { + int get_info(const DoutPrefixProvider *dpp, int index, RGWDataChangesLogInfo *info) override { auto& fifo = fifos[index]; - auto r = fifo.read_meta(null_yield); + auto r = fifo.read_meta(dpp, null_yield); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": unable to get FIFO metadata: " << get_oid(index) << ": " << cpp_strerror(-r) << dendl; return r; } rados::cls::fifo::info m; - fifo.meta(m, null_yield); + fifo.meta(dpp, m, null_yield); auto p = m.head_part_num; if (p < 0) { info->marker = ""s; @@ -325,9 +326,9 @@ public: return 0; } rgw::cls::fifo::part_info h; - r = fifo.get_part_info(p, &h, null_yield); + r = fifo.get_part_info(dpp, p, &h, null_yield); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": unable to get part info: " << get_oid(index) << "/" << p << ": " << cpp_strerror(-r) << dendl; return r; @@ -336,22 +337,22 @@ public: info->last_update = h.max_time; return 0; } - int trim(int index, std::string_view marker) override { - auto r = fifos[index].trim(marker, false, null_yield); + int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker) override { + auto r = fifos[index].trim(dpp, marker, false, null_yield); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": unable to trim FIFO: " << get_oid(index) << ": " << cpp_strerror(-r) << dendl; } return r; } - int trim(int index, std::string_view marker, + int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker, librados::AioCompletion* c) override { int r = 0; if (marker == rgw::cls::fifo::marker(0, 0).to_string()) { rgw_complete_aio_completion(c, -ENODATA); } else { - fifos[index].trim(marker, false, c, null_yield); + fifos[index].trim(dpp, marker, false, c, null_yield); } return r; } @@ -360,14 +361,14 @@ public: rgw::cls::fifo::marker::max().to_string(); return std::string_view(mm); } - int is_empty() override { + int is_empty(const DoutPrefixProvider *dpp) override { std::vector log_entries; bool more = false; for (auto shard = 0u; shard < fifos.size(); ++shard) { - auto r = fifos[shard].list(1, {}, &log_entries, &more, + auto r = fifos[shard].list(dpp, 1, {}, &log_entries, &more, null_yield); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": unable to list FIFO: " << get_oid(shard) << ": " << cpp_strerror(-r) << dendl; return r; @@ -445,7 +446,7 @@ bs::error_code DataLogBackends::handle_empty_to(uint64_t new_tail) noexcept { } -int RGWDataChangesLog::start(const RGWZone* _zone, +int RGWDataChangesLog::start(const DoutPrefixProvider *dpp, const RGWZone* _zone, const RGWZoneParams& zoneparams, librados::Rados* lr) { @@ -456,16 +457,16 @@ int RGWDataChangesLog::start(const RGWZone* _zone, // Should be guaranteed by `set_enum_allowed` ceph_assert(defbacking); auto log_pool = zoneparams.log_pool; - auto r = rgw_init_ioctx(lr, log_pool, ioctx, true, false); + auto r = rgw_init_ioctx(dpp, lr, log_pool, ioctx, true, false); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": Failed to initialized ioctx, r=" << r << ", pool=" << log_pool << dendl; return -r; } auto besr = logback_generations::init( - ioctx, metadata_log_oid(), [this](uint64_t gen_id, int shard) { + dpp, ioctx, metadata_log_oid(), [this](uint64_t gen_id, int shard) { return get_oid(gen_id, shard); }, num_shards, *defbacking, null_yield, *this); @@ -492,7 +493,7 @@ int RGWDataChangesLog::choose_oid(const rgw_bucket_shard& bs) { return static_cast(r); } -int RGWDataChangesLog::renew_entries() +int RGWDataChangesLog::renew_entries(const DoutPrefixProvider *dpp) { if (!zone->log_data) return 0; @@ -528,11 +529,11 @@ int RGWDataChangesLog::renew_entries() auto now = real_clock::now(); - auto ret = be->push(index, std::move(entries)); + auto ret = be->push(dpp, index, std::move(entries)); if (ret < 0) { /* we don't really need to have a special handling for failed cases here, * as this is just an optimization. */ - lderr(cct) << "ERROR: svc.cls->timelog.add() returned " << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: svc.cls->timelog.add() returned " << ret << dendl; return ret; } @@ -677,7 +678,7 @@ int RGWDataChangesLog::add_entry(const DoutPrefixProvider *dpp, const RGWBucketI ldpp_dout(dpp, 20) << "RGWDataChangesLog::add_entry() sending update with now=" << now << " cur_expiration=" << expiration << dendl; auto be = bes->head(); - ret = be->push(index, now, change.key, std::move(bl)); + ret = be->push(dpp, index, now, change.key, std::move(bl)); now = real_clock::now(); @@ -700,7 +701,7 @@ int RGWDataChangesLog::add_entry(const DoutPrefixProvider *dpp, const RGWBucketI return ret; } -int DataLogBackends::list(int shard, int max_entries, +int DataLogBackends::list(const DoutPrefixProvider *dpp, int shard, int max_entries, std::vector& entries, std::optional marker, std::string* out_marker, bool* truncated) @@ -716,7 +717,7 @@ int DataLogBackends::list(int shard, int max_entries, auto be = i->second; l.unlock(); gen_id = be->gen_id; - auto r = be->list(shard, max_entries, gentries, + auto r = be->list(dpp, shard, max_entries, gentries, gen_id == start_id ? start_cursor : std::string{}, &out_cursor, truncated); if (r < 0) @@ -740,16 +741,16 @@ int DataLogBackends::list(int shard, int max_entries, return 0; } -int RGWDataChangesLog::list_entries(int shard, int max_entries, +int RGWDataChangesLog::list_entries(const DoutPrefixProvider *dpp, int shard, int max_entries, std::vector& entries, std::optional marker, std::string* out_marker, bool* truncated) { assert(shard < num_shards); - return bes->list(shard, max_entries, entries, marker, out_marker, truncated); + return bes->list(dpp, shard, max_entries, entries, marker, out_marker, truncated); } -int RGWDataChangesLog::list_entries(int max_entries, +int RGWDataChangesLog::list_entries(const DoutPrefixProvider *dpp, int max_entries, std::vector& entries, LogMarker& marker, bool *ptruncated) { @@ -757,7 +758,7 @@ int RGWDataChangesLog::list_entries(int max_entries, entries.clear(); for (; marker.shard < num_shards && int(entries.size()) < max_entries; marker.shard++, marker.marker.reset()) { - int ret = list_entries(marker.shard, max_entries - entries.size(), + int ret = list_entries(dpp, marker.shard, max_entries - entries.size(), entries, marker.marker, NULL, &truncated); if (ret == -ENOENT) { continue; @@ -774,18 +775,18 @@ int RGWDataChangesLog::list_entries(int max_entries, return 0; } -int RGWDataChangesLog::get_info(int shard_id, RGWDataChangesLogInfo *info) +int RGWDataChangesLog::get_info(const DoutPrefixProvider *dpp, int shard_id, RGWDataChangesLogInfo *info) { assert(shard_id < num_shards); auto be = bes->head(); - auto r = be->get_info(shard_id, info); + auto r = be->get_info(dpp, shard_id, info); if (!info->marker.empty()) { info->marker = gencursor(be->gen_id, info->marker); } return r; } -int DataLogBackends::trim_entries(int shard_id, std::string_view marker) +int DataLogBackends::trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker) { auto [target_gen, cursor] = cursorgen(marker); std::unique_lock l(m); @@ -798,7 +799,7 @@ int DataLogBackends::trim_entries(int shard_id, std::string_view marker) be = upper_bound(be->gen_id)->second) { l.unlock(); auto c = be->gen_id == target_gen ? cursor : be->max_marker(); - r = be->trim(shard_id, c); + r = be->trim(dpp, shard_id, c); if (r == -ENOENT) r = -ENODATA; if (r == -ENODATA && be->gen_id < target_gen) @@ -808,10 +809,10 @@ int DataLogBackends::trim_entries(int shard_id, std::string_view marker) return r; } -int RGWDataChangesLog::trim_entries(int shard_id, std::string_view marker) +int RGWDataChangesLog::trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker) { assert(shard_id < num_shards); - return bes->trim_entries(shard_id, marker); + return bes->trim_entries(dpp, shard_id, marker); } class GenTrim : public rgw::cls::fifo::Completion { @@ -824,15 +825,15 @@ public: const uint64_t tail_gen; boost::intrusive_ptr be; - GenTrim(DataLogBackends* bes, int shard_id, uint64_t target_gen, + GenTrim(const DoutPrefixProvider *dpp, DataLogBackends* bes, int shard_id, uint64_t target_gen, std::string cursor, uint64_t head_gen, uint64_t tail_gen, boost::intrusive_ptr be, lr::AioCompletion* super) - : Completion(super), bes(bes), shard_id(shard_id), target_gen(target_gen), + : Completion(dpp, super), bes(bes), shard_id(shard_id), target_gen(target_gen), cursor(std::move(cursor)), head_gen(head_gen), tail_gen(tail_gen), be(std::move(be)) {} - void handle(Ptr&& p, int r) { + void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { auto gen_id = be->gen_id; be.reset(); if (r == -ENOENT) @@ -855,11 +856,11 @@ public: be = i->second; } auto c = be->gen_id == target_gen ? cursor : be->max_marker(); - be->trim(shard_id, c, call(std::move(p))); + be->trim(dpp, shard_id, c, call(std::move(p))); } }; -void DataLogBackends::trim_entries(int shard_id, std::string_view marker, +void DataLogBackends::trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker, librados::AioCompletion* c) { auto [target_gen, cursor] = cursorgen(marker); @@ -873,15 +874,15 @@ void DataLogBackends::trim_entries(int shard_id, std::string_view marker, } auto be = begin()->second; l.unlock(); - auto gt = std::make_unique(this, shard_id, target_gen, + auto gt = std::make_unique(dpp, this, shard_id, target_gen, std::string(cursor), head_gen, tail_gen, be, c); auto cc = be->gen_id == target_gen ? cursor : be->max_marker(); - be->trim(shard_id, cc, GenTrim::call(std::move(gt))); + be->trim(dpp, shard_id, cc, GenTrim::call(std::move(gt))); } -int DataLogBackends::trim_generations(std::optional& through) { +int DataLogBackends::trim_generations(const DoutPrefixProvider *dpp, std::optional& through) { if (size() != 1) { std::vector candidates; { @@ -894,7 +895,7 @@ int DataLogBackends::trim_generations(std::optional& through) { std::optional highest; for (auto& be : candidates) { - auto r = be->is_empty(); + auto r = be->is_empty(dpp); if (r < 0) { return r; } else if (r == 1) { @@ -908,21 +909,21 @@ int DataLogBackends::trim_generations(std::optional& through) { if (!highest) { return 0; } - auto ec = empty_to(*highest, null_yield); + auto ec = empty_to(dpp, *highest, null_yield); if (ec) { return ceph::from_error_code(ec); } } - return ceph::from_error_code(remove_empty(null_yield)); + return ceph::from_error_code(remove_empty(dpp, null_yield)); } -int RGWDataChangesLog::trim_entries(int shard_id, std::string_view marker, +int RGWDataChangesLog::trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker, librados::AioCompletion* c) { assert(shard_id < num_shards); - bes->trim_entries(shard_id, marker, c); + bes->trim_entries(dpp, shard_id, marker, c); return 0; } @@ -943,10 +944,11 @@ void RGWDataChangesLog::renew_run() noexcept { static constexpr auto runs_per_prune = 150; auto run = 0; for (;;) { - dout(2) << "RGWDataChangesLog::ChangesRenewThread: start" << dendl; - int r = renew_entries(); + const DoutPrefix dp(cct, dout_subsys, "rgw data changes log: "); + ldpp_dout(&dp, 2) << "RGWDataChangesLog::ChangesRenewThread: start" << dendl; + int r = renew_entries(&dp); if (r < 0) { - dout(0) << "ERROR: RGWDataChangesLog::renew_entries returned error r=" << r << dendl; + ldpp_dout(&dp, 0) << "ERROR: RGWDataChangesLog::renew_entries returned error r=" << r << dendl; } if (going_down()) @@ -954,16 +956,16 @@ void RGWDataChangesLog::renew_run() noexcept { if (run == runs_per_prune) { std::optional through; - dout(2) << "RGWDataChangesLog::ChangesRenewThread: pruning old generations" << dendl; - trim_generations(through); + ldpp_dout(&dp, 2) << "RGWDataChangesLog::ChangesRenewThread: pruning old generations" << dendl; + trim_generations(&dp, through); if (r < 0) { derr << "RGWDataChangesLog::ChangesRenewThread: failed pruning r=" << r << dendl; } else if (through) { - dout(2) << "RGWDataChangesLog::ChangesRenewThread: pruned generations " + ldpp_dout(&dp, 2) << "RGWDataChangesLog::ChangesRenewThread: pruned generations " << "through " << *through << "." << dendl; } else { - dout(2) << "RGWDataChangesLog::ChangesRenewThread: nothing to prune." + ldpp_dout(&dp, 2) << "RGWDataChangesLog::ChangesRenewThread: nothing to prune." << dendl; } run = 0; @@ -1003,10 +1005,10 @@ std::string RGWDataChangesLog::max_marker() const { "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); } -int RGWDataChangesLog::change_format(log_type type, optional_yield y) { - return ceph::from_error_code(bes->new_backing(type, y)); +int RGWDataChangesLog::change_format(const DoutPrefixProvider *dpp, log_type type, optional_yield y) { + return ceph::from_error_code(bes->new_backing(dpp, type, y)); } -int RGWDataChangesLog::trim_generations(std::optional& through) { - return bes->trim_generations(through); +int RGWDataChangesLog::trim_generations(const DoutPrefixProvider *dpp, std::optional& through) { + return bes->trim_generations(dpp, through); } diff --git a/src/rgw/rgw_datalog.h b/src/rgw/rgw_datalog.h index 116796f259e..79872bb1571 100644 --- a/src/rgw/rgw_datalog.h +++ b/src/rgw/rgw_datalog.h @@ -146,12 +146,12 @@ public: --i; return i->second; } - int list(int shard, int max_entries, + int list(const DoutPrefixProvider *dpp, int shard, int max_entries, std::vector& entries, std::optional marker, std::string* out_marker, bool* truncated); - int trim_entries(int shard_id, std::string_view marker); - void trim_entries(int shard_id, std::string_view marker, + int trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker); + void trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker, librados::AioCompletion* c); void set_zero(RGWDataChangesBE* be) { emplace(0, be); @@ -161,7 +161,7 @@ public: bs::error_code handle_new_gens(entries_t e) noexcept override; bs::error_code handle_empty_to(uint64_t new_tail) noexcept override; - int trim_generations(std::optional& through); + int trim_generations(const DoutPrefixProvider *dpp, std::optional& through); }; class RGWDataChangesLog { @@ -218,30 +218,30 @@ class RGWDataChangesLog { int choose_oid(const rgw_bucket_shard& bs); bool going_down() const; bool filter_bucket(const DoutPrefixProvider *dpp, const rgw_bucket& bucket, optional_yield y) const; - int renew_entries(); + int renew_entries(const DoutPrefixProvider *dpp); public: RGWDataChangesLog(CephContext* cct); ~RGWDataChangesLog(); - int start(const RGWZone* _zone, const RGWZoneParams& zoneparams, + int start(const DoutPrefixProvider *dpp, const RGWZone* _zone, const RGWZoneParams& zoneparams, librados::Rados* lr); int add_entry(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id); int get_log_shard_id(rgw_bucket& bucket, int shard_id); - int list_entries(int shard, int max_entries, + int list_entries(const DoutPrefixProvider *dpp, int shard, int max_entries, std::vector& entries, std::optional marker, std::string* out_marker, bool* truncated); - int trim_entries(int shard_id, std::string_view marker); - int trim_entries(int shard_id, std::string_view marker, + int trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker); + int trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker, librados::AioCompletion* c); // :( - int get_info(int shard_id, RGWDataChangesLogInfo *info); + int get_info(const DoutPrefixProvider *dpp, int shard_id, RGWDataChangesLogInfo *info); using LogMarker = RGWDataChangesLogMarker; - int list_entries(int max_entries, + int list_entries(const DoutPrefixProvider *dpp, int max_entries, std::vector& entries, LogMarker& marker, bool* ptruncated); @@ -266,8 +266,8 @@ public: std::string get_oid(uint64_t gen_id, int shard_id) const; - int change_format(log_type type, optional_yield y); - int trim_generations(std::optional& through); + int change_format(const DoutPrefixProvider *dpp, log_type type, optional_yield y); + int trim_generations(const DoutPrefixProvider *dpp, std::optional& through); }; class RGWDataChangesBE : public boost::intrusive_ref_counter { @@ -296,21 +296,21 @@ public: const std::string& key, ceph::buffer::list&& entry, entries& out) = 0; - virtual int push(int index, entries&& items) = 0; - virtual int push(int index, ceph::real_time now, + virtual int push(const DoutPrefixProvider *dpp, int index, entries&& items) = 0; + virtual int push(const DoutPrefixProvider *dpp, int index, ceph::real_time now, const std::string& key, ceph::buffer::list&& bl) = 0; - virtual int list(int shard, int max_entries, + virtual int list(const DoutPrefixProvider *dpp, int shard, int max_entries, std::vector& entries, std::optional marker, std::string* out_marker, bool* truncated) = 0; - virtual int get_info(int index, RGWDataChangesLogInfo *info) = 0; - virtual int trim(int index, std::string_view marker) = 0; - virtual int trim(int index, std::string_view marker, + virtual int get_info(const DoutPrefixProvider *dpp, int index, RGWDataChangesLogInfo *info) = 0; + virtual int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker) = 0; + virtual int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker, librados::AioCompletion* c) = 0; virtual std::string_view max_marker() const = 0; // 1 on empty, 0 on non-empty, negative on error. - virtual int is_empty() = 0; + virtual int is_empty(const DoutPrefixProvider *dpp) = 0; }; diff --git a/src/rgw/rgw_dencoder.cc b/src/rgw/rgw_dencoder.cc index ea68dac22e1..5a5865495d9 100644 --- a/src/rgw/rgw_dencoder.cc +++ b/src/rgw/rgw_dencoder.cc @@ -40,113 +40,6 @@ void RGWObjManifestPart::generate_test_instances(std::list& o.push_back(p); } -void RGWObjManifest::obj_iterator::seek(uint64_t o) -{ - ofs = o; - if (manifest->explicit_objs) { - explicit_iter = manifest->objs.upper_bound(ofs); - if (explicit_iter != manifest->objs.begin()) { - --explicit_iter; - } - if (ofs < manifest->obj_size) { - update_explicit_pos(); - } else { - ofs = manifest->obj_size; - } - update_location(); - return; - } - if (o < manifest->get_head_size()) { - rule_iter = manifest->rules.begin(); - stripe_ofs = 0; - stripe_size = manifest->get_head_size(); - if (rule_iter != manifest->rules.end()) { - cur_part_id = rule_iter->second.start_part_num; - cur_override_prefix = rule_iter->second.override_prefix; - } - update_location(); - return; - } - - rule_iter = manifest->rules.upper_bound(ofs); - next_rule_iter = rule_iter; - if (rule_iter != manifest->rules.begin()) { - --rule_iter; - } - - if (rule_iter == manifest->rules.end()) { - update_location(); - return; - } - - RGWObjManifestRule& rule = rule_iter->second; - - if (rule.part_size > 0) { - cur_part_id = rule.start_part_num + (ofs - rule.start_ofs) / rule.part_size; - } else { - cur_part_id = rule.start_part_num; - } - part_ofs = rule.start_ofs + (cur_part_id - rule.start_part_num) * rule.part_size; - - if (rule.stripe_max_size > 0) { - cur_stripe = (ofs - part_ofs) / rule.stripe_max_size; - - stripe_ofs = part_ofs + cur_stripe * rule.stripe_max_size; - if (!cur_part_id && manifest->get_head_size() > 0) { - cur_stripe++; - } - } else { - cur_stripe = 0; - stripe_ofs = part_ofs; - } - - if (!rule.part_size) { - stripe_size = rule.stripe_max_size; - stripe_size = std::min(manifest->get_obj_size() - stripe_ofs, stripe_size); - } else { - uint64_t next = std::min(stripe_ofs + rule.stripe_max_size, part_ofs + rule.part_size); - stripe_size = next - stripe_ofs; - } - - cur_override_prefix = rule.override_prefix; - - update_location(); -} - -void RGWObjManifest::obj_iterator::update_location() -{ - if (manifest->explicit_objs) { - if (manifest->empty()) { - location = rgw_obj_select{}; - } else { - location = explicit_iter->second.loc; - } - return; - } - - if (ofs < manifest->get_head_size()) { - location = manifest->get_obj(); - location.set_placement_rule(manifest->get_head_placement_rule()); - return; - } - - manifest->get_implicit_location(cur_part_id, cur_stripe, ofs, &cur_override_prefix, &location); -} - -void RGWObjManifest::obj_iterator::update_explicit_pos() -{ - ofs = explicit_iter->first; - stripe_ofs = ofs; - - map::iterator next_iter = explicit_iter; - ++next_iter; - if (next_iter != manifest->objs.end()) { - stripe_size = next_iter->first - ofs; - } else { - stripe_size = manifest->obj_size - ofs; - } -} - void RGWObjManifest::generate_test_instances(std::list& o) { RGWObjManifest *m = new RGWObjManifest; @@ -167,58 +60,6 @@ void RGWObjManifest::generate_test_instances(std::list& o) o.push_back(new RGWObjManifest); } -void RGWObjManifest::get_implicit_location(uint64_t cur_part_id, uint64_t cur_stripe, uint64_t ofs, string *override_prefix, rgw_obj_select *location) -{ - rgw_obj loc; - - string& oid = loc.key.name; - string& ns = loc.key.ns; - - if (!override_prefix || override_prefix->empty()) { - oid = prefix; - } else { - oid = *override_prefix; - } - - if (!cur_part_id) { - if (ofs < max_head_size) { - location->set_placement_rule(head_placement_rule); - *location = obj; - return; - } else { - char buf[16]; - snprintf(buf, sizeof(buf), "%d", (int)cur_stripe); - oid += buf; - ns = shadow_ns; - } - } else { - char buf[32]; - if (cur_stripe == 0) { - snprintf(buf, sizeof(buf), ".%d", (int)cur_part_id); - oid += buf; - ns= RGW_OBJ_NS_MULTIPART; - } else { - snprintf(buf, sizeof(buf), ".%d_%d", (int)cur_part_id, (int)cur_stripe); - oid += buf; - ns = shadow_ns; - } - } - - if (!tail_placement.bucket.name.empty()) { - loc.bucket = tail_placement.bucket; - } else { - loc.bucket = obj.bucket; - } - - // Always overwrite instance with tail_instance - // to get the right shadow object location - loc.key.set_instance(tail_instance); - - location->set_placement_rule(tail_placement.placement_rule); - *location = loc; -} - - void rgw_log_entry::generate_test_instances(list& o) { diff --git a/src/rgw/rgw_etag_verifier.cc b/src/rgw/rgw_etag_verifier.cc index 285d64cd7a9..6a455e18b23 100644 --- a/src/rgw/rgw_etag_verifier.cc +++ b/src/rgw/rgw_etag_verifier.cc @@ -7,7 +7,8 @@ namespace rgw::putobj { -int create_etag_verifier(CephContext* cct, DataProcessor* filter, +int create_etag_verifier(const DoutPrefixProvider *dpp, + CephContext* cct, DataProcessor* filter, const bufferlist& manifest_bl, const std::optional& compression, etag_verifier_ptr& verifier) @@ -18,14 +19,14 @@ int create_etag_verifier(CephContext* cct, DataProcessor* filter, auto miter = manifest_bl.cbegin(); decode(manifest, miter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: couldn't decode manifest" << dendl; + ldpp_dout(dpp, 0) << "ERROR: couldn't decode manifest" << dendl; return -EIO; } RGWObjManifestRule rule; bool found = manifest.get_rule(0, &rule); if (!found) { - lderr(cct) << "ERROR: manifest->get_rule() could not find rule" << dendl; + ldpp_dout(dpp, -1) << "ERROR: manifest->get_rule() could not find rule" << dendl; return -EIO; } @@ -43,11 +44,11 @@ int create_etag_verifier(CephContext* cct, DataProcessor* filter, * MPU part. These part ETags then become the input for the MPU object * Etag. */ - for (auto mi = manifest.obj_begin(); mi != manifest.obj_end(); ++mi) { + for (auto mi = manifest.obj_begin(dpp); mi != manifest.obj_end(dpp); ++mi) { if (cur_part_ofs == mi.get_part_ofs()) continue; cur_part_ofs = mi.get_part_ofs(); - ldout(cct, 20) << "MPU Part offset:" << cur_part_ofs << dendl; + ldpp_dout(dpp, 20) << "MPU Part offset:" << cur_part_ofs << dendl; part_ofs.push_back(cur_part_ofs); } @@ -64,12 +65,12 @@ int create_etag_verifier(CephContext* cct, DataProcessor* filter, }; block = std::lower_bound(block, blocks.end(), ofs, less); if (block == blocks.end() || block->new_ofs != ofs) { - ldout(cct, 4) << "no match for compressed offset " << ofs + ldpp_dout(dpp, 4) << "no match for compressed offset " << ofs << ", disabling etag verification" << dendl; return -EIO; } ofs = block->old_ofs; - ldout(cct, 20) << "MPU Part uncompressed offset:" << ofs << dendl; + ldpp_dout(dpp, 20) << "MPU Part uncompressed offset:" << ofs << dendl; } } diff --git a/src/rgw/rgw_etag_verifier.h b/src/rgw/rgw_etag_verifier.h index dac6ddab5f8..48007cf1699 100644 --- a/src/rgw/rgw_etag_verifier.h +++ b/src/rgw/rgw_etag_verifier.h @@ -75,7 +75,8 @@ constexpr auto max_etag_verifier_size = std::max( ); using etag_verifier_ptr = ceph::static_ptr; -int create_etag_verifier(CephContext* cct, DataProcessor* next, +int create_etag_verifier(const DoutPrefixProvider *dpp, + CephContext* cct, DataProcessor* next, const bufferlist& manifest_bl, const std::optional& compression, etag_verifier_ptr& verifier); diff --git a/src/rgw/rgw_file.cc b/src/rgw/rgw_file.cc index 9f465a8f005..fa3a426f1ab 100644 --- a/src/rgw/rgw_file.cc +++ b/src/rgw/rgw_file.cc @@ -1957,7 +1957,7 @@ namespace rgw { attrbl.append(val.c_str(), val.size() + 1); } - op_ret = rgw_get_request_metadata(state->cct, state->info, attrs); + op_ret = rgw_get_request_metadata(this, state->cct, state->info, attrs); if (op_ret < 0) { goto done; } diff --git a/src/rgw/rgw_gc.cc b/src/rgw/rgw_gc.cc index 5e3070d3fcc..6e907c5dbf9 100644 --- a/src/rgw/rgw_gc.cc +++ b/src/rgw/rgw_gc.cc @@ -50,7 +50,7 @@ void RGWGC::initialize(CephContext *_cct, RGWRados *_store) { op.create(false); const uint64_t queue_size = cct->_conf->rgw_gc_max_queue_size, num_deferred_entries = cct->_conf->rgw_gc_max_deferred; gc_log_init2(op, queue_size, num_deferred_entries); - store->gc_operate(obj_names[i], &op); + store->gc_operate(this, obj_names[i], &op); } } @@ -76,13 +76,13 @@ int RGWGC::send_chain(cls_rgw_obj_chain& chain, const string& tag) ldpp_dout(this, 20) << "RGWGC::send_chain - on object name: " << obj_names[i] << "tag is: " << tag << dendl; - auto ret = store->gc_operate(obj_names[i], &op); + auto ret = store->gc_operate(this, obj_names[i], &op); if (ret != -ECANCELED && ret != -EPERM) { return ret; } ObjectWriteOperation set_entry_op; cls_rgw_gc_set_entry(set_entry_op, cct->_conf->rgw_gc_obj_min_wait, info); - return store->gc_operate(obj_names[i], &set_entry_op); + return store->gc_operate(this, obj_names[i], &set_entry_op); } struct defer_chain_state { @@ -188,7 +188,7 @@ int RGWGC::remove(int index, int num_entries) ObjectWriteOperation op; cls_rgw_gc_queue_remove_entries(op, num_entries); - return store->gc_operate(obj_names[index], &op); + return store->gc_operate(this, obj_names[index], &op); } int RGWGC::list(int *index, string& marker, uint32_t max, bool expired_only, std::list& result, bool *truncated, bool& processing_queue) @@ -606,7 +606,7 @@ int RGWGC::process(int index, int max_secs, bool expired_only, if (obj.pool != last_pool) { delete ctx; ctx = new IoCtx; - ret = rgw_init_ioctx(store->get_rados_handle(), obj.pool, *ctx); + ret = rgw_init_ioctx(this, store->get_rados_handle(), obj.pool, *ctx); if (ret < 0) { if (transitioned_objects_cache[index]) { goto done; diff --git a/src/rgw/rgw_json_enc.cc b/src/rgw/rgw_json_enc.cc index 7fc619186af..b3ceff68e75 100644 --- a/src/rgw/rgw_json_enc.cc +++ b/src/rgw/rgw_json_enc.cc @@ -143,8 +143,11 @@ void RGWObjManifest::dump(Formatter *f) const ::encode_json("tail_instance", tail_instance, f); ::encode_json("tail_placement", tail_placement, f); - f->dump_object("begin_iter", begin_iter); - f->dump_object("end_iter", end_iter); + // nullptr being passed into iterators since there + // is no cct and we aren't doing anything with these + // iterators that would write do the log + f->dump_object("begin_iter", obj_begin(nullptr)); + f->dump_object("end_iter", obj_end(nullptr)); } void rgw_log_entry::dump(Formatter *f) const diff --git a/src/rgw/rgw_lc.cc b/src/rgw/rgw_lc.cc index 572e51986d6..df3b10e24d2 100644 --- a/src/rgw/rgw_lc.cc +++ b/src/rgw/rgw_lc.cc @@ -1573,7 +1573,7 @@ int RGWLC::bucket_lc_post(int index, int max_lock_sec, << dendl; do { - int ret = lock->try_lock(lock_duration, null_yield); + int ret = lock->try_lock(this, lock_duration, null_yield); if (ret == -EBUSY || ret == -EEXIST) { /* already locked by another lc processor */ ldpp_dout(this, 0) << "RGWLC::bucket_lc_post() failed to acquire lock on " @@ -1716,7 +1716,7 @@ int RGWLC::process(int index, int max_lock_secs, LCWorker* worker, utime_t time(max_lock_secs, 0); - int ret = lock->try_lock(time, null_yield); + int ret = lock->try_lock(this, time, null_yield); if (ret == -EBUSY || ret == -EEXIST) { /* already locked by another lc processor */ ldpp_dout(this, 0) << "RGWLC::process() failed to acquire lock on " @@ -1949,7 +1949,8 @@ static std::string get_lc_shard_name(const rgw_bucket& bucket){ } template -static int guard_lc_modify(rgw::sal::Store* store, +static int guard_lc_modify(const DoutPrefixProvider *dpp, + rgw::sal::Store* store, rgw::sal::Lifecycle* sal_lc, const rgw_bucket& bucket, const string& cookie, const F& f) { @@ -1974,21 +1975,21 @@ static int guard_lc_modify(rgw::sal::Store* store, int ret; do { - ret = lock->try_lock(time, null_yield); + ret = lock->try_lock(dpp, time, null_yield); if (ret == -EBUSY || ret == -EEXIST) { - ldout(cct, 0) << "RGWLC::RGWPutLC() failed to acquire lock on " + ldpp_dout(dpp, 0) << "RGWLC::RGWPutLC() failed to acquire lock on " << oid << ", sleep 5, try again" << dendl; sleep(5); // XXX: return retryable error continue; } if (ret < 0) { - ldout(cct, 0) << "RGWLC::RGWPutLC() failed to acquire lock on " + ldpp_dout(dpp, 0) << "RGWLC::RGWPutLC() failed to acquire lock on " << oid << ", ret=" << ret << dendl; break; } ret = f(sal_lc, oid, entry); if (ret < 0) { - ldout(cct, 0) << "RGWLC::RGWPutLC() failed to set entry on " + ldpp_dout(dpp, 0) << "RGWLC::RGWPutLC() failed to set entry on " << oid << ", ret=" << ret << dendl; } break; @@ -2016,7 +2017,7 @@ int RGWLC::set_bucket_config(rgw::sal::Bucket* bucket, rgw_bucket& b = bucket->get_key(); - ret = guard_lc_modify(store, sal_lc.get(), b, cookie, + ret = guard_lc_modify(this, store, sal_lc.get(), b, cookie, [&](rgw::sal::Lifecycle* sal_lc, const string& oid, const rgw::sal::Lifecycle::LCEntry& entry) { return sal_lc->set_entry(oid, entry); @@ -2035,13 +2036,13 @@ int RGWLC::remove_bucket_config(rgw::sal::Bucket* bucket, rgw_bucket& b = bucket->get_key(); if (ret < 0) { - ldout(cct, 0) << "RGWLC::RGWDeleteLC() failed to set attrs on bucket=" + ldpp_dout(this, 0) << "RGWLC::RGWDeleteLC() failed to set attrs on bucket=" << b.name << " returned err=" << ret << dendl; return ret; } - ret = guard_lc_modify(store, sal_lc.get(), b, cookie, + ret = guard_lc_modify(this, store, sal_lc.get(), b, cookie, [&](rgw::sal::Lifecycle* sal_lc, const string& oid, const rgw::sal::Lifecycle::LCEntry& entry) { return sal_lc->rm_entry(oid, entry); @@ -2058,7 +2059,8 @@ RGWLC::~RGWLC() namespace rgw::lc { -int fix_lc_shard_entry(rgw::sal::Store* store, +int fix_lc_shard_entry(const DoutPrefixProvider *dpp, + rgw::sal::Store* store, rgw::sal::Lifecycle* sal_lc, rgw::sal::Bucket* bucket) { @@ -2079,19 +2081,19 @@ int fix_lc_shard_entry(rgw::sal::Store* store, // We are not dropping the old marker here as that would be caught by the next LC process update int ret = sal_lc->get_entry(lc_oid, shard_name, entry); if (ret == 0) { - ldout(store->ctx(), 5) << "Entry already exists, nothing to do" << dendl; + ldpp_dout(dpp, 5) << "Entry already exists, nothing to do" << dendl; return ret; // entry is already existing correctly set to marker } - ldout(store->ctx(), 5) << "lc_get_entry errored ret code=" << ret << dendl; + ldpp_dout(dpp, 5) << "lc_get_entry errored ret code=" << ret << dendl; if (ret == -ENOENT) { - ldout(store->ctx(), 1) << "No entry for bucket=" << bucket + ldpp_dout(dpp, 1) << "No entry for bucket=" << bucket << " creating " << dendl; // TODO: we have too many ppl making cookies like this! char cookie_buf[COOKIE_LEN + 1]; gen_rand_alphanumeric(store->ctx(), cookie_buf, sizeof(cookie_buf) - 1); std::string cookie = cookie_buf; - ret = guard_lc_modify( + ret = guard_lc_modify(dpp, store, sal_lc, bucket->get_key(), cookie, [&lc_oid](rgw::sal::Lifecycle* slc, const string& oid, diff --git a/src/rgw/rgw_lc.h b/src/rgw/rgw_lc.h index 4bcac3cd49d..e34e77d2d00 100644 --- a/src/rgw/rgw_lc.h +++ b/src/rgw/rgw_lc.h @@ -546,7 +546,8 @@ public: namespace rgw::lc { -int fix_lc_shard_entry(rgw::sal::Store* store, +int fix_lc_shard_entry(const DoutPrefixProvider *dpp, + rgw::sal::Store* store, rgw::sal::Lifecycle* sal_lc, rgw::sal::Bucket* bucket); diff --git a/src/rgw/rgw_lib.h b/src/rgw/rgw_lib.h index d7c6a632179..836e4f0f6ac 100644 --- a/src/rgw/rgw_lib.h +++ b/src/rgw/rgw_lib.h @@ -18,13 +18,15 @@ #include "services/svc_zone_utils.h" #include "include/ceph_assert.h" +#define dout_subsys ceph_subsys_rgw + class OpsLogSocket; namespace rgw { class RGWLibFrontend; - class RGWLib { + class RGWLib : public DoutPrefixProvider { RGWFrontendConfig* fec; RGWLibFrontend* fe; OpsLogSocket* olog; @@ -44,6 +46,10 @@ namespace rgw { rgw::LDAPHelper* get_ldh() { return ldh; } + CephContext *get_cct() const override { return cct.get(); } + unsigned get_subsys() const { return dout_subsys; } + std::ostream& gen_prefix(std::ostream& out) const { return out << "lib rgw: "; } + int init(); int init(vector& args); int stop(); diff --git a/src/rgw/rgw_lib_frontend.h b/src/rgw/rgw_lib_frontend.h index 0f2276f4896..461befd6bad 100644 --- a/src/rgw/rgw_lib_frontend.h +++ b/src/rgw/rgw_lib_frontend.h @@ -66,7 +66,7 @@ namespace rgw { } /* enqueue_req */ /* "regular" requests */ - void handle_request(RGWRequest* req) override; // async handler, deletes req + void handle_request(const DoutPrefixProvider *dpp, RGWRequest* req) override; // async handler, deletes req int process_request(RGWLibRequest* req); int process_request(RGWLibRequest* req, RGWLibIO* io); void set_access_key(RGWAccessKey& key) { access_key = key; } diff --git a/src/rgw/rgw_loadgen.cc b/src/rgw/rgw_loadgen.cc index e8de0f4498e..22c133864b7 100644 --- a/src/rgw/rgw_loadgen.cc +++ b/src/rgw/rgw_loadgen.cc @@ -16,7 +16,7 @@ void RGWLoadGenRequestEnv::set_date(utime_t& tm) date_str = rgw_to_asctime(tm); } -int RGWLoadGenRequestEnv::sign(RGWAccessKey& access_key) +int RGWLoadGenRequestEnv::sign(const DoutPrefixProvider *dpp, RGWAccessKey& access_key) { meta_map_t meta_map; map sub_resources; @@ -24,7 +24,8 @@ int RGWLoadGenRequestEnv::sign(RGWAccessKey& access_key) string canonical_header; string digest; - rgw_create_s3_canonical_header(request_method.c_str(), + rgw_create_s3_canonical_header(dpp, + request_method.c_str(), nullptr, /* const char *content_md5 */ content_type.c_str(), date_str.c_str(), diff --git a/src/rgw/rgw_loadgen.h b/src/rgw/rgw_loadgen.h index 44f434f4878..5a0abca57f7 100644 --- a/src/rgw/rgw_loadgen.h +++ b/src/rgw/rgw_loadgen.h @@ -27,7 +27,7 @@ struct RGWLoadGenRequestEnv { } void set_date(utime_t& tm); - int sign(RGWAccessKey& access_key); + int sign(const DoutPrefixProvider *dpp, RGWAccessKey& access_key); }; /* XXX does RGWLoadGenIO actually want to perform stream/HTTP I/O, diff --git a/src/rgw/rgw_loadgen_process.cc b/src/rgw/rgw_loadgen_process.cc index 2e2da2c613d..a83ae89f0bc 100644 --- a/src/rgw/rgw_loadgen_process.cc +++ b/src/rgw/rgw_loadgen_process.cc @@ -113,7 +113,7 @@ void RGWLoadGenProcess::gen_request(const string& method, req_wq.queue(req); } /* RGWLoadGenProcess::gen_request */ -void RGWLoadGenProcess::handle_request(RGWRequest* r) +void RGWLoadGenProcess::handle_request(const DoutPrefixProvider *dpp, RGWRequest* r) { RGWLoadGenRequest* req = static_cast(r); @@ -127,7 +127,7 @@ void RGWLoadGenProcess::handle_request(RGWRequest* r) env.request_method = req->method; env.uri = req->resource; env.set_date(tm); - env.sign(access_key); + env.sign(dpp, access_key); RGWLoadGenIO real_client_io(&env); RGWRestfulIO client_io(cct, &real_client_io); diff --git a/src/rgw/rgw_log.cc b/src/rgw/rgw_log.cc index adbbc8358cb..70182f1d79a 100644 --- a/src/rgw/rgw_log.cc +++ b/src/rgw/rgw_log.cc @@ -86,7 +86,7 @@ string render_log_object_name(const string& format, } /* usage logger */ -class UsageLogger { +class UsageLogger : public DoutPrefixProvider { CephContext *cct; rgw::sal::Store* store; map usage_map; @@ -165,8 +165,12 @@ public: num_entries = 0; lock.unlock(); - store->log_usage(old_map); + store->log_usage(this, old_map); } + + CephContext *get_cct() const override { return cct; } + unsigned get_subsys() const override { return dout_subsys; } + std::ostream& gen_prefix(std::ostream& out) const override { return out << "rgw UsageLogger: "; } }; static UsageLogger *usage_logger = NULL; @@ -225,7 +229,7 @@ static void log_usage(struct req_state *s, const string& op_name) if (!s->is_err()) data.successful_ops = 1; - ldout(s->cct, 30) << "log_usage: bucket_name=" << bucket_name + ldpp_dout(s, 30) << "log_usage: bucket_name=" << bucket_name << " tenant=" << s->bucket_tenant << ", bytes_sent=" << bytes_sent << ", bytes_received=" << bytes_received << ", success=" << data.successful_ops << dendl; @@ -341,12 +345,12 @@ int rgw_log_op(rgw::sal::Store* store, RGWREST* const rest, struct req_state *s, return 0; if (s->bucket_name.empty()) { - ldout(s->cct, 5) << "nothing to log for operation" << dendl; + ldpp_dout(s, 5) << "nothing to log for operation" << dendl; return -EINVAL; } if (s->err.ret == -ERR_NO_SUCH_BUCKET || rgw::sal::Bucket::empty(s->bucket.get())) { if (!s->cct->_conf->rgw_log_nonexistent_bucket) { - ldout(s->cct, 5) << "bucket " << s->bucket_name << " doesn't exist, not logging" << dendl; + ldpp_dout(s, 5) << "bucket " << s->bucket_name << " doesn't exist, not logging" << dendl; return 0; } bucket_id = ""; @@ -356,7 +360,7 @@ int rgw_log_op(rgw::sal::Store* store, RGWREST* const rest, struct req_state *s, entry.bucket = rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name); if (check_utf8(entry.bucket.c_str(), entry.bucket.size()) != 0) { - ldout(s->cct, 5) << "not logging op on bucket with non-utf8 name" << dendl; + ldpp_dout(s, 5) << "not logging op on bucket with non-utf8 name" << dendl; return 0; } @@ -462,14 +466,14 @@ int rgw_log_op(rgw::sal::Store* store, RGWREST* const rest, struct req_state *s, if (s->cct->_conf->rgw_ops_log_rados) { string oid = render_log_object_name(s->cct->_conf->rgw_log_object_name, &bdt, entry.bucket_id, entry.bucket); - ret = store->log_op(oid, bl); + ret = store->log_op(s, oid, bl); } if (olog) { olog->log(entry); } if (ret < 0) - ldout(s->cct, 0) << "ERROR: failed to log entry" << dendl; + ldpp_dout(s, 0) << "ERROR: failed to log entry" << dendl; return ret; } diff --git a/src/rgw/rgw_log_backing.cc b/src/rgw/rgw_log_backing.cc index 8ed52e8849c..1c20cc8791b 100644 --- a/src/rgw/rgw_log_backing.cc +++ b/src/rgw/rgw_log_backing.cc @@ -31,22 +31,21 @@ inline std::ostream& operator <<(std::ostream& m, const shard_check& t) { namespace { /// Return the shard type, and a bool to see whether it has entries. std::pair -probe_shard(librados::IoCtx& ioctx, const std::string& oid, +probe_shard(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid, bool& fifo_unsupported, optional_yield y) { - auto cct = static_cast(ioctx.cct()); bool omap = false; { librados::ObjectReadOperation op; cls_log_header header; cls_log_info(op, &header); - auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y); if (r == -ENOENT) { return { shard_check::dne, {} }; } if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " error probing for omap: r=" << r << ", oid=" << oid << dendl; return { shard_check::corrupt, {} }; @@ -56,26 +55,26 @@ probe_shard(librados::IoCtx& ioctx, const std::string& oid, } if (!fifo_unsupported) { std::unique_ptr fifo; - auto r = rgw::cls::fifo::FIFO::open(ioctx, oid, + auto r = rgw::cls::fifo::FIFO::open(dpp, ioctx, oid, &fifo, y, std::nullopt, true); if (r < 0 && !(r == -ENOENT || r == -ENODATA || r == -EPERM)) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " error probing for fifo: r=" << r << ", oid=" << oid << dendl; return { shard_check::corrupt, {} }; } if (fifo && omap) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " fifo and omap found: oid=" << oid << dendl; return { shard_check::corrupt, {} }; } if (fifo) { bool more = false; std::vector entries; - r = fifo->list(1, nullopt, &entries, &more, y); + r = fifo->list(dpp, 1, nullopt, &entries, &more, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": unable to list entries: r=" << r << ", oid=" << oid << dendl; return { shard_check::corrupt, {} }; @@ -94,9 +93,9 @@ probe_shard(librados::IoCtx& ioctx, const std::string& oid, librados::ObjectReadOperation op; cls_log_list(op, {}, {}, {}, 1, entries, &out_marker, &truncated); - auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": failed to list: r=" << r << ", oid=" << oid << dendl; return { shard_check::corrupt, {} }; } @@ -109,26 +108,25 @@ probe_shard(librados::IoCtx& ioctx, const std::string& oid, } tl::expected -handle_dne(librados::IoCtx& ioctx, +handle_dne(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, log_type def, std::string oid, bool fifo_unsupported, optional_yield y) { - auto cct = static_cast(ioctx.cct()); if (def == log_type::fifo) { if (fifo_unsupported) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " WARNING: FIFO set as default but not supported by OSD. " << "Falling back to OMAP." << dendl; return log_type::omap; } std::unique_ptr fifo; - auto r = rgw::cls::fifo::FIFO::create(ioctx, oid, + auto r = rgw::cls::fifo::FIFO::create(dpp, ioctx, oid, &fifo, y, std::nullopt); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " error creating FIFO: r=" << r << ", oid=" << oid << dendl; return tl::unexpected(bs::error_code(-r, bs::system_category())); @@ -139,17 +137,17 @@ handle_dne(librados::IoCtx& ioctx, } tl::expected -log_backing_type(librados::IoCtx& ioctx, +log_backing_type(const DoutPrefixProvider *dpp, + librados::IoCtx& ioctx, log_type def, int shards, const fu2::unique_function& get_oid, optional_yield y) { - auto cct = static_cast(ioctx.cct()); auto check = shard_check::dne; bool fifo_unsupported = false; for (int i = 0; i < shards; ++i) { - auto [c, e] = probe_shard(ioctx, get_oid(i), fifo_unsupported, y); + auto [c, e] = probe_shard(dpp, ioctx, get_oid(i), fifo_unsupported, y); if (c == shard_check::corrupt) return tl::unexpected(bs::error_code(EIO, bs::system_category())); if (c == shard_check::dne) continue; @@ -159,20 +157,20 @@ log_backing_type(librados::IoCtx& ioctx, } if (check != c) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " clashing types: check=" << check << ", c=" << c << dendl; return tl::unexpected(bs::error_code(EIO, bs::system_category())); } } if (check == shard_check::corrupt) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " should be unreachable!" << dendl; return tl::unexpected(bs::error_code(EIO, bs::system_category())); } if (check == shard_check::dne) - return handle_dne(ioctx, + return handle_dne(dpp, ioctx, def, get_oid(0), fifo_unsupported, @@ -181,20 +179,20 @@ log_backing_type(librados::IoCtx& ioctx, return (check == shard_check::fifo ? log_type::fifo : log_type::omap); } -bs::error_code log_remove(librados::IoCtx& ioctx, +bs::error_code log_remove(const DoutPrefixProvider *dpp, + librados::IoCtx& ioctx, int shards, const fu2::unique_function& get_oid, bool leave_zero, optional_yield y) { bs::error_code ec; - auto cct = static_cast(ioctx.cct()); for (int i = 0; i < shards; ++i) { auto oid = get_oid(i); rados::cls::fifo::info info; uint32_t part_header_size = 0, part_entry_overhead = 0; - auto r = rgw::cls::fifo::get_meta(ioctx, oid, nullopt, &info, + auto r = rgw::cls::fifo::get_meta(dpp, ioctx, oid, nullopt, &info, &part_header_size, &part_entry_overhead, 0, y, true); if (r == -ENOENT) continue; @@ -203,11 +201,11 @@ bs::error_code log_remove(librados::IoCtx& ioctx, librados::ObjectWriteOperation op; op.remove(); auto part_oid = info.part_oid(j); - auto subr = rgw_rados_operate(ioctx, part_oid, &op, null_yield); + auto subr = rgw_rados_operate(dpp, ioctx, part_oid, &op, null_yield); if (subr < 0 && subr != -ENOENT) { if (!ec) ec = bs::error_code(-subr, bs::system_category()); - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": failed removing FIFO part: part_oid=" << part_oid << ", subr=" << subr << dendl; } @@ -216,7 +214,7 @@ bs::error_code log_remove(librados::IoCtx& ioctx, if (r < 0 && r != -ENODATA) { if (!ec) ec = bs::error_code(-r, bs::system_category()); - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": failed checking FIFO part: oid=" << oid << ", r=" << r << dendl; } @@ -231,11 +229,11 @@ bs::error_code log_remove(librados::IoCtx& ioctx, } else { op.remove(); } - r = rgw_rados_operate(ioctx, oid, &op, null_yield); + r = rgw_rados_operate(dpp, ioctx, oid, &op, null_yield); if (r < 0 && r != -ENOENT) { if (!ec) ec = bs::error_code(-r, bs::system_category()); - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": failed removing shard: oid=" << oid << ", r=" << r << dendl; } @@ -255,13 +253,14 @@ logback_generations::~logback_generations() { } } -bs::error_code logback_generations::setup(log_type def, +bs::error_code logback_generations::setup(const DoutPrefixProvider *dpp, + log_type def, optional_yield y) noexcept { try { - auto cct = static_cast(ioctx.cct()); // First, read. - auto res = read(y); + auto cct = static_cast(ioctx.cct()); + auto res = read(dpp, y); if (!res && res.error() != bs::errc::no_such_file_or_directory) { return res.error(); } @@ -272,7 +271,7 @@ bs::error_code logback_generations::setup(log_type def, // Are we the first? Then create generation 0 and the generations // metadata. librados::ObjectWriteOperation op; - auto type = log_backing_type(ioctx, def, shards, + auto type = log_backing_type(dpp, ioctx, def, shards, [this](int shard) { return this->get_oid(0, shard); }, y); @@ -295,16 +294,16 @@ bs::error_code logback_generations::setup(log_type def, lock.unlock(); op.write_full(bl); - auto r = rgw_rados_operate(ioctx, oid, &op, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y); if (r < 0 && r != -EEXIST) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": failed writing oid=" << oid << ", r=" << r << dendl; bs::system_error(-r, bs::system_category()); } // Did someone race us? Then re-read. if (r != 0) { - res = read(y); + res = read(dpp, y); if (!res) return res.error(); if (res->first.empty()) @@ -314,7 +313,7 @@ bs::error_code logback_generations::setup(log_type def, // generation zero, incremented, then erased generation zero, // don't leave generation zero lying around. if (l.gen_id != 0) { - auto ec = log_remove(ioctx, shards, + auto ec = log_remove(dpp, ioctx, shards, [this](int shard) { return this->get_oid(0, shard); }, true, y); @@ -333,7 +332,7 @@ bs::error_code logback_generations::setup(log_type def, m.unlock(); auto ec = watch(); if (ec) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": failed to re-establish watch, unsafe to continue: oid=" << oid << ", ec=" << ec.message() << dendl; } @@ -343,11 +342,10 @@ bs::error_code logback_generations::setup(log_type def, } } -bs::error_code logback_generations::update(optional_yield y) noexcept +bs::error_code logback_generations::update(const DoutPrefixProvider *dpp, optional_yield y) noexcept { try { - auto cct = static_cast(ioctx.cct()); - auto res = read(y); + auto res = read(dpp, y); if (!res) { return res.error(); } @@ -361,7 +359,7 @@ bs::error_code logback_generations::update(optional_yield y) noexcept // Check consistency and prepare update if (es.empty()) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": INCONSISTENCY! Read empty update." << dendl; return bs::error_code(EFAULT, bs::system_category()); } @@ -370,12 +368,12 @@ bs::error_code logback_generations::update(optional_yield y) noexcept assert(cur_lowest != entries_.cend()); auto new_lowest = lowest_nomempty(es); if (new_lowest == es.cend()) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": INCONSISTENCY! Read update with no active head." << dendl; return bs::error_code(EFAULT, bs::system_category()); } if (new_lowest->first < cur_lowest->first) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": INCONSISTENCY! Tail moved wrong way." << dendl; return bs::error_code(EFAULT, bs::system_category()); } @@ -389,7 +387,7 @@ bs::error_code logback_generations::update(optional_yield y) noexcept entries_t new_entries; if ((es.end() - 1)->first < (entries_.end() - 1)->first) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": INCONSISTENCY! Head moved wrong way." << dendl; return bs::error_code(EFAULT, bs::system_category()); } @@ -420,11 +418,10 @@ bs::error_code logback_generations::update(optional_yield y) noexcept return {}; } -auto logback_generations::read(optional_yield y) noexcept -> +auto logback_generations::read(const DoutPrefixProvider *dpp, optional_yield y) noexcept -> tl::expected, bs::error_code> { try { - auto cct = static_cast(ioctx.cct()); librados::ObjectReadOperation op; std::unique_lock l(m); cls_version_check(op, version, VER_COND_GE); @@ -433,14 +430,14 @@ auto logback_generations::read(optional_yield y) noexcept -> cls_version_read(op, &v2); cb::list bl; op.read(0, 0, &bl, nullptr); - auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y); if (r < 0) { if (r == -ENOENT) { - ldout(cct, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": oid=" << oid << " not found" << dendl; } else { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": failed reading oid=" << oid << ", r=" << r << dendl; } @@ -459,7 +456,7 @@ auto logback_generations::read(optional_yield y) noexcept -> } } -bs::error_code logback_generations::write(entries_t&& e, +bs::error_code logback_generations::write(const DoutPrefixProvider *dpp, entries_t&& e, std::unique_lock&& l_, optional_yield y) noexcept { @@ -467,14 +464,13 @@ bs::error_code logback_generations::write(entries_t&& e, ceph_assert(l.mutex() == &m && l.owns_lock()); try { - auto cct = static_cast(ioctx.cct()); librados::ObjectWriteOperation op; cls_version_check(op, version, VER_COND_GE); cb::list bl; encode(e, bl); op.write_full(bl); cls_version_inc(op); - auto r = rgw_rados_operate(ioctx, oid, &op, y); + auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y); if (r == 0) { entries_ = std::move(e); version.inc(); @@ -482,13 +478,13 @@ bs::error_code logback_generations::write(entries_t&& e, } l.unlock(); if (r < 0 && r != -ECANCELED) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": failed reading oid=" << oid << ", r=" << r << dendl; return { -r, bs::system_category() }; } if (r == -ECANCELED) { - auto ec = update(y); + auto ec = update(dpp, y); if (ec) { return ec; } else { @@ -518,12 +514,12 @@ bs::error_code logback_generations::watch() noexcept { return {}; } -bs::error_code logback_generations::new_backing(log_type type, +bs::error_code logback_generations::new_backing(const DoutPrefixProvider *dpp, + log_type type, optional_yield y) noexcept { - auto cct = static_cast(ioctx.cct()); static constexpr auto max_tries = 10; try { - auto ec = update(y); + auto ec = update(dpp, y); if (ec) return ec; auto tries = 0; entries_t new_entries; @@ -541,27 +537,27 @@ bs::error_code logback_generations::new_backing(log_type type, new_entries.emplace(newgenid, newgen); auto es = entries_; es.emplace(newgenid, std::move(newgen)); - ec = write(std::move(es), std::move(l), y); + ec = write(dpp, std::move(es), std::move(l), y); ++tries; } while (ec == bs::errc::operation_canceled && tries < max_tries); if (tries >= max_tries) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": exhausted retry attempts." << dendl; return ec; } if (ec) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": write failed with ec=" << ec.message() << dendl; return ec; } cb::list bl, rbl; - auto r = rgw_rados_notify(ioctx, oid, bl, 10'000, &rbl, y); + auto r = rgw_rados_notify(dpp, ioctx, oid, bl, 10'000, &rbl, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": notify failed with r=" << r << dendl; return { -r, bs::system_category() }; } @@ -572,12 +568,12 @@ bs::error_code logback_generations::new_backing(log_type type, return {}; } -bs::error_code logback_generations::empty_to(uint64_t gen_id, +bs::error_code logback_generations::empty_to(const DoutPrefixProvider *dpp, + uint64_t gen_id, optional_yield y) noexcept { - auto cct = static_cast(ioctx.cct()); static constexpr auto max_tries = 10; try { - auto ec = update(y); + auto ec = update(dpp, y); if (ec) return ec; auto tries = 0; uint64_t newtail = 0; @@ -586,7 +582,7 @@ bs::error_code logback_generations::empty_to(uint64_t gen_id, { auto last = entries_.end() - 1; if (gen_id >= last->first) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": Attempt to trim beyond the possible." << dendl; return bs::error_code(EINVAL, bs::system_category()); } @@ -601,27 +597,27 @@ bs::error_code logback_generations::empty_to(uint64_t gen_id, newtail = i->first; i->second.pruned = ceph::real_clock::now(); } - ec = write(std::move(es), std::move(l), y); + ec = write(dpp, std::move(es), std::move(l), y); ++tries; } while (ec == bs::errc::operation_canceled && tries < max_tries); if (tries >= max_tries) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": exhausted retry attempts." << dendl; return ec; } if (ec) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": write failed with ec=" << ec.message() << dendl; return ec; } cb::list bl, rbl; - auto r = rgw_rados_notify(ioctx, oid, bl, 10'000, &rbl, y); + auto r = rgw_rados_notify(dpp, ioctx, oid, bl, 10'000, &rbl, y); if (r < 0) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": notify failed with r=" << r << dendl; return { -r, bs::system_category() }; } @@ -632,11 +628,10 @@ bs::error_code logback_generations::empty_to(uint64_t gen_id, return {}; } -bs::error_code logback_generations::remove_empty(optional_yield y) noexcept { - auto cct = static_cast(ioctx.cct()); +bs::error_code logback_generations::remove_empty(const DoutPrefixProvider *dpp, optional_yield y) noexcept { static constexpr auto max_tries = 10; try { - auto ec = update(y); + auto ec = update(dpp, y); if (ec) return ec; auto tries = 0; entries_t new_entries; @@ -664,12 +659,12 @@ bs::error_code logback_generations::remove_empty(optional_yield y) noexcept { auto es2 = entries_; for (const auto& [gen_id, e] : es) { ceph_assert(e.pruned); - auto ec = log_remove(ioctx, shards, + auto ec = log_remove(dpp, ioctx, shards, [this, gen_id=gen_id](int shard) { return this->get_oid(gen_id, shard); }, (gen_id == 0), y); if (ec) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": Error pruning: gen_id=" << gen_id << " ec=" << ec.message() << dendl; } @@ -679,18 +674,18 @@ bs::error_code logback_generations::remove_empty(optional_yield y) noexcept { } l.lock(); es.clear(); - ec = write(std::move(es2), std::move(l), y); + ec = write(dpp, std::move(es2), std::move(l), y); ++tries; } while (ec == bs::errc::operation_canceled && tries < max_tries); if (tries >= max_tries) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": exhausted retry attempts." << dendl; return ec; } if (ec) { - lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ + ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": write failed with ec=" << ec.message() << dendl; return ec; } @@ -706,8 +701,9 @@ void logback_generations::handle_notify(uint64_t notify_id, bufferlist& bl) { auto cct = static_cast(ioctx.cct()); + const DoutPrefix dp(cct, dout_subsys, "logback generations handle_notify: "); if (notifier_id != my_id) { - auto ec = update(null_yield); + auto ec = update(&dp, null_yield); if (ec) { lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ diff --git a/src/rgw/rgw_log_backing.h b/src/rgw/rgw_log_backing.h index 6f755efb463..5b9f1bfd21c 100644 --- a/src/rgw/rgw_log_backing.h +++ b/src/rgw/rgw_log_backing.h @@ -74,7 +74,8 @@ inline std::ostream& operator <<(std::ostream& m, const log_type& t) { /// Look over the shards in a log and determine the type. tl::expected -log_backing_type(librados::IoCtx& ioctx, +log_backing_type(const DoutPrefixProvider *dpp, + librados::IoCtx& ioctx, log_type def, int shards, //< Total number of shards /// A function taking a shard number and @@ -147,10 +148,10 @@ private: entries_t entries_; tl::expected, bs::error_code> - read(optional_yield y) noexcept; - bs::error_code write(entries_t&& e, std::unique_lock&& l_, + read(const DoutPrefixProvider *dpp, optional_yield y) noexcept; + bs::error_code write(const DoutPrefixProvider *dpp, entries_t&& e, std::unique_lock&& l_, optional_yield y) noexcept; - bs::error_code setup(log_type def, optional_yield y) noexcept; + bs::error_code setup(const DoutPrefixProvider *dpp, log_type def, optional_yield y) noexcept; bs::error_code watch() noexcept; @@ -178,7 +179,7 @@ public: template static tl::expected, bs::error_code> - init(librados::IoCtx& ioctx_, std::string oid_, + init(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx_, std::string oid_, fu2::unique_function&& get_oid_, int shards_, log_type def, optional_yield y, Args&& ...args) noexcept { @@ -188,7 +189,7 @@ public: shards_, std::forward(args)...); std::unique_ptr lg(lgp); lgp = nullptr; - auto ec = lg->setup(def, y); + auto ec = lg->setup(dpp, def, y); if (ec) return tl::unexpected(ec); // Obnoxiousness for C++ Compiler in Bionic Beaver @@ -198,17 +199,17 @@ public: } } - bs::error_code update(optional_yield y) noexcept; + bs::error_code update(const DoutPrefixProvider *dpp, optional_yield y) noexcept; entries_t entries() const { return entries_; } - bs::error_code new_backing(log_type type, optional_yield y) noexcept; + bs::error_code new_backing(const DoutPrefixProvider *dpp, log_type type, optional_yield y) noexcept; - bs::error_code empty_to(uint64_t gen_id, optional_yield y) noexcept; + bs::error_code empty_to(const DoutPrefixProvider *dpp, uint64_t gen_id, optional_yield y) noexcept; - bs::error_code remove_empty(optional_yield y) noexcept; + bs::error_code remove_empty(const DoutPrefixProvider *dpp, optional_yield y) noexcept; // Callbacks, to be defined by descendant. @@ -264,10 +265,10 @@ class LazyFIFO { std::mutex m; std::unique_ptr fifo; - int lazy_init(optional_yield y) { + int lazy_init(const DoutPrefixProvider *dpp, optional_yield y) { std::unique_lock l(m); if (fifo) return 0; - auto r = rgw::cls::fifo::FIFO::create(ioctx, oid, &fifo, y); + auto r = rgw::cls::fifo::FIFO::create(dpp, ioctx, oid, &fifo, y); if (r) { fifo.reset(); } @@ -279,114 +280,120 @@ public: LazyFIFO(librados::IoCtx& ioctx, std::string oid) : ioctx(ioctx), oid(std::move(oid)) {} - int read_meta(optional_yield y) { - auto r = lazy_init(y); + int read_meta(const DoutPrefixProvider *dpp, optional_yield y) { + auto r = lazy_init(dpp, y); if (r < 0) return r; - return fifo->read_meta(y); + return fifo->read_meta(dpp, y); } - int meta(rados::cls::fifo::info& info, optional_yield y) { - auto r = lazy_init(y); + int meta(const DoutPrefixProvider *dpp, rados::cls::fifo::info& info, optional_yield y) { + auto r = lazy_init(dpp, y); if (r < 0) return r; info = fifo->meta(); return 0; } - int get_part_layout_info(std::uint32_t& part_header_size, + int get_part_layout_info(const DoutPrefixProvider *dpp, + std::uint32_t& part_header_size, std::uint32_t& part_entry_overhead, optional_yield y) { - auto r = lazy_init(y); + auto r = lazy_init(dpp, y); if (r < 0) return r; std::tie(part_header_size, part_entry_overhead) = fifo->get_part_layout_info(); return 0; } - int push(const ceph::buffer::list& bl, + int push(const DoutPrefixProvider *dpp, + const ceph::buffer::list& bl, optional_yield y) { - auto r = lazy_init(y); + auto r = lazy_init(dpp, y); if (r < 0) return r; - return fifo->push(bl, y); + return fifo->push(dpp, bl, y); } - int push(ceph::buffer::list& bl, + int push(const DoutPrefixProvider *dpp, + ceph::buffer::list& bl, librados::AioCompletion* c, optional_yield y) { - auto r = lazy_init(y); + auto r = lazy_init(dpp, y); if (r < 0) return r; - fifo->push(bl, c); + fifo->push(dpp, bl, c); return 0; } - int push(const std::vector& data_bufs, + int push(const DoutPrefixProvider *dpp, + const std::vector& data_bufs, optional_yield y) { - auto r = lazy_init(y); + auto r = lazy_init(dpp, y); if (r < 0) return r; - return fifo->push(data_bufs, y); + return fifo->push(dpp, data_bufs, y); } - int push(const std::vector& data_bufs, + int push(const DoutPrefixProvider *dpp, + const std::vector& data_bufs, librados::AioCompletion* c, optional_yield y) { - auto r = lazy_init(y); + auto r = lazy_init(dpp, y); if (r < 0) return r; - fifo->push(data_bufs, c); + fifo->push(dpp, data_bufs, c); return 0; } - int list(int max_entries, std::optional markstr, + int list(const DoutPrefixProvider *dpp, + int max_entries, std::optional markstr, std::vector* out, bool* more, optional_yield y) { - auto r = lazy_init(y); + auto r = lazy_init(dpp, y); if (r < 0) return r; - return fifo->list(max_entries, markstr, out, more, y); + return fifo->list(dpp, max_entries, markstr, out, more, y); } - int list(int max_entries, std::optional markstr, + int list(const DoutPrefixProvider *dpp, int max_entries, std::optional markstr, std::vector* out, bool* more, librados::AioCompletion* c, optional_yield y) { - auto r = lazy_init(y); + auto r = lazy_init(dpp, y); if (r < 0) return r; - fifo->list(max_entries, markstr, out, more, c); + fifo->list(dpp, max_entries, markstr, out, more, c); return 0; } - int trim(std::string_view markstr, bool exclusive, optional_yield y) { - auto r = lazy_init(y); + int trim(const DoutPrefixProvider *dpp, std::string_view markstr, bool exclusive, optional_yield y) { + auto r = lazy_init(dpp, y); if (r < 0) return r; - return fifo->trim(markstr, exclusive, y); + return fifo->trim(dpp, markstr, exclusive, y); } - int trim(std::string_view markstr, bool exclusive, librados::AioCompletion* c, + int trim(const DoutPrefixProvider *dpp, std::string_view markstr, bool exclusive, librados::AioCompletion* c, optional_yield y) { - auto r = lazy_init(y); + auto r = lazy_init(dpp, y); if (r < 0) return r; - fifo->trim(markstr, exclusive, c); + fifo->trim(dpp, markstr, exclusive, c); return 0; } - int get_part_info(int64_t part_num, rados::cls::fifo::part_header* header, + int get_part_info(const DoutPrefixProvider *dpp, int64_t part_num, rados::cls::fifo::part_header* header, optional_yield y) { - auto r = lazy_init(y); + auto r = lazy_init(dpp, y); if (r < 0) return r; - return fifo->get_part_info(part_num, header, y); + return fifo->get_part_info(dpp, part_num, header, y); } - int get_part_info(int64_t part_num, rados::cls::fifo::part_header* header, + int get_part_info(const DoutPrefixProvider *dpp, int64_t part_num, rados::cls::fifo::part_header* header, librados::AioCompletion* c, optional_yield y) { - auto r = lazy_init(y); + auto r = lazy_init(dpp, y); if (r < 0) return r; fifo->get_part_info(part_num, header, c); return 0; } - int get_head_info(fu2::unique_function< + int get_head_info(const DoutPrefixProvider *dpp, fu2::unique_function< void(int r, rados::cls::fifo::part_header&&)>&& f, librados::AioCompletion* c, optional_yield y) { - auto r = lazy_init(y); + auto r = lazy_init(dpp, y); if (r < 0) return r; - fifo->get_head_info(std::move(f), c); + fifo->get_head_info(dpp, std::move(f), c); return 0; } }; diff --git a/src/rgw/rgw_lua.cc b/src/rgw/rgw_lua.cc index 384ef2787c4..7d5ba3635ed 100644 --- a/src/rgw/rgw_lua.cc +++ b/src/rgw/rgw_lua.cc @@ -90,8 +90,8 @@ const std::string PACKAGE_LIST_OBJECT_NAME = "lua_package_allowlist"; namespace bp = boost::process; -int add_package(rgw::sal::Store* store, optional_yield y, const std::string& package_name, bool allow_compilation) { - // verify that luarocks can load this oackage +int add_package(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, const std::string& package_name, bool allow_compilation) { + // verify that luarocks can load this package const auto p = bp::search_path("luarocks"); if (p.empty()) { return -ECHILD; @@ -123,7 +123,7 @@ int add_package(rgw::sal::Store* store, optional_yield y, const std::string& pac std::map new_package{{package_name, empty_bl}}; librados::ObjectWriteOperation op; op.omap_set(new_package); - ret = rgw_rados_operate(*(static_cast(store)->getRados()->get_lc_pool_ctx()), + ret = rgw_rados_operate(dpp, *(static_cast(store)->getRados()->get_lc_pool_ctx()), PACKAGE_LIST_OBJECT_NAME, &op, y); if (ret < 0) { @@ -132,10 +132,10 @@ int add_package(rgw::sal::Store* store, optional_yield y, const std::string& pac return 0; } -int remove_package(rgw::sal::Store* store, optional_yield y, const std::string& package_name) { +int remove_package(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, const std::string& package_name) { librados::ObjectWriteOperation op; op.omap_rm_keys(std::set({package_name})); - const auto ret = rgw_rados_operate(*(static_cast(store)->getRados()->get_lc_pool_ctx()), + const auto ret = rgw_rados_operate(dpp, *(static_cast(store)->getRados()->get_lc_pool_ctx()), PACKAGE_LIST_OBJECT_NAME, &op, y); if (ret < 0) { @@ -145,7 +145,7 @@ int remove_package(rgw::sal::Store* store, optional_yield y, const std::string& return 0; } -int list_packages(rgw::sal::Store* store, optional_yield y, packages_t& packages) { +int list_packages(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, packages_t& packages) { constexpr auto max_chunk = 1024U; std::string start_after; bool more = true; @@ -154,7 +154,7 @@ int list_packages(rgw::sal::Store* store, optional_yield y, packages_t& packages librados::ObjectReadOperation op; packages_t packages_chunk; op.omap_get_keys2(start_after, max_chunk, &packages_chunk, &more, &rval); - const auto ret = rgw_rados_operate(*(static_cast(store)->getRados()->get_lc_pool_ctx()), + const auto ret = rgw_rados_operate(dpp, *(static_cast(store)->getRados()->get_lc_pool_ctx()), PACKAGE_LIST_OBJECT_NAME, &op, nullptr, y); if (ret < 0) { @@ -167,7 +167,7 @@ int list_packages(rgw::sal::Store* store, optional_yield y, packages_t& packages return 0; } -int install_packages(rgw::sal::Store* store, optional_yield y, packages_t& failed_packages, std::string& output) { +int install_packages(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, packages_t& failed_packages, std::string& output) { // luarocks directory cleanup boost::system::error_code ec; const auto& luarocks_path = store->get_luarocks_path(); @@ -180,7 +180,7 @@ int install_packages(rgw::sal::Store* store, optional_yield y, packages_t& faile } packages_t packages; - auto ret = list_packages(store, y, packages); + auto ret = list_packages(dpp, store, y, packages); if (ret == -ENOENT) { // allowlist is empty return 0; diff --git a/src/rgw/rgw_lua.h b/src/rgw/rgw_lua.h index 9af85d7685f..4aa4bb7e947 100644 --- a/src/rgw/rgw_lua.h +++ b/src/rgw/rgw_lua.h @@ -40,17 +40,17 @@ int delete_script(const DoutPrefixProvider *dpp, rgw::sal::Store* store, const s using packages_t = std::set; // add a lua package to the allowlist -int add_package(rgw::sal::Store* store, optional_yield y, const std::string& package_name, bool allow_compilation); +int add_package(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, const std::string& package_name, bool allow_compilation); // remove a lua package from the allowlist -int remove_package(rgw::sal::Store* store, optional_yield y, const std::string& package_name); +int remove_package(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, const std::string& package_name); // list lua packages in the allowlist -int list_packages(rgw::sal::Store* store, optional_yield y, packages_t& packages); +int list_packages(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, packages_t& packages); // install all packages from the allowlist // return the list of packages that failed to install and the output of the install command -int install_packages(rgw::sal::Store* store, optional_yield y, packages_t& failed_packages, std::string& output); +int install_packages(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, packages_t& failed_packages, std::string& output); #endif } diff --git a/src/rgw/rgw_lua_request.cc b/src/rgw/rgw_lua_request.cc index e01fb33b730..5a367250278 100644 --- a/src/rgw/rgw_lua_request.cc +++ b/src/rgw/rgw_lua_request.cc @@ -33,7 +33,7 @@ int RequestLog(lua_State* L) const auto rc = rgw_log_op(store, rest, s, op_name, olog); lua_pushinteger(L, rc); } else { - ldout(s->cct, 1) << "Lua ERROR: missing rados store, cannot use ops log" << dendl; + ldpp_dout(s, 1) << "Lua ERROR: missing rados store, cannot use ops log" << dendl; lua_pushinteger(L, -EINVAL); } @@ -805,11 +805,11 @@ int execute( // execute the lua script if (luaL_dostring(L, script.c_str()) != LUA_OK) { const std::string err(lua_tostring(L, -1)); - ldout(s->cct, 1) << "Lua ERROR: " << err << dendl; + ldpp_dout(s, 1) << "Lua ERROR: " << err << dendl; return -1; } } catch (const std::runtime_error& e) { - ldout(s->cct, 1) << "Lua ERROR: " << e.what() << dendl; + ldpp_dout(s, 1) << "Lua ERROR: " << e.what() << dendl; return -1; } diff --git a/src/rgw/rgw_main.cc b/src/rgw/rgw_main.cc index e947f5541b3..25377848dd0 100644 --- a/src/rgw/rgw_main.cc +++ b/src/rgw/rgw_main.cc @@ -422,7 +422,7 @@ int radosgw_Main(int argc, const char **argv) #ifdef WITH_RADOSGW_LUA_PACKAGES rgw::lua::packages_t failed_packages; std::string output; - r = rgw::lua::install_packages(store, null_yield, failed_packages, output); + r = rgw::lua::install_packages(&dp, store, null_yield, failed_packages, output); if (r < 0) { dout(1) << "ERROR: failed to install lua packages from allowlist" << dendl; } @@ -632,12 +632,12 @@ int radosgw_Main(int argc, const char **argv) // add a watcher to respond to realm configuration changes - RGWPeriodPusher pusher(store, null_yield); + RGWPeriodPusher pusher(&dp, store, null_yield); RGWFrontendPauser pauser(fes, implicit_tenant_context, &pusher); auto reloader = std::make_unique(store, service_map_meta, &pauser); - RGWRealmWatcher realm_watcher(g_ceph_context, store->get_zone()->get_realm()); + RGWRealmWatcher realm_watcher(&dp, g_ceph_context, store->get_zone()->get_realm()); realm_watcher.add_watcher(RGWRealmNotify::Reload, *reloader); realm_watcher.add_watcher(RGWRealmNotify::ZonesNeedPeriod, pusher); diff --git a/src/rgw/rgw_mdlog.h b/src/rgw/rgw_mdlog.h index 53f5df762b9..ae16cc5e673 100644 --- a/src/rgw/rgw_mdlog.h +++ b/src/rgw/rgw_mdlog.h @@ -102,9 +102,9 @@ public: oid = prefix + buf; } - int add_entry(const string& hash_key, const string& section, const string& key, bufferlist& bl); + int add_entry(const DoutPrefixProvider *dpp, const string& hash_key, const string& section, const string& key, bufferlist& bl); int get_shard_id(const string& hash_key, int *shard_id); - int store_entries_in_shard(list& entries, int shard_id, librados::AioCompletion *completion); + int store_entries_in_shard(const DoutPrefixProvider *dpp, list& entries, int shard_id, librados::AioCompletion *completion); struct LogListCtx { int cur_shard; @@ -123,17 +123,18 @@ public: const real_time& end_time, const string& marker, void **handle); void complete_list_entries(void *handle); - int list_entries(void *handle, + int list_entries(const DoutPrefixProvider *dpp, + void *handle, int max_entries, list& entries, string *out_marker, bool *truncated); - int trim(int shard_id, const real_time& from_time, const real_time& end_time, const string& start_marker, const string& end_marker); - int get_info(int shard_id, RGWMetadataLogInfo *info); - int get_info_async(int shard_id, RGWMetadataLogInfoCompletion *completion); - int lock_exclusive(int shard_id, timespan duration, string&zone_id, string& owner_id); - int unlock(int shard_id, string& zone_id, string& owner_id); + int trim(const DoutPrefixProvider *dpp, int shard_id, const real_time& from_time, const real_time& end_time, const string& start_marker, const string& end_marker); + int get_info(const DoutPrefixProvider *dpp, int shard_id, RGWMetadataLogInfo *info); + int get_info_async(const DoutPrefixProvider *dpp, int shard_id, RGWMetadataLogInfoCompletion *completion); + int lock_exclusive(const DoutPrefixProvider *dpp, int shard_id, timespan duration, string&zone_id, string& owner_id); + int unlock(const DoutPrefixProvider *dpp, int shard_id, string& zone_id, string& owner_id); int update_shards(list& shards); diff --git a/src/rgw/rgw_metadata.cc b/src/rgw/rgw_metadata.cc index 5de78f52a4e..726802e4a42 100644 --- a/src/rgw/rgw_metadata.cc +++ b/src/rgw/rgw_metadata.cc @@ -104,7 +104,7 @@ void RGWMetadataLogData::decode_json(JSONObj *obj) { } -int RGWMetadataLog::add_entry(const string& hash_key, const string& section, const string& key, bufferlist& bl) { +int RGWMetadataLog::add_entry(const DoutPrefixProvider *dpp, const string& hash_key, const string& section, const string& key, bufferlist& bl) { if (!svc.zone->need_to_log_metadata()) return 0; @@ -114,7 +114,7 @@ int RGWMetadataLog::add_entry(const string& hash_key, const string& section, con rgw_shard_name(prefix, cct->_conf->rgw_md_log_max_shards, hash_key, oid, &shard_id); mark_modified(shard_id); real_time now = real_clock::now(); - return svc.cls->timelog.add(oid, now, section, key, bl, null_yield); + return svc.cls->timelog.add(dpp, oid, now, section, key, bl, null_yield); } int RGWMetadataLog::get_shard_id(const string& hash_key, int *shard_id) @@ -125,13 +125,13 @@ int RGWMetadataLog::get_shard_id(const string& hash_key, int *shard_id) return 0; } -int RGWMetadataLog::store_entries_in_shard(list& entries, int shard_id, librados::AioCompletion *completion) +int RGWMetadataLog::store_entries_in_shard(const DoutPrefixProvider *dpp, list& entries, int shard_id, librados::AioCompletion *completion) { string oid; mark_modified(shard_id); rgw_shard_name(prefix, shard_id, oid); - return svc.cls->timelog.add(oid, entries, completion, false, null_yield); + return svc.cls->timelog.add(dpp, oid, entries, completion, false, null_yield); } void RGWMetadataLog::init_list_entries(int shard_id, const real_time& from_time, const real_time& end_time, @@ -154,7 +154,7 @@ void RGWMetadataLog::complete_list_entries(void *handle) { delete ctx; } -int RGWMetadataLog::list_entries(void *handle, +int RGWMetadataLog::list_entries(const DoutPrefixProvider *dpp, void *handle, int max_entries, list& entries, string *last_marker, @@ -167,7 +167,7 @@ int RGWMetadataLog::list_entries(void *handle, } std::string next_marker; - int ret = svc.cls->timelog.list(ctx->cur_oid, ctx->from_time, ctx->end_time, + int ret = svc.cls->timelog.list(dpp, ctx->cur_oid, ctx->from_time, ctx->end_time, max_entries, entries, ctx->marker, &next_marker, truncated, null_yield); if ((ret < 0) && (ret != -ENOENT)) @@ -184,14 +184,14 @@ int RGWMetadataLog::list_entries(void *handle, return 0; } -int RGWMetadataLog::get_info(int shard_id, RGWMetadataLogInfo *info) +int RGWMetadataLog::get_info(const DoutPrefixProvider *dpp, int shard_id, RGWMetadataLogInfo *info) { string oid; get_shard_oid(shard_id, oid); cls_log_header header; - int ret = svc.cls->timelog.info(oid, &header, null_yield); + int ret = svc.cls->timelog.info(dpp, oid, &header, null_yield); if ((ret < 0) && (ret != -ENOENT)) return ret; @@ -220,40 +220,40 @@ RGWMetadataLogInfoCompletion::~RGWMetadataLogInfoCompletion() completion->release(); } -int RGWMetadataLog::get_info_async(int shard_id, RGWMetadataLogInfoCompletion *completion) +int RGWMetadataLog::get_info_async(const DoutPrefixProvider *dpp, int shard_id, RGWMetadataLogInfoCompletion *completion) { string oid; get_shard_oid(shard_id, oid); completion->get(); // hold a ref until the completion fires - return svc.cls->timelog.info_async(completion->get_io_obj(), oid, + return svc.cls->timelog.info_async(dpp, completion->get_io_obj(), oid, &completion->get_header(), completion->get_completion()); } -int RGWMetadataLog::trim(int shard_id, const real_time& from_time, const real_time& end_time, +int RGWMetadataLog::trim(const DoutPrefixProvider *dpp, int shard_id, const real_time& from_time, const real_time& end_time, const string& start_marker, const string& end_marker) { string oid; get_shard_oid(shard_id, oid); - return svc.cls->timelog.trim(oid, from_time, end_time, start_marker, + return svc.cls->timelog.trim(dpp, oid, from_time, end_time, start_marker, end_marker, nullptr, null_yield); } -int RGWMetadataLog::lock_exclusive(int shard_id, timespan duration, string& zone_id, string& owner_id) { +int RGWMetadataLog::lock_exclusive(const DoutPrefixProvider *dpp, int shard_id, timespan duration, string& zone_id, string& owner_id) { string oid; get_shard_oid(shard_id, oid); - return svc.cls->lock.lock_exclusive(svc.zone->get_zone_params().log_pool, oid, duration, zone_id, owner_id); + return svc.cls->lock.lock_exclusive(dpp, svc.zone->get_zone_params().log_pool, oid, duration, zone_id, owner_id); } -int RGWMetadataLog::unlock(int shard_id, string& zone_id, string& owner_id) { +int RGWMetadataLog::unlock(const DoutPrefixProvider *dpp, int shard_id, string& zone_id, string& owner_id) { string oid; get_shard_oid(shard_id, oid); - return svc.cls->lock.unlock(svc.zone->get_zone_params().log_pool, oid, zone_id, owner_id); + return svc.cls->lock.unlock(dpp, svc.zone->get_zone_params().log_pool, oid, zone_id, owner_id); } void RGWMetadataLog::mark_modified(int shard_id) @@ -329,7 +329,7 @@ public: return -ENOTSUP; } - int list_keys_init(const string& marker, void **phandle) override { + int list_keys_init(const DoutPrefixProvider *dpp, const string& marker, void **phandle) override { iter_data *data = new iter_data; list sections; mgr->get_sections(sections); @@ -435,7 +435,7 @@ int RGWMetadataHandlerPut_SObj::put_pre(const DoutPrefixProvider *dpp) int RGWMetadataHandlerPut_SObj::put(const DoutPrefixProvider *dpp) { - int ret = put_check(); + int ret = put_check(dpp); if (ret != 0) { return ret; } @@ -525,11 +525,11 @@ int RGWMetadataHandler_GenericMetaBE::get_shard_id(const string& entry, int *sha }); } -int RGWMetadataHandler_GenericMetaBE::list_keys_init(const string& marker, void **phandle) +int RGWMetadataHandler_GenericMetaBE::list_keys_init(const DoutPrefixProvider *dpp, const string& marker, void **phandle) { auto op = std::make_unique(be_handler); - int ret = op->list_init(marker); + int ret = op->list_init(dpp, marker); if (ret < 0) { return ret; } @@ -771,12 +771,12 @@ struct list_keys_handle { RGWMetadataHandler *handler; }; -int RGWMetadataManager::list_keys_init(const string& section, void **handle) +int RGWMetadataManager::list_keys_init(const DoutPrefixProvider *dpp, const string& section, void **handle) { - return list_keys_init(section, string(), handle); + return list_keys_init(dpp, section, string(), handle); } -int RGWMetadataManager::list_keys_init(const string& section, +int RGWMetadataManager::list_keys_init(const DoutPrefixProvider *dpp, const string& section, const string& marker, void **handle) { string entry; @@ -791,7 +791,7 @@ int RGWMetadataManager::list_keys_init(const string& section, list_keys_handle *h = new list_keys_handle; h->handler = handler; - ret = handler->list_keys_init(marker, &h->handle); + ret = handler->list_keys_init(dpp, marker, &h->handle); if (ret < 0) { delete h; return ret; diff --git a/src/rgw/rgw_metadata.h b/src/rgw/rgw_metadata.h index e5cb101a0cc..c9adeee4d2e 100644 --- a/src/rgw/rgw_metadata.h +++ b/src/rgw/rgw_metadata.h @@ -87,7 +87,7 @@ public: RGWMDLogStatus op_type, std::function f) = 0; - virtual int list_keys_init(const string& marker, void **phandle) = 0; + virtual int list_keys_init(const DoutPrefixProvider *dpp, const string& marker, void **phandle) = 0; virtual int list_keys_next(void *handle, int max, list& keys, bool *truncated) = 0; virtual void list_keys_complete(void *handle) = 0; @@ -182,7 +182,7 @@ public: int get_shard_id(const string& entry, int *shard_id) override; - int list_keys_init(const std::string& marker, void **phandle) override; + int list_keys_init(const DoutPrefixProvider *dpp, const std::string& marker, void **phandle) override; int list_keys_next(void *handle, int max, std::list& keys, bool *truncated) override; void list_keys_complete(void *handle) override; @@ -254,8 +254,8 @@ public: RGWMDLogStatus op_type, std::function f); - int list_keys_init(const string& section, void **phandle); - int list_keys_init(const string& section, const string& marker, void **phandle); + int list_keys_init(const DoutPrefixProvider *dpp, const string& section, void **phandle); + int list_keys_init(const DoutPrefixProvider *dpp, const string& section, const string& marker, void **phandle); int list_keys_next(void *handle, int max, list& keys, bool *truncated); void list_keys_complete(void *handle); @@ -286,7 +286,7 @@ public: int put_pre(const DoutPrefixProvider *dpp) override; int put(const DoutPrefixProvider *dpp) override; - virtual int put_check() { + virtual int put_check(const DoutPrefixProvider *dpp) { return 0; } virtual int put_checked(const DoutPrefixProvider *dpp); diff --git a/src/rgw/rgw_multi.cc b/src/rgw/rgw_multi.cc index d66514f2d82..e3369f64b64 100644 --- a/src/rgw/rgw_multi.cc +++ b/src/rgw/rgw_multi.cc @@ -78,7 +78,7 @@ bool is_v2_upload_id(const string& upload_id) (strncmp(uid, MULTIPART_UPLOAD_ID_PREFIX_LEGACY, sizeof(MULTIPART_UPLOAD_ID_PREFIX_LEGACY) - 1) == 0); } -int list_multipart_parts(rgw::sal::Bucket* bucket, +int list_multipart_parts(const DoutPrefixProvider *dpp, rgw::sal::Bucket* bucket, CephContext *cct, const string& upload_id, const string& meta_oid, int num_parts, @@ -106,10 +106,10 @@ int list_multipart_parts(rgw::sal::Bucket* bucket, snprintf(buf, sizeof(buf), "%08d", marker); p.append(buf); - ret = obj->omap_get_vals(p, num_parts + 1, &parts_map, + ret = obj->omap_get_vals(dpp, p, num_parts + 1, &parts_map, nullptr, null_yield); } else { - ret = obj->omap_get_all(&parts_map, null_yield); + ret = obj->omap_get_all(dpp, &parts_map, null_yield); } if (ret < 0) { return ret; @@ -129,7 +129,7 @@ int list_multipart_parts(rgw::sal::Bucket* bucket, try { decode(info, bli); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: could not part info, caught buffer::error" << + ldpp_dout(dpp, 0) << "ERROR: could not part info, caught buffer::error" << dendl; return -EIO; } @@ -141,7 +141,7 @@ int list_multipart_parts(rgw::sal::Bucket* bucket, * where one gateway doesn't support correctly sorted omap * keys for multipart upload just assume data is unsorted. */ - return list_multipart_parts(bucket, cct, upload_id, + return list_multipart_parts(dpp, bucket, cct, upload_id, meta_oid, num_parts, marker, parts, next_marker, truncated, true); } @@ -183,14 +183,14 @@ int list_multipart_parts(rgw::sal::Bucket* bucket, return 0; } -int list_multipart_parts(struct req_state *s, +int list_multipart_parts(const DoutPrefixProvider *dpp, struct req_state *s, const string& upload_id, const string& meta_oid, int num_parts, int marker, map& parts, int *next_marker, bool *truncated, bool assume_unsorted) { - return list_multipart_parts(s->bucket.get(), s->cct, upload_id, + return list_multipart_parts(dpp, s->bucket.get(), s->cct, upload_id, meta_oid, num_parts, marker, parts, next_marker, truncated, assume_unsorted); } @@ -213,7 +213,7 @@ int abort_multipart_upload(const DoutPrefixProvider *dpp, uint64_t parts_accounted_size = 0; do { - ret = list_multipart_parts(bucket, cct, + ret = list_multipart_parts(dpp, bucket, cct, mp_obj.get_upload_id(), mp_obj.get_meta(), 1000, marker, obj_parts, &marker, &truncated); if (ret < 0) { @@ -235,9 +235,9 @@ int abort_multipart_upload(const DoutPrefixProvider *dpp, if (ret < 0 && ret != -ENOENT) return ret; } else { - chain->update(&obj_part.manifest); - RGWObjManifest::obj_iterator oiter = obj_part.manifest.obj_begin(); - if (oiter != obj_part.manifest.obj_end()) { + chain->update(dpp, &obj_part.manifest); + RGWObjManifest::obj_iterator oiter = obj_part.manifest.obj_begin(dpp); + if (oiter != obj_part.manifest.obj_end(dpp)) { std::unique_ptr head = bucket->get_object(rgw_obj_key()); rgw_raw_obj raw_head = oiter.get_location().get_raw_obj(store); head->raw_obj_to_obj(raw_head); @@ -259,7 +259,7 @@ int abort_multipart_upload(const DoutPrefixProvider *dpp, return -ERR_NO_SUCH_UPLOAD; } //Delete objects inline if send chain to gc fails - chain->delete_inline(mp_obj.get_upload_id()); + chain->delete_inline(dpp, mp_obj.get_upload_id()); } std::unique_ptr del_op = meta_obj->get_delete_op(obj_ctx); diff --git a/src/rgw/rgw_multi.h b/src/rgw/rgw_multi.h index ca4b8817095..ffafe3d05b6 100644 --- a/src/rgw/rgw_multi.h +++ b/src/rgw/rgw_multi.h @@ -8,6 +8,7 @@ #include "rgw_xml.h" #include "rgw_obj_manifest.h" #include "rgw_compression_types.h" +#include "common/dout.h" namespace rgw { namespace sal { class Store; @@ -108,7 +109,8 @@ public: extern bool is_v2_upload_id(const string& upload_id); -extern int list_multipart_parts(rgw::sal::Bucket* bucket, +extern int list_multipart_parts(const DoutPrefixProvider *dpp, + rgw::sal::Bucket* bucket, CephContext *cct, const string& upload_id, const string& meta_oid, int num_parts, @@ -116,7 +118,8 @@ extern int list_multipart_parts(rgw::sal::Bucket* bucket, int *next_marker, bool *truncated, bool assume_unsorted = false); -extern int list_multipart_parts(struct req_state *s, +extern int list_multipart_parts(const DoutPrefixProvider *dpp, + struct req_state *s, const string& upload_id, const string& meta_oid, int num_parts, int marker, map& parts, diff --git a/src/rgw/rgw_notify.cc b/src/rgw/rgw_notify.cc index 696690ee6d9..38f6c0647a4 100644 --- a/src/rgw/rgw_notify.cc +++ b/src/rgw/rgw_notify.cc @@ -85,7 +85,7 @@ class Manager : public DoutPrefixProvider { librados::ObjectReadOperation op; queues_t queues_chunk; op.omap_get_keys2(start_after, max_chunk, &queues_chunk, &more, &rval); - const auto ret = rgw_rados_operate(rados_ioctx, Q_LIST_OBJECT_NAME, &op, nullptr, y); + const auto ret = rgw_rados_operate(this, rados_ioctx, Q_LIST_OBJECT_NAME, &op, nullptr, y); if (ret == -ENOENT) { // queue list object was not created - nothing to do return 0; @@ -209,7 +209,7 @@ class Manager : public DoutPrefixProvider { "" /*no tag*/); cls_2pc_queue_expire_reservations(op, stale_time); // check ownership and do reservation cleanup in one batch - auto ret = rgw_rados_operate(rados_ioctx, queue_name, &op, optional_yield(io_context, yield)); + auto ret = rgw_rados_operate(this, rados_ioctx, queue_name, &op, optional_yield(io_context, yield)); if (ret == -ENOENT) { // queue was deleted ldpp_dout(this, 5) << "INFO: queue: " @@ -268,7 +268,7 @@ class Manager : public DoutPrefixProvider { "" /*no tag*/); cls_2pc_queue_list_entries(op, start_marker, max_elements, &obl, &rval); // check ownership and list entries in one batch - auto ret = rgw_rados_operate(rados_ioctx, queue_name, &op, nullptr, optional_yield(io_context, yield)); + auto ret = rgw_rados_operate(this, rados_ioctx, queue_name, &op, nullptr, optional_yield(io_context, yield)); if (ret == -ENOENT) { // queue was deleted ldpp_dout(this, 5) << "INFO: queue: " @@ -344,7 +344,7 @@ class Manager : public DoutPrefixProvider { "" /*no tag*/); cls_2pc_queue_remove_entries(op, end_marker); // check ownership and deleted entries in one batch - const auto ret = rgw_rados_operate(rados_ioctx, queue_name, &op, optional_yield(io_context, yield)); + const auto ret = rgw_rados_operate(this, rados_ioctx, queue_name, &op, optional_yield(io_context, yield)); if (ret == -ENOENT) { // queue was deleted ldpp_dout(this, 5) << "INFO: queue: " @@ -416,7 +416,7 @@ class Manager : public DoutPrefixProvider { failover_time, LOCK_FLAG_MAY_RENEW); - ret = rgw_rados_operate(rados_ioctx, queue_name, &op, optional_yield(io_context, yield)); + ret = rgw_rados_operate(this, rados_ioctx, queue_name, &op, optional_yield(io_context, yield)); if (ret == -EBUSY) { // lock is already taken by another RGW ldpp_dout(this, 20) << "INFO: queue: " << queue_name << " owned (locked) by another daemon" << dendl; @@ -518,7 +518,7 @@ public: librados::ObjectWriteOperation op; op.create(true); cls_2pc_queue_init(op, topic_name, max_queue_size); - auto ret = rgw_rados_operate(rados_ioctx, topic_name, &op, y); + auto ret = rgw_rados_operate(this, rados_ioctx, topic_name, &op, y); if (ret == -EEXIST) { // queue already exists - nothing to do ldpp_dout(this, 20) << "INFO: queue for topic: " << topic_name << " already exists. nothing to do" << dendl; @@ -533,7 +533,7 @@ public: bufferlist empty_bl; std::map new_topic{{topic_name, empty_bl}}; op.omap_set(new_topic); - ret = rgw_rados_operate(rados_ioctx, Q_LIST_OBJECT_NAME, &op, y); + ret = rgw_rados_operate(this, rados_ioctx, Q_LIST_OBJECT_NAME, &op, y); if (ret < 0) { ldpp_dout(this, 1) << "ERROR: failed to add queue: " << topic_name << " to queue list. error: " << ret << dendl; return ret; @@ -545,7 +545,7 @@ public: int remove_persistent_topic(const std::string& topic_name, optional_yield y) { librados::ObjectWriteOperation op; op.remove(); - auto ret = rgw_rados_operate(rados_ioctx, topic_name, &op, y); + auto ret = rgw_rados_operate(this, rados_ioctx, topic_name, &op, y); if (ret == -ENOENT) { // queue already removed - nothing to do ldpp_dout(this, 20) << "INFO: queue for topic: " << topic_name << " already removed. nothing to do" << dendl; @@ -559,7 +559,7 @@ public: std::set topic_to_remove{{topic_name}}; op.omap_rm_keys(topic_to_remove); - ret = rgw_rados_operate(rados_ioctx, Q_LIST_OBJECT_NAME, &op, y); + ret = rgw_rados_operate(this, rados_ioctx, Q_LIST_OBJECT_NAME, &op, y); if (ret < 0) { ldpp_dout(this, 1) << "ERROR: failed to remove queue: " << topic_name << " from queue list. error: " << ret << dendl; return ret; @@ -761,7 +761,7 @@ bool notification_match(const rgw_pubsub_topic_filter& filter, const req_state* return true; } -int publish_reserve(EventType event_type, +int publish_reserve(const DoutPrefixProvider *dpp, EventType event_type, reservation_t& res, const RGWObjTags* req_tags) { @@ -780,7 +780,7 @@ int publish_reserve(EventType event_type, // notification does not apply to req_state continue; } - ldout(res.s->cct, 20) << "INFO: notification: '" << topic_filter.s3_id << + ldpp_dout(dpp, 20) << "INFO: notification: '" << topic_filter.s3_id << "' on topic: '" << topic_cfg.dest.arn_topic << "' and bucket: '" << res.s->bucket->get_name() << "' (unique topic: '" << topic_cfg.name << @@ -796,17 +796,17 @@ int publish_reserve(EventType event_type, int rval; const auto& queue_name = topic_cfg.dest.arn_topic; cls_2pc_queue_reserve(op, res.size, 1, &obl, &rval); - auto ret = rgw_rados_operate(res.store->getRados()->get_notif_pool_ctx(), + auto ret = rgw_rados_operate(dpp, res.store->getRados()->get_notif_pool_ctx(), queue_name, &op, res.s->yield, librados::OPERATION_RETURNVEC); if (ret < 0) { - ldout(res.s->cct, 1) << "ERROR: failed to reserve notification on queue: " << queue_name + ldpp_dout(dpp, 1) << "ERROR: failed to reserve notification on queue: " << queue_name << ". error: " << ret << dendl; // if no space is left in queue we ask client to slow down return (ret == -ENOSPC) ? -ERR_RATE_LIMITED : ret; } ret = cls_2pc_queue_reserve_result(obl, res_id); if (ret < 0) { - ldout(res.s->cct, 1) << "ERROR: failed to parse reservation id. error: " << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to parse reservation id. error: " << ret << dendl; return ret; } } @@ -846,7 +846,7 @@ int publish_commit(rgw::sal::Object* obj, // first cancel the existing reservation librados::ObjectWriteOperation op; cls_2pc_queue_abort(op, topic.res_id); - auto ret = rgw_rados_operate(res.store->getRados()->get_notif_pool_ctx(), + auto ret = rgw_rados_operate(dpp, res.store->getRados()->get_notif_pool_ctx(), topic.cfg.dest.arn_topic, &op, res.s->yield); if (ret < 0) { @@ -859,7 +859,7 @@ int publish_commit(rgw::sal::Object* obj, bufferlist obl; int rval; cls_2pc_queue_reserve(op, bl.length(), 1, &obl, &rval); - ret = rgw_rados_operate(res.store->getRados()->get_notif_pool_ctx(), + ret = rgw_rados_operate(dpp, res.store->getRados()->get_notif_pool_ctx(), queue_name, &op, res.s->yield, librados::OPERATION_RETURNVEC); if (ret < 0) { ldpp_dout(dpp, 1) << "ERROR: failed to reserve extra space on queue: " << queue_name @@ -875,7 +875,7 @@ int publish_commit(rgw::sal::Object* obj, std::vector bl_data_vec{std::move(bl)}; librados::ObjectWriteOperation op; cls_2pc_queue_commit(op, bl_data_vec, topic.res_id); - const auto ret = rgw_rados_operate(res.store->getRados()->get_notif_pool_ctx(), + const auto ret = rgw_rados_operate(dpp, res.store->getRados()->get_notif_pool_ctx(), queue_name, &op, res.s->yield); topic.res_id = cls_2pc_reservation::NO_ID; @@ -910,7 +910,7 @@ int publish_commit(rgw::sal::Object* obj, return 0; } -int publish_abort(reservation_t& res) { +int publish_abort(const DoutPrefixProvider *dpp, reservation_t& res) { for (auto& topic : res.topics) { if (!topic.cfg.dest.persistent || topic.res_id == cls_2pc_reservation::NO_ID) { // nothing to abort or already committed/aborted @@ -919,11 +919,11 @@ int publish_abort(reservation_t& res) { const auto& queue_name = topic.cfg.dest.arn_topic; librados::ObjectWriteOperation op; cls_2pc_queue_abort(op, topic.res_id); - const auto ret = rgw_rados_operate(res.store->getRados()->get_notif_pool_ctx(), + const auto ret = rgw_rados_operate(dpp, res.store->getRados()->get_notif_pool_ctx(), queue_name, &op, res.s->yield); if (ret < 0) { - ldout(res.s->cct, 1) << "ERROR: failed to abort reservation: " << topic.res_id << + ldpp_dout(dpp, 1) << "ERROR: failed to abort reservation: " << topic.res_id << " from queue: " << queue_name << ". error: " << ret << dendl; return ret; } @@ -933,7 +933,7 @@ int publish_abort(reservation_t& res) { } reservation_t::~reservation_t() { - publish_abort(*this); + publish_abort(dpp, *this); } } diff --git a/src/rgw/rgw_notify.h b/src/rgw/rgw_notify.h index 977d010cfd2..ba5f96c1fd0 100644 --- a/src/rgw/rgw_notify.h +++ b/src/rgw/rgw_notify.h @@ -52,14 +52,15 @@ struct reservation_t { cls_2pc_reservation::id_t res_id; }; + const DoutPrefixProvider *dpp; std::vector topics; rgw::sal::RadosStore* const store; const req_state* const s; size_t size; rgw::sal::Object* const object; - reservation_t(rgw::sal::RadosStore* _store, const req_state* _s, rgw::sal::Object* _object) : - store(_store), s(_s), object(_object) {} + reservation_t(const DoutPrefixProvider *_dpp, rgw::sal::RadosStore* _store, const req_state* _s, rgw::sal::Object* _object) : + dpp(_dpp), store(_store), s(_s), object(_object) {} // dtor doing resource leak guarding // aborting the reservation if not already committed or aborted @@ -67,7 +68,8 @@ struct reservation_t { }; // create a reservation on the 2-phase-commit queue -int publish_reserve(EventType event_type, +int publish_reserve(const DoutPrefixProvider *dpp, + EventType event_type, reservation_t& reservation, const RGWObjTags* req_tags); @@ -81,7 +83,7 @@ int publish_commit(rgw::sal::Object* obj, const DoutPrefixProvider *dpp); // cancel the reservation -int publish_abort(reservation_t& reservation); +int publish_abort(const DoutPrefixProvider *dpp, reservation_t& reservation); } diff --git a/src/rgw/rgw_obj_manifest.cc b/src/rgw/rgw_obj_manifest.cc index 7e5a4ec80af..ca5aa7e7505 100644 --- a/src/rgw/rgw_obj_manifest.cc +++ b/src/rgw/rgw_obj_manifest.cc @@ -5,6 +5,7 @@ #include "services/svc_zone.h" #include "services/svc_tier_rados.h" +#include "rgw_rados.h" // RGW_OBJ_NS_SHADOW and RGW_OBJ_NS_MULTIPART #define dout_context g_ceph_context #define dout_subsys ceph_subsys_rgw @@ -35,36 +36,14 @@ int RGWObjManifest::generator::create_next(uint64_t ofs) manifest->get_implicit_location(cur_part_id, cur_stripe, ofs, NULL, &cur_obj); - manifest->update_iterators(); - return 0; } -const RGWObjManifest::obj_iterator& RGWObjManifest::obj_begin() -{ - return begin_iter; -} - -const RGWObjManifest::obj_iterator& RGWObjManifest::obj_end() -{ - return end_iter; -} - -RGWObjManifest::obj_iterator RGWObjManifest::obj_find(uint64_t ofs) -{ - if (ofs > obj_size) { - ofs = obj_size; - } - RGWObjManifest::obj_iterator iter(this); - iter.seek(ofs); - return iter; -} - -int RGWObjManifest::append(RGWObjManifest& m, const RGWZoneGroup& zonegroup, +int RGWObjManifest::append(const DoutPrefixProvider *dpp, RGWObjManifest& m, const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params) { if (explicit_objs || m.explicit_objs) { - return append_explicit(m, zonegroup, zone_params); + return append_explicit(dpp, m, zonegroup, zone_params); } if (rules.empty()) { @@ -84,7 +63,7 @@ int RGWObjManifest::append(RGWObjManifest& m, const RGWZoneGroup& zonegroup, map::iterator miter = m.rules.begin(); if (miter == m.rules.end()) { - return append_explicit(m, zonegroup, zone_params); + return append_explicit(dpp, m, zonegroup, zone_params); } for (; miter != m.rules.end(); ++miter) { @@ -138,9 +117,9 @@ int RGWObjManifest::append(RGWObjManifest& m, const RGWZoneGroup& zonegroup, return 0; } -int RGWObjManifest::append(RGWObjManifest& m, rgw::sal::Zone* zone_svc) +int RGWObjManifest::append(const DoutPrefixProvider *dpp, RGWObjManifest& m, rgw::sal::Zone* zone_svc) { - return append(m, zone_svc->get_zonegroup(), zone_svc->get_params()); + return append(dpp, m, zone_svc->get_zonegroup(), zone_svc->get_params()); } void RGWObjManifest::append_rules(RGWObjManifest& m, map::iterator& miter, @@ -155,14 +134,14 @@ void RGWObjManifest::append_rules(RGWObjManifest& m, map::iterator iter; uint64_t base = obj_size; @@ -252,7 +231,7 @@ void RGWObjManifest::obj_iterator::operator++() /* are we still pointing at the head? */ if (ofs < head_size) { rule_iter = manifest->rules.begin(); - RGWObjManifestRule *rule = &rule_iter->second; + const RGWObjManifestRule *rule = &rule_iter->second; ofs = std::min(head_size, obj_size); stripe_ofs = ofs; cur_stripe = 1; @@ -264,16 +243,16 @@ void RGWObjManifest::obj_iterator::operator++() return; } - RGWObjManifestRule *rule = &rule_iter->second; + const RGWObjManifestRule *rule = &rule_iter->second; stripe_ofs += rule->stripe_max_size; cur_stripe++; - dout(20) << "RGWObjManifest::operator++(): rule->part_size=" << rule->part_size << " rules.size()=" << manifest->rules.size() << dendl; + ldpp_dout(dpp, 20) << "RGWObjManifest::operator++(): rule->part_size=" << rule->part_size << " rules.size()=" << manifest->rules.size() << dendl; if (rule->part_size > 0) { /* multi part, multi stripes object */ - dout(20) << "RGWObjManifest::operator++(): stripe_ofs=" << stripe_ofs << " part_ofs=" << part_ofs << " rule->part_size=" << rule->part_size << dendl; + ldpp_dout(dpp, 20) << "RGWObjManifest::operator++(): stripe_ofs=" << stripe_ofs << " part_ofs=" << part_ofs << " rule->part_size=" << rule->part_size << dendl; if (stripe_ofs >= part_ofs + rule->part_size) { /* moved to the next part */ @@ -309,7 +288,7 @@ void RGWObjManifest::obj_iterator::operator++() stripe_size = 0; } - dout(20) << "RGWObjManifest::operator++(): result: ofs=" << ofs << " stripe_ofs=" << stripe_ofs << " part_ofs=" << part_ofs << " rule->part_size=" << rule->part_size << dendl; + ldpp_dout(dpp, 20) << "RGWObjManifest::operator++(): result: ofs=" << ofs << " stripe_ofs=" << stripe_ofs << " part_ofs=" << part_ofs << " rule->part_size=" << rule->part_size << dendl; update_location(); } @@ -363,8 +342,164 @@ int RGWObjManifest::generator::create_begin(CephContext *cct, RGWObjManifest *_m // Normal object which not generated through copy operation manifest->set_tail_instance(_obj.key.instance); - manifest->update_iterators(); - return 0; } +void RGWObjManifest::obj_iterator::seek(uint64_t o) +{ + ofs = o; + if (manifest->explicit_objs) { + explicit_iter = manifest->objs.upper_bound(ofs); + if (explicit_iter != manifest->objs.begin()) { + --explicit_iter; + } + if (ofs < manifest->obj_size) { + update_explicit_pos(); + } else { + ofs = manifest->obj_size; + } + update_location(); + return; + } + if (o < manifest->get_head_size()) { + rule_iter = manifest->rules.begin(); + stripe_ofs = 0; + stripe_size = manifest->get_head_size(); + if (rule_iter != manifest->rules.end()) { + cur_part_id = rule_iter->second.start_part_num; + cur_override_prefix = rule_iter->second.override_prefix; + } + update_location(); + return; + } + + rule_iter = manifest->rules.upper_bound(ofs); + next_rule_iter = rule_iter; + if (rule_iter != manifest->rules.begin()) { + --rule_iter; + } + + if (rule_iter == manifest->rules.end()) { + update_location(); + return; + } + + const RGWObjManifestRule& rule = rule_iter->second; + + if (rule.part_size > 0) { + cur_part_id = rule.start_part_num + (ofs - rule.start_ofs) / rule.part_size; + } else { + cur_part_id = rule.start_part_num; + } + part_ofs = rule.start_ofs + (cur_part_id - rule.start_part_num) * rule.part_size; + + if (rule.stripe_max_size > 0) { + cur_stripe = (ofs - part_ofs) / rule.stripe_max_size; + + stripe_ofs = part_ofs + cur_stripe * rule.stripe_max_size; + if (!cur_part_id && manifest->get_head_size() > 0) { + cur_stripe++; + } + } else { + cur_stripe = 0; + stripe_ofs = part_ofs; + } + + if (!rule.part_size) { + stripe_size = rule.stripe_max_size; + stripe_size = std::min(manifest->get_obj_size() - stripe_ofs, stripe_size); + } else { + uint64_t next = std::min(stripe_ofs + rule.stripe_max_size, part_ofs + rule.part_size); + stripe_size = next - stripe_ofs; + } + + cur_override_prefix = rule.override_prefix; + + update_location(); +} + +void RGWObjManifest::obj_iterator::update_location() +{ + if (manifest->explicit_objs) { + if (manifest->empty()) { + location = rgw_obj_select{}; + } else { + location = explicit_iter->second.loc; + } + return; + } + + if (ofs < manifest->get_head_size()) { + location = manifest->get_obj(); + location.set_placement_rule(manifest->get_head_placement_rule()); + return; + } + + manifest->get_implicit_location(cur_part_id, cur_stripe, ofs, &cur_override_prefix, &location); +} + +void RGWObjManifest::obj_iterator::update_explicit_pos() +{ + ofs = explicit_iter->first; + stripe_ofs = ofs; + + auto next_iter = explicit_iter; + ++next_iter; + if (next_iter != manifest->objs.end()) { + stripe_size = next_iter->first - ofs; + } else { + stripe_size = manifest->obj_size - ofs; + } +} + +void RGWObjManifest::get_implicit_location(uint64_t cur_part_id, uint64_t cur_stripe, + uint64_t ofs, string *override_prefix, rgw_obj_select *location) const +{ + rgw_obj loc; + + string& oid = loc.key.name; + string& ns = loc.key.ns; + + if (!override_prefix || override_prefix->empty()) { + oid = prefix; + } else { + oid = *override_prefix; + } + + if (!cur_part_id) { + if (ofs < max_head_size) { + location->set_placement_rule(head_placement_rule); + *location = obj; + return; + } else { + char buf[16]; + snprintf(buf, sizeof(buf), "%d", (int)cur_stripe); + oid += buf; + ns = RGW_OBJ_NS_SHADOW; + } + } else { + char buf[32]; + if (cur_stripe == 0) { + snprintf(buf, sizeof(buf), ".%d", (int)cur_part_id); + oid += buf; + ns= RGW_OBJ_NS_MULTIPART; + } else { + snprintf(buf, sizeof(buf), ".%d_%d", (int)cur_part_id, (int)cur_stripe); + oid += buf; + ns = RGW_OBJ_NS_SHADOW; + } + } + + if (!tail_placement.bucket.name.empty()) { + loc.bucket = tail_placement.bucket; + } else { + loc.bucket = obj.bucket; + } + + // Always overwrite instance with tail_instance + // to get the right shadow object location + loc.key.set_instance(tail_instance); + + location->set_placement_rule(tail_placement.placement_rule); + *location = loc; +} diff --git a/src/rgw/rgw_obj_manifest.h b/src/rgw/rgw_obj_manifest.h index 2e1ec798255..f2567fddf66 100644 --- a/src/rgw/rgw_obj_manifest.h +++ b/src/rgw/rgw_obj_manifest.h @@ -163,17 +163,13 @@ protected: string tail_instance; /* tail object's instance */ - void convert_to_explicit(const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params); - int append_explicit(RGWObjManifest& m, const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params); + void convert_to_explicit(const DoutPrefixProvider *dpp, const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params); + int append_explicit(const DoutPrefixProvider *dpp, RGWObjManifest& m, const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params); void append_rules(RGWObjManifest& m, map::iterator& iter, string *override_prefix); - void update_iterators() { - begin_iter.seek(0); - end_iter.seek(obj_size); - } public: - RGWObjManifest() : begin_iter(this), end_iter(this) {} + RGWObjManifest() = default; RGWObjManifest(const RGWObjManifest& rhs) { *this = rhs; } @@ -188,13 +184,6 @@ public: tail_placement = rhs.tail_placement; rules = rhs.rules; tail_instance = rhs.tail_instance; - - begin_iter.set_manifest(this); - end_iter.set_manifest(this); - - begin_iter.seek(rhs.begin_iter.get_ofs()); - end_iter.seek(rhs.end_iter.get_ofs()); - return *this; } @@ -209,7 +198,8 @@ public: set_obj_size(_size); } - void get_implicit_location(uint64_t cur_part_id, uint64_t cur_stripe, uint64_t ofs, string *override_prefix, rgw_obj_select *location); + void get_implicit_location(uint64_t cur_part_id, uint64_t cur_stripe, uint64_t ofs, + string *override_prefix, rgw_obj_select *location) const; void set_trivial_rule(uint64_t tail_ofs, uint64_t stripe_max_size) { RGWObjManifestRule rule(0, tail_ofs, 0, stripe_max_size); @@ -318,34 +308,33 @@ public: decode(tail_placement.placement_rule, bl); } - update_iterators(); DECODE_FINISH(bl); } void dump(Formatter *f) const; static void generate_test_instances(list& o); - int append(RGWObjManifest& m, const RGWZoneGroup& zonegroup, + int append(const DoutPrefixProvider *dpp, RGWObjManifest& m, const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params); - int append(RGWObjManifest& m, rgw::sal::Zone* zone); + int append(const DoutPrefixProvider *dpp, RGWObjManifest& m, rgw::sal::Zone* zone); bool get_rule(uint64_t ofs, RGWObjManifestRule *rule); - bool empty() { + bool empty() const { if (explicit_objs) return objs.empty(); return rules.empty(); } - bool has_explicit_objs() { + bool has_explicit_objs() const { return explicit_objs; } - bool has_tail() { + bool has_tail() const { if (explicit_objs) { if (objs.size() == 1) { - map::iterator iter = objs.begin(); - rgw_obj& o = iter->second.loc; + auto iter = objs.begin(); + const rgw_obj& o = iter->second.loc; return !(obj == o); } return (objs.size() >= 2); @@ -364,7 +353,7 @@ public: } } - const rgw_obj& get_obj() { + const rgw_obj& get_obj() const { return obj; } @@ -373,11 +362,11 @@ public: tail_placement.bucket = _b; } - const rgw_bucket_placement& get_tail_placement() { + const rgw_bucket_placement& get_tail_placement() const { return tail_placement; } - const rgw_placement_rule& get_head_placement_rule() { + const rgw_placement_rule& get_head_placement_rule() const { return head_placement_rule; } @@ -385,7 +374,7 @@ public: prefix = _p; } - const string& get_prefix() { + const string& get_prefix() const { return prefix; } @@ -393,7 +382,7 @@ public: tail_instance = _ti; } - const string& get_tail_instance() { + const string& get_tail_instance() const { return tail_instance; } @@ -403,24 +392,23 @@ public: void set_obj_size(uint64_t s) { obj_size = s; - - update_iterators(); } - uint64_t get_obj_size() { + uint64_t get_obj_size() const { return obj_size; } - uint64_t get_head_size() { + uint64_t get_head_size() const { return head_size; } - uint64_t get_max_head_size() { + uint64_t get_max_head_size() const { return max_head_size; } class obj_iterator { - RGWObjManifest *manifest = nullptr; + const DoutPrefixProvider *dpp; + const RGWObjManifest *manifest = nullptr; uint64_t part_ofs = 0; /* where current part starts */ uint64_t stripe_ofs = 0; /* where current stripe starts */ uint64_t ofs = 0; /* current position within the object */ @@ -432,26 +420,18 @@ public: rgw_obj_select location; - map::iterator rule_iter; - map::iterator next_rule_iter; - - map::iterator explicit_iter; + map::const_iterator rule_iter; + map::const_iterator next_rule_iter; + map::const_iterator explicit_iter; void update_explicit_pos(); - - protected: - - void set_manifest(RGWObjManifest *m) { - manifest = m; - } - public: obj_iterator() = default; - explicit obj_iterator(RGWObjManifest *_m) - : obj_iterator(_m, 0) + explicit obj_iterator(const DoutPrefixProvider *_dpp, const RGWObjManifest *_m) + : obj_iterator(_dpp, _m, 0) {} - obj_iterator(RGWObjManifest *_m, uint64_t _ofs) : manifest(_m) { + obj_iterator(const DoutPrefixProvider *_dpp, const RGWObjManifest *_m, uint64_t _ofs) : dpp(_dpp), manifest(_m) { seek(_ofs); } void seek(uint64_t ofs); @@ -508,16 +488,14 @@ public: void update_location(); - friend class RGWObjManifest; void dump(Formatter *f) const; }; // class obj_iterator - const obj_iterator& obj_begin(); - const obj_iterator& obj_end(); - obj_iterator obj_find(uint64_t ofs); - - obj_iterator begin_iter; - obj_iterator end_iter; + obj_iterator obj_begin(const DoutPrefixProvider *dpp) const { return obj_iterator{dpp, this}; } + obj_iterator obj_end(const DoutPrefixProvider *dpp) const { return obj_iterator{dpp, this, obj_size}; } + obj_iterator obj_find(const DoutPrefixProvider *dpp, uint64_t ofs) const { + return obj_iterator{dpp, this, std::min(ofs, obj_size)}; + } /* * simple object generator. Using a simple single rule manifest. diff --git a/src/rgw/rgw_object_expirer_core.cc b/src/rgw/rgw_object_expirer_core.cc index ad8c1e2fd77..183c101de2a 100644 --- a/src/rgw/rgw_object_expirer_core.cc +++ b/src/rgw/rgw_object_expirer_core.cc @@ -85,7 +85,8 @@ static int objexp_hint_parse(CephContext *cct, cls_timeindex_entry &ti_entry, return 0; } -int RGWObjExpStore::objexp_hint_add(const ceph::real_time& delete_at, +int RGWObjExpStore::objexp_hint_add(const DoutPrefixProvider *dpp, + const ceph::real_time& delete_at, const string& tenant_name, const string& bucket_name, const string& bucket_id, @@ -106,15 +107,16 @@ int RGWObjExpStore::objexp_hint_add(const ceph::real_time& delete_at, string shard_name = objexp_hint_get_shardname(objexp_key_shard(obj_key, cct->_conf->rgw_objexp_hints_num_shards)); auto obj = rados_svc->obj(rgw_raw_obj(zone_svc->get_params().log_pool, shard_name)); - int r = obj.open(); + int r = obj.open(dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl; return r; } - return obj.operate(&op, null_yield); + return obj.operate(dpp, &op, null_yield); } -int RGWObjExpStore::objexp_hint_list(const string& oid, +int RGWObjExpStore::objexp_hint_list(const DoutPrefixProvider *dpp, + const string& oid, const ceph::real_time& start_time, const ceph::real_time& end_time, const int max_entries, @@ -128,13 +130,13 @@ int RGWObjExpStore::objexp_hint_list(const string& oid, out_marker, truncated); auto obj = rados_svc->obj(rgw_raw_obj(zone_svc->get_params().log_pool, oid)); - int r = obj.open(); + int r = obj.open(dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl; return r; } bufferlist obl; - int ret = obj.operate(&op, &obl, null_yield); + int ret = obj.operate(dpp, &op, &obl, null_yield); if ((ret < 0 ) && (ret != -ENOENT)) { return ret; @@ -147,7 +149,8 @@ int RGWObjExpStore::objexp_hint_list(const string& oid, return 0; } -static int cls_timeindex_trim_repeat(rgw_rados_ref ref, +static int cls_timeindex_trim_repeat(const DoutPrefixProvider *dpp, + rgw_rados_ref ref, const string& oid, const utime_t& from_time, const utime_t& to_time, @@ -158,7 +161,7 @@ static int cls_timeindex_trim_repeat(rgw_rados_ref ref, do { librados::ObjectWriteOperation op; cls_timeindex_trim(op, from_time, to_time, from_marker, to_marker); - int r = rgw_rados_operate(ref.pool.ioctx(), oid, &op, null_yield); + int r = rgw_rados_operate(dpp, ref.pool.ioctx(), oid, &op, null_yield); if (r == -ENODATA) done = true; else if (r < 0) @@ -168,20 +171,21 @@ static int cls_timeindex_trim_repeat(rgw_rados_ref ref, return 0; } -int RGWObjExpStore::objexp_hint_trim(const string& oid, +int RGWObjExpStore::objexp_hint_trim(const DoutPrefixProvider *dpp, + const string& oid, const ceph::real_time& start_time, const ceph::real_time& end_time, const string& from_marker, const string& to_marker) { auto obj = rados_svc->obj(rgw_raw_obj(zone_svc->get_params().log_pool, oid)); - int r = obj.open(); + int r = obj.open(dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl; return r; } auto& ref = obj.get_ref(); - int ret = cls_timeindex_trim_repeat(ref, oid, utime_t(start_time), utime_t(end_time), + int ret = cls_timeindex_trim_repeat(dpp, ref, oid, utime_t(start_time), utime_t(end_time), from_marker, to_marker); if ((ret < 0 ) && (ret != -ENOENT)) { return ret; @@ -255,22 +259,23 @@ void RGWObjectExpirer::garbage_chunk(const DoutPrefixProvider *dpp, return; } -void RGWObjectExpirer::trim_chunk(const string& shard, +void RGWObjectExpirer::trim_chunk(const DoutPrefixProvider *dpp, + const string& shard, const utime_t& from, const utime_t& to, const string& from_marker, const string& to_marker) { - ldout(store->ctx(), 20) << "trying to trim removal hints to=" << to + ldpp_dout(dpp, 20) << "trying to trim removal hints to=" << to << ", to_marker=" << to_marker << dendl; real_time rt_from = from.to_real_time(); real_time rt_to = to.to_real_time(); - int ret = exp_store.objexp_hint_trim(shard, rt_from, rt_to, + int ret = exp_store.objexp_hint_trim(dpp, shard, rt_from, rt_to, from_marker, to_marker); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR during trim: " << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR during trim: " << ret << dendl; } return; @@ -309,7 +314,7 @@ bool RGWObjectExpirer::process_single_shard(const DoutPrefixProvider *dpp, real_time rt_start = round_start.to_real_time(); list entries; - ret = exp_store.objexp_hint_list(shard, rt_last, rt_start, + ret = exp_store.objexp_hint_list(dpp, shard, rt_last, rt_start, num_entries, marker, entries, &out_marker, &truncated); if (ret < 0) { @@ -322,7 +327,7 @@ bool RGWObjectExpirer::process_single_shard(const DoutPrefixProvider *dpp, garbage_chunk(dpp, entries, need_trim); if (need_trim) { - trim_chunk(shard, last_run, round_start, marker, out_marker); + trim_chunk(dpp, shard, last_run, round_start, marker, out_marker); } utime_t now = ceph_clock_now(); diff --git a/src/rgw/rgw_object_expirer_core.h b/src/rgw/rgw_object_expirer_core.h index a2ec4250cb4..db73f5e5fb0 100644 --- a/src/rgw/rgw_object_expirer_core.h +++ b/src/rgw/rgw_object_expirer_core.h @@ -46,13 +46,15 @@ public: rados_svc(_rados_svc), zone_svc(_zone_svc) {} - int objexp_hint_add(const ceph::real_time& delete_at, + int objexp_hint_add(const DoutPrefixProvider *dpp, + const ceph::real_time& delete_at, const string& tenant_name, const string& bucket_name, const string& bucket_id, const rgw_obj_index_key& obj_key); - int objexp_hint_list(const string& oid, + int objexp_hint_list(const DoutPrefixProvider *dpp, + const string& oid, const ceph::real_time& start_time, const ceph::real_time& end_time, const int max_entries, @@ -61,7 +63,8 @@ public: string *out_marker, /* out */ bool *truncated); /* out */ - int objexp_hint_trim(const string& oid, + int objexp_hint_trim(const DoutPrefixProvider *dpp, + const string& oid, const ceph::real_time& start_time, const ceph::real_time& end_time, const string& from_marker, @@ -107,12 +110,13 @@ public: stop_processor(); } - int hint_add(const ceph::real_time& delete_at, + int hint_add(const DoutPrefixProvider *dpp, + const ceph::real_time& delete_at, const string& tenant_name, const string& bucket_name, const string& bucket_id, const rgw_obj_index_key& obj_key) { - return exp_store.objexp_hint_add(delete_at, tenant_name, bucket_name, + return exp_store.objexp_hint_add(dpp, delete_at, tenant_name, bucket_name, bucket_id, obj_key); } @@ -122,7 +126,8 @@ public: std::list& entries, /* in */ bool& need_trim); /* out */ - void trim_chunk(const std::string& shard, + void trim_chunk(const DoutPrefixProvider *dpp, + const std::string& shard, const utime_t& from, const utime_t& to, const string& from_marker, diff --git a/src/rgw/rgw_op.cc b/src/rgw/rgw_op.cc index 87a18f989a8..bbfdc749884 100644 --- a/src/rgw/rgw_op.cc +++ b/src/rgw/rgw_op.cc @@ -164,7 +164,8 @@ done: return r; } -static int decode_policy(CephContext *cct, +static int decode_policy(const DoutPrefixProvider *dpp, + CephContext *cct, bufferlist& bl, RGWAccessControlPolicy *policy) { @@ -172,11 +173,11 @@ static int decode_policy(CephContext *cct, try { policy->decode(iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl; return -EIO; } if (cct->_conf->subsys.should_gather()) { - ldout(cct, 15) << __func__ << " Read AccessControlPolicy"; + ldpp_dout(dpp, 15) << __func__ << " Read AccessControlPolicy"; RGWAccessControlPolicy_S3 *s3policy = static_cast(policy); s3policy->to_xml(*_dout); *_dout << dendl; @@ -185,13 +186,14 @@ static int decode_policy(CephContext *cct, } -static int get_user_policy_from_attr(CephContext * const cct, +static int get_user_policy_from_attr(const DoutPrefixProvider *dpp, + CephContext * const cct, map& attrs, RGWAccessControlPolicy& policy /* out */) { auto aiter = attrs.find(RGW_ATTR_ACL); if (aiter != attrs.end()) { - int ret = decode_policy(cct, aiter->second, &policy); + int ret = decode_policy(dpp, cct, aiter->second, &policy); if (ret < 0) { return ret; } @@ -220,7 +222,7 @@ int rgw_op_get_bucket_policy_from_attr(const DoutPrefixProvider *dpp, map::iterator aiter = bucket_attrs.find(RGW_ATTR_ACL); if (aiter != bucket_attrs.end()) { - int ret = decode_policy(cct, aiter->second, policy); + int ret = decode_policy(dpp, cct, aiter->second, policy); if (ret < 0) return ret; } else { @@ -254,7 +256,7 @@ static int get_obj_policy_from_attr(const DoutPrefixProvider *dpp, ret = rop->get_attr(dpp, RGW_ATTR_ACL, bl, y); if (ret >= 0) { - ret = decode_policy(cct, bl, policy); + ret = decode_policy(dpp, cct, bl, policy); if (ret < 0) return ret; } else if (ret == -ENODATA) { @@ -651,7 +653,7 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Store* st ret = acl_user->read_attrs(dpp, y); if (!ret) { - ret = get_user_policy_from_attr(s->cct, acl_user->get_attrs(), *s->user_acl); + ret = get_user_policy_from_attr(dpp, s->cct, acl_user->get_attrs(), *s->user_acl); } if (-ENOENT == ret) { /* In already existing clusters users won't have ACL. In such case @@ -688,7 +690,7 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Store* st else ret = -EACCES; } } catch (const std::exception& e) { - lderr(s->cct) << "Error reading IAM User Policy: " << e.what() << dendl; + ldpp_dout(dpp, -1) << "Error reading IAM User Policy: " << e.what() << dendl; ret = -EACCES; } } @@ -1119,11 +1121,11 @@ int RGWPutBucketTags::verify_permission(optional_yield y) { void RGWPutBucketTags::execute(optional_yield y) { - op_ret = get_params(y); + op_ret = get_params(this, y); if (op_ret < 0) return; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; } @@ -1149,7 +1151,7 @@ int RGWDeleteBucketTags::verify_permission(optional_yield y) void RGWDeleteBucketTags::execute(optional_yield y) { bufferlist in_data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -1197,7 +1199,7 @@ void RGWPutBucketReplication::execute(optional_yield y) { if (op_ret < 0) return; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -1235,7 +1237,7 @@ int RGWDeleteBucketReplication::verify_permission(optional_yield y) void RGWDeleteBucketReplication::execute(optional_yield y) { bufferlist in_data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -1552,7 +1554,7 @@ int RGWGetObj::read_user_manifest_part(rgw::sal::Bucket* bucket, } } - op_ret = rgw_policy_from_attrset(s->cct, part->get_attrs(), &obj_policy); + op_ret = rgw_policy_from_attrset(s, s->cct, part->get_attrs(), &obj_policy); if (op_ret < 0) return op_ret; @@ -2043,7 +2045,7 @@ static inline void rgw_cond_decode_objtags( bufferlist::const_iterator iter{&tags->second}; s->tagset.decode(iter); } catch (buffer::error& err) { - ldout(s->cct, 0) + ldpp_dout(s, 0) << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl; } } @@ -2402,7 +2404,7 @@ void RGWGetUsage::execute(optional_yield y) RGWUsageIter usage_iter; while (s->bucket && is_truncated) { - op_ret = s->bucket->read_usage(start_epoch, end_epoch, max_entries, &is_truncated, + op_ret = s->bucket->read_usage(this, start_epoch, end_epoch, max_entries, &is_truncated, usage_iter, usage); if (op_ret == -ENOENT) { op_ret = 0; @@ -2426,7 +2428,7 @@ void RGWGetUsage::execute(optional_yield y) return; } - op_ret = s->user->read_stats(y, &stats); + op_ret = s->user->read_stats(this, y, &stats); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: can't read user header" << dendl; return; @@ -2491,7 +2493,7 @@ void RGWStatAccount::execute(optional_yield y) } if (!lastmarker) { - lderr(s->cct) << "ERROR: rgw_read_user_buckets, stasis at marker=" + ldpp_dout(this, -1) << "ERROR: rgw_read_user_buckets, stasis at marker=" << marker << " uid=" << s->user->get_id() << dendl; break; } @@ -2573,7 +2575,7 @@ void RGWSetBucketVersioning::execute(optional_yield y) } } - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -2649,7 +2651,7 @@ void RGWSetBucketWebsite::execute(optional_yield y) if (op_ret < 0) return; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << " forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -2683,7 +2685,7 @@ void RGWDeleteBucketWebsite::execute(optional_yield y) { bufferlist in_data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "NOTICE: forward_to_master failed on bucket=" << s->bucket->get_name() << "returned err=" << op_ret << dendl; @@ -3129,7 +3131,7 @@ void RGWCreateBucket::execute(optional_yield y) if (need_metadata_upload()) { /* It's supposed that following functions WILL NOT change any special * attributes (like RGW_ATTR_ACL) if they are already present in attrs. */ - op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false); + op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs, false); if (op_ret < 0) { return; } @@ -3222,7 +3224,7 @@ void RGWCreateBucket::execute(optional_yield y) attrs.clear(); - op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false); + op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs, false); if (op_ret < 0) { return; } @@ -3300,7 +3302,7 @@ void RGWDeleteBucket::execute(optional_yield y) } } - op_ret = s->bucket->sync_user_stats(y); + op_ret = s->bucket->sync_user_stats(this, y); if ( op_ret < 0) { ldpp_dout(this, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl; } @@ -3311,7 +3313,7 @@ void RGWDeleteBucket::execute(optional_yield y) } bufferlist in_data; - op_ret = store->forward_request_to_master(s->user.get(), &ot.read_version, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), &ot.read_version, in_data, nullptr, s->info, y); if (op_ret < 0) { if (op_ret == -ENOENT) { /* adjust error, we want to return with NoSuchBucket and not @@ -3780,7 +3782,7 @@ void RGWPutObj::execute(optional_yield y) // make reservation for notification if needed std::unique_ptr res = store->get_notification(s->object.get(), s, rgw::notify::ObjectCreatedPut); - op_ret = res->publish_reserve(obj_tags.get()); + op_ret = res->publish_reserve(this, obj_tags.get()); if (op_ret < 0) { return; } @@ -4022,7 +4024,7 @@ void RGWPutObj::execute(optional_yield y) emplace_attr(RGW_ATTR_ETAG, std::move(bl)); populate_with_generic_attrs(s, attrs); - op_ret = rgw_get_request_metadata(s->cct, s->info, attrs); + op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs); if (op_ret < 0) { return; } @@ -4134,7 +4136,7 @@ void RGWPostObj::execute(optional_yield y) // make reservation for notification if needed std::unique_ptr res = store->get_notification(s->object.get(), s, rgw::notify::ObjectCreatedPost); - op_ret = res->publish_reserve(); + op_ret = res->publish_reserve(this); if (op_ret < 0) { return; } @@ -4366,7 +4368,7 @@ int RGWPutMetadataAccount::init_processing(optional_yield y) attrs.emplace(RGW_ATTR_ACL, std::move(acl_bl)); } - op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false); + op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs, false); if (op_ret < 0) { return op_ret; } @@ -4459,7 +4461,7 @@ void RGWPutMetadataBucket::execute(optional_yield y) return; } - op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false); + op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs, false); if (op_ret < 0) { return; } @@ -4554,7 +4556,7 @@ void RGWPutMetadataObject::execute(optional_yield y) return; } - op_ret = rgw_get_request_metadata(s->cct, s->info, attrs); + op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs); if (op_ret < 0) { return; } @@ -4769,7 +4771,7 @@ void RGWDeleteObj::execute(optional_yield y) rgw::notify::ObjectRemovedDeleteMarkerCreated : rgw::notify::ObjectRemovedDelete; std::unique_ptr res = store->get_notification(s->object.get(), s, event_type); - op_ret = res->publish_reserve(); + op_ret = res->publish_reserve(this); if (op_ret < 0) { return; } @@ -5038,7 +5040,7 @@ int RGWCopyObj::init_common() dest_policy.encode(aclbl); emplace_attr(RGW_ATTR_ACL, std::move(aclbl)); - op_ret = rgw_get_request_metadata(s->cct, s->info, attrs); + op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs); if (op_ret < 0) { return op_ret; } @@ -5086,7 +5088,7 @@ void RGWCopyObj::execute(optional_yield y) // make reservation for notification if needed std::unique_ptr res = store->get_notification(s->object.get(), s, rgw::notify::ObjectCreatedCopy); - op_ret = res->publish_reserve(); + op_ret = res->publish_reserve(this); if (op_ret < 0) { return; } @@ -5370,7 +5372,7 @@ void RGWPutACLs::execute(optional_yield y) if (s->canned_acl.empty()) { in_data.append(data); } - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -5395,7 +5397,7 @@ void RGWPutACLs::execute(optional_yield y) if (s->bucket_access_conf && s->bucket_access_conf->block_public_acls() && - new_policy.is_public()) { + new_policy.is_public(this)) { op_ret = -EACCES; return; } @@ -5495,7 +5497,7 @@ void RGWPutLC::execute(optional_yield y) ldpp_dout(this, 15) << "New LifecycleConfiguration:" << ss.str() << dendl; } - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -5511,7 +5513,7 @@ void RGWPutLC::execute(optional_yield y) void RGWDeleteLC::execute(optional_yield y) { bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -5555,7 +5557,7 @@ void RGWPutCORS::execute(optional_yield y) if (op_ret < 0) return; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -5577,7 +5579,7 @@ int RGWDeleteCORS::verify_permission(optional_yield y) void RGWDeleteCORS::execute(optional_yield y) { bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -5687,7 +5689,7 @@ void RGWSetRequestPayment::pre_exec() void RGWSetRequestPayment::execute(optional_yield y) { - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -5767,7 +5769,7 @@ void RGWInitMultipart::execute(optional_yield y) if (op_ret != 0) return; - op_ret = rgw_get_request_metadata(s->cct, s->info, attrs); + op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs); if (op_ret < 0) { return; } @@ -5775,7 +5777,7 @@ void RGWInitMultipart::execute(optional_yield y) // make reservation for notification if needed std::unique_ptr res = store->get_notification(s->object.get(), s, rgw::notify::ObjectCreatedPost); - op_ret = res->publish_reserve(); + op_ret = res->publish_reserve(this); if (op_ret < 0) { return; } @@ -5923,7 +5925,7 @@ void RGWCompleteMultipart::execute(optional_yield y) // make reservation for notification if needed std::unique_ptr res = store->get_notification(s->object.get(), s, rgw::notify::ObjectCreatedCompleteMultipartUpload); - op_ret = res->publish_reserve(); + op_ret = res->publish_reserve(this); if (op_ret < 0) { return; } @@ -5957,11 +5959,11 @@ void RGWCompleteMultipart::execute(optional_yield y) s->cct->_conf.get_val("rgw_mp_lock_max_time"); utime_t dur(max_lock_secs_mp, 0); - serializer = meta_obj->get_serializer("RGWCompleteMultipart"); - op_ret = serializer->try_lock(dur, y); + serializer = meta_obj->get_serializer(this, "RGWCompleteMultipart"); + op_ret = serializer->try_lock(this, dur, y); if (op_ret < 0) { ldpp_dout(this, 0) << "failed to acquire lock" << dendl; - if (op_ret == -ENOENT && check_previously_completed(this, parts)) { + if (op_ret == -ENOENT && check_previously_completed(parts)) { ldpp_dout(this, 1) << "NOTICE: This multipart completion is already completed" << dendl; op_ret = 0; return; @@ -5980,7 +5982,7 @@ void RGWCompleteMultipart::execute(optional_yield y) attrs = meta_obj->get_attrs(); do { - op_ret = list_multipart_parts(s, upload_id, meta_oid, max_parts, + op_ret = list_multipart_parts(this, s, upload_id, meta_oid, max_parts, marker, obj_parts, &marker, &truncated); if (op_ret == -ENOENT) { op_ret = -ERR_NO_SUCH_UPLOAD; @@ -6037,7 +6039,7 @@ void RGWCompleteMultipart::execute(optional_yield y) op_ret = -ERR_INVALID_PART; return; } else { - manifest.append(obj_part.manifest, store->get_zone()); + manifest.append(this, obj_part.manifest, store->get_zone()); } bool part_compressed = (obj_part.cs_info.compression_type != "none"); @@ -6149,13 +6151,13 @@ void RGWCompleteMultipart::execute(optional_yield y) } } -bool RGWCompleteMultipart::check_previously_completed(const DoutPrefixProvider* dpp, const RGWMultiCompleteUpload* parts) +bool RGWCompleteMultipart::check_previously_completed(const RGWMultiCompleteUpload* parts) { // re-calculate the etag from the parts and compare to the existing object s->object->set_bucket(s->bucket.get()); int ret = s->object->get_obj_attrs(s->obj_ctx, s->yield, this); if (ret < 0) { - ldpp_dout(dpp, 0) << __func__ << "() ERROR: get_obj_attrs() returned ret=" << ret << dendl; + ldpp_dout(this, 0) << __func__ << "() ERROR: get_obj_attrs() returned ret=" << ret << dendl; return false; } rgw::sal::Attrs sattrs = s->object->get_attrs(); @@ -6167,7 +6169,7 @@ bool RGWCompleteMultipart::check_previously_completed(const DoutPrefixProvider* char petag[CEPH_CRYPTO_MD5_DIGESTSIZE]; hex_to_buf(partetag.c_str(), petag, CEPH_CRYPTO_MD5_DIGESTSIZE); hash.Update((const unsigned char *)petag, sizeof(petag)); - ldpp_dout(dpp, 20) << __func__ << "() re-calculating multipart etag: part: " + ldpp_dout(this, 20) << __func__ << "() re-calculating multipart etag: part: " << index << ", etag: " << partetag << dendl; } @@ -6179,11 +6181,11 @@ bool RGWCompleteMultipart::check_previously_completed(const DoutPrefixProvider* "-%lld", (long long)parts->parts.size()); if (oetag.compare(final_etag_str) != 0) { - ldpp_dout(dpp, 1) << __func__ << "() NOTICE: etag mismatch: object etag:" + ldpp_dout(this, 1) << __func__ << "() NOTICE: etag mismatch: object etag:" << oetag << ", re-calculated etag:" << final_etag_str << dendl; return false; } - ldpp_dout(dpp, 5) << __func__ << "() object etag and re-calculated etag match, etag: " << oetag << dendl; + ldpp_dout(this, 5) << __func__ << "() object etag and re-calculated etag match, etag: " << oetag << dendl; return true; } @@ -6288,7 +6290,7 @@ void RGWListMultipart::execute(optional_yield y) if (op_ret < 0) return; - op_ret = list_multipart_parts(s, upload_id, meta_oid, max_parts, + op_ret = list_multipart_parts(this, s, upload_id, meta_oid, max_parts, marker, parts, NULL, &truncated); } @@ -6549,7 +6551,7 @@ void RGWDeleteMultiObj::execute(optional_yield y) rgw::notify::ObjectRemovedDeleteMarkerCreated : rgw::notify::ObjectRemovedDelete; std::unique_ptr res = store->get_notification(obj.get(), s, event_type); - op_ret = res->publish_reserve(); + op_ret = res->publish_reserve(this); if (op_ret < 0) { send_partial_response(*iter, false, "", op_ret); continue; @@ -7532,7 +7534,7 @@ int RGWHandler::do_init_permissions(const DoutPrefixProvider *dpp, optional_yiel { int ret = rgw_build_bucket_policies(dpp, store, s, y); if (ret < 0) { - ldpp_dout(s, 10) << "init_permissions on " << s->bucket + ldpp_dout(dpp, 10) << "init_permissions on " << s->bucket << " failed, ret=" << ret << dendl; return ret==-ENODATA ? -EACCES : ret; } @@ -7625,7 +7627,7 @@ void RGWPutBucketPolicy::execute(optional_yield y) return; } - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -7760,7 +7762,7 @@ void RGWPutBucketObjectLock::execute(optional_yield y) try { RGWXMLDecoder::decode_xml("ObjectLockConfiguration", obj_lock, &parser, true); } catch (RGWXMLDecoder::err& err) { - ldout(s->cct, 5) << "unexpected xml:" << err << dendl; + ldpp_dout(this, 5) << "unexpected xml:" << err << dendl; op_ret = -ERR_MALFORMED_XML; return; } @@ -7771,9 +7773,9 @@ void RGWPutBucketObjectLock::execute(optional_yield y) return; } - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { - ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl; return; } @@ -7930,7 +7932,7 @@ void RGWGetObjRetention::execute(optional_yield y) try { obj_retention.decode(iter); } catch (const buffer::error& e) { - ldout(s->cct, 0) << __func__ << "decode object retention config failed" << dendl; + ldpp_dout(this, 0) << __func__ << "decode object retention config failed" << dendl; op_ret = -EIO; return; } @@ -7977,7 +7979,7 @@ void RGWPutObjLegalHold::execute(optional_yield y) { try { RGWXMLDecoder::decode_xml("LegalHold", obj_legal_hold, &parser, true); } catch (RGWXMLDecoder::err &err) { - ldout(s->cct, 5) << "unexpected xml:" << err << dendl; + ldpp_dout(this, 5) << "unexpected xml:" << err << dendl; op_ret = -ERR_MALFORMED_XML; return; } @@ -8026,7 +8028,7 @@ void RGWGetObjLegalHold::execute(optional_yield y) try { obj_legal_hold.decode(iter); } catch (const buffer::error& e) { - ldout(s->cct, 0) << __func__ << "decode object legal hold config failed" << dendl; + ldpp_dout(this, 0) << __func__ << "decode object legal hold config failed" << dendl; op_ret = -EIO; return; } @@ -8049,7 +8051,7 @@ int RGWGetBucketPolicyStatus::verify_permission(optional_yield y) void RGWGetBucketPolicyStatus::execute(optional_yield y) { - isPublic = (s->iam_policy && rgw::IAM::is_public(*s->iam_policy)) || s->bucket_acl->is_public(); + isPublic = (s->iam_policy && rgw::IAM::is_public(*s->iam_policy)) || s->bucket_acl->is_public(this); } int RGWPutBucketPublicAccessBlock::verify_permission(optional_yield y) @@ -8095,7 +8097,7 @@ void RGWPutBucketPublicAccessBlock::execute(optional_yield y) return; } - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; diff --git a/src/rgw/rgw_op.h b/src/rgw/rgw_op.h index 10255fe6f50..58303cbf0f9 100644 --- a/src/rgw/rgw_op.h +++ b/src/rgw/rgw_op.h @@ -516,7 +516,7 @@ public: void execute(optional_yield y) override; virtual void send_response() override = 0; - virtual int get_params(optional_yield y) = 0; + virtual int get_params(const DoutPrefixProvider *dpp, optional_yield y) = 0; const char* name() const override { return "put_bucket_tags"; } virtual uint32_t op_mask() override { return RGW_OP_TYPE_WRITE; } RGWOpType get_type() override { return RGW_OP_PUT_BUCKET_TAGGING; } @@ -1812,7 +1812,7 @@ public: int verify_permission(optional_yield y) override; void pre_exec() override; void execute(optional_yield y) override; - bool check_previously_completed(const DoutPrefixProvider* dpp, const RGWMultiCompleteUpload* parts); + bool check_previously_completed(const RGWMultiCompleteUpload* parts); void complete() override; virtual int get_params(optional_yield y) = 0; @@ -2032,7 +2032,7 @@ inline int get_system_versioning_params(req_state *s, string err; *olh_epoch = strict_strtol(epoch_str.c_str(), 10, &err); if (!err.empty()) { - lsubdout(s->cct, rgw, 0) << "failed to parse versioned-epoch param" + ldpp_subdout(s, rgw, 0) << "failed to parse versioned-epoch param" << dendl; return -EINVAL; } @@ -2076,7 +2076,8 @@ static inline void format_xattr(std::string &xattr) * On failure returns a negative error code. * */ -inline int rgw_get_request_metadata(CephContext* const cct, +inline int rgw_get_request_metadata(const DoutPrefixProvider *dpp, + CephContext* const cct, struct req_info& info, std::map& attrs, const bool allow_empty_attrs = true) @@ -2094,10 +2095,10 @@ inline int rgw_get_request_metadata(CephContext* const cct, std::string& xattr = kv.second; if (blocklisted_headers.count(name) == 1) { - lsubdout(cct, rgw, 10) << "skipping x>> " << name << dendl; + ldpp_subdout(dpp, rgw, 10) << "skipping x>> " << name << dendl; continue; } else if (allow_empty_attrs || !xattr.empty()) { - lsubdout(cct, rgw, 10) << "x>> " << name << ":" << xattr << dendl; + ldpp_subdout(dpp, rgw, 10) << "x>> " << name << ":" << xattr << dendl; format_xattr(xattr); std::string attr_name(RGW_ATTR_PREFIX); diff --git a/src/rgw/rgw_orphan.cc b/src/rgw/rgw_orphan.cc index 38a69ee5ba6..199f533b669 100644 --- a/src/rgw/rgw_orphan.cc +++ b/src/rgw/rgw_orphan.cc @@ -146,10 +146,10 @@ int RGWOrphanStore::list_jobs(map & job_list) return 0; } -int RGWOrphanStore::init() +int RGWOrphanStore::init(const DoutPrefixProvider *dpp) { const rgw_pool& log_pool = store->get_zone()->get_params().log_pool; - int r = rgw_init_ioctx(static_cast(store)->getRados()->get_rados_handle(), log_pool, ioctx); + int r = rgw_init_ioctx(dpp, static_cast(store)->getRados()->get_rados_handle(), log_pool, ioctx); if (r < 0) { cerr << "ERROR: failed to open log pool (" << log_pool << " ret=" << r << std::endl; return r; @@ -158,18 +158,18 @@ int RGWOrphanStore::init() return 0; } -int RGWOrphanStore::store_entries(const string& oid, const map& entries) +int RGWOrphanStore::store_entries(const DoutPrefixProvider *dpp, const string& oid, const map& entries) { librados::ObjectWriteOperation op; op.omap_set(entries); cout << "storing " << entries.size() << " entries at " << oid << std::endl; - ldout(store->ctx(), 20) << "storing " << entries.size() << " entries at " << oid << ": " << dendl; + ldpp_dout(dpp, 20) << "storing " << entries.size() << " entries at " << oid << ": " << dendl; for (map::const_iterator iter = entries.begin(); iter != entries.end(); ++iter) { - ldout(store->ctx(), 20) << " > " << iter->first << dendl; + ldpp_dout(dpp, 20) << " > " << iter->first << dendl; } - int ret = rgw_rados_operate(ioctx, oid, &op, null_yield); + int ret = rgw_rados_operate(dpp, ioctx, oid, &op, null_yield); if (ret < 0) { - lderr(store->ctx()) << "ERROR: " << __func__ << "(" << oid << ") returned ret=" << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: " << __func__ << "(" << oid << ") returned ret=" << ret << dendl; } return 0; @@ -188,9 +188,9 @@ int RGWOrphanStore::read_entries(const string& oid, const string& marker, mapctx()) << "ERROR: failed to read state ret=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to read state ret=" << r << dendl; return r; } @@ -220,11 +220,11 @@ int RGWOrphanSearch::init(const string& job_name, RGWOrphanSearchInfo *info, boo r = save_state(); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to write state ret=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to write state ret=" << r << dendl; return r; } } else { - lderr(store->ctx()) << "ERROR: job not found" << dendl; + ldpp_dout(dpp, -1) << "ERROR: job not found" << dendl; return r; } @@ -246,7 +246,7 @@ int RGWOrphanSearch::init(const string& job_name, RGWOrphanSearchInfo *info, boo return 0; } -int RGWOrphanSearch::log_oids(map& log_shards, map >& oids) +int RGWOrphanSearch::log_oids(const DoutPrefixProvider *dpp, map& log_shards, map >& oids) { map >::iterator miter = oids.begin(); @@ -273,11 +273,11 @@ int RGWOrphanSearch::log_oids(map& log_shards, map entries; #define MAX_OMAP_SET_ENTRIES 100 for (int j = 0; cur != end && j != MAX_OMAP_SET_ENTRIES; ++cur, ++j) { - ldout(store->ctx(), 20) << "adding obj: " << *cur << dendl; + ldpp_dout(dpp, 20) << "adding obj: " << *cur << dendl; entries[*cur] = bufferlist(); } - int ret = orphan_store.store_entries(cur_info.oid, entries); + int ret = orphan_store.store_entries(dpp, cur_info.oid, entries); if (ret < 0) { return ret; } @@ -291,13 +291,13 @@ int RGWOrphanSearch::log_oids(map& log_shards, map(store)->getRados()->get_rados_handle(), search_info.pool, ioctx); + int ret = rgw_init_ioctx(dpp, static_cast(store)->getRados()->get_rados_handle(), search_info.pool, ioctx); if (ret < 0) { - lderr(store->ctx()) << __func__ << ": rgw_init_ioctx() returned ret=" << ret << dendl; + ldpp_dout(dpp, -1) << __func__ << ": rgw_init_ioctx() returned ret=" << ret << dendl; return ret; } @@ -351,7 +351,7 @@ int RGWOrphanSearch::build_all_oids_index() ++total; if (++count >= COUNT_BEFORE_FLUSH) { ldout(store->ctx(), 1) << "iterated through " << total << " objects" << dendl; - ret = log_oids(all_objs_index, oids); + ret = log_oids(dpp, all_objs_index, oids); if (ret < 0) { cerr << __func__ << ": ERROR: log_oids() returned ret=" << ret << std::endl; return ret; @@ -360,7 +360,7 @@ int RGWOrphanSearch::build_all_oids_index() oids.clear(); } } - ret = log_oids(all_objs_index, oids); + ret = log_oids(dpp, all_objs_index, oids); if (ret < 0) { cerr << __func__ << ": ERROR: log_oids() returned ret=" << ret << std::endl; return ret; @@ -369,14 +369,14 @@ int RGWOrphanSearch::build_all_oids_index() return 0; } -int RGWOrphanSearch::build_buckets_instance_index() +int RGWOrphanSearch::build_buckets_instance_index(const DoutPrefixProvider *dpp) { void *handle; int max = 1000; string section = "bucket.instance"; - int ret = store->meta_list_keys_init(section, string(), &handle); + int ret = store->meta_list_keys_init(dpp, section, string(), &handle); if (ret < 0) { - lderr(store->ctx()) << "ERROR: can't get key: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, -1) << "ERROR: can't get key: " << cpp_strerror(-ret) << dendl; return ret; } @@ -393,20 +393,20 @@ int RGWOrphanSearch::build_buckets_instance_index() list keys; ret = store->meta_list_keys_next(handle, max, keys, &truncated); if (ret < 0) { - lderr(store->ctx()) << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, -1) << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << dendl; return ret; } for (list::iterator iter = keys.begin(); iter != keys.end(); ++iter) { ++total; - ldout(store->ctx(), 10) << "bucket_instance=" << *iter << " total=" << total << dendl; + ldpp_dout(dpp, 10) << "bucket_instance=" << *iter << " total=" << total << dendl; int shard = orphan_shard(*iter); instances[shard].push_back(*iter); if (++count >= COUNT_BEFORE_FLUSH) { - ret = log_oids(buckets_instance_index, instances); + ret = log_oids(dpp, buckets_instance_index, instances); if (ret < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: log_oids() returned ret=" << ret << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: log_oids() returned ret=" << ret << dendl; return ret; } count = 0; @@ -418,16 +418,16 @@ int RGWOrphanSearch::build_buckets_instance_index() store->meta_list_keys_complete(handle); - ret = log_oids(buckets_instance_index, instances); + ret = log_oids(dpp, buckets_instance_index, instances); if (ret < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: log_oids() returned ret=" << ret << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: log_oids() returned ret=" << ret << dendl; return ret; } return 0; } -int RGWOrphanSearch::handle_stat_result(map >& oids, rgw::sal::Object::StatOp::Result& result) +int RGWOrphanSearch::handle_stat_result(const DoutPrefixProvider *dpp, map >& oids, rgw::sal::Object::StatOp::Result& result) { set obj_oids; rgw::sal::Bucket* bucket = result.obj->get_bucket(); @@ -445,12 +445,12 @@ int RGWOrphanSearch::handle_stat_result(map >& oids, rgw::sal: if (!detailed_mode && manifest.get_obj_size() <= manifest.get_head_size()) { - ldout(store->ctx(), 5) << "skipping object as it fits in a head" << dendl; + ldpp_dout(dpp, 5) << "skipping object as it fits in a head" << dendl; return 0; } RGWObjManifest::obj_iterator miter; - for (miter = manifest.obj_begin(); miter != manifest.obj_end(); ++miter) { + for (miter = manifest.obj_begin(dpp); miter != manifest.obj_end(dpp); ++miter) { const rgw_raw_obj& loc = miter.get_location().get_raw_obj(store); string s = loc.oid; obj_oids.insert(obj_fingerprint(s)); @@ -458,7 +458,7 @@ int RGWOrphanSearch::handle_stat_result(map >& oids, rgw::sal: } for (set::iterator iter = obj_oids.begin(); iter != obj_oids.end(); ++iter) { - ldout(store->ctx(), 20) << __func__ << ": oid for obj=" << result.obj << ": " << *iter << dendl; + ldpp_dout(dpp, 20) << __func__ << ": oid for obj=" << result.obj << ": " << *iter << dendl; int shard = orphan_shard(*iter); oids[shard].push_back(*iter); @@ -467,20 +467,20 @@ int RGWOrphanSearch::handle_stat_result(map >& oids, rgw::sal: return 0; } -int RGWOrphanSearch::pop_and_handle_stat_op(map >& oids, std::deque>& ops) +int RGWOrphanSearch::pop_and_handle_stat_op(const DoutPrefixProvider *dpp, map >& oids, std::deque>& ops) { rgw::sal::Object::StatOp* front_op = ops.front().get(); int ret = front_op->wait(); if (ret < 0) { if (ret != -ENOENT) { - lderr(store->ctx()) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl; } goto done; } - ret = handle_stat_result(oids, front_op->result); + ret = handle_stat_result(dpp, oids, front_op->result); if (ret < 0) { - lderr(store->ctx()) << "ERROR: handle_stat_response() returned error: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, -1) << "ERROR: handle_stat_response() returned error: " << cpp_strerror(-ret) << dendl; } done: ops.pop_front(); @@ -582,21 +582,21 @@ int RGWOrphanSearch::build_linked_oids_for_bucket(const DoutPrefixProvider *dpp, stat_ops.push_back(std::move(stat_op)); - ret = op->stat_async(); + ret = op->stat_async(dpp); if (ret < 0) { - lderr(store->ctx()) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl; return ret; } if (stat_ops.size() >= max_concurrent_ios) { - ret = pop_and_handle_stat_op(oids, stat_ops); + ret = pop_and_handle_stat_op(dpp, oids, stat_ops); if (ret < 0) { if (ret != -ENOENT) { - lderr(store->ctx()) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl; } } } if (oids.size() >= COUNT_BEFORE_FLUSH) { - ret = log_oids(linked_objs_index, oids); + ret = log_oids(dpp, linked_objs_index, oids); if (ret < 0) { cerr << __func__ << ": ERROR: log_oids() returned ret=" << ret << std::endl; return ret; @@ -607,10 +607,10 @@ int RGWOrphanSearch::build_linked_oids_for_bucket(const DoutPrefixProvider *dpp, } while (results.is_truncated); while (!stat_ops.empty()) { - ret = pop_and_handle_stat_op(oids, stat_ops); + ret = pop_and_handle_stat_op(dpp, oids, stat_ops); if (ret < 0) { if (ret != -ENOENT) { - lderr(store->ctx()) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl; } } } @@ -637,7 +637,7 @@ int RGWOrphanSearch::build_linked_oids_index(const DoutPrefixProvider *dpp) } if (ret < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: read_entries() oid=" << oid << " returned ret=" << ret << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: read_entries() oid=" << oid << " returned ret=" << ret << dendl; return ret; } @@ -646,10 +646,10 @@ int RGWOrphanSearch::build_linked_oids_index(const DoutPrefixProvider *dpp) } for (map::iterator eiter = entries.begin(); eiter != entries.end(); ++eiter) { - ldout(store->ctx(), 20) << " indexed entry: " << eiter->first << dendl; + ldpp_dout(dpp, 20) << " indexed entry: " << eiter->first << dendl; ret = build_linked_oids_for_bucket(dpp, eiter->first, oids); if (ret < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: build_linked_oids_for_bucket() indexed entry=" << eiter->first + ldpp_dout(dpp, -1) << __func__ << ": ERROR: build_linked_oids_for_bucket() indexed entry=" << eiter->first << " returned ret=" << ret << dendl; return ret; } @@ -662,7 +662,7 @@ int RGWOrphanSearch::build_linked_oids_index(const DoutPrefixProvider *dpp) search_stage.marker.clear(); } - int ret = log_oids(linked_objs_index, oids); + int ret = log_oids(dpp, linked_objs_index, oids); if (ret < 0) { cerr << __func__ << ": ERROR: log_oids() returned ret=" << ret << std::endl; return ret; @@ -727,7 +727,7 @@ int OMAPReader::get_next(string *key, bufferlist *pbl, bool *done) return get_next(key, pbl, done); } -int RGWOrphanSearch::compare_oid_indexes() +int RGWOrphanSearch::compare_oid_indexes(const DoutPrefixProvider *dpp) { ceph_assert(linked_objs_index.size() == all_objs_index.size()); @@ -735,9 +735,9 @@ int RGWOrphanSearch::compare_oid_indexes() librados::IoCtx data_ioctx; - int ret = rgw_init_ioctx(static_cast(store)->getRados()->get_rados_handle(), search_info.pool, data_ioctx); + int ret = rgw_init_ioctx(dpp, static_cast(store)->getRados()->get_rados_handle(), search_info.pool, data_ioctx); if (ret < 0) { - lderr(store->ctx()) << __func__ << ": rgw_init_ioctx() returned ret=" << ret << dendl; + ldpp_dout(dpp, -1) << __func__ << ": rgw_init_ioctx() returned ret=" << ret << dendl; return ret; } @@ -776,7 +776,7 @@ int RGWOrphanSearch::compare_oid_indexes() } if (cur_linked == key_fp) { - ldout(store->ctx(), 20) << "linked: " << key << dendl; + ldpp_dout(dpp, 20) << "linked: " << key << dendl; continue; } @@ -784,15 +784,15 @@ int RGWOrphanSearch::compare_oid_indexes() r = data_ioctx.stat(key, NULL, &mtime); if (r < 0) { if (r != -ENOENT) { - lderr(store->ctx()) << "ERROR: ioctx.stat(" << key << ") returned ret=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: ioctx.stat(" << key << ") returned ret=" << r << dendl; } continue; } if (stale_secs && (uint64_t)mtime >= time_threshold) { - ldout(store->ctx(), 20) << "skipping: " << key << " (mtime=" << mtime << " threshold=" << time_threshold << ")" << dendl; + ldpp_dout(dpp, 20) << "skipping: " << key << " (mtime=" << mtime << " threshold=" << time_threshold << ")" << dendl; continue; } - ldout(store->ctx(), 20) << "leaked: " << key << dendl; + ldpp_dout(dpp, 20) << "leaked: " << key << dendl; cout << "leaked: " << key << std::endl; } while (!done); } @@ -811,63 +811,63 @@ int RGWOrphanSearch::run(const DoutPrefixProvider *dpp) search_stage = RGWOrphanSearchStage(ORPHAN_SEARCH_STAGE_LSPOOL); r = save_state(); if (r < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl; return r; } // fall through case ORPHAN_SEARCH_STAGE_LSPOOL: ldpp_dout(dpp, 0) << __func__ << "(): building index of all objects in pool" << dendl; - r = build_all_oids_index(); + r = build_all_oids_index(dpp); if (r < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl; return r; } search_stage = RGWOrphanSearchStage(ORPHAN_SEARCH_STAGE_LSBUCKETS); r = save_state(); if (r < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl; return r; } // fall through case ORPHAN_SEARCH_STAGE_LSBUCKETS: ldpp_dout(dpp, 0) << __func__ << "(): building index of all bucket indexes" << dendl; - r = build_buckets_instance_index(); + r = build_buckets_instance_index(dpp); if (r < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl; return r; } search_stage = RGWOrphanSearchStage(ORPHAN_SEARCH_STAGE_ITERATE_BI); r = save_state(); if (r < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl; return r; } // fall through case ORPHAN_SEARCH_STAGE_ITERATE_BI: - ldout(store->ctx(), 0) << __func__ << "(): building index of all linked objects" << dendl; + ldpp_dout(dpp, 0) << __func__ << "(): building index of all linked objects" << dendl; r = build_linked_oids_index(dpp); if (r < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl; return r; } search_stage = RGWOrphanSearchStage(ORPHAN_SEARCH_STAGE_COMPARE); r = save_state(); if (r < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: failed to save state, ret=" << r << dendl; return r; } // fall through case ORPHAN_SEARCH_STAGE_COMPARE: - r = compare_oid_indexes(); + r = compare_oid_indexes(dpp); if (r < 0) { - lderr(store->ctx()) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl; + ldpp_dout(dpp, -1) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl; return r; } @@ -920,7 +920,8 @@ int RGWOrphanSearch::finish() } -int RGWRadosList::handle_stat_result(rgw::sal::Object::StatOp::Result& result, +int RGWRadosList::handle_stat_result(const DoutPrefixProvider *dpp, + rgw::sal::Object::StatOp::Result& result, std::string& bucket_name, rgw_obj_key& obj_key, std::set& obj_oids) @@ -929,7 +930,7 @@ int RGWRadosList::handle_stat_result(rgw::sal::Object::StatOp::Result& result, rgw::sal::Bucket* bucket = result.obj->get_bucket(); - ldout(store->ctx(), 20) << "RGWRadosList::" << __func__ << + ldpp_dout(dpp, 20) << "RGWRadosList::" << __func__ << " bucket=" << bucket << ", has_manifest=" << !!result.manifest << dendl; @@ -937,11 +938,11 @@ int RGWRadosList::handle_stat_result(rgw::sal::Object::StatOp::Result& result, // iterator to store result of dlo/slo attribute find auto attr_it = result.obj->get_attrs().end(); const std::string oid = bucket->get_marker() + "_" + result.obj->get_oid(); - ldout(store->ctx(), 20) << "radoslist processing object=\"" << + ldpp_dout(dpp, 20) << "radoslist processing object=\"" << oid << "\"" << dendl; if (visited_oids.find(oid) != visited_oids.end()) { // apparently we hit a loop; don't continue with this oid - ldout(store->ctx(), 15) << + ldpp_dout(dpp, 15) << "radoslist stopped loop at already visited object=\"" << oid << "\"" << dendl; return 0; @@ -965,7 +966,7 @@ int RGWRadosList::handle_stat_result(rgw::sal::Object::StatOp::Result& result, obj_oids.insert(oid); visited_oids.insert(oid); // prevent dlo loops - ldout(store->ctx(), 15) << "radoslist added to visited list DLO=\"" << + ldpp_dout(dpp, 15) << "radoslist added to visited list DLO=\"" << oid << "\"" << dendl; char* prefix_path_c = attr_it->second.c_str(); @@ -980,7 +981,7 @@ int RGWRadosList::handle_stat_result(rgw::sal::Object::StatOp::Result& result, const std::string prefix = prefix_path.substr(sep_pos + 1); add_bucket_prefix(bucket_name, prefix); - ldout(store->ctx(), 25) << "radoslist DLO oid=\"" << oid << + ldpp_dout(dpp, 25) << "radoslist DLO oid=\"" << oid << "\" added bucket=\"" << bucket_name << "\" prefix=\"" << prefix << "\" to process list" << dendl; } else if ((attr_it = result.obj->get_attrs().find(RGW_ATTR_SLO_MANIFEST)) != @@ -989,7 +990,7 @@ int RGWRadosList::handle_stat_result(rgw::sal::Object::StatOp::Result& result, obj_oids.insert(oid); visited_oids.insert(oid); // prevent slo loops - ldout(store->ctx(), 15) << "radoslist added to visited list SLO=\"" << + ldpp_dout(dpp, 15) << "radoslist added to visited list SLO=\"" << oid << "\"" << dendl; RGWSLOInfo slo_info; @@ -997,7 +998,7 @@ int RGWRadosList::handle_stat_result(rgw::sal::Object::StatOp::Result& result, try { ::decode(slo_info, bliter); } catch (buffer::error& err) { - ldout(store->ctx(), 0) << + ldpp_dout(dpp, 0) << "ERROR: failed to decode slo manifest for " << oid << dendl; return -EIO; } @@ -1018,7 +1019,7 @@ int RGWRadosList::handle_stat_result(rgw::sal::Object::StatOp::Result& result, const rgw_obj_key obj_key(obj_name); add_bucket_filter(bucket_name, obj_key); - ldout(store->ctx(), 25) << "radoslist SLO oid=\"" << oid << + ldpp_dout(dpp, 25) << "radoslist SLO oid=\"" << oid << "\" added bucket=\"" << bucket_name << "\" obj_key=\"" << obj_key << "\" to process list" << dendl; } @@ -1029,13 +1030,13 @@ int RGWRadosList::handle_stat_result(rgw::sal::Object::StatOp::Result& result, // manifest AND empty objects have no manifest, but they're // realized as empty rados objects if (0 == manifest.get_max_head_size() || - manifest.obj_begin() == manifest.obj_end()) { + manifest.obj_begin(dpp) == manifest.obj_end(dpp)) { obj_oids.insert(oid); // first_insert = true; } RGWObjManifest::obj_iterator miter; - for (miter = manifest.obj_begin(); miter != manifest.obj_end(); ++miter) { + for (miter = manifest.obj_begin(dpp); miter != manifest.obj_end(dpp); ++miter) { const rgw_raw_obj& loc = miter.get_location().get_raw_obj(store); string s = loc.oid; @@ -1047,6 +1048,7 @@ int RGWRadosList::handle_stat_result(rgw::sal::Object::StatOp::Result& result, } // RGWRadosList::handle_stat_result int RGWRadosList::pop_and_handle_stat_op( + const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, std::deque>& ops) { @@ -1058,15 +1060,15 @@ int RGWRadosList::pop_and_handle_stat_op( int ret = front_op->wait(); if (ret < 0) { if (ret != -ENOENT) { - lderr(store->ctx()) << "ERROR: stat_async() returned error: " << + ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl; } goto done; } - ret = handle_stat_result(front_op->result, bucket_name, obj_key, obj_oids); + ret = handle_stat_result(dpp, front_op->result, bucket_name, obj_key, obj_oids); if (ret < 0) { - lderr(store->ctx()) << "ERROR: handle_stat_result() returned error: " << + ldpp_dout(dpp, -1) << "ERROR: handle_stat_result() returned error: " << cpp_strerror(-ret) << dendl; } @@ -1241,18 +1243,18 @@ int RGWRadosList::process_bucket( objs.push_back(std::move(obj)); stat_ops.push_back(std::move(stat_op)); - ret = op->stat_async(); + ret = op->stat_async(dpp); if (ret < 0) { - lderr(store->ctx()) << "ERROR: stat_async() returned error: " << + ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl; return ret; } if (stat_ops.size() >= max_concurrent_ios) { - ret = pop_and_handle_stat_op(obj_ctx, stat_ops); + ret = pop_and_handle_stat_op(dpp, obj_ctx, stat_ops); if (ret < 0) { if (ret != -ENOENT) { - lderr(store->ctx()) << + ldpp_dout(dpp, -1) << "ERROR: pop_and_handle_stat_op() returned error: " << cpp_strerror(-ret) << dendl; } @@ -1291,10 +1293,10 @@ int RGWRadosList::process_bucket( } while (results.is_truncated); while (!stat_ops.empty()) { - ret = pop_and_handle_stat_op(obj_ctx, stat_ops); + ret = pop_and_handle_stat_op(dpp, obj_ctx, stat_ops); if (ret < 0) { if (ret != -ENOENT) { - lderr(store->ctx()) << "ERROR: stat_async() returned error: " << + ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl; } } @@ -1309,9 +1311,9 @@ int RGWRadosList::run(const DoutPrefixProvider *dpp) int ret; void* handle = nullptr; - ret = store->meta_list_keys_init("bucket", string(), &handle); + ret = store->meta_list_keys_init(dpp, "bucket", string(), &handle); if (ret < 0) { - lderr(store->ctx()) << "RGWRadosList::" << __func__ << + ldpp_dout(dpp, -1) << "RGWRadosList::" << __func__ << " ERROR: list_keys_init returned " << cpp_strerror(-ret) << dendl; return ret; @@ -1431,7 +1433,7 @@ int RGWRadosList::run(const DoutPrefixProvider *dpp, const std::string& start_bu ret = do_incomplete_multipart(dpp, bucket.get()); if (ret < 0) { - lderr(store->ctx()) << "RGWRadosList::" << __func__ << + ldpp_dout(dpp, -1) << "RGWRadosList::" << __func__ << ": ERROR: do_incomplete_multipart returned ret=" << ret << dendl; return ret; } @@ -1498,7 +1500,7 @@ int RGWRadosList::do_incomplete_multipart(const DoutPrefixProvider *dpp, do { // while (is_parts_truncated); std::map parts; - ret = list_multipart_parts(bucket, store->ctx(), + ret = list_multipart_parts(dpp, bucket, store->ctx(), mp.get_upload_id(), mp.get_meta(), max_parts, parts_marker, parts, &parts_marker, @@ -1509,7 +1511,7 @@ int RGWRadosList::do_incomplete_multipart(const DoutPrefixProvider *dpp, "for " << mp.get_upload_id() << ", moving on" << dendl; break; } else if (ret < 0) { - lderr(store->ctx()) << "RGWRadosList::" << __func__ << + ldpp_dout(dpp, -1) << "RGWRadosList::" << __func__ << ": ERROR: list_multipart_parts returned ret=" << ret << dendl; return ret; @@ -1517,8 +1519,8 @@ int RGWRadosList::do_incomplete_multipart(const DoutPrefixProvider *dpp, for (auto& p : parts) { RGWObjManifest& manifest = p.second.manifest; - for (auto obj_it = manifest.obj_begin(); - obj_it != manifest.obj_end(); + for (auto obj_it = manifest.obj_begin(dpp); + obj_it != manifest.obj_end(dpp); ++obj_it) { const rgw_raw_obj& loc = obj_it.get_location().get_raw_obj(store); diff --git a/src/rgw/rgw_orphan.h b/src/rgw/rgw_orphan.h index 0756d31a034..8dd03a47c1d 100644 --- a/src/rgw/rgw_orphan.h +++ b/src/rgw/rgw_orphan.h @@ -133,7 +133,7 @@ public: librados::IoCtx& get_ioctx() { return ioctx; } - int init(); + int init(const DoutPrefixProvider *dpp); int read_job(const string& job_name, RGWOrphanSearchState& state); int write_job(const string& job_name, const RGWOrphanSearchState& state); @@ -141,7 +141,7 @@ public: int list_jobs(map &job_list); - int store_entries(const string& oid, const map& entries); + int store_entries(const DoutPrefixProvider *dpp, const string& oid, const map& entries); int read_entries(const string& oid, const string& marker, map *entries, bool *truncated); }; @@ -172,16 +172,15 @@ class RGWOrphanSearch { list::iterator end; }; - int log_oids(map& log_shards, map >& oids); + int log_oids(const DoutPrefixProvider *dpp, map& log_shards, map >& oids); #define RGW_ORPHANSEARCH_HASH_PRIME 7877 int orphan_shard(const string& str) { return ceph_str_hash_linux(str.c_str(), str.size()) % RGW_ORPHANSEARCH_HASH_PRIME % search_info.num_shards; } - int handle_stat_result(map >& oids, rgw::sal::Object::StatOp::Result& result); - int pop_and_handle_stat_op(map >& oids, std::deque>& ops); - + int handle_stat_result(const DoutPrefixProvider *dpp, map >& oids, rgw::sal::Object::StatOp::Result& result); + int pop_and_handle_stat_op(const DoutPrefixProvider *dpp, map >& oids, std::deque>& ops); int remove_index(map& index); public: @@ -194,15 +193,15 @@ public: return orphan_store.write_job(search_info.job_name, state); } - int init(const string& job_name, RGWOrphanSearchInfo *info, bool _detailed_mode=false); + int init(const DoutPrefixProvider *dpp, const string& job_name, RGWOrphanSearchInfo *info, bool _detailed_mode=false); int create(const string& job_name, int num_shards); - int build_all_oids_index(); - int build_buckets_instance_index(); + int build_all_oids_index(const DoutPrefixProvider *dpp); + int build_buckets_instance_index(const DoutPrefixProvider *dpp); int build_linked_oids_for_bucket(const DoutPrefixProvider *dpp, const string& bucket_instance_id, map >& oids); int build_linked_oids_index(const DoutPrefixProvider *dpp); - int compare_oid_indexes(); + int compare_oid_indexes(const DoutPrefixProvider *dpp); int run(const DoutPrefixProvider *dpp); int finish(); @@ -260,11 +259,13 @@ class RGWRadosList { bool include_rgw_obj_name; std::string field_separator; - int handle_stat_result(rgw::sal::Object::StatOp::Result& result, + int handle_stat_result(const DoutPrefixProvider *dpp, + rgw::sal::Object::StatOp::Result& result, std::string& bucket_name, rgw_obj_key& obj_key, std::set& obj_oids); - int pop_and_handle_stat_op(RGWObjectCtx& obj_ctx, + int pop_and_handle_stat_op(const DoutPrefixProvider *dpp, + RGWObjectCtx& obj_ctx, std::deque>& ops); public: diff --git a/src/rgw/rgw_otp.cc b/src/rgw/rgw_otp.cc index 7417fe60804..07cc14f113b 100644 --- a/src/rgw/rgw_otp.cc +++ b/src/rgw/rgw_otp.cc @@ -126,7 +126,7 @@ class RGWOTPMetadataHandler : public RGWOTPMetadataHandlerBase { RGWSI_OTP_BE_Ctx be_ctx(op->ctx()); - int ret = svc.otp->store_all(be_ctx, + int ret = svc.otp->store_all(dpp, be_ctx, entry, obj->devices, obj->mtime, @@ -145,7 +145,7 @@ class RGWOTPMetadataHandler : public RGWOTPMetadataHandlerBase { RGWSI_OTP_BE_Ctx be_ctx(op->ctx()); - return svc.otp->remove_all(be_ctx, + return svc.otp->remove_all(dpp, be_ctx, entry, &objv_tracker, y); @@ -184,21 +184,23 @@ int RGWOTPCtl::read_all(const rgw_user& uid, }); } -int RGWOTPCtl::store_all(const RGWOTPInfo& info, +int RGWOTPCtl::store_all(const DoutPrefixProvider *dpp, + const RGWOTPInfo& info, optional_yield y, const PutParams& params) { return meta_handler->call([&](RGWSI_OTP_BE_Ctx& ctx) { - return svc.otp->store_all(ctx, info.uid, info.devices, params.mtime, params.objv_tracker, y); + return svc.otp->store_all(dpp, ctx, info.uid, info.devices, params.mtime, params.objv_tracker, y); }); } -int RGWOTPCtl::remove_all(const rgw_user& uid, +int RGWOTPCtl::remove_all(const DoutPrefixProvider *dpp, + const rgw_user& uid, optional_yield y, const RemoveParams& params) { return meta_handler->call([&](RGWSI_OTP_BE_Ctx& ctx) { - return svc.otp->remove_all(ctx, uid, params.objv_tracker, y); + return svc.otp->remove_all(dpp, ctx, uid, params.objv_tracker, y); }); } diff --git a/src/rgw/rgw_otp.h b/src/rgw/rgw_otp.h index e3ed49cab07..9bd17e6aff9 100644 --- a/src/rgw/rgw_otp.h +++ b/src/rgw/rgw_otp.h @@ -105,9 +105,11 @@ public: int read_all(const rgw_user& uid, RGWOTPInfo *info, optional_yield y, const DoutPrefixProvider *dpp, const GetParams& params = {}); - int store_all(const RGWOTPInfo& info, optional_yield y, + int store_all(const DoutPrefixProvider *dpp, + const RGWOTPInfo& info, optional_yield y, const PutParams& params = {}); - int remove_all(const rgw_user& user, optional_yield y, + int remove_all(const DoutPrefixProvider *dpp, + const rgw_user& user, optional_yield y, const RemoveParams& params = {}); }; diff --git a/src/rgw/rgw_period_history.cc b/src/rgw/rgw_period_history.cc index 67c63e7beaa..abbd998cfb9 100644 --- a/src/rgw/rgw_period_history.cc +++ b/src/rgw/rgw_period_history.cc @@ -85,7 +85,7 @@ class RGWPeriodHistory::Impl final { ~Impl(); Cursor get_current() const { return current_cursor; } - Cursor attach(RGWPeriod&& period, optional_yield y); + Cursor attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y); Cursor insert(RGWPeriod&& period); Cursor lookup(epoch_t realm_epoch); @@ -148,7 +148,7 @@ RGWPeriodHistory::Impl::~Impl() histories.clear_and_dispose(std::default_delete{}); } -Cursor RGWPeriodHistory::Impl::attach(RGWPeriod&& period, optional_yield y) +Cursor RGWPeriodHistory::Impl::attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y) { if (current_history == histories.end()) { return Cursor{-EINVAL}; @@ -179,12 +179,12 @@ Cursor RGWPeriodHistory::Impl::attach(RGWPeriod&& period, optional_yield y) } if (predecessor_id.empty()) { - lderr(cct) << "reached a period with an empty predecessor id" << dendl; + ldpp_dout(dpp, -1) << "reached a period with an empty predecessor id" << dendl; return Cursor{-EINVAL}; } // pull the period outside of the lock - int r = puller->pull(predecessor_id, period, y); + int r = puller->pull(dpp, predecessor_id, period, y); if (r < 0) { return Cursor{r}; } @@ -339,9 +339,9 @@ Cursor RGWPeriodHistory::get_current() const { return impl->get_current(); } -Cursor RGWPeriodHistory::attach(RGWPeriod&& period, optional_yield y) +Cursor RGWPeriodHistory::attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y) { - return impl->attach(std::move(period), y); + return impl->attach(dpp, std::move(period), y); } Cursor RGWPeriodHistory::insert(RGWPeriod&& period) { diff --git a/src/rgw/rgw_period_history.h b/src/rgw/rgw_period_history.h index 6004db2efef..0d412c76a3b 100644 --- a/src/rgw/rgw_period_history.h +++ b/src/rgw/rgw_period_history.h @@ -11,6 +11,7 @@ #include "include/ceph_assert.h" #include "include/types.h" #include "common/async/yield_context.h" +#include "common/dout.h" namespace bi = boost::intrusive; @@ -42,7 +43,7 @@ class RGWPeriodHistory final { public: virtual ~Puller() = default; - virtual int pull(const std::string& period_id, RGWPeriod& period, + virtual int pull(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period, optional_yield y) = 0; }; @@ -100,7 +101,7 @@ class RGWPeriodHistory final { /// current_period and the given period, reading predecessor periods or /// fetching them from the master as necessary. returns a cursor at the /// given period that can be used to traverse the current_history - Cursor attach(RGWPeriod&& period, optional_yield y); + Cursor attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y); /// insert the given period into an existing history, or create a new /// unconnected history. similar to attach(), but it doesn't try to fetch diff --git a/src/rgw/rgw_period_puller.cc b/src/rgw/rgw_period_puller.cc index 7f870cbab05..8e0df896957 100644 --- a/src/rgw/rgw_period_puller.cc +++ b/src/rgw/rgw_period_puller.cc @@ -24,7 +24,7 @@ RGWPeriodPuller::RGWPeriodPuller(RGWSI_Zone *zone_svc, RGWSI_SysObj *sysobj_svc) namespace { // pull the given period over the connection -int pull_period(RGWRESTConn* conn, const std::string& period_id, +int pull_period(const DoutPrefixProvider *dpp, RGWRESTConn* conn, const std::string& period_id, const std::string& realm_id, RGWPeriod& period, optional_yield y) { @@ -40,7 +40,7 @@ int pull_period(RGWRESTConn* conn, const std::string& period_id, bufferlist data; #define MAX_REST_RESPONSE (128 * 1024) - int r = conn->forward(user, info, nullptr, MAX_REST_RESPONSE, nullptr, &data, y); + int r = conn->forward(dpp, user, info, nullptr, MAX_REST_RESPONSE, nullptr, &data, y); if (r < 0) { return r; } @@ -64,59 +64,59 @@ int pull_period(RGWRESTConn* conn, const std::string& period_id, } // anonymous namespace -int RGWPeriodPuller::pull(const std::string& period_id, RGWPeriod& period, +int RGWPeriodPuller::pull(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period, optional_yield y) { // try to read the period from rados period.set_id(period_id); period.set_epoch(0); - int r = period.init(cct, svc.sysobj, y); + int r = period.init(dpp, cct, svc.sysobj, y); if (r < 0) { if (svc.zone->is_meta_master()) { // can't pull if we're the master - ldout(cct, 1) << "metadata master failed to read period " + ldpp_dout(dpp, 1) << "metadata master failed to read period " << period_id << " from local storage: " << cpp_strerror(r) << dendl; return r; } - ldout(cct, 14) << "pulling period " << period_id + ldpp_dout(dpp, 14) << "pulling period " << period_id << " from master" << dendl; // request the period from the master zone - r = pull_period(svc.zone->get_master_conn(), period_id, + r = pull_period(dpp, svc.zone->get_master_conn(), period_id, svc.zone->get_realm().get_id(), period, y); if (r < 0) { - lderr(cct) << "failed to pull period " << period_id << dendl; + ldpp_dout(dpp, -1) << "failed to pull period " << period_id << dendl; return r; } // write the period to rados - r = period.store_info(true, y); + r = period.store_info(dpp, true, y); if (r == -EEXIST) { r = 0; } else if (r < 0) { - lderr(cct) << "failed to store period " << period_id << dendl; + ldpp_dout(dpp, -1) << "failed to store period " << period_id << dendl; return r; } // update latest epoch - r = period.update_latest_epoch(period.get_epoch(), y); + r = period.update_latest_epoch(dpp, period.get_epoch(), y); if (r == -EEXIST) { // already have this epoch (or a more recent one) return 0; } if (r < 0) { - lderr(cct) << "failed to update latest_epoch for period " + ldpp_dout(dpp, -1) << "failed to update latest_epoch for period " << period_id << dendl; return r; } // reflect period objects if this is the latest version if (svc.zone->get_realm().get_current_period() == period_id) { - r = period.reflect(y); + r = period.reflect(dpp, y); if (r < 0) { return r; } } - ldout(cct, 14) << "period " << period_id + ldpp_dout(dpp, 14) << "period " << period_id << " pulled and written to local storage" << dendl; } else { - ldout(cct, 14) << "found period " << period_id + ldpp_dout(dpp, 14) << "found period " << period_id << " in local storage" << dendl; } return 0; diff --git a/src/rgw/rgw_period_puller.h b/src/rgw/rgw_period_puller.h index 7ac5cc8dce5..654029dd1c4 100644 --- a/src/rgw/rgw_period_puller.h +++ b/src/rgw/rgw_period_puller.h @@ -21,7 +21,7 @@ class RGWPeriodPuller : public RGWPeriodHistory::Puller { public: explicit RGWPeriodPuller(RGWSI_Zone *zone_svc, RGWSI_SysObj *sysobj_svc); - int pull(const std::string& period_id, RGWPeriod& period, optional_yield y) override; + int pull(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period, optional_yield y) override; }; #endif // CEPH_RGW_PERIOD_PULLER_H diff --git a/src/rgw/rgw_period_pusher.cc b/src/rgw/rgw_period_pusher.cc index b94b9cc243d..f9963ee485c 100644 --- a/src/rgw/rgw_period_pusher.cc +++ b/src/rgw/rgw_period_pusher.cc @@ -45,15 +45,15 @@ class PushAndRetryCR : public RGWCoroutine { counter(0) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int PushAndRetryCR::operate() +int PushAndRetryCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { for (;;) { yield { - ldout(cct, 10) << "pushing period " << period.get_id() + ldpp_dout(dpp, 10) << "pushing period " << period.get_id() << " to " << zone << dendl; // initialize the http params rgw_http_param_pair params[] = { @@ -67,7 +67,7 @@ int PushAndRetryCR::operate() // stop on success if (get_ret_status() == 0) { - ldout(cct, 10) << "push to " << zone << " succeeded" << dendl; + ldpp_dout(dpp, 10) << "push to " << zone << " succeeded" << dendl; return set_cr_done(); } @@ -81,7 +81,7 @@ int PushAndRetryCR::operate() utime_t dur; dur.set_from_double(timeout); - ldout(cct, 10) << "waiting " << dur << "s for retry.." << dendl; + ldpp_dout(dpp, 10) << "waiting " << dur << "s for retry.." << dendl; wait(dur); timeout *= 2; @@ -110,15 +110,15 @@ class PushAllCR : public RGWCoroutine { conns(std::move(conns)) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int PushAllCR::operate() +int PushAllCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { // spawn a coroutine to push the period over each connection yield { - ldout(cct, 4) << "sending " << conns.size() << " periods" << dendl; + ldpp_dout(dpp, 4) << "sending " << conns.size() << " periods" << dendl; for (auto& c : conns) spawn(new PushAndRetryCR(cct, c.first, &c.second, http, period), false); } @@ -130,7 +130,8 @@ int PushAllCR::operate() } /// A background thread to run the PushAllCR coroutine and exit. -class RGWPeriodPusher::CRThread { +class RGWPeriodPusher::CRThread : public DoutPrefixProvider { + CephContext* cct; RGWCoroutinesManager coroutines; RGWHTTPManager http; boost::intrusive_ptr push_all; @@ -139,13 +140,13 @@ class RGWPeriodPusher::CRThread { public: CRThread(CephContext* cct, RGWPeriod&& period, std::map&& conns) - : coroutines(cct, NULL), + : cct(cct), coroutines(cct, NULL), http(cct, coroutines.get_completion_mgr()), push_all(new PushAllCR(cct, &http, std::move(period), std::move(conns))) { http.start(); // must spawn the CR thread after start - thread = std::thread([this]() noexcept { coroutines.run(push_all.get()); }); + thread = std::thread([this]() noexcept { coroutines.run(this, push_all.get()); }); } ~CRThread() { @@ -155,10 +156,14 @@ class RGWPeriodPusher::CRThread { if (thread.joinable()) thread.join(); } + + CephContext *get_cct() const override { return cct; } + unsigned get_subsys() const override { return dout_subsys; } + std::ostream& gen_prefix(std::ostream& out) const override { return out << "rgw period pusher CR thread: "; } }; -RGWPeriodPusher::RGWPeriodPusher(rgw::sal::Store* store, +RGWPeriodPusher::RGWPeriodPusher(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y) : cct(store->ctx()), store(store) { @@ -170,9 +175,9 @@ RGWPeriodPusher::RGWPeriodPusher(rgw::sal::Store* store, // always send out the current period on startup RGWPeriod period; // XXX dang - int r = period.init(cct, static_cast(store)->svc()->sysobj, realm_id, y, realm.get_name()); + int r = period.init(dpp, cct, static_cast(store)->svc()->sysobj, realm_id, y, realm.get_name()); if (r < 0) { - lderr(cct) << "failed to load period for realm " << realm_id << dendl; + ldpp_dout(dpp, -1) << "failed to load period for realm " << realm_id << dendl; return; } diff --git a/src/rgw/rgw_period_pusher.h b/src/rgw/rgw_period_pusher.h index da00175d300..7f88e4e968f 100644 --- a/src/rgw/rgw_period_pusher.h +++ b/src/rgw/rgw_period_pusher.h @@ -29,7 +29,7 @@ using RGWZonesNeedPeriod = RGWPeriod; class RGWPeriodPusher final : public RGWRealmWatcher::Watcher, public RGWRealmReloader::Pauser { public: - explicit RGWPeriodPusher(rgw::sal::Store* store, optional_yield y); + explicit RGWPeriodPusher(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y); ~RGWPeriodPusher() override; /// respond to realm notifications by pushing new periods to other zones diff --git a/src/rgw/rgw_process.cc b/src/rgw/rgw_process.cc index 3dea772e310..f7fc8dbbe65 100644 --- a/src/rgw/rgw_process.cc +++ b/src/rgw/rgw_process.cc @@ -82,7 +82,7 @@ RGWRequest* RGWProcess::RGWWQ::_dequeue() { void RGWProcess::RGWWQ::_process(RGWRequest *req, ThreadPool::TPHandle &) { perfcounter->inc(l_rgw_qactive); - process->handle_request(req); + process->handle_request(this, req); process->req_throttle.put(1); perfcounter->inc(l_rgw_qactive, -1); } diff --git a/src/rgw/rgw_process.h b/src/rgw/rgw_process.h index a6c040f37b6..f1b10d9fdde 100644 --- a/src/rgw/rgw_process.h +++ b/src/rgw/rgw_process.h @@ -57,7 +57,7 @@ protected: int sock_fd; std::string uri_prefix; - struct RGWWQ : public ThreadPool::WorkQueue { + struct RGWWQ : public DoutPrefixProvider, public ThreadPool::WorkQueue { RGWProcess* process; RGWWQ(RGWProcess* p, ceph::timespan timeout, ceph::timespan suicide_timeout, ThreadPool* tp) @@ -85,6 +85,11 @@ protected: void _clear() override { ceph_assert(process->m_req_queue.empty()); } + + CephContext *get_cct() const override { return process->cct; } + unsigned get_subsys() const { return ceph_subsys_rgw; } + std::ostream& gen_prefix(std::ostream& out) const { return out << "rgw request work queue: ";} + } req_wq; public: @@ -111,7 +116,7 @@ public: virtual ~RGWProcess() = default; virtual void run() = 0; - virtual void handle_request(RGWRequest *req) = 0; + virtual void handle_request(const DoutPrefixProvider *dpp, RGWRequest *req) = 0; void pause() { m_tp.pause(); @@ -147,7 +152,7 @@ public: } void run() override; - void handle_request(RGWRequest* req) override; + void handle_request(const DoutPrefixProvider *dpp, RGWRequest* req) override; }; class RGWProcessControlThread : public Thread { @@ -169,7 +174,7 @@ public: RGWProcess(cct, pe, num_threads, _conf) {} void run() override; void checkpoint(); - void handle_request(RGWRequest* req) override; + void handle_request(const DoutPrefixProvider *dpp, RGWRequest* req) override; void gen_request(const string& method, const string& resource, int content_length, std::atomic* fail_flag); diff --git a/src/rgw/rgw_pubsub.cc b/src/rgw/rgw_pubsub.cc index 96a52488639..a2597cbd16d 100644 --- a/src/rgw/rgw_pubsub.cc +++ b/src/rgw/rgw_pubsub.cc @@ -431,11 +431,12 @@ RGWPubSub::RGWPubSub(rgw::sal::RadosStore* _store, const std::string& _tenant) : get_meta_obj(&meta_obj); } -int RGWPubSub::remove(const rgw_raw_obj& obj, +int RGWPubSub::remove(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, RGWObjVersionTracker *objv_tracker, optional_yield y) { - int ret = rgw_delete_system_obj(store->svc()->sysobj, obj.pool, obj.oid, objv_tracker, y); + int ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, obj.pool, obj.oid, objv_tracker, y); if (ret < 0) { return ret; } @@ -453,12 +454,12 @@ int RGWPubSub::read_topics(rgw_pubsub_topics *result, RGWObjVersionTracker *objv return 0; } -int RGWPubSub::write_topics(const rgw_pubsub_topics& topics, +int RGWPubSub::write_topics(const DoutPrefixProvider *dpp, const rgw_pubsub_topics& topics, RGWObjVersionTracker *objv_tracker, optional_yield y) { - int ret = write(meta_obj, topics, objv_tracker, y); + int ret = write(dpp, meta_obj, topics, objv_tracker, y); if (ret < 0 && ret != -ENOENT) { - ldout(store->ctx(), 1) << "ERROR: failed to write topics info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to write topics info: ret=" << ret << dendl; return ret; } return 0; @@ -479,11 +480,11 @@ int RGWPubSub::Bucket::read_topics(rgw_pubsub_bucket_topics *result, RGWObjVersi return 0; } -int RGWPubSub::Bucket::write_topics(const rgw_pubsub_bucket_topics& topics, +int RGWPubSub::Bucket::write_topics(const DoutPrefixProvider *dpp, const rgw_pubsub_bucket_topics& topics, RGWObjVersionTracker *objv_tracker, optional_yield y) { - int ret = ps->write(bucket_meta_obj, topics, objv_tracker, y); + int ret = ps->write(dpp, bucket_meta_obj, topics, objv_tracker, y); if (ret < 0) { ldout(ps->store->ctx(), 1) << "ERROR: failed to write bucket topics info: ret=" << ret << dendl; return ret; @@ -535,31 +536,30 @@ int RGWPubSub::get_topic(const string& name, rgw_pubsub_topic *result) return 0; } -int RGWPubSub::Bucket::create_notification(const string& topic_name, const rgw::notify::EventTypeList& events, optional_yield y) { - return create_notification(topic_name, events, std::nullopt, "", y); +int RGWPubSub::Bucket::create_notification(const DoutPrefixProvider *dpp, const string& topic_name, const rgw::notify::EventTypeList& events, optional_yield y) { + return create_notification(dpp, topic_name, events, std::nullopt, "", y); } -int RGWPubSub::Bucket::create_notification(const string& topic_name,const rgw::notify::EventTypeList& events, OptionalFilter s3_filter, const std::string& notif_name, optional_yield y) { +int RGWPubSub::Bucket::create_notification(const DoutPrefixProvider *dpp, const string& topic_name,const rgw::notify::EventTypeList& events, OptionalFilter s3_filter, const std::string& notif_name, optional_yield y) { rgw_pubsub_topic_subs topic_info; - rgw::sal::RadosStore* store = ps->store; int ret = ps->get_topic(topic_name, &topic_info); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to read topic '" << topic_name << "' info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to read topic '" << topic_name << "' info: ret=" << ret << dendl; return ret; } - ldout(store->ctx(), 20) << "successfully read topic '" << topic_name << "' info" << dendl; + ldpp_dout(dpp, 20) << "successfully read topic '" << topic_name << "' info" << dendl; RGWObjVersionTracker objv_tracker; rgw_pubsub_bucket_topics bucket_topics; ret = read_topics(&bucket_topics, &objv_tracker); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to read topics from bucket '" << + ldpp_dout(dpp, 1) << "ERROR: failed to read topics from bucket '" << bucket.name << "': ret=" << ret << dendl; return ret; } - ldout(store->ctx(), 20) << "successfully read " << bucket_topics.topics.size() << " topics from bucket '" << + ldpp_dout(dpp, 20) << "successfully read " << bucket_topics.topics.size() << " topics from bucket '" << bucket.name << "'" << dendl; auto& topic_filter = bucket_topics.topics[topic_name]; @@ -570,25 +570,24 @@ int RGWPubSub::Bucket::create_notification(const string& topic_name,const rgw::n topic_filter.s3_filter = *s3_filter; } - ret = write_topics(bucket_topics, &objv_tracker, y); + ret = write_topics(dpp, bucket_topics, &objv_tracker, y); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to write topics to bucket '" << bucket.name << "': ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to write topics to bucket '" << bucket.name << "': ret=" << ret << dendl; return ret; } - ldout(store->ctx(), 20) << "successfully wrote " << bucket_topics.topics.size() << " topics to bucket '" << bucket.name << "'" << dendl; + ldpp_dout(dpp, 20) << "successfully wrote " << bucket_topics.topics.size() << " topics to bucket '" << bucket.name << "'" << dendl; return 0; } -int RGWPubSub::Bucket::remove_notification(const string& topic_name, optional_yield y) +int RGWPubSub::Bucket::remove_notification(const DoutPrefixProvider *dpp, const string& topic_name, optional_yield y) { rgw_pubsub_topic_subs topic_info; - rgw::sal::RadosStore* store = ps->store; int ret = ps->get_topic(topic_name, &topic_info); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to read topic info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to read topic info: ret=" << ret << dendl; return ret; } @@ -597,7 +596,7 @@ int RGWPubSub::Bucket::remove_notification(const string& topic_name, optional_yi ret = read_topics(&bucket_topics, &objv_tracker); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to read bucket topics info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to read bucket topics info: ret=" << ret << dendl; return ret; } @@ -605,65 +604,65 @@ int RGWPubSub::Bucket::remove_notification(const string& topic_name, optional_yi if (bucket_topics.topics.empty()) { // no more topics - delete the notification object of the bucket - ret = ps->remove(bucket_meta_obj, &objv_tracker, y); + ret = ps->remove(dpp, bucket_meta_obj, &objv_tracker, y); if (ret < 0 && ret != -ENOENT) { - ldout(ps->store->ctx(), 1) << "ERROR: failed to remove bucket topics: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to remove bucket topics: ret=" << ret << dendl; return ret; } return 0; } // write back the notifications without the deleted one - ret = write_topics(bucket_topics, &objv_tracker, y); + ret = write_topics(dpp, bucket_topics, &objv_tracker, y); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to write topics info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to write topics info: ret=" << ret << dendl; return ret; } return 0; } -int RGWPubSub::Bucket::remove_notifications(optional_yield y) +int RGWPubSub::Bucket::remove_notifications(const DoutPrefixProvider *dpp, optional_yield y) { // get all topics on a bucket rgw_pubsub_bucket_topics bucket_topics; auto ret = get_topics(&bucket_topics); if (ret < 0 && ret != -ENOENT) { - ldout(ps->store->ctx(), 1) << "ERROR: failed to get list of topics from bucket '" << bucket.name << "', ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to get list of topics from bucket '" << bucket.name << "', ret=" << ret << dendl; return ret ; } // remove all auto-genrated topics for (const auto& topic : bucket_topics.topics) { const auto& topic_name = topic.first; - ret = ps->remove_topic(topic_name, y); + ret = ps->remove_topic(dpp, topic_name, y); if (ret < 0 && ret != -ENOENT) { - ldout(ps->store->ctx(), 5) << "WARNING: failed to remove auto-generated topic '" << topic_name << "', ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "WARNING: failed to remove auto-generated topic '" << topic_name << "', ret=" << ret << dendl; } } // delete the notification object of the bucket - ret = ps->remove(bucket_meta_obj, nullptr, y); + ret = ps->remove(dpp, bucket_meta_obj, nullptr, y); if (ret < 0 && ret != -ENOENT) { - ldout(ps->store->ctx(), 1) << "ERROR: failed to remove bucket topics: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to remove bucket topics: ret=" << ret << dendl; return ret; } return 0; } -int RGWPubSub::create_topic(const string& name, optional_yield y) { - return create_topic(name, rgw_pubsub_sub_dest(), "", "", y); +int RGWPubSub::create_topic(const DoutPrefixProvider *dpp, const string& name, optional_yield y) { + return create_topic(dpp, name, rgw_pubsub_sub_dest(), "", "", y); } -int RGWPubSub::create_topic(const string& name, const rgw_pubsub_sub_dest& dest, const std::string& arn, const std::string& opaque_data, optional_yield y) { +int RGWPubSub::create_topic(const DoutPrefixProvider *dpp, const string& name, const rgw_pubsub_sub_dest& dest, const std::string& arn, const std::string& opaque_data, optional_yield y) { RGWObjVersionTracker objv_tracker; rgw_pubsub_topics topics; int ret = read_topics(&topics, &objv_tracker); if (ret < 0 && ret != -ENOENT) { // its not an error if not topics exist, we create one - ldout(store->ctx(), 1) << "ERROR: failed to read topics info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to read topics info: ret=" << ret << dendl; return ret; } @@ -674,35 +673,35 @@ int RGWPubSub::create_topic(const string& name, const rgw_pubsub_sub_dest& dest, new_topic.topic.arn = arn; new_topic.topic.opaque_data = opaque_data; - ret = write_topics(topics, &objv_tracker, y); + ret = write_topics(dpp, topics, &objv_tracker, y); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to write topics info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to write topics info: ret=" << ret << dendl; return ret; } return 0; } -int RGWPubSub::remove_topic(const string& name, optional_yield y) +int RGWPubSub::remove_topic(const DoutPrefixProvider *dpp, const string& name, optional_yield y) { RGWObjVersionTracker objv_tracker; rgw_pubsub_topics topics; int ret = read_topics(&topics, &objv_tracker); if (ret < 0 && ret != -ENOENT) { - ldout(store->ctx(), 1) << "ERROR: failed to read topics info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to read topics info: ret=" << ret << dendl; return ret; } else if (ret == -ENOENT) { // its not an error if no topics exist, just a no-op - ldout(store->ctx(), 10) << "WARNING: failed to read topics info, deletion is a no-op: ret=" << ret << dendl; + ldpp_dout(dpp, 10) << "WARNING: failed to read topics info, deletion is a no-op: ret=" << ret << dendl; return 0; } topics.topics.erase(name); - ret = write_topics(topics, &objv_tracker, y); + ret = write_topics(dpp, topics, &objv_tracker, y); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to remove topics info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to remove topics info: ret=" << ret << dendl; return ret; } @@ -719,25 +718,26 @@ int RGWPubSub::Sub::read_sub(rgw_pubsub_sub_config *result, RGWObjVersionTracker return 0; } -int RGWPubSub::Sub::write_sub(const rgw_pubsub_sub_config& sub_conf, +int RGWPubSub::Sub::write_sub(const DoutPrefixProvider *dpp, + const rgw_pubsub_sub_config& sub_conf, RGWObjVersionTracker *objv_tracker, optional_yield y) { - int ret = ps->write(sub_meta_obj, sub_conf, objv_tracker, y); + int ret = ps->write(dpp, sub_meta_obj, sub_conf, objv_tracker, y); if (ret < 0) { - ldout(ps->store->ctx(), 1) << "ERROR: failed to write subscription info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to write subscription info: ret=" << ret << dendl; return ret; } return 0; } -int RGWPubSub::Sub::remove_sub(RGWObjVersionTracker *objv_tracker, +int RGWPubSub::Sub::remove_sub(const DoutPrefixProvider *dpp, RGWObjVersionTracker *objv_tracker, optional_yield y) { - int ret = ps->remove(sub_meta_obj, objv_tracker, y); + int ret = ps->remove(dpp, sub_meta_obj, objv_tracker, y); if (ret < 0) { - ldout(ps->store->ctx(), 1) << "ERROR: failed to remove subscription info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to remove subscription info: ret=" << ret << dendl; return ret; } @@ -749,21 +749,20 @@ int RGWPubSub::Sub::get_conf(rgw_pubsub_sub_config *result) return read_sub(result, nullptr); } -int RGWPubSub::Sub::subscribe(const string& topic, const rgw_pubsub_sub_dest& dest, optional_yield y, const std::string& s3_id) +int RGWPubSub::Sub::subscribe(const DoutPrefixProvider *dpp, const string& topic, const rgw_pubsub_sub_dest& dest, optional_yield y, const std::string& s3_id) { RGWObjVersionTracker objv_tracker; rgw_pubsub_topics topics; - rgw::sal::RadosStore* store = ps->store; int ret = ps->read_topics(&topics, &objv_tracker); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to read topics info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to read topics info: ret=" << ret << dendl; return ret != -ENOENT ? ret : -EINVAL; } auto iter = topics.topics.find(topic); if (iter == topics.topics.end()) { - ldout(store->ctx(), 1) << "ERROR: cannot add subscription to topic: topic not found" << dendl; + ldpp_dout(dpp, 1) << "ERROR: cannot add subscription to topic: topic not found" << dendl; return -EINVAL; } @@ -779,31 +778,30 @@ int RGWPubSub::Sub::subscribe(const string& topic, const rgw_pubsub_sub_dest& de t.subs.insert(sub); - ret = ps->write_topics(topics, &objv_tracker, y); + ret = ps->write_topics(dpp, topics, &objv_tracker, y); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to write topics info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to write topics info: ret=" << ret << dendl; return ret; } - ret = write_sub(sub_conf, nullptr, y); + ret = write_sub(dpp, sub_conf, nullptr, y); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to write subscription info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to write subscription info: ret=" << ret << dendl; return ret; } return 0; } -int RGWPubSub::Sub::unsubscribe(const string& _topic, optional_yield y) +int RGWPubSub::Sub::unsubscribe(const DoutPrefixProvider *dpp, const string& _topic, optional_yield y) { string topic = _topic; RGWObjVersionTracker sobjv_tracker; - rgw::sal::RadosStore* store = ps->store; if (topic.empty()) { rgw_pubsub_sub_config sub_conf; int ret = read_sub(&sub_conf, &sobjv_tracker); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to read subscription info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to read subscription info: ret=" << ret << dendl; return ret; } topic = sub_conf.topic; @@ -815,7 +813,7 @@ int RGWPubSub::Sub::unsubscribe(const string& _topic, optional_yield y) int ret = ps->read_topics(&topics, &objv_tracker); if (ret < 0) { // not an error - could be that topic was already deleted - ldout(store->ctx(), 10) << "WARNING: failed to read topics info: ret=" << ret << dendl; + ldpp_dout(dpp, 10) << "WARNING: failed to read topics info: ret=" << ret << dendl; } else { auto iter = topics.topics.find(topic); if (iter != topics.topics.end()) { @@ -823,17 +821,17 @@ int RGWPubSub::Sub::unsubscribe(const string& _topic, optional_yield y) t.subs.erase(sub); - ret = ps->write_topics(topics, &objv_tracker, y); + ret = ps->write_topics(dpp, topics, &objv_tracker, y); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to write topics info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to write topics info: ret=" << ret << dendl; return ret; } } } - ret = remove_sub(&sobjv_tracker, y); + ret = remove_sub(dpp, &sobjv_tracker, y); if (ret < 0) { - ldout(store->ctx(), 1) << "ERROR: failed to delete subscription info: ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to delete subscription info: ret=" << ret << dendl; return ret; } return 0; diff --git a/src/rgw/rgw_pubsub.h b/src/rgw/rgw_pubsub.h index 617b7e93d9d..f667acd9009 100644 --- a/src/rgw/rgw_pubsub.h +++ b/src/rgw/rgw_pubsub.h @@ -615,14 +615,14 @@ class RGWPubSub int read(const rgw_raw_obj& obj, T* data, RGWObjVersionTracker* objv_tracker); template - int write(const rgw_raw_obj& obj, const T& info, + int write(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const T& info, RGWObjVersionTracker* obj_tracker, optional_yield y); - int remove(const rgw_raw_obj& obj, RGWObjVersionTracker* objv_tracker, + int remove(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, RGWObjVersionTracker* objv_tracker, optional_yield y); int read_topics(rgw_pubsub_topics *result, RGWObjVersionTracker* objv_tracker); - int write_topics(const rgw_pubsub_topics& topics, + int write_topics(const DoutPrefixProvider *dpp, const rgw_pubsub_topics& topics, RGWObjVersionTracker* objv_tracker, optional_yield y); public: @@ -641,7 +641,7 @@ public: // set the list of topics associated with a bucket // use version tacker to enforce atomicity between read/write // return 0 on success, error code otherwise - int write_topics(const rgw_pubsub_bucket_topics& topics, + int write_topics(const DoutPrefixProvider *dpp, const rgw_pubsub_bucket_topics& topics, RGWObjVersionTracker* objv_tracker, optional_yield y); public: Bucket(RGWPubSub *_ps, const rgw_bucket& _bucket) : ps(_ps), bucket(_bucket) { @@ -657,16 +657,16 @@ public: // for S3 compliant notifications the version with: s3_filter and notif_name should be used // return -ENOENT if the topic does not exists // return 0 on success, error code otherwise - int create_notification(const string& topic_name, const rgw::notify::EventTypeList& events, optional_yield y); - int create_notification(const string& topic_name, const rgw::notify::EventTypeList& events, OptionalFilter s3_filter, const std::string& notif_name, optional_yield y); + int create_notification(const DoutPrefixProvider *dpp, const string& topic_name, const rgw::notify::EventTypeList& events, optional_yield y); + int create_notification(const DoutPrefixProvider *dpp, const string& topic_name, const rgw::notify::EventTypeList& events, OptionalFilter s3_filter, const std::string& notif_name, optional_yield y); // remove a topic and filter from bucket // if the topic does not exists on the bucket it is a no-op (considered success) // return -ENOENT if the topic does not exists // return 0 on success, error code otherwise - int remove_notification(const string& topic_name, optional_yield y); + int remove_notification(const DoutPrefixProvider *dpp, const string& topic_name, optional_yield y); // remove all notifications (and autogenerated topics) associated with the bucket // return 0 on success or if no topic was associated with the bucket, error code otherwise - int remove_notifications(optional_yield y); + int remove_notifications(const DoutPrefixProvider *dpp, optional_yield y); }; // base class for subscription @@ -678,9 +678,9 @@ public: rgw_raw_obj sub_meta_obj; int read_sub(rgw_pubsub_sub_config *result, RGWObjVersionTracker* objv_tracker); - int write_sub(const rgw_pubsub_sub_config& sub_conf, + int write_sub(const DoutPrefixProvider *dpp, const rgw_pubsub_sub_config& sub_conf, RGWObjVersionTracker* objv_tracker, optional_yield y); - int remove_sub(RGWObjVersionTracker* objv_tracker, optional_yield y); + int remove_sub(const DoutPrefixProvider *dpp, RGWObjVersionTracker* objv_tracker, optional_yield y); public: Sub(RGWPubSub *_ps, const std::string& _sub) : ps(_ps), sub(_sub) { ps->get_sub_meta_obj(sub, &sub_meta_obj); @@ -688,9 +688,9 @@ public: virtual ~Sub() = default; - int subscribe(const string& topic_name, const rgw_pubsub_sub_dest& dest, optional_yield y, + int subscribe(const DoutPrefixProvider *dpp, const string& topic_name, const rgw_pubsub_sub_dest& dest, optional_yield y, const std::string& s3_id=""); - int unsubscribe(const string& topic_name, optional_yield y); + int unsubscribe(const DoutPrefixProvider *dpp, const string& topic_name, optional_yield y); int get_conf(rgw_pubsub_sub_config* result); static const int DEFAULT_MAX_EVENTS = 100; @@ -763,15 +763,15 @@ public: // create a topic with a name only // if the topic already exists it is a no-op (considered success) // return 0 on success, error code otherwise - int create_topic(const string& name, optional_yield y); + int create_topic(const DoutPrefixProvider *dpp, const string& name, optional_yield y); // create a topic with push destination information and ARN // if the topic already exists the destination and ARN values may be updated (considered succsess) // return 0 on success, error code otherwise - int create_topic(const string& name, const rgw_pubsub_sub_dest& dest, const std::string& arn, const std::string& opaque_data, optional_yield y); + int create_topic(const DoutPrefixProvider *dpp, const string& name, const rgw_pubsub_sub_dest& dest, const std::string& arn, const std::string& opaque_data, optional_yield y); // remove a topic according to its name // if the topic does not exists it is a no-op (considered success) // return 0 on success, error code otherwise - int remove_topic(const string& name, optional_yield y); + int remove_topic(const DoutPrefixProvider *dpp, const string& name, optional_yield y); }; @@ -799,13 +799,13 @@ int RGWPubSub::read(const rgw_raw_obj& obj, T* result, RGWObjVersionTracker* obj } template -int RGWPubSub::write(const rgw_raw_obj& obj, const T& info, +int RGWPubSub::write(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const T& info, RGWObjVersionTracker* objv_tracker, optional_yield y) { bufferlist bl; encode(info, bl); - int ret = rgw_put_system_obj(obj_ctx, obj.pool, obj.oid, + int ret = rgw_put_system_obj(dpp, obj_ctx, obj.pool, obj.oid, bl, false, objv_tracker, real_time(), y); if (ret < 0) { diff --git a/src/rgw/rgw_pubsub_push.cc b/src/rgw/rgw_pubsub_push.cc index 3b5b9266610..ca1b43b588d 100644 --- a/src/rgw/rgw_pubsub_push.cc +++ b/src/rgw/rgw_pubsub_push.cc @@ -73,7 +73,7 @@ private: } // send message to endpoint - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { init_new_io(this); const auto rc = sync_env->http_manager->add_request(this); if (rc < 0) { @@ -232,7 +232,7 @@ private: topic(_topic), conn(_conn), message(_message) {} // send message to endpoint, without waiting for reply - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { const auto rc = amqp::publish(conn, topic, message); if (rc < 0) { @@ -262,7 +262,7 @@ private: topic(_topic), conn(_conn), message(_message) {} // send message to endpoint, waiting for reply - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { init_new_io(this); @@ -504,7 +504,7 @@ private: topic(_topic), conn(_conn), message(_message) {} // send message to endpoint, without waiting for reply - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { const auto rc = kafka::publish(conn, topic, message); if (rc < 0) { @@ -534,7 +534,7 @@ private: topic(_topic), conn(_conn), message(_message) {} // send message to endpoint, waiting for reply - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { init_new_io(this); diff --git a/src/rgw/rgw_putobj_processor.cc b/src/rgw/rgw_putobj_processor.cc index 2f3a09f92b6..687583e3303 100644 --- a/src/rgw/rgw_putobj_processor.cc +++ b/src/rgw/rgw_putobj_processor.cc @@ -369,7 +369,7 @@ int MultipartObjectProcessor::complete(size_t accounted_size, bucket->get_object(rgw_obj_key(mp.get_meta(), std::string(), RGW_OBJ_NS_MULTIPART)); meta_obj->set_in_extra_data(true); - r = meta_obj->omap_set_val_by_key(p, bl, true, null_yield); + r = meta_obj->omap_set_val_by_key(dpp, p, bl, true, null_yield); if (r < 0) { return r == -ENOENT ? -ERR_NO_SUCH_UPLOAD : r; } @@ -504,7 +504,7 @@ int AppendObjectProcessor::complete(size_t accounted_size, const string &etag, c //For Append obj, disable versioning obj_op->params.versioning_disabled = true; if (cur_manifest) { - cur_manifest->append(manifest, store->get_zone()); + cur_manifest->append(dpp, manifest, store->get_zone()); obj_op->params.manifest = cur_manifest; } else { obj_op->params.manifest = &manifest; diff --git a/src/rgw/rgw_quota.cc b/src/rgw/rgw_quota.cc index e8cfa07d4ab..4759f43c6ff 100644 --- a/src/rgw/rgw_quota.cc +++ b/src/rgw/rgw_quota.cc @@ -203,7 +203,7 @@ int RGWQuotaCache::get_stats(const rgw_user& user, const rgw_bucket& bucket, if (qs.async_refresh_time.sec() > 0 && now >= qs.async_refresh_time) { int r = async_refresh(user, bucket, qs); if (r < 0) { - ldout(store->ctx(), 0) << "ERROR: quota async refresh returned ret=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: quota async refresh returned ret=" << r << dendl; /* continue processing, might be a transient error, async refresh is just optimization */ } @@ -298,15 +298,15 @@ int BucketAsyncRefreshHandler::init_fetch() const DoutPrefix dp(store->ctx(), dout_subsys, "rgw bucket async refresh handler: "); int r = store->get_bucket(&dp, nullptr, bucket, &rbucket, null_yield); if (r < 0) { - ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket << " r=" << r << dendl; + ldpp_dout(&dp, 0) << "could not get bucket info for bucket=" << bucket << " r=" << r << dendl; return r; } - ldout(store->ctx(), 20) << "initiating async quota refresh for bucket=" << bucket << dendl; + ldpp_dout(&dp, 20) << "initiating async quota refresh for bucket=" << bucket << dendl; - r = rbucket->get_bucket_stats_async(RGW_NO_SHARD, this); + r = rbucket->get_bucket_stats_async(&dp, RGW_NO_SHARD, this); if (r < 0) { - ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket.name << dendl; + ldpp_dout(&dp, 0) << "could not get bucket info for bucket=" << bucket.name << dendl; /* get_bucket_stats_async() dropped our reference already */ return r; @@ -368,7 +368,7 @@ int RGWBucketStatsCache::fetch_stats_from_storage(const rgw_user& _u, const rgw_ int r = store->get_bucket(dpp, user.get(), _b, &bucket, y); if (r < 0) { - ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << _b << " r=" << r << dendl; + ldpp_dout(dpp, 0) << "could not get bucket info for bucket=" << _b << " r=" << r << dendl; return r; } @@ -376,9 +376,9 @@ int RGWBucketStatsCache::fetch_stats_from_storage(const rgw_user& _u, const rgw_ string master_ver; map bucket_stats; - r = bucket->get_bucket_stats(RGW_NO_SHARD, &bucket_ver, &master_ver, bucket_stats); + r = bucket->get_bucket_stats(dpp, RGW_NO_SHARD, &bucket_ver, &master_ver, bucket_stats); if (r < 0) { - ldout(store->ctx(), 0) << "could not get bucket stats for bucket=" + ldpp_dout(dpp, 0) << "could not get bucket stats for bucket=" << _b.name << dendl; return r; } @@ -398,12 +398,14 @@ int RGWBucketStatsCache::fetch_stats_from_storage(const rgw_user& _u, const rgw_ class UserAsyncRefreshHandler : public RGWQuotaCache::AsyncRefreshHandler, public RGWGetUserStats_CB { + const DoutPrefixProvider *dpp; rgw_bucket bucket; public: - UserAsyncRefreshHandler(rgw::sal::Store* _store, RGWQuotaCache *_cache, + UserAsyncRefreshHandler(const DoutPrefixProvider *_dpp, rgw::sal::Store* _store, RGWQuotaCache *_cache, const rgw_user& _user, const rgw_bucket& _bucket) : RGWQuotaCache::AsyncRefreshHandler(_store, _cache), RGWGetUserStats_CB(_user), + dpp(_dpp), bucket(_bucket) {} void drop_reference() override { put(); } @@ -415,10 +417,10 @@ int UserAsyncRefreshHandler::init_fetch() { std::unique_ptr ruser = store->get_user(user); - ldout(store->ctx(), 20) << "initiating async quota refresh for user=" << user << dendl; - int r = ruser->read_stats_async(this); + ldpp_dout(dpp, 20) << "initiating async quota refresh for user=" << user << dendl; + int r = ruser->read_stats_async(dpp, this); if (r < 0) { - ldout(store->ctx(), 0) << "could not get bucket info for user=" << user << dendl; + ldpp_dout(dpp, 0) << "could not get bucket info for user=" << user << dendl; /* get_bucket_stats_async() dropped our reference already */ return r; @@ -439,6 +441,7 @@ void UserAsyncRefreshHandler::handle_response(int r) } class RGWUserStatsCache : public RGWQuotaCache { + const DoutPrefixProvider *dpp; std::atomic down_flag = { false }; ceph::shared_mutex mutex = ceph::make_shared_mutex("RGWUserStatsCache"); map modified_buckets; @@ -574,8 +577,8 @@ protected: } public: - RGWUserStatsCache(rgw::sal::Store* _store, bool quota_threads) - : RGWQuotaCache(_store, _store->ctx()->_conf->rgw_bucket_quota_cache_size) + RGWUserStatsCache(const DoutPrefixProvider *dpp, rgw::sal::Store* _store, bool quota_threads) + : RGWQuotaCache(_store, _store->ctx()->_conf->rgw_bucket_quota_cache_size), dpp(dpp) { if (quota_threads) { buckets_sync_thread = new BucketsSyncThread(store->ctx(), this); @@ -592,7 +595,7 @@ public: } AsyncRefreshHandler *allocate_refresh_handler(const rgw_user& user, const rgw_bucket& bucket) override { - return new UserAsyncRefreshHandler(store, this, user, bucket); + return new UserAsyncRefreshHandler(dpp, store, this, user, bucket); } bool can_use_cached_stats(RGWQuotaInfo& quota, RGWStorageStats& stats) override { @@ -623,9 +626,9 @@ int RGWUserStatsCache::fetch_stats_from_storage(const rgw_user& _u, const DoutPrefixProvider *dpp) { std::unique_ptr user = store->get_user(_u); - int r = user->read_stats(y, &stats); + int r = user->read_stats(dpp, y, &stats); if (r < 0) { - ldout(store->ctx(), 0) << "could not get user stats for user=" << user << dendl; + ldpp_dout(dpp, 0) << "could not get user stats for user=" << user << dendl; return r; } @@ -639,13 +642,13 @@ int RGWUserStatsCache::sync_bucket(const rgw_user& _u, rgw_bucket& _b, optional_ int r = store->get_bucket(dpp, user.get(), _b, &bucket, y); if (r < 0) { - ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << _b << " r=" << r << dendl; + ldpp_dout(dpp, 0) << "could not get bucket info for bucket=" << _b << " r=" << r << dendl; return r; } - r = bucket->sync_user_stats(y); + r = bucket->sync_user_stats(dpp, y); if (r < 0) { - ldout(store->ctx(), 0) << "ERROR: sync_user_stats() for user=" << _u << ", bucket=" << bucket << " returned " << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: sync_user_stats() for user=" << _u << ", bucket=" << bucket << " returned " << r << dendl; return r; } @@ -659,15 +662,15 @@ int RGWUserStatsCache::sync_user(const DoutPrefixProvider *dpp, const rgw_user& ceph::real_time last_stats_update; std::unique_ptr user = store->get_user(rgw_user(_u.to_str())); - int ret = user->read_stats(y, &stats, &last_stats_sync, &last_stats_update); + int ret = user->read_stats(dpp, y, &stats, &last_stats_sync, &last_stats_update); if (ret < 0) { - ldout(store->ctx(), 5) << "ERROR: can't read user header: ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "ERROR: can't read user header: ret=" << ret << dendl; return ret; } if (!store->ctx()->_conf->rgw_user_quota_sync_idle_users && last_stats_update < last_stats_sync) { - ldout(store->ctx(), 20) << "user is idle, not doing a full sync (user=" << user << ")" << dendl; + ldpp_dout(dpp, 20) << "user is idle, not doing a full sync (user=" << user << ")" << dendl; return 0; } @@ -679,7 +682,7 @@ int RGWUserStatsCache::sync_user(const DoutPrefixProvider *dpp, const rgw_user& ret = rgw_user_sync_all_stats(dpp, store, user.get(), y); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: failed user stats sync, ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed user stats sync, ret=" << ret << dendl; return ret; } @@ -691,9 +694,9 @@ int RGWUserStatsCache::sync_all_users(const DoutPrefixProvider *dpp, optional_yi string key = "user"; void *handle; - int ret = store->meta_list_keys_init(key, string(), &handle); + int ret = store->meta_list_keys_init(dpp, key, string(), &handle); if (ret < 0) { - ldout(store->ctx(), 10) << "ERROR: can't get key: ret=" << ret << dendl; + ldpp_dout(dpp, 10) << "ERROR: can't get key: ret=" << ret << dendl; return ret; } @@ -704,17 +707,17 @@ int RGWUserStatsCache::sync_all_users(const DoutPrefixProvider *dpp, optional_yi list keys; ret = store->meta_list_keys_next(handle, max, keys, &truncated); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: lists_keys_next(): ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: lists_keys_next(): ret=" << ret << dendl; goto done; } for (list::iterator iter = keys.begin(); iter != keys.end() && !going_down(); ++iter) { rgw_user user(*iter); - ldout(store->ctx(), 20) << "RGWUserStatsCache: sync user=" << user << dendl; + ldpp_dout(dpp, 20) << "RGWUserStatsCache: sync user=" << user << dendl; int ret = sync_user(dpp, user, y); if (ret < 0) { - ldout(store->ctx(), 5) << "ERROR: sync_user() failed, user=" << user << " ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "ERROR: sync_user() failed, user=" << user << " ret=" << ret << dendl; /* continuing to next user */ continue; @@ -925,9 +928,9 @@ class RGWQuotaHandlerImpl : public RGWQuotaHandler { return 0; } public: - RGWQuotaHandlerImpl(rgw::sal::Store* _store, bool quota_threads) : store(_store), + RGWQuotaHandlerImpl(const DoutPrefixProvider *dpp, rgw::sal::Store* _store, bool quota_threads) : store(_store), bucket_stats_cache(_store), - user_stats_cache(_store, quota_threads) {} + user_stats_cache(dpp, _store, quota_threads) {} int check_quota(const rgw_user& user, rgw_bucket& bucket, @@ -998,9 +1001,9 @@ public: }; -RGWQuotaHandler *RGWQuotaHandler::generate_handler(rgw::sal::Store* store, bool quota_threads) +RGWQuotaHandler *RGWQuotaHandler::generate_handler(const DoutPrefixProvider *dpp, rgw::sal::Store* store, bool quota_threads) { - return new RGWQuotaHandlerImpl(store, quota_threads); + return new RGWQuotaHandlerImpl(dpp, store, quota_threads); } void RGWQuotaHandler::free_handler(RGWQuotaHandler *handler) diff --git a/src/rgw/rgw_quota.h b/src/rgw/rgw_quota.h index 271cf1ead6a..b8b386bbdce 100644 --- a/src/rgw/rgw_quota.h +++ b/src/rgw/rgw_quota.h @@ -115,7 +115,7 @@ public: virtual void update_stats(const rgw_user& bucket_owner, rgw_bucket& bucket, int obj_delta, uint64_t added_bytes, uint64_t removed_bytes) = 0; - static RGWQuotaHandler *generate_handler(rgw::sal::Store* store, bool quota_threads); + static RGWQuotaHandler *generate_handler(const DoutPrefixProvider *dpp, rgw::sal::Store* store, bool quota_threads); static void free_handler(RGWQuotaHandler *handler); }; diff --git a/src/rgw/rgw_rados.cc b/src/rgw/rgw_rados.cc index 3d11536cdbf..f86ca698918 100644 --- a/src/rgw/rgw_rados.cc +++ b/src/rgw/rgw_rados.cc @@ -301,7 +301,7 @@ public: http_manager.start(); } - int notify_all(map& conn_map, set& shards) { + int notify_all(const DoutPrefixProvider *dpp, map& conn_map, set& shards) { rgw_http_param_pair pairs[] = { { "type", "metadata" }, { "notify", NULL }, { NULL, NULL } }; @@ -314,7 +314,7 @@ public: stacks.push_back(stack); } - return run(stacks); + return run(dpp, stacks); } }; @@ -328,7 +328,7 @@ public: http_manager.start(); } - int notify_all(map& conn_map, + int notify_all(const DoutPrefixProvider *dpp, map& conn_map, bc::flat_map >& shards) { rgw_http_param_pair pairs[] = { { "type", "data" }, { "notify", NULL }, @@ -343,7 +343,7 @@ public: stacks.push_back(stack); } - return run(stacks); + return run(dpp, stacks); } }; @@ -373,9 +373,9 @@ void *RGWRadosThread::Worker::entry() { do { auto start = ceph::real_clock::now(); - int r = processor->process(); + int r = processor->process(this); if (r < 0) { - dout(0) << "ERROR: processor->process() returned error r=" << r << dendl; + ldpp_dout(this, 0) << "ERROR: processor->process() returned error r=" << r << dendl; } if (processor->going_down()) @@ -417,10 +417,10 @@ public: RGWMetaNotifier(RGWRados *_store, RGWMetadataLog* log) : RGWRadosThread(_store, "meta-notifier"), notify_mgr(_store), log(log) {} - int process() override; + int process(const DoutPrefixProvider *dpp) override; }; -int RGWMetaNotifier::process() +int RGWMetaNotifier::process(const DoutPrefixProvider *dpp) { set shards; @@ -431,10 +431,10 @@ int RGWMetaNotifier::process() } for (set::iterator iter = shards.begin(); iter != shards.end(); ++iter) { - ldout(cct, 20) << __func__ << "(): notifying mdlog change, shard_id=" << *iter << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): notifying mdlog change, shard_id=" << *iter << dendl; } - notify_mgr.notify_all(store->svc.zone->get_zone_conn_map(), shards); + notify_mgr.notify_all(dpp, store->svc.zone->get_zone_conn_map(), shards); return 0; } @@ -451,10 +451,10 @@ class RGWDataNotifier : public RGWRadosThread { public: RGWDataNotifier(RGWRados *_store) : RGWRadosThread(_store, "data-notifier"), notify_mgr(_store) {} - int process() override; + int process(const DoutPrefixProvider *dpp) override; }; -int RGWDataNotifier::process() +int RGWDataNotifier::process(const DoutPrefixProvider *dpp) { auto data_log = store->svc.datalog_rados; if (!data_log) { @@ -468,11 +468,11 @@ int RGWDataNotifier::process() } for (const auto& [shard_id, keys] : shards) { - ldout(cct, 20) << __func__ << "(): notifying datalog change, shard_id=" + ldpp_dout(dpp, 20) << __func__ << "(): notifying datalog change, shard_id=" << shard_id << ": " << keys << dendl; } - notify_mgr.notify_all(store->svc.zone->get_zone_data_notify_to_map(), shards); + notify_mgr.notify_all(dpp, store->svc.zone->get_zone_data_notify_to_map(), shards); return 0; } @@ -482,8 +482,8 @@ public: RGWSyncProcessorThread(RGWRados *_store, const string& thread_name = "radosgw") : RGWRadosThread(_store, thread_name) {} RGWSyncProcessorThread(RGWRados *_store) : RGWRadosThread(_store) {} ~RGWSyncProcessorThread() override {} - int init() override = 0 ; - int process() override = 0; + int init(const DoutPrefixProvider *dpp) override = 0 ; + int process(const DoutPrefixProvider *dpp) override = 0; }; class RGWMetaSyncProcessorThread : public RGWSyncProcessorThread @@ -507,17 +507,17 @@ public: } RGWMetaSyncStatusManager* get_manager() { return &sync; } - int init() override { - int ret = sync.init(); + int init(const DoutPrefixProvider *dpp) override { + int ret = sync.init(dpp); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: sync.init() returned " << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: sync.init() returned " << ret << dendl; return ret; } return 0; } - int process() override { - sync.run(null_yield); + int process(const DoutPrefixProvider *dpp) override { + sync.run(dpp, null_yield); return 0; } }; @@ -554,16 +554,16 @@ public: } RGWDataSyncStatusManager* get_manager() { return &sync; } - int init() override { + int init(const DoutPrefixProvider *dpp) override { return 0; } - int process() override { + int process(const DoutPrefixProvider *dpp) override { while (!initialized) { if (going_down()) { return 0; } - int ret = sync.init(); + int ret = sync.init(dpp); if (ret >= 0) { initialized = true; break; @@ -571,7 +571,7 @@ public: /* we'll be back! */ return 0; } - sync.run(); + sync.run(dpp); return 0; } }; @@ -596,10 +596,10 @@ public: trim_interval(interval, 0) {} - int init() override { + int init(const DoutPrefixProvider *dpp) override { return http.start(); } - int process() override { + int process(const DoutPrefixProvider *dpp) override { list stacks; auto meta = new RGWCoroutinesStack(store->ctx(), &crs); meta->call(create_meta_log_trim_cr(this, static_cast(store), &http, @@ -609,7 +609,7 @@ public: if (store->svc()->zone->sync_module_exports_data()) { auto data = new RGWCoroutinesStack(store->ctx(), &crs); - data->call(create_data_log_trim_cr(static_cast(store), &http, + data->call(create_data_log_trim_cr(dpp, static_cast(store), &http, cct->_conf->rgw_data_log_num_shards, trim_interval)); stacks.push_back(data); @@ -619,7 +619,7 @@ public: stacks.push_back(bucket); } - crs.run(stacks); + crs.run(dpp, stacks); return 0; } @@ -679,10 +679,10 @@ RGWDataSyncStatusManager* RGWRados::get_data_sync_manager(const rgw_zone_id& sou return thread->second->get_manager(); } -int RGWRados::get_required_alignment(const rgw_pool& pool, uint64_t *alignment) +int RGWRados::get_required_alignment(const DoutPrefixProvider *dpp, const rgw_pool& pool, uint64_t *alignment) { IoCtx ioctx; - int r = open_pool_ctx(pool, ioctx, false); + int r = open_pool_ctx(dpp, pool, ioctx, false); if (r < 0) { ldout(cct, 0) << "ERROR: open_pool_ctx() returned " << r << dendl; return r; @@ -733,7 +733,7 @@ void RGWRados::get_max_aligned_size(uint64_t size, uint64_t alignment, uint64_t int RGWRados::get_max_chunk_size(const rgw_pool& pool, uint64_t *max_chunk_size, const DoutPrefixProvider *dpp, uint64_t *palignment) { uint64_t alignment; - int r = get_required_alignment(pool, &alignment); + int r = get_required_alignment(dpp, pool, &alignment); if (r < 0) { return r; } @@ -803,7 +803,7 @@ public: RGWIndexCompletionThread(RGWRados *_store) : RGWRadosThread(_store, "index-complete"), store(_store) {} - int process() override; + int process(const DoutPrefixProvider *dpp) override; void add_completion(complete_op_data *completion) { { @@ -819,7 +819,7 @@ public: std::ostream& gen_prefix(std::ostream& out) const { return out << "rgw index completion thread: "; } }; -int RGWIndexCompletionThread::process() +int RGWIndexCompletionThread::process(const DoutPrefixProvider *dpp) { list comps; @@ -834,14 +834,14 @@ int RGWIndexCompletionThread::process() if (going_down()) { continue; } - ldout(store->ctx(), 20) << __func__ << "(): handling completion for key=" << c->key << dendl; + ldpp_dout(this, 20) << __func__ << "(): handling completion for key=" << c->key << dendl; RGWRados::BucketShard bs(store); RGWBucketInfo bucket_info; int r = bs.init(c->obj.bucket, c->obj, &bucket_info, this); if (r < 0) { - ldout(cct, 0) << "ERROR: " << __func__ << "(): failed to initialize BucketShard, obj=" << c->obj << " r=" << r << dendl; + ldpp_dout(this, 0) << "ERROR: " << __func__ << "(): failed to initialize BucketShard, obj=" << c->obj << " r=" << r << dendl; /* not much to do */ continue; } @@ -852,7 +852,7 @@ int RGWIndexCompletionThread::process() cls_rgw_guard_bucket_resharding(o, -ERR_BUSY_RESHARDING); cls_rgw_bucket_complete_op(o, c->op, c->tag, c->ver, c->key, c->dir_meta, &c->remove_objs, c->log_op, c->bilog_op, &c->zones_trace); - return bs->bucket_obj.operate(&o, null_yield); + return bs->bucket_obj.operate(this, &o, null_yield); }); if (r < 0) { ldpp_dout(this, 0) << "ERROR: " << __func__ << "(): bucket index completion failed, obj=" << c->obj << " r=" << r << dendl; @@ -861,7 +861,7 @@ int RGWIndexCompletionThread::process() } r = store->svc.datalog_rados->add_entry(this, bucket_info, bs.shard_id); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed writing data log" << dendl; + ldpp_dout(this, -1) << "ERROR: failed writing data log" << dendl; } } @@ -914,9 +914,9 @@ public: complete_op_data **result); bool handle_completion(completion_t cb, complete_op_data *arg); - int start() { + int start(const DoutPrefixProvider *dpp) { completion_thread = new RGWIndexCompletionThread(store); - int ret = completion_thread->init(); + int ret = completion_thread->init(dpp); if (ret < 0) { return ret; } @@ -1173,27 +1173,27 @@ int RGWRados::init_complete(const DoutPrefixProvider *dpp) */ sync_module = svc.sync_modules->get_sync_module(); - ret = open_root_pool_ctx(); + ret = open_root_pool_ctx(dpp); if (ret < 0) return ret; - ret = open_gc_pool_ctx(); + ret = open_gc_pool_ctx(dpp); if (ret < 0) return ret; - ret = open_lc_pool_ctx(); + ret = open_lc_pool_ctx(dpp); if (ret < 0) return ret; - ret = open_objexp_pool_ctx(); + ret = open_objexp_pool_ctx(dpp); if (ret < 0) return ret; - ret = open_reshard_pool_ctx(); + ret = open_reshard_pool_ctx(dpp); if (ret < 0) return ret; - ret = open_notif_pool_ctx(); + ret = open_notif_pool_ctx(dpp); if (ret < 0) return ret; @@ -1245,7 +1245,7 @@ int RGWRados::init_complete(const DoutPrefixProvider *dpp) auto async_processor = svc.rados->get_async_processor(); std::lock_guard l{meta_sync_thread_lock}; meta_sync_processor_thread = new RGWMetaSyncProcessorThread(this->store, async_processor); - ret = meta_sync_processor_thread->init(); + ret = meta_sync_processor_thread->init(dpp); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to initialize meta sync thread" << dendl; return ret; @@ -1266,9 +1266,9 @@ int RGWRados::init_complete(const DoutPrefixProvider *dpp) std::lock_guard dl{data_sync_thread_lock}; for (auto source_zone : svc.zone->get_data_sync_source_zones()) { - ldout(cct, 5) << "starting data sync thread for zone " << source_zone->name << dendl; + ldpp_dout(dpp, 5) << "starting data sync thread for zone " << source_zone->name << dendl; auto *thread = new RGWDataSyncProcessorThread(this->store, svc.rados->get_async_processor(), source_zone); - ret = thread->init(); + ret = thread->init(dpp); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to initialize data sync thread" << dendl; return ret; @@ -1279,7 +1279,7 @@ int RGWRados::init_complete(const DoutPrefixProvider *dpp) auto interval = cct->_conf->rgw_sync_log_trim_interval; if (interval > 0) { sync_log_trimmer = new RGWSyncLogTrimThread(this->store, &*bucket_trim, interval); - ret = sync_log_trimmer->init(); + ret = sync_log_trimmer->init(dpp); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to initialize sync log trim thread" << dendl; return ret; @@ -1299,7 +1299,7 @@ int RGWRados::init_complete(const DoutPrefixProvider *dpp) if (use_lc_thread) lc->start_processor(); - quota_handler = RGWQuotaHandler::generate_handler(this->store, quota_threads); + quota_handler = RGWQuotaHandler::generate_handler(dpp, this->store, quota_threads); bucket_index_max_shards = (cct->_conf->rgw_override_bucket_index_max_shards ? cct->_conf->rgw_override_bucket_index_max_shards : zone.bucket_index_max_shards); @@ -1327,7 +1327,7 @@ int RGWRados::init_complete(const DoutPrefixProvider *dpp) } index_completion_manager = new RGWIndexCompletionManager(this); - ret = index_completion_manager->start(); + ret = index_completion_manager->start(dpp); if (ret < 0) { return ret; } @@ -1390,41 +1390,41 @@ int RGWRados::initialize(const DoutPrefixProvider *dpp) * Open the pool used as root for this gateway * Returns: 0 on success, -ERR# otherwise. */ -int RGWRados::open_root_pool_ctx() +int RGWRados::open_root_pool_ctx(const DoutPrefixProvider *dpp) { - return rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().domain_root, root_pool_ctx, true, true); + return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().domain_root, root_pool_ctx, true, true); } -int RGWRados::open_gc_pool_ctx() +int RGWRados::open_gc_pool_ctx(const DoutPrefixProvider *dpp) { - return rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().gc_pool, gc_pool_ctx, true, true); + return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().gc_pool, gc_pool_ctx, true, true); } -int RGWRados::open_lc_pool_ctx() +int RGWRados::open_lc_pool_ctx(const DoutPrefixProvider *dpp) { - return rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().lc_pool, lc_pool_ctx, true, true); + return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().lc_pool, lc_pool_ctx, true, true); } -int RGWRados::open_objexp_pool_ctx() +int RGWRados::open_objexp_pool_ctx(const DoutPrefixProvider *dpp) { - return rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().log_pool, objexp_pool_ctx, true, true); + return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().log_pool, objexp_pool_ctx, true, true); } -int RGWRados::open_reshard_pool_ctx() +int RGWRados::open_reshard_pool_ctx(const DoutPrefixProvider *dpp) { - return rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().reshard_pool, reshard_pool_ctx, true, true); + return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().reshard_pool, reshard_pool_ctx, true, true); } -int RGWRados::open_notif_pool_ctx() +int RGWRados::open_notif_pool_ctx(const DoutPrefixProvider *dpp) { - return rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().notif_pool, notif_pool_ctx, true, true); + return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().notif_pool, notif_pool_ctx, true, true); } -int RGWRados::open_pool_ctx(const rgw_pool& pool, librados::IoCtx& io_ctx, +int RGWRados::open_pool_ctx(const DoutPrefixProvider *dpp, const rgw_pool& pool, librados::IoCtx& io_ctx, bool mostly_omap) { constexpr bool create = true; // create the pool if it doesn't exist - return rgw_init_ioctx(get_rados_handle(), pool, io_ctx, create, mostly_omap); + return rgw_init_ioctx(dpp, get_rados_handle(), pool, io_ctx, create, mostly_omap); } /**** logs ****/ @@ -1435,10 +1435,10 @@ struct log_list_state { librados::NObjectIterator obit; }; -int RGWRados::log_list_init(const string& prefix, RGWAccessHandle *handle) +int RGWRados::log_list_init(const DoutPrefixProvider *dpp, const string& prefix, RGWAccessHandle *handle) { log_list_state *state = new log_list_state; - int r = rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().log_pool, state->io_ctx); + int r = rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().log_pool, state->io_ctx); if (r < 0) { delete state; return r; @@ -1469,10 +1469,10 @@ int RGWRados::log_list_next(RGWAccessHandle handle, string *name) return 0; } -int RGWRados::log_remove(const string& name) +int RGWRados::log_remove(const DoutPrefixProvider *dpp, const string& name) { librados::IoCtx io_ctx; - int r = rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().log_pool, io_ctx); + int r = rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().log_pool, io_ctx); if (r < 0) return r; return io_ctx.remove(name); @@ -1488,10 +1488,10 @@ struct log_show_state { log_show_state() : pos(0), eof(false) {} }; -int RGWRados::log_show_init(const string& name, RGWAccessHandle *handle) +int RGWRados::log_show_init(const DoutPrefixProvider *dpp, const string& name, RGWAccessHandle *handle) { log_show_state *state = new log_show_state; - int r = rgw_init_ioctx(get_rados_handle(), svc.zone->get_zone_params().log_pool, state->io_ctx); + int r = rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().log_pool, state->io_ctx); if (r < 0) { delete state; return r; @@ -1573,7 +1573,7 @@ static void usage_log_hash(CephContext *cct, const string& name, string& hash, u hash = buf; } -int RGWRados::log_usage(map& usage_info) +int RGWRados::log_usage(const DoutPrefixProvider *dpp, map& usage_info) { uint32_t index = 0; @@ -1589,7 +1589,7 @@ int RGWRados::log_usage(map& usage_info) RGWUsageBatch& info = iter->second; if (ub.user.empty()) { - ldout(cct, 0) << "WARNING: RGWRados::log_usage(): user name empty (bucket=" << ub.bucket << "), skipping" << dendl; + ldpp_dout(dpp, 0) << "WARNING: RGWRados::log_usage(): user name empty (bucket=" << ub.bucket << "), skipping" << dendl; continue; } @@ -1610,14 +1610,14 @@ int RGWRados::log_usage(map& usage_info) map::iterator liter; for (liter = log_objs.begin(); liter != log_objs.end(); ++liter) { - int r = cls_obj_usage_log_add(liter->first, liter->second); + int r = cls_obj_usage_log_add(dpp, liter->first, liter->second); if (r < 0) return r; } return 0; } -int RGWRados::read_usage(const rgw_user& user, const string& bucket_name, uint64_t start_epoch, uint64_t end_epoch, +int RGWRados::read_usage(const DoutPrefixProvider *dpp, const rgw_user& user, const string& bucket_name, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool *is_truncated, RGWUsageIter& usage_iter, map& usage) { @@ -1638,7 +1638,7 @@ int RGWRados::read_usage(const rgw_user& user, const string& bucket_name, uint64 map ret_usage; map::iterator iter; - int ret = cls_obj_usage_log_read(hash, user_str, bucket_name, start_epoch, end_epoch, num, + int ret = cls_obj_usage_log_read(dpp, hash, user_str, bucket_name, start_epoch, end_epoch, num, usage_iter.read_iter, ret_usage, is_truncated); if (ret == -ENOENT) goto next; @@ -1661,7 +1661,7 @@ next: return 0; } -int RGWRados::trim_usage(const rgw_user& user, const string& bucket_name, uint64_t start_epoch, uint64_t end_epoch) +int RGWRados::trim_usage(const DoutPrefixProvider *dpp, const rgw_user& user, const string& bucket_name, uint64_t start_epoch, uint64_t end_epoch) { uint32_t index = 0; string hash, first_hash; @@ -1670,7 +1670,7 @@ int RGWRados::trim_usage(const rgw_user& user, const string& bucket_name, uint64 hash = first_hash; do { - int ret = cls_obj_usage_log_trim(hash, user_str, bucket_name, start_epoch, end_epoch); + int ret = cls_obj_usage_log_trim(dpp, hash, user_str, bucket_name, start_epoch, end_epoch); if (ret < 0 && ret != -ENOENT) return ret; @@ -1682,15 +1682,15 @@ int RGWRados::trim_usage(const rgw_user& user, const string& bucket_name, uint64 } -int RGWRados::clear_usage() +int RGWRados::clear_usage(const DoutPrefixProvider *dpp) { auto max_shards = cct->_conf->rgw_usage_max_shards; int ret=0; for (unsigned i=0; i < max_shards; i++){ string oid = RGW_USAGE_OBJ_PREFIX + to_string(i); - ret = cls_obj_usage_log_clear(oid); + ret = cls_obj_usage_log_clear(dpp, oid); if (ret < 0){ - ldout(cct,0) << "usage clear on oid="<< oid << "failed with ret=" << ret << dendl; + ldpp_dout(dpp,0) << "usage clear on oid="<< oid << "failed with ret=" << ret << dendl; return ret; } } @@ -1711,7 +1711,7 @@ int RGWRados::decode_policy(bufferlist& bl, ACLOwner *owner) return 0; } -int rgw_policy_from_attrset(CephContext *cct, map& attrset, RGWAccessControlPolicy *policy) +int rgw_policy_from_attrset(const DoutPrefixProvider *dpp, CephContext *cct, map& attrset, RGWAccessControlPolicy *policy) { map::iterator aiter = attrset.find(RGW_ATTR_ACL); if (aiter == attrset.end()) @@ -1722,12 +1722,12 @@ int rgw_policy_from_attrset(CephContext *cct, map& attrset, try { policy->decode(iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl; return -EIO; } if (cct->_conf->subsys.should_gather()) { RGWAccessControlPolicy_S3 *s3policy = static_cast(policy); - ldout(cct, 15) << __func__ << " Read AccessControlPolicy"; + ldpp_dout(dpp, 15) << __func__ << " Read AccessControlPolicy"; s3policy->to_xml(*_dout); *_dout << dendl; } @@ -1824,12 +1824,12 @@ int RGWRados::Bucket::List::list_objects_ordered( rgw_obj_index_key prev_marker; for (uint16_t attempt = 1; /* empty */; ++attempt) { - ldout(cct, 20) << "RGWRados::Bucket::List::" << __func__ << + ldpp_dout(dpp, 20) << "RGWRados::Bucket::List::" << __func__ << " starting attempt " << attempt << dendl; if (attempt > 1 && !(prev_marker < cur_marker)) { // we've failed to make forward progress - ldout(cct, 0) << "RGWRados::Bucket::List::" << __func__ << + ldpp_dout(dpp, 0) << "RGWRados::Bucket::List::" << __func__ << ": ERROR marker failed to make forward progress; attempt=" << attempt << ", prev_marker=" << prev_marker << ", cur_marker=" << cur_marker << dendl; @@ -1873,7 +1873,7 @@ int RGWRados::Bucket::List::list_objects_ordered( */ bool valid = rgw_obj_key::parse_raw_oid(index_key.name, &obj); if (!valid) { - ldout(cct, 0) << "ERROR: could not parse object name: " << + ldpp_dout(dpp, 0) << "ERROR: could not parse object name: " << obj.name << dendl; continue; } @@ -1927,7 +1927,7 @@ int RGWRados::Bucket::List::list_objects_ordered( // after the prefix if (delim_pos != int(obj.name.length() - params.delim.length())) { - ldout(cct, 0) << + ldpp_dout(dpp, 0) << "WARNING: found delimiter in place other than the end of " "the prefix; obj.name=" << obj.name << ", prefix=" << params.prefix << dendl; @@ -1976,7 +1976,7 @@ int RGWRados::Bucket::List::list_objects_ordered( goto done; } - ldout(cct, 20) << "RGWRados::Bucket::List::" << __func__ << + ldpp_dout(dpp, 20) << "RGWRados::Bucket::List::" << __func__ << " adding entry " << entry.key << " to result" << dendl; result->emplace_back(std::move(entry)); @@ -1995,11 +1995,11 @@ int RGWRados::Bucket::List::list_objects_ordered( cur_marker.name.substr(0, marker_delim_pos); skip_after_delim.append(after_delim_s); - ldout(cct, 20) << "skip_after_delim=" << skip_after_delim << dendl; + ldpp_dout(dpp, 20) << "skip_after_delim=" << skip_after_delim << dendl; if (skip_after_delim > cur_marker.name) { cur_marker = skip_after_delim; - ldout(cct, 20) << "setting cur_marker=" + ldpp_dout(dpp, 20) << "setting cur_marker=" << cur_marker.name << "[" << cur_marker.instance << "]" << dendl; @@ -2007,7 +2007,7 @@ int RGWRados::Bucket::List::list_objects_ordered( } } // if older osd didn't do delimiter filtering - ldout(cct, 20) << "RGWRados::Bucket::List::" << __func__ << + ldpp_dout(dpp, 20) << "RGWRados::Bucket::List::" << __func__ << " INFO end of outer loop, truncated=" << truncated << ", count=" << count << ", attempt=" << attempt << dendl; @@ -2060,7 +2060,6 @@ int RGWRados::Bucket::List::list_objects_unordered(const DoutPrefixProvider *dpp optional_yield y) { RGWRados *store = target->get_store(); - CephContext *cct = store->ctx(); int shard_id = target->get_shard_id(); int count = 0; @@ -2136,7 +2135,7 @@ int RGWRados::Bucket::List::list_objects_unordered(const DoutPrefixProvider *dpp */ bool valid = rgw_obj_key::parse_raw_oid(index_key.name, &obj); if (!valid) { - ldout(cct, 0) << "ERROR: could not parse object name: " << + ldpp_dout(dpp, 0) << "ERROR: could not parse object name: " << obj.name << dendl; continue; } @@ -2186,11 +2185,11 @@ done: * create a rados pool, associated meta info * returns 0 on success, -ERR# otherwise. */ -int RGWRados::create_pool(const rgw_pool& pool) +int RGWRados::create_pool(const DoutPrefixProvider *dpp, const rgw_pool& pool) { librados::IoCtx io_ctx; constexpr bool create = true; - return rgw_init_ioctx(get_rados_handle(), pool, io_ctx, create); + return rgw_init_ioctx(dpp, get_rados_handle(), pool, io_ctx, create); } void RGWRados::create_bucket_id(string *bucket_id) @@ -2225,7 +2224,7 @@ int RGWRados::create_bucket(const RGWUserInfo& owner, rgw_bucket& bucket, for (int i = 0; i < MAX_CREATE_RETRIES; i++) { int ret = 0; - ret = svc.zone->select_bucket_placement(owner, zonegroup_id, placement_rule, + ret = svc.zone->select_bucket_placement(dpp, owner, zonegroup_id, placement_rule, &selected_placement_rule, &rule_info, y); if (ret < 0) return ret; @@ -2271,7 +2270,7 @@ int RGWRados::create_bucket(const RGWUserInfo& owner, rgw_bucket& bucket, info.quota = *pquota_info; } - int r = svc.bi->init_index(info); + int r = svc.bi->init_index(dpp, info); if (r < 0) { return r; } @@ -2288,19 +2287,19 @@ int RGWRados::create_bucket(const RGWUserInfo& owner, rgw_bucket& bucket, if (r == -ENOENT) { continue; } - ldout(cct, 0) << "get_bucket_info returned " << r << dendl; + ldpp_dout(dpp, 0) << "get_bucket_info returned " << r << dendl; return r; } /* only remove it if it's a different bucket instance */ if (orig_info.bucket.bucket_id != bucket.bucket_id) { - int r = svc.bi->clean_index(info); + int r = svc.bi->clean_index(dpp, info); if (r < 0) { - ldout(cct, 0) << "WARNING: could not remove bucket index (r=" << r << ")" << dendl; + ldpp_dout(dpp, 0) << "WARNING: could not remove bucket index (r=" << r << ")" << dendl; } r = ctl.bucket->remove_bucket_instance_info(info.bucket, info, null_yield, dpp); if (r < 0) { - ldout(cct, 0) << "WARNING: " << __func__ << "(): failed to remove bucket instance info: bucket instance=" << info.bucket.get_key() << ": r=" << r << dendl; + ldpp_dout(dpp, 0) << "WARNING: " << __func__ << "(): failed to remove bucket instance info: bucket instance=" << info.bucket.get_key() << ": r=" << r << dendl; /* continue anyway */ } } @@ -2312,7 +2311,7 @@ int RGWRados::create_bucket(const RGWUserInfo& owner, rgw_bucket& bucket, } /* this is highly unlikely */ - ldout(cct, 0) << "ERROR: could not create bucket, continuously raced with bucket creation and removal" << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not create bucket, continuously raced with bucket creation and removal" << dendl; return -ENOENT; } @@ -2328,18 +2327,18 @@ bool RGWRados::obj_to_raw(const rgw_placement_rule& placement_rule, const rgw_ob return get_obj_data_pool(placement_rule, obj, &raw_obj->pool); } -int RGWRados::get_obj_head_ioctx(const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::IoCtx *ioctx) +int RGWRados::get_obj_head_ioctx(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::IoCtx *ioctx) { string oid, key; get_obj_bucket_and_oid_loc(obj, oid, key); rgw_pool pool; if (!get_obj_data_pool(bucket_info.placement_rule, obj, &pool)) { - ldout(cct, 0) << "ERROR: cannot get data pool for obj=" << obj << ", probably misconfiguration" << dendl; + ldpp_dout(dpp, 0) << "ERROR: cannot get data pool for obj=" << obj << ", probably misconfiguration" << dendl; return -EIO; } - int r = open_pool_ctx(pool, *ioctx, false); + int r = open_pool_ctx(dpp, pool, *ioctx, false); if (r < 0) { return r; } @@ -2349,22 +2348,22 @@ int RGWRados::get_obj_head_ioctx(const RGWBucketInfo& bucket_info, const rgw_obj return 0; } -int RGWRados::get_obj_head_ref(const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_rados_ref *ref) +int RGWRados::get_obj_head_ref(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_rados_ref *ref) { get_obj_bucket_and_oid_loc(obj, ref->obj.oid, ref->obj.loc); rgw_pool pool; if (!get_obj_data_pool(bucket_info.placement_rule, obj, &pool)) { - ldout(cct, 0) << "ERROR: cannot get data pool for obj=" << obj << ", probably misconfiguration" << dendl; + ldpp_dout(dpp, 0) << "ERROR: cannot get data pool for obj=" << obj << ", probably misconfiguration" << dendl; return -EIO; } ref->pool = svc.rados->pool(pool); - int r = ref->pool.open(RGWSI_RADOS::OpenParams() + int r = ref->pool.open(dpp, RGWSI_RADOS::OpenParams() .set_mostly_omap(false)); if (r < 0) { - ldout(cct, 0) << "ERROR: failed opening data pool (pool=" << pool << "); r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed opening data pool (pool=" << pool << "); r=" << r << dendl; return r; } @@ -2373,7 +2372,7 @@ int RGWRados::get_obj_head_ref(const RGWBucketInfo& bucket_info, const rgw_obj& return 0; } -int RGWRados::get_raw_obj_ref(const rgw_raw_obj& obj, rgw_rados_ref *ref) +int RGWRados::get_raw_obj_ref(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, rgw_rados_ref *ref) { ref->obj = obj; @@ -2382,10 +2381,10 @@ int RGWRados::get_raw_obj_ref(const rgw_raw_obj& obj, rgw_rados_ref *ref) ref->obj.pool = svc.zone->get_zone_params().domain_root; } ref->pool = svc.rados->pool(obj.pool); - int r = ref->pool.open(RGWSI_RADOS::OpenParams() + int r = ref->pool.open(dpp, RGWSI_RADOS::OpenParams() .set_mostly_omap(false)); if (r < 0) { - ldout(cct, 0) << "ERROR: failed opening pool (pool=" << obj.pool << "); r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed opening pool (pool=" << obj.pool << "); r=" << r << dendl; return r; } @@ -2394,16 +2393,16 @@ int RGWRados::get_raw_obj_ref(const rgw_raw_obj& obj, rgw_rados_ref *ref) return 0; } -int RGWRados::get_system_obj_ref(const rgw_raw_obj& obj, rgw_rados_ref *ref) +int RGWRados::get_system_obj_ref(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, rgw_rados_ref *ref) { - return get_raw_obj_ref(obj, ref); + return get_raw_obj_ref(dpp, obj, ref); } /* * fixes an issue where head objects were supposed to have a locator created, but ended * up without one */ -int RGWRados::fix_head_obj_locator(const RGWBucketInfo& bucket_info, bool copy_obj, bool remove_bad, rgw_obj_key& key) +int RGWRados::fix_head_obj_locator(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, bool copy_obj, bool remove_bad, rgw_obj_key& key) { const rgw_bucket& bucket = bucket_info.bucket; string oid; @@ -2414,13 +2413,13 @@ int RGWRados::fix_head_obj_locator(const RGWBucketInfo& bucket_info, bool copy_o get_obj_bucket_and_oid_loc(obj, oid, locator); if (locator.empty()) { - ldout(cct, 20) << "object does not have a locator, nothing to fix" << dendl; + ldpp_dout(dpp, 20) << "object does not have a locator, nothing to fix" << dendl; return 0; } librados::IoCtx ioctx; - int ret = get_obj_head_ioctx(bucket_info, obj, &ioctx); + int ret = get_obj_head_ioctx(dpp, bucket_info, obj, &ioctx); if (ret < 0) { cerr << "ERROR: get_obj_head_ioctx() returned ret=" << ret << std::endl; return ret; @@ -2438,19 +2437,19 @@ int RGWRados::fix_head_obj_locator(const RGWBucketInfo& bucket_info, bool copy_o #define HEAD_SIZE 512 * 1024 op.read(0, HEAD_SIZE, &data, NULL); - ret = rgw_rados_operate(ioctx, oid, &op, &data, null_yield); + ret = rgw_rados_operate(dpp, ioctx, oid, &op, &data, null_yield); if (ret < 0) { - lderr(cct) << "ERROR: rgw_rados_operate(oid=" << oid << ") returned ret=" << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: rgw_rados_operate(oid=" << oid << ") returned ret=" << ret << dendl; return ret; } if (size > HEAD_SIZE) { - lderr(cct) << "ERROR: returned object size (" << size << ") > HEAD_SIZE (" << HEAD_SIZE << ")" << dendl; + ldpp_dout(dpp, -1) << "ERROR: returned object size (" << size << ") > HEAD_SIZE (" << HEAD_SIZE << ")" << dendl; return -EIO; } if (size != data.length()) { - lderr(cct) << "ERROR: returned object size (" << size << ") != data.length() (" << data.length() << ")" << dendl; + ldpp_dout(dpp, -1) << "ERROR: returned object size (" << size << ") != data.length() (" << data.length() << ")" << dendl; return -EIO; } @@ -2467,7 +2466,7 @@ int RGWRados::fix_head_obj_locator(const RGWBucketInfo& bucket_info, bool copy_o wop.write(0, data); ioctx.locator_set_key(locator); - rgw_rados_operate(ioctx, oid, &wop, null_yield); + rgw_rados_operate(dpp, ioctx, oid, &wop, null_yield); } if (remove_bad) { @@ -2475,7 +2474,7 @@ int RGWRados::fix_head_obj_locator(const RGWBucketInfo& bucket_info, bool copy_o ret = ioctx.remove(oid); if (ret < 0) { - lderr(cct) << "ERROR: failed to remove original bad object" << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to remove original bad object" << dendl; return ret; } } @@ -2483,7 +2482,8 @@ int RGWRados::fix_head_obj_locator(const RGWBucketInfo& bucket_info, bool copy_o return 0; } -int RGWRados::move_rados_obj(librados::IoCtx& src_ioctx, +int RGWRados::move_rados_obj(const DoutPrefixProvider *dpp, + librados::IoCtx& src_ioctx, const string& src_oid, const string& src_locator, librados::IoCtx& dst_ioctx, const string& dst_oid, const string& dst_locator) @@ -2515,7 +2515,7 @@ int RGWRados::move_rados_obj(librados::IoCtx& src_ioctx, mtime = real_clock::from_timespec(mtime_ts); } rop.read(ofs, chunk_size, &data, NULL); - ret = rgw_rados_operate(src_ioctx, src_oid, &rop, &data, null_yield); + ret = rgw_rados_operate(dpp, src_ioctx, src_oid, &rop, &data, null_yield); if (ret < 0) { goto done_err; } @@ -2530,7 +2530,7 @@ int RGWRados::move_rados_obj(librados::IoCtx& src_ioctx, mtime = real_clock::from_timespec(mtime_ts); } wop.write(ofs, data); - ret = rgw_rados_operate(dst_ioctx, dst_oid, &wop, null_yield); + ret = rgw_rados_operate(dpp, dst_ioctx, dst_oid, &wop, null_yield); if (ret < 0) { goto done_err; } @@ -2539,7 +2539,7 @@ int RGWRados::move_rados_obj(librados::IoCtx& src_ioctx, } while (!done); if (ofs != size) { - lderr(cct) << "ERROR: " << __func__ << ": copying " << src_oid << " -> " << dst_oid + ldpp_dout(dpp, -1) << "ERROR: " << __func__ << ": copying " << src_oid << " -> " << dst_oid << ": expected " << size << " bytes to copy, ended up with " << ofs << dendl; ret = -EIO; goto done_err; @@ -2551,7 +2551,7 @@ int RGWRados::move_rados_obj(librados::IoCtx& src_ioctx, done_err: // TODO: clean up dst_oid if we created it - lderr(cct) << "ERROR: failed to copy " << src_oid << " -> " << dst_oid << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to copy " << src_oid << " -> " << dst_oid << dendl; return ret; } @@ -2569,7 +2569,7 @@ int RGWRados::fix_tail_obj_locator(const DoutPrefixProvider *dpp, const RGWBucke } rgw_rados_ref ref; - int r = get_obj_head_ref(bucket_info, obj, &ref); + int r = get_obj_head_ref(dpp, bucket_info, obj, &ref); if (r < 0) { return r; } @@ -2583,7 +2583,7 @@ int RGWRados::fix_tail_obj_locator(const DoutPrefixProvider *dpp, const RGWBucke if (astate->manifest) { RGWObjManifest::obj_iterator miter; RGWObjManifest& manifest = *astate->manifest; - for (miter = manifest.obj_begin(); miter != manifest.obj_end(); ++miter) { + for (miter = manifest.obj_begin(dpp); miter != manifest.obj_end(dpp); ++miter) { rgw_raw_obj raw_loc = miter.get_location().get_raw_obj(store); rgw_obj loc; string oid; @@ -2626,9 +2626,9 @@ int RGWRados::fix_tail_obj_locator(const DoutPrefixProvider *dpp, const RGWBucke *need_fix = true; } if (fix) { - r = move_rados_obj(src_ioctx, oid, bad_loc, ioctx, oid, locator); + r = move_rados_obj(dpp, src_ioctx, oid, bad_loc, ioctx, oid, locator); if (r < 0) { - lderr(cct) << "ERROR: copy_rados_obj() on oid=" << oid << " returned r=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: copy_rados_obj() on oid=" << oid << " returned r=" << r << dendl; } } } @@ -2657,9 +2657,9 @@ int RGWRados::BucketShard::init(const rgw_bucket& _bucket, string oid; - ret = store->svc.bi_rados->open_bucket_index_shard(*bucket_info_p, obj.get_hash_object(), &bucket_obj, &shard_id); + ret = store->svc.bi_rados->open_bucket_index_shard(dpp, *bucket_info_p, obj.get_hash_object(), &bucket_obj, &shard_id); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl; return ret; } ldpp_dout(dpp, 20) << " bucket index object: " << bucket_obj.get_raw_obj() << dendl; @@ -2688,9 +2688,9 @@ int RGWRados::BucketShard::init(const rgw_bucket& _bucket, string oid; - ret = store->svc.bi_rados->open_bucket_index_shard(*bucket_info_p, shard_id, idx_layout, &bucket_obj); + ret = store->svc.bi_rados->open_bucket_index_shard(dpp, *bucket_info_p, shard_id, idx_layout, &bucket_obj); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl; return ret; } ldpp_dout(dpp, 20) << " bucket index oid: " << bucket_obj.get_raw_obj() << dendl; @@ -2698,35 +2698,35 @@ int RGWRados::BucketShard::init(const rgw_bucket& _bucket, return 0; } -int RGWRados::BucketShard::init(const RGWBucketInfo& bucket_info, +int RGWRados::BucketShard::init(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj) { bucket = bucket_info.bucket; - int ret = store->svc.bi_rados->open_bucket_index_shard(bucket_info, + int ret = store->svc.bi_rados->open_bucket_index_shard(dpp, bucket_info, obj.get_hash_object(), &bucket_obj, &shard_id); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl; return ret; } - ldout(store->ctx(), 20) << " bucket index object: " << bucket_obj << dendl; + ldpp_dout(dpp, 20) << " bucket index object: " << bucket_obj << dendl; return 0; } -int RGWRados::BucketShard::init(const RGWBucketInfo& bucket_info, const rgw::bucket_index_layout_generation& idx_layout, int sid) +int RGWRados::BucketShard::init(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw::bucket_index_layout_generation& idx_layout, int sid) { bucket = bucket_info.bucket; shard_id = sid; - int ret = store->svc.bi_rados->open_bucket_index_shard(bucket_info, shard_id, idx_layout, &bucket_obj); + int ret = store->svc.bi_rados->open_bucket_index_shard(dpp, bucket_info, shard_id, idx_layout, &bucket_obj); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl; return ret; } - ldout(store->ctx(), 20) << " bucket index object: " << bucket_obj << dendl; + ldpp_dout(dpp, 20) << " bucket index object: " << bucket_obj << dendl; return 0; } @@ -2817,7 +2817,7 @@ int RGWRados::swift_versioning_copy(RGWObjectCtx& obj_ctx, r = get_bucket_info(&svc, bucket->get_tenant(), bucket->get_info().swift_ver_location, dest_bucket_info, NULL, null_yield, NULL); if (r < 0) { - ldout(cct, 10) << "failed to read dest bucket info: r=" << r << dendl; + ldpp_dout(dpp, 10) << "failed to read dest bucket info: r=" << r << dendl; if (r == -ENOENT) { return -ERR_PRECONDITION_FAILED; } @@ -3022,7 +3022,7 @@ int RGWRados::Object::Write::_do_write_meta(const DoutPrefixProvider *dpp, } rgw_rados_ref ref; - r = store->get_obj_head_ref(target->get_bucket_info(), obj, &ref); + r = store->get_obj_head_ref(dpp, target->get_bucket_info(), obj, &ref); if (r < 0) return r; @@ -3162,7 +3162,7 @@ int RGWRados::Object::Write::_do_write_meta(const DoutPrefixProvider *dpp, auto& ioctx = ref.pool.ioctx(); tracepoint(rgw_rados, operate_enter, req_id.c_str()); - r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield); + r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield); tracepoint(rgw_rados, operate_exit, req_id.c_str()); if (r < 0) { /* we can expect to get -ECANCELED if object was replaced under, or -ENOENT if was removed, or -EEXIST if it did not exist @@ -3177,9 +3177,9 @@ int RGWRados::Object::Write::_do_write_meta(const DoutPrefixProvider *dpp, epoch = ioctx.get_last_version(); poolid = ioctx.get_id(); - r = target->complete_atomic_modification(); + r = target->complete_atomic_modification(dpp); if (r < 0) { - ldout(store->ctx(), 0) << "ERROR: complete_atomic_modification returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: complete_atomic_modification returned r=" << r << dendl; } tracepoint(rgw_rados, complete_enter, req_id.c_str()); @@ -3210,10 +3210,10 @@ int RGWRados::Object::Write::_do_write_meta(const DoutPrefixProvider *dpp, rgw_obj_index_key obj_key; obj.key.get_index_key(&obj_key); - r = store->obj_expirer->hint_add(meta.delete_at, obj.bucket.tenant, obj.bucket.name, + r = store->obj_expirer->hint_add(dpp, meta.delete_at, obj.bucket.tenant, obj.bucket.name, obj.bucket.bucket_id, obj_key); if (r < 0) { - ldout(store->ctx(), 0) << "ERROR: objexp_hint_add() returned r=" << r << ", object will not get removed" << dendl; + ldpp_dout(dpp, 0) << "ERROR: objexp_hint_add() returned r=" << r << ", object will not get removed" << dendl; /* ignoring error, nothing we can do at this point */ } } @@ -3233,7 +3233,7 @@ int RGWRados::Object::Write::_do_write_meta(const DoutPrefixProvider *dpp, done_cancel: int ret = index_op->cancel(dpp); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: index_op.cancel()() returned ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: index_op.cancel()() returned ret=" << ret << dendl; } meta.canceled = true; @@ -3300,6 +3300,7 @@ int RGWRados::Object::Write::write_meta(const DoutPrefixProvider *dpp, uint64_t class RGWRadosPutObj : public RGWHTTPStreamRWRequest::ReceiveCB { + const DoutPrefixProvider *dpp; CephContext* cct; rgw_obj obj; rgw::putobj::DataProcessor *filter; @@ -3321,13 +3322,15 @@ class RGWRadosPutObj : public RGWHTTPStreamRWRequest::ReceiveCB uint64_t lofs{0}; /* logical ofs */ std::function&)> attrs_handler; public: - RGWRadosPutObj(CephContext* cct, + RGWRadosPutObj(const DoutPrefixProvider *dpp, + CephContext* cct, CompressorRef& plugin, boost::optional& compressor, rgw::putobj::ObjectProcessor *p, void (*_progress_cb)(off_t, void *), void *_progress_data, std::function&)> _attrs_handler) : + dpp(dpp), cct(cct), filter(p), compressor(compressor), @@ -3342,7 +3345,7 @@ public: if (extra_data_bl.length()) { JSONParser jp; if (!jp.parse(extra_data_bl.c_str(), extra_data_bl.length())) { - ldout(cct, 0) << "failed to parse response extra data. len=" << extra_data_bl.length() << " data=" << extra_data_bl.c_str() << dendl; + ldpp_dout(dpp, 0) << "failed to parse response extra data. len=" << extra_data_bl.length() << " data=" << extra_data_bl.c_str() << dendl; return -EIO; } @@ -3360,7 +3363,7 @@ public: bool compressed = false; int r = rgw_compression_info_from_attr(bl, compressed, info); if (r < 0) { - ldout(cct, 4) << "failed to decode compression info, " + ldpp_dout(dpp, 4) << "failed to decode compression info, " "disabling etag verification" << dendl; try_etag_verify = false; } else if (compressed) { @@ -3407,11 +3410,11 @@ public: * to know the sequence in which the filters must be applied. */ if (try_etag_verify && src_attrs.find(RGW_ATTR_CRYPT_MODE) == src_attrs.end()) { - ret = rgw::putobj::create_etag_verifier(cct, filter, manifest_bl, + ret = rgw::putobj::create_etag_verifier(dpp, cct, filter, manifest_bl, compression_info, etag_verifier); if (ret < 0) { - ldout(cct, 4) << "failed to initial etag verifier, " + ldpp_dout(dpp, 4) << "failed to initial etag verifier, " "disabling etag verification" << dendl; } else { filter = etag_verifier.get(); @@ -3629,7 +3632,8 @@ public: } }; -int RGWRados::stat_remote_obj(RGWObjectCtx& obj_ctx, +int RGWRados::stat_remote_obj(const DoutPrefixProvider *dpp, + RGWObjectCtx& obj_ctx, const rgw_user& user_id, req_info *info, const rgw_zone_id& source_zone, @@ -3694,7 +3698,7 @@ int RGWRados::stat_remote_obj(RGWObjectCtx& obj_ctx, constexpr bool rgwx_stat = true; constexpr bool sync_manifest = true; constexpr bool skip_decrypt = true; - int ret = conn->get_obj(user_id, info, src_obj, pmod, unmod_ptr, + int ret = conn->get_obj(dpp, user_id, info, src_obj, pmod, unmod_ptr, dest_mtime_weight.zone_short_id, dest_mtime_weight.pg_ver, prepend_meta, get_op, rgwx_stat, sync_manifest, skip_decrypt, @@ -3823,7 +3827,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, } else { map::iterator iter = zonegroup_conn_map.find(src_bucket->get_info().zonegroup); if (iter == zonegroup_conn_map.end()) { - ldout(cct, 0) << "could not find zonegroup connection to zonegroup: " << source_zone << dendl; + ldpp_dout(dpp, 0) << "could not find zonegroup connection to zonegroup: " << source_zone << dendl; return -ENOENT; } conn = iter->second; @@ -3831,7 +3835,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, } else { auto iter = zone_conn_map.find(source_zone); if (iter == zone_conn_map.end()) { - ldout(cct, 0) << "could not find zone connection to zone: " << source_zone << dendl; + ldpp_dout(dpp, 0) << "could not find zone connection to zone: " << source_zone << dendl; return -ENOENT; } conn = iter->second; @@ -3847,7 +3851,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, std::optional override_owner; - RGWRadosPutObj cb(cct, plugin, compressor, &processor, progress_cb, progress_data, + RGWRadosPutObj cb(dpp, cct, plugin, compressor, &processor, progress_cb, progress_data, [&](map& obj_attrs) { const rgw_placement_rule *ptail_rule; @@ -3859,7 +3863,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, &override_owner, &ptail_rule); if (ret < 0) { - ldout(cct, 5) << "Aborting fetch: source object filter returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "Aborting fetch: source object filter returned ret=" << ret << dendl; return ret; } @@ -3869,7 +3873,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, if (compression_type != "none") { plugin = Compressor::create(cct, compression_type); if (!plugin) { - ldout(cct, 1) << "Cannot load plugin for compression type " + ldpp_dout(dpp, 1) << "Cannot load plugin for compression type " << compression_type << dendl; } } @@ -3908,7 +3912,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, static constexpr bool rgwx_stat = false; static constexpr bool sync_manifest = true; static constexpr bool skip_decrypt = true; - ret = conn->get_obj(user_id, info, src_obj, pmod, unmod_ptr, + ret = conn->get_obj(dpp, user_id, info, src_obj, pmod, unmod_ptr, dest_mtime_weight.zone_short_id, dest_mtime_weight.pg_ver, prepend_meta, get_op, rgwx_stat, sync_manifest, skip_decrypt, @@ -3929,7 +3933,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, } if (cb.get_data_len() != expected_size) { ret = -EIO; - ldout(cct, 0) << "ERROR: object truncated during fetching, expected " + ldpp_dout(dpp, 0) << "ERROR: object truncated during fetching, expected " << expected_size << " bytes but received " << cb.get_data_len() << dendl; goto set_err_state; } @@ -3951,7 +3955,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, RGWUserInfo owner_info; if (ctl.user->get_info_by_uid(dpp, *override_owner, &owner_info, null_yield) < 0) { - ldout(cct, 10) << "owner info does not exist" << dendl; + ldpp_dout(dpp, 10) << "owner info does not exist" << dendl; return -EINVAL; } @@ -3959,14 +3963,14 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, auto aiter = obj_attrs.find(RGW_ATTR_ACL); if (aiter == obj_attrs.end()) { - ldout(cct, 0) << "WARNING: " << __func__ << "(): object doesn't have ACL attribute, setting default ACLs" << dendl; + ldpp_dout(dpp, 0) << "WARNING: " << __func__ << "(): object doesn't have ACL attribute, setting default ACLs" << dendl; acl.create_default(owner_info.user_id, owner_info.display_name); } else { auto iter = aiter->second.cbegin(); try { acl.decode(iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: " << __func__ << "(): could not decode policy, caught buffer::error" << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): could not decode policy, caught buffer::error" << dendl; return -EIO; } } @@ -3990,7 +3994,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, try { decode(delete_at, iter->second); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: failed to decode delete_at field in intra zone copy" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to decode delete_at field in intra zone copy" << dendl; } } } @@ -4023,7 +4027,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, try { decode(pg_ver, iter); } catch (buffer::error& err) { - ldout(ctx(), 0) << "ERROR: failed to decode pg ver attribute, ignoring" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to decode pg ver attribute, ignoring" << dendl; /* non critical error */ } } @@ -4041,7 +4045,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, if (verifier_etag != trimmed_etag) { ret = -EIO; - ldout(cct, 0) << "ERROR: source and destination objects don't match. Expected etag:" + ldpp_dout(dpp, 0) << "ERROR: source and destination objects don't match. Expected etag:" << trimmed_etag << " Computed etag:" << verifier_etag << dendl; goto set_err_state; } @@ -4058,28 +4062,28 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, } if (copy_if_newer && canceled) { - ldout(cct, 20) << "raced with another write of obj: " << dest_obj << dendl; + ldpp_dout(dpp, 20) << "raced with another write of obj: " << dest_obj << dendl; obj_ctx.invalidate(dest_obj->get_obj()); /* object was overwritten */ ret = get_obj_state(dpp, &obj_ctx, dest_bucket->get_info(), dest_obj->get_obj(), &dest_state, false, null_yield); if (ret < 0) { - ldout(cct, 0) << "ERROR: " << __func__ << ": get_err_state() returned ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << ": get_err_state() returned ret=" << ret << dendl; goto set_err_state; } dest_mtime_weight.init(dest_state); dest_mtime_weight.high_precision = high_precision_time; if (!dest_state->exists || dest_mtime_weight < set_mtime_weight) { - ldout(cct, 20) << "retrying writing object mtime=" << set_mtime << " dest_state->mtime=" << dest_state->mtime << " dest_state->exists=" << dest_state->exists << dendl; + ldpp_dout(dpp, 20) << "retrying writing object mtime=" << set_mtime << " dest_state->mtime=" << dest_state->mtime << " dest_state->exists=" << dest_state->exists << dendl; continue; } else { - ldout(cct, 20) << "not retrying writing object mtime=" << set_mtime << " dest_state->mtime=" << dest_state->mtime << " dest_state->exists=" << dest_state->exists << dendl; + ldpp_dout(dpp, 20) << "not retrying writing object mtime=" << set_mtime << " dest_state->mtime=" << dest_state->mtime << " dest_state->exists=" << dest_state->exists << dendl; } } break; } if (i == MAX_COMPLETE_RETRY) { - ldout(cct, 0) << "ERROR: retried object completion too many times, something is wrong!" << dendl; + ldpp_dout(dpp, 0) << "ERROR: retried object completion too many times, something is wrong!" << dendl; ret = -EIO; goto set_err_state; } @@ -4119,7 +4123,7 @@ int RGWRados::copy_obj_to_remote_dest(const DoutPrefixProvider *dpp, auto rest_master_conn = svc.zone->get_master_conn(); - int ret = rest_master_conn->put_obj_async_init(user_id, dest_obj, src_attrs, &out_stream_req); + int ret = rest_master_conn->put_obj_async_init(dpp, user_id, dest_obj, src_attrs, &out_stream_req); if (ret < 0) { return ret; } @@ -4344,14 +4348,14 @@ int RGWRados::copy_obj(RGWObjectCtx& obj_ctx, mtime, real_time(), attrs, olh_epoch, delete_at, petag, dpp, y); } - RGWObjManifest::obj_iterator miter = astate->manifest->obj_begin(); + RGWObjManifest::obj_iterator miter = astate->manifest->obj_begin(dpp); if (copy_first) { // we need to copy first chunk, not increase refcount ++miter; } rgw_rados_ref ref; - ret = get_raw_obj_ref(miter.get_location().get_raw_obj(store), &ref); + ret = get_raw_obj_ref(dpp, miter.get_location().get_raw_obj(store), &ref); if (ret < 0) { return ret; } @@ -4383,7 +4387,7 @@ int RGWRados::copy_obj(RGWObjectCtx& obj_ctx, manifest.set_tail_placement(tail_placement.placement_rule, src_obj->get_bucket()->get_key()); } string ref_tag; - for (; miter != astate->manifest->obj_end(); ++miter) { + for (; miter != astate->manifest->obj_end(dpp); ++miter) { ObjectWriteOperation op; ref_tag = tag + '\0'; cls_refcount_get(op, ref_tag, true); @@ -4392,7 +4396,7 @@ int RGWRados::copy_obj(RGWObjectCtx& obj_ctx, auto& ioctx = ref.pool.ioctx(); ioctx.locator_set_key(loc.loc); - ret = rgw_rados_operate(ioctx, loc.oid, &op, null_yield); + ret = rgw_rados_operate(dpp, ioctx, loc.oid, &op, null_yield); if (ret < 0) { goto done_ret; } @@ -4448,7 +4452,7 @@ done_ret: ref.pool.ioctx().locator_set_key(riter->loc); - int r = rgw_rados_operate(ref.pool.ioctx(), riter->oid, &op, null_yield); + int r = rgw_rados_operate(dpp, ref.pool.ioctx(), riter->oid, &op, null_yield); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: cleanup after error failed to drop reference on obj=" << *riter << dendl; } @@ -4644,7 +4648,7 @@ int RGWRados::delete_bucket(RGWBucketInfo& bucket_info, RGWObjVersionTracker& ob const rgw_bucket& bucket = bucket_info.bucket; RGWSI_RADOS::Pool index_pool; map bucket_objs; - int r = svc.bi_rados->open_bucket_index(bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr); + int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr); if (r < 0) return r; @@ -4718,7 +4722,7 @@ int RGWRados::set_bucket_owner(rgw_bucket& bucket, ACLOwner& owner, const DoutPr r = get_bucket_instance_info(obj_ctx, bucket, info, nullptr, &attrs, null_yield, dpp); } if (r < 0) { - ldout(cct, 0) << "NOTICE: get_bucket_info on bucket=" << bucket.name << " returned err=" << r << dendl; + ldpp_dout(dpp, 0) << "NOTICE: get_bucket_info on bucket=" << bucket.name << " returned err=" << r << dendl; return r; } @@ -4726,7 +4730,7 @@ int RGWRados::set_bucket_owner(rgw_bucket& bucket, ACLOwner& owner, const DoutPr r = put_bucket_instance_info(info, false, real_time(), &attrs, dpp); if (r < 0) { - ldout(cct, 0) << "NOTICE: put_bucket_info on bucket=" << bucket.name << " returned err=" << r << dendl; + ldpp_dout(dpp, 0) << "NOTICE: put_bucket_info on bucket=" << bucket.name << " returned err=" << r << dendl; return r; } @@ -4752,7 +4756,7 @@ int RGWRados::set_buckets_enabled(vector& buckets, bool enabled, con map attrs; int r = get_bucket_info(&svc, bucket.tenant, bucket.name, info, NULL, null_yield, dpp, &attrs); if (r < 0) { - ldout(cct, 0) << "NOTICE: get_bucket_info on bucket=" << bucket.name << " returned err=" << r << ", skipping bucket" << dendl; + ldpp_dout(dpp, 0) << "NOTICE: get_bucket_info on bucket=" << bucket.name << " returned err=" << r << ", skipping bucket" << dendl; ret = r; continue; } @@ -4764,7 +4768,7 @@ int RGWRados::set_buckets_enabled(vector& buckets, bool enabled, con r = put_bucket_instance_info(info, false, real_time(), &attrs, dpp); if (r < 0) { - ldout(cct, 0) << "NOTICE: put_bucket_info on bucket=" << bucket.name << " returned err=" << r << ", skipping bucket" << dendl; + ldpp_dout(dpp, 0) << "NOTICE: put_bucket_info on bucket=" << bucket.name << " returned err=" << r << ", skipping bucket" << dendl; ret = r; continue; } @@ -4784,13 +4788,13 @@ int RGWRados::bucket_suspended(const DoutPrefixProvider *dpp, rgw_bucket& bucket return 0; } -int RGWRados::Object::complete_atomic_modification() +int RGWRados::Object::complete_atomic_modification(const DoutPrefixProvider *dpp) { if ((!state->manifest)|| state->keep_tail) return 0; cls_rgw_obj_chain chain; - store->update_gc_chain(obj, *state->manifest, &chain); + store->update_gc_chain(dpp, obj, *state->manifest, &chain); if (chain.empty()) { return 0; @@ -4800,17 +4804,17 @@ int RGWRados::Object::complete_atomic_modification() auto ret = store->gc->send_chain(chain, tag); // do it synchronously if (ret < 0) { //Delete objects inline if send chain to gc fails - store->delete_objs_inline(chain, tag); + store->delete_objs_inline(dpp, chain, tag); } return 0; } -void RGWRados::update_gc_chain(rgw_obj& head_obj, RGWObjManifest& manifest, cls_rgw_obj_chain *chain) +void RGWRados::update_gc_chain(const DoutPrefixProvider *dpp, rgw_obj& head_obj, RGWObjManifest& manifest, cls_rgw_obj_chain *chain) { RGWObjManifest::obj_iterator iter; rgw_raw_obj raw_head; obj_to_raw(manifest.get_head_placement_rule(), head_obj, &raw_head); - for (iter = manifest.obj_begin(); iter != manifest.obj_end(); ++iter) { + for (iter = manifest.obj_begin(dpp); iter != manifest.obj_end(dpp); ++iter) { const rgw_raw_obj& mobj = iter.get_location().get_raw_obj(store); if (mobj == raw_head) continue; @@ -4828,7 +4832,7 @@ int RGWRados::send_chain_to_gc(cls_rgw_obj_chain& chain, const string& tag) return gc->send_chain(chain, tag); } -void RGWRados::delete_objs_inline(cls_rgw_obj_chain& chain, const string& tag) +void RGWRados::delete_objs_inline(const DoutPrefixProvider *dpp, cls_rgw_obj_chain& chain, const string& tag) { string last_pool; std::unique_ptr ctx(new IoCtx); @@ -4837,10 +4841,10 @@ void RGWRados::delete_objs_inline(cls_rgw_obj_chain& chain, const string& tag) cls_rgw_obj& obj = *liter; if (obj.pool != last_pool) { ctx.reset(new IoCtx); - ret = rgw_init_ioctx(get_rados_handle(), obj.pool, *ctx); + ret = rgw_init_ioctx(dpp, get_rados_handle(), obj.pool, *ctx); if (ret < 0) { last_pool = ""; - ldout(cct, 0) << "ERROR: failed to create ioctx pool=" << + ldpp_dout(dpp, 0) << "ERROR: failed to create ioctx pool=" << obj.pool << dendl; continue; } @@ -4848,13 +4852,13 @@ void RGWRados::delete_objs_inline(cls_rgw_obj_chain& chain, const string& tag) } ctx->locator_set_key(obj.loc); const string& oid = obj.key.name; /* just stored raw oid there */ - ldout(cct, 5) << "delete_objs_inline: removing " << obj.pool << + ldpp_dout(dpp, 5) << "delete_objs_inline: removing " << obj.pool << ":" << obj.key.name << dendl; ObjectWriteOperation op; cls_refcount_put(op, tag, true); ret = ctx->operate(oid, &op); if (ret < 0) { - ldout(cct, 5) << "delete_objs_inline: refcount put returned error " << ret << dendl; + ldpp_dout(dpp, 5) << "delete_objs_inline: refcount put returned error " << ret << dendl; } } } @@ -4876,7 +4880,7 @@ static void accumulate_raw_stats(const rgw_bucket_dir_header& header, } } -int RGWRados::bucket_check_index(RGWBucketInfo& bucket_info, +int RGWRados::bucket_check_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, map *existing_stats, map *calculated_stats) { @@ -4886,7 +4890,7 @@ int RGWRados::bucket_check_index(RGWBucketInfo& bucket_info, map oids; map bucket_objs_ret; - int ret = svc.bi_rados->open_bucket_index(bucket_info, std::nullopt, &index_pool, &oids, nullptr); + int ret = svc.bi_rados->open_bucket_index(dpp, bucket_info, std::nullopt, &index_pool, &oids, nullptr); if (ret < 0) { return ret; } @@ -4910,12 +4914,12 @@ int RGWRados::bucket_check_index(RGWBucketInfo& bucket_info, return 0; } -int RGWRados::bucket_rebuild_index(RGWBucketInfo& bucket_info) +int RGWRados::bucket_rebuild_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info) { RGWSI_RADOS::Pool index_pool; map bucket_objs; - int r = svc.bi_rados->open_bucket_index(bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr); + int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr); if (r < 0) { return r; } @@ -4923,12 +4927,12 @@ int RGWRados::bucket_rebuild_index(RGWBucketInfo& bucket_info) return CLSRGWIssueBucketRebuild(index_pool.ioctx(), bucket_objs, cct->_conf->rgw_bucket_index_max_aio)(); } -int RGWRados::bucket_set_reshard(const RGWBucketInfo& bucket_info, const cls_rgw_bucket_instance_entry& entry) +int RGWRados::bucket_set_reshard(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const cls_rgw_bucket_instance_entry& entry) { RGWSI_RADOS::Pool index_pool; map bucket_objs; - int r = svc.bi_rados->open_bucket_index(bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr); + int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr); if (r < 0) { return r; } @@ -4969,7 +4973,7 @@ int RGWRados::defer_gc(const DoutPrefixProvider *dpp, void *ctx, const RGWBucket ldpp_dout(dpp, 0) << "defer chain tag=" << tag << dendl; cls_rgw_obj_chain chain; - update_gc_chain(state->obj, *state->manifest, &chain); + update_gc_chain(dpp, state->obj, *state->manifest, &chain); return gc->async_defer_chain(tag, chain); } @@ -5055,7 +5059,7 @@ int RGWRados::Object::Delete::delete_obj(optional_yield y, const DoutPrefixProvi } else { rgw_bucket_dir_entry dirent; - int r = store->bi_get_instance(target->get_bucket_info(), obj, &dirent); + int r = store->bi_get_instance(dpp, target->get_bucket_info(), obj, &dirent); if (r < 0) { return r; } @@ -5070,13 +5074,13 @@ int RGWRados::Object::Delete::delete_obj(optional_yield y, const DoutPrefixProvi BucketShard *bs; int r = target->get_bucket_shard(&bs, dpp); if (r < 0) { - ldout(store->ctx(), 5) << "failed to get BucketShard object: r=" << r << dendl; + ldpp_dout(dpp, 5) << "failed to get BucketShard object: r=" << r << dendl; return r; } r = store->svc.datalog_rados->add_entry(dpp, target->bucket_info, bs->shard_id); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed writing data log" << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl; return r; } @@ -5084,7 +5088,7 @@ int RGWRados::Object::Delete::delete_obj(optional_yield y, const DoutPrefixProvi } rgw_rados_ref ref; - int r = store->get_obj_head_ref(target->get_bucket_info(), obj, &ref); + int r = store->get_obj_head_ref(dpp, target->get_bucket_info(), obj, &ref); if (r < 0) { return r; } @@ -5163,7 +5167,7 @@ int RGWRados::Object::Delete::delete_obj(optional_yield y, const DoutPrefixProvi store->remove_rgw_head_obj(op); auto& ioctx = ref.pool.ioctx(); - r = rgw_rados_operate(ioctx, ref.obj.oid, &op, null_yield); + r = rgw_rados_operate(dpp, ioctx, ref.obj.oid, &op, null_yield); /* raced with another operation, object state is indeterminate */ const bool need_invalidate = (r == -ECANCELED); @@ -5177,7 +5181,7 @@ int RGWRados::Object::Delete::delete_obj(optional_yield y, const DoutPrefixProvi } r = index_op.complete_del(dpp, poolid, ioctx.get_last_version(), state->mtime, params.remove_objs); - int ret = target->complete_atomic_modification(); + int ret = target->complete_atomic_modification(dpp); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: complete_atomic_modification returned ret=" << ret << dendl; } @@ -5185,7 +5189,7 @@ int RGWRados::Object::Delete::delete_obj(optional_yield y, const DoutPrefixProvi } else { int ret = index_op.cancel(dpp); if (ret < 0) { - ldout(store->ctx(), 0) << "ERROR: index_op.cancel() returned ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: index_op.cancel() returned ret=" << ret << dendl; } } @@ -5223,10 +5227,10 @@ int RGWRados::delete_obj(const DoutPrefixProvider *dpp, return del_op.delete_obj(null_yield, dpp); } -int RGWRados::delete_raw_obj(const rgw_raw_obj& obj) +int RGWRados::delete_raw_obj(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj) { rgw_rados_ref ref; - int r = get_raw_obj_ref(obj, &ref); + int r = get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { return r; } @@ -5234,7 +5238,7 @@ int RGWRados::delete_raw_obj(const rgw_raw_obj& obj) ObjectWriteOperation op; op.remove(); - r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield); + r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield); if (r < 0) return r; @@ -5251,7 +5255,7 @@ int RGWRados::delete_obj_index(const rgw_obj& obj, ceph::real_time mtime, const RGWBucketInfo bucket_info; int ret = get_bucket_instance_info(obj_ctx, obj.bucket, bucket_info, NULL, NULL, null_yield, dpp); if (ret < 0) { - ldout(cct, 0) << "ERROR: " << __func__ << "() get_bucket_instance_info(bucket=" << obj.bucket << ") returned ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "() get_bucket_instance_info(bucket=" << obj.bucket << ") returned ret=" << ret << dendl; return ret; } @@ -5261,12 +5265,12 @@ int RGWRados::delete_obj_index(const rgw_obj& obj, ceph::real_time mtime, const return index_op.complete_del(dpp, -1 /* pool */, 0, mtime, NULL); } -static void generate_fake_tag(rgw::sal::Store* store, map& attrset, RGWObjManifest& manifest, bufferlist& manifest_bl, bufferlist& tag_bl) +static void generate_fake_tag(const DoutPrefixProvider *dpp, rgw::sal::Store* store, map& attrset, RGWObjManifest& manifest, bufferlist& manifest_bl, bufferlist& tag_bl) { string tag; - RGWObjManifest::obj_iterator mi = manifest.obj_begin(); - if (mi != manifest.obj_end()) { + RGWObjManifest::obj_iterator mi = manifest.obj_begin(dpp); + if (mi != manifest.obj_end(dpp)) { if (manifest.has_tail()) // first object usually points at the head, let's skip to a more unique part ++mi; tag = mi.get_location().get_raw_obj(store).oid; @@ -5350,7 +5354,7 @@ int RGWRados::get_obj_state_impl(const DoutPrefixProvider *dpp, RGWObjectCtx *rc int r = -ENOENT; if (!assume_noent) { - r = RGWRados::raw_obj_stat(raw_obj, &s->size, &s->mtime, &s->epoch, &s->attrset, (s->prefetch_data ? &s->data : NULL), NULL, y); + r = RGWRados::raw_obj_stat(dpp, raw_obj, &s->size, &s->mtime, &s->epoch, &s->attrset, (s->prefetch_data ? &s->data : NULL), NULL, y); } if (r == -ENOENT) { @@ -5433,7 +5437,7 @@ int RGWRados::get_obj_state_impl(const DoutPrefixProvider *dpp, RGWObjectCtx *rc if (cct->_conf->subsys.should_gather() && \ s->manifest->has_explicit_objs()) { RGWObjManifest::obj_iterator mi; - for (mi = s->manifest->obj_begin(); mi != s->manifest->obj_end(); ++mi) { + for (mi = s->manifest->obj_begin(dpp); mi != s->manifest->obj_end(dpp); ++mi) { ldpp_dout(dpp, 20) << "manifest: ofs=" << mi.get_ofs() << " loc=" << mi.get_location().get_raw_obj(store) << dendl; } } @@ -5443,7 +5447,7 @@ int RGWRados::get_obj_state_impl(const DoutPrefixProvider *dpp, RGWObjectCtx *rc * Uh oh, something's wrong, object with manifest should have tag. Let's * create one out of the manifest, would be unique */ - generate_fake_tag(store, s->attrset, *s->manifest, manifest_bl, s->obj_tag); + generate_fake_tag(dpp, store, s->attrset, *s->manifest, manifest_bl, s->obj_tag); s->fake_tag = true; } } @@ -5488,7 +5492,7 @@ int RGWRados::get_obj_state_impl(const DoutPrefixProvider *dpp, RGWObjectCtx *rc if (is_olh(s->attrset)) { s->is_olh = true; - ldout(cct, 20) << __func__ << ": setting s->olh_tag to " << string(s->olh_tag.c_str(), s->olh_tag.length()) << dendl; + ldpp_dout(dpp, 20) << __func__ << ": setting s->olh_tag to " << string(s->olh_tag.c_str(), s->olh_tag.length()) << dendl; if (need_follow_olh) { return get_olh_target_state(dpp, *rctx, bucket_info, obj, s, state, y); @@ -5541,7 +5545,7 @@ int RGWRados::Object::Read::get_attr(const DoutPrefixProvider *dpp, const char * return 0; } -int RGWRados::Object::Stat::stat_async() +int RGWRados::Object::Stat::stat_async(const DoutPrefixProvider *dpp) { RGWObjectCtx& ctx = source->get_ctx(); rgw_obj& obj = source->get_obj(); @@ -5562,7 +5566,7 @@ int RGWRados::Object::Stat::stat_async() string loc; get_obj_bucket_and_oid_loc(obj, oid, loc); - int r = store->get_obj_head_ioctx(source->get_bucket_info(), obj, &state.io_ctx); + int r = store->get_obj_head_ioctx(dpp, source->get_bucket_info(), obj, &state.io_ctx); if (r < 0) { return r; } @@ -5574,7 +5578,7 @@ int RGWRados::Object::Stat::stat_async() state.io_ctx.locator_set_key(loc); r = state.io_ctx.aio_operate(oid, state.completion, &op, NULL); if (r < 0) { - ldout(store->ctx(), 5) << __func__ + ldpp_dout(dpp, 5) << __func__ << ": ERROR: aio_operate() returned ret=" << r << dendl; return r; @@ -5631,21 +5635,22 @@ int RGWRados::append_atomic_test(const DoutPrefixProvider *dpp, RGWObjectCtx *rc if (r < 0) return r; - return append_atomic_test(*pstate, op); + return append_atomic_test(dpp, *pstate, op); } -int RGWRados::append_atomic_test(const RGWObjState* state, +int RGWRados::append_atomic_test(const DoutPrefixProvider *dpp, + const RGWObjState* state, librados::ObjectOperation& op) { if (!state->is_atomic) { - ldout(cct, 20) << "state for obj=" << state->obj << " is not atomic, not appending atomic test" << dendl; + ldpp_dout(dpp, 20) << "state for obj=" << state->obj << " is not atomic, not appending atomic test" << dendl; return 0; } if (state->obj_tag.length() > 0 && !state->fake_tag) {// check for backward compatibility op.cmpxattr(RGW_ATTR_ID_TAG, LIBRADOS_CMPXATTR_OP_EQ, state->obj_tag); } else { - ldout(cct, 20) << "state->obj_tag is empty, not appending atomic test" << dendl; + ldpp_dout(dpp, 20) << "state->obj_tag is empty, not appending atomic test" << dendl; } return 0; } @@ -5780,7 +5785,7 @@ int RGWRados::set_attrs(const DoutPrefixProvider *dpp, void *ctx, const RGWBucke } rgw_rados_ref ref; - int r = get_obj_head_ref(bucket_info, obj, &ref); + int r = get_obj_head_ref(dpp, bucket_info, obj, &ref); if (r < 0) { return r; } @@ -5825,7 +5830,7 @@ int RGWRados::set_attrs(const DoutPrefixProvider *dpp, void *ctx, const RGWBucke rgw_obj_index_key obj_key; obj.key.get_index_key(&obj_key); - obj_expirer->hint_add(ts, bucket.tenant, bucket.name, bucket.bucket_id, obj_key); + obj_expirer->hint_add(dpp, ts, bucket.tenant, bucket.name, bucket.bucket_id, obj_key); } catch (buffer::error& err) { ldpp_dout(dpp, 0) << "ERROR: failed to decode " RGW_ATTR_DELETE_AT << " attr" << dendl; } @@ -5859,7 +5864,7 @@ int RGWRados::set_attrs(const DoutPrefixProvider *dpp, void *ctx, const RGWBucke struct timespec mtime_ts = real_clock::to_timespec(mtime); op.mtime2(&mtime_ts); auto& ioctx = ref.pool.ioctx(); - r = rgw_rados_operate(ioctx, ref.obj.oid, &op, null_yield); + r = rgw_rados_operate(dpp, ioctx, ref.obj.oid, &op, null_yield); if (state) { if (r >= 0) { bufferlist acl_bl = attrs[RGW_ATTR_ACL]; @@ -5880,7 +5885,7 @@ int RGWRados::set_attrs(const DoutPrefixProvider *dpp, void *ctx, const RGWBucke } else { int ret = index_op.cancel(dpp); if (ret < 0) { - ldout(cct, 0) << "ERROR: complete_update_index_cancel() returned ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: complete_update_index_cancel() returned ret=" << ret << dendl; } } } @@ -5934,7 +5939,7 @@ int RGWRados::Object::Read::prepare(optional_yield y, const DoutPrefixProvider * state.cur_pool = state.head_obj.pool; state.cur_ioctx = &state.io_ctxs[state.cur_pool]; - r = store->get_obj_head_ioctx(bucket_info, state.obj, state.cur_ioctx); + r = store->get_obj_head_ioctx(dpp, bucket_info, state.obj, state.cur_ioctx); if (r < 0) { return r; } @@ -6037,14 +6042,14 @@ int RGWRados::Bucket::UpdateIndex::guard_reshard(const DoutPrefixProvider *dpp, for (int i = 0; i < NUM_RESHARD_RETRIES; ++i) { int ret = get_bucket_shard(&bs, dpp); if (ret < 0) { - ldout(store->ctx(), 5) << "failed to get BucketShard object: ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "failed to get BucketShard object: ret=" << ret << dendl; return ret; } r = call(bs); if (r != -ERR_BUSY_RESHARDING) { break; } - ldout(store->ctx(), 0) << "NOTICE: resharding operation on bucket index detected, blocking" << dendl; + ldpp_dout(dpp, 0) << "NOTICE: resharding operation on bucket index detected, blocking" << dendl; string new_bucket_id; r = store->block_while_resharding(bs, &new_bucket_id, target->bucket_info, null_yield, dpp); @@ -6054,11 +6059,11 @@ int RGWRados::Bucket::UpdateIndex::guard_reshard(const DoutPrefixProvider *dpp, if (r < 0) { return r; } - ldout(store->ctx(), 20) << "reshard completion identified, new_bucket_id=" << new_bucket_id << dendl; + ldpp_dout(dpp, 20) << "reshard completion identified, new_bucket_id=" << new_bucket_id << dendl; i = 0; /* resharding is finished, make sure we can retry */ r = target->update_bucket_id(new_bucket_id, dpp); if (r < 0) { - ldout(store->ctx(), 0) << "ERROR: update_bucket_id() new_bucket_id=" << new_bucket_id << " returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: update_bucket_id() new_bucket_id=" << new_bucket_id << " returned r=" << r << dendl; return r; } invalidate_bs(); @@ -6091,7 +6096,7 @@ int RGWRados::Bucket::UpdateIndex::prepare(const DoutPrefixProvider *dpp, RGWMod } int r = guard_reshard(dpp, nullptr, [&](BucketShard *bs) -> int { - return store->cls_obj_prepare_op(*bs, op, optag, obj, bilog_flags, y, zones_trace); + return store->cls_obj_prepare_op(dpp, *bs, op, optag, obj, bilog_flags, y, zones_trace); }); if (r < 0) { @@ -6119,7 +6124,7 @@ int RGWRados::Bucket::UpdateIndex::complete(const DoutPrefixProvider *dpp, int64 int ret = get_bucket_shard(&bs, dpp); if (ret < 0) { - ldout(store->ctx(), 5) << "failed to get BucketShard object: ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "failed to get BucketShard object: ret=" << ret << dendl; return ret; } @@ -6149,7 +6154,7 @@ int RGWRados::Bucket::UpdateIndex::complete(const DoutPrefixProvider *dpp, int64 int r = store->svc.datalog_rados->add_entry(dpp, target->bucket_info, bs->shard_id); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed writing data log" << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl; } return ret; @@ -6168,7 +6173,7 @@ int RGWRados::Bucket::UpdateIndex::complete_del(const DoutPrefixProvider *dpp, int ret = get_bucket_shard(&bs, dpp); if (ret < 0) { - ldout(store->ctx(), 5) << "failed to get BucketShard object: ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "failed to get BucketShard object: ret=" << ret << dendl; return ret; } @@ -6176,7 +6181,7 @@ int RGWRados::Bucket::UpdateIndex::complete_del(const DoutPrefixProvider *dpp, int r = store->svc.datalog_rados->add_entry(dpp, target->bucket_info, bs->shard_id); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed writing data log" << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl; } return ret; @@ -6202,7 +6207,7 @@ int RGWRados::Bucket::UpdateIndex::cancel(const DoutPrefixProvider *dpp) */ int r = store->svc.datalog_rados->add_entry(dpp, target->bucket_info, bs->shard_id); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed writing data log" << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl; } return ret; @@ -6211,7 +6216,6 @@ int RGWRados::Bucket::UpdateIndex::cancel(const DoutPrefixProvider *dpp) int RGWRados::Object::Read::read(int64_t ofs, int64_t end, bufferlist& bl, optional_yield y, const DoutPrefixProvider *dpp) { RGWRados *store = source->get_store(); - CephContext *cct = store->ctx(); rgw_raw_obj read_obj; uint64_t read_ofs = ofs; @@ -6242,7 +6246,7 @@ int RGWRados::Object::Read::read(int64_t ofs, int64_t end, bufferlist& bl, optio if (astate->manifest && astate->manifest->has_tail()) { /* now get the relevant object part */ - RGWObjManifest::obj_iterator iter = astate->manifest->obj_find(ofs); + RGWObjManifest::obj_iterator iter = astate->manifest->obj_find(dpp, ofs); uint64_t stripe_ofs = iter.get_stripe_ofs(); read_obj = iter.get_location().get_raw_obj(store->store); @@ -6298,9 +6302,9 @@ int RGWRados::Object::Read::read(int64_t ofs, int64_t end, bufferlist& bl, optio auto iter = state.io_ctxs.find(read_obj.pool); if (iter == state.io_ctxs.end()) { state.cur_ioctx = &state.io_ctxs[read_obj.pool]; - r = store->open_pool_ctx(read_obj.pool, *state.cur_ioctx, false); + r = store->open_pool_ctx(dpp, read_obj.pool, *state.cur_ioctx, false); if (r < 0) { - ldout(cct, 20) << "ERROR: failed to open pool context for pool=" << read_obj.pool << " r=" << r << dendl; + ldpp_dout(dpp, 20) << "ERROR: failed to open pool context for pool=" << read_obj.pool << " r=" << r << dendl; return r; } } else { @@ -6379,17 +6383,19 @@ struct get_obj_data { } }; -static int _get_obj_iterate_cb(const rgw_raw_obj& read_obj, off_t obj_ofs, +static int _get_obj_iterate_cb(const DoutPrefixProvider *dpp, + const rgw_raw_obj& read_obj, off_t obj_ofs, off_t read_ofs, off_t len, bool is_head_obj, RGWObjState *astate, void *arg) { struct get_obj_data *d = (struct get_obj_data *)arg; - return d->store->get_obj_iterate_cb(read_obj, obj_ofs, read_ofs, len, + return d->store->get_obj_iterate_cb(dpp, read_obj, obj_ofs, read_ofs, len, is_head_obj, astate, arg); } -int RGWRados::get_obj_iterate_cb(const rgw_raw_obj& read_obj, off_t obj_ofs, +int RGWRados::get_obj_iterate_cb(const DoutPrefixProvider *dpp, + const rgw_raw_obj& read_obj, off_t obj_ofs, off_t read_ofs, off_t len, bool is_head_obj, RGWObjState *astate, void *arg) { @@ -6399,7 +6405,7 @@ int RGWRados::get_obj_iterate_cb(const rgw_raw_obj& read_obj, off_t obj_ofs, if (is_head_obj) { /* only when reading from the head object do we need to do the atomic test */ - int r = append_atomic_test(astate, op); + int r = append_atomic_test(dpp, astate, op); if (r < 0) return r; @@ -6421,13 +6427,13 @@ int RGWRados::get_obj_iterate_cb(const rgw_raw_obj& read_obj, off_t obj_ofs, } auto obj = d->store->svc.rados->obj(read_obj); - int r = obj.open(); + int r = obj.open(dpp); if (r < 0) { - ldout(cct, 4) << "failed to open rados context for " << read_obj << dendl; + ldpp_dout(dpp, 4) << "failed to open rados context for " << read_obj << dendl; return r; } - ldout(cct, 20) << "rados->get_obj_iterate_cb oid=" << read_obj.oid << " obj-ofs=" << obj_ofs << " read_ofs=" << read_ofs << " len=" << len << dendl; + ldpp_dout(dpp, 20) << "rados->get_obj_iterate_cb oid=" << read_obj.oid << " obj-ofs=" << obj_ofs << " read_ofs=" << read_ofs << " len=" << len << dendl; op.read(read_ofs, len, nullptr, nullptr); const uint64_t cost = len; @@ -6453,7 +6459,7 @@ int RGWRados::Object::Read::iterate(const DoutPrefixProvider *dpp, int64_t ofs, int r = store->iterate_obj(dpp, obj_ctx, source->get_bucket_info(), state.obj, ofs, end, chunk_size, _get_obj_iterate_cb, &data, y); if (r < 0) { - ldout(cct, 0) << "iterate_obj() failed with " << r << dendl; + ldpp_dout(dpp, 0) << "iterate_obj() failed with " << r << dendl; data.cancel(); // drain completions without writing back to client return r; } @@ -6487,9 +6493,9 @@ int RGWRados::iterate_obj(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, if (astate->manifest) { /* now get the relevant object stripe */ - RGWObjManifest::obj_iterator iter = astate->manifest->obj_find(ofs); + RGWObjManifest::obj_iterator iter = astate->manifest->obj_find(dpp, ofs); - RGWObjManifest::obj_iterator obj_end = astate->manifest->obj_end(); + RGWObjManifest::obj_iterator obj_end = astate->manifest->obj_end(dpp); for (; iter != obj_end && ofs <= end; ++iter) { off_t stripe_ofs = iter.get_stripe_ofs(); @@ -6505,7 +6511,7 @@ int RGWRados::iterate_obj(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, } reading_from_head = (read_obj == head_obj); - r = cb(read_obj, ofs, read_ofs, read_len, reading_from_head, astate, arg); + r = cb(dpp, read_obj, ofs, read_ofs, read_len, reading_from_head, astate, arg); if (r < 0) { return r; } @@ -6519,7 +6525,7 @@ int RGWRados::iterate_obj(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, read_obj = head_obj; uint64_t read_len = std::min(len, max_chunk_size); - r = cb(read_obj, ofs, ofs, read_len, reading_from_head, astate, arg); + r = cb(dpp, read_obj, ofs, ofs, read_len, reading_from_head, astate, arg); if (r < 0) { return r; } @@ -6532,31 +6538,31 @@ int RGWRados::iterate_obj(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, return 0; } -int RGWRados::obj_operate(const RGWBucketInfo& bucket_info, const rgw_obj& obj, ObjectWriteOperation *op) +int RGWRados::obj_operate(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, ObjectWriteOperation *op) { rgw_rados_ref ref; - int r = get_obj_head_ref(bucket_info, obj, &ref); + int r = get_obj_head_ref(dpp, bucket_info, obj, &ref); if (r < 0) { return r; } - return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, op, null_yield); + return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, op, null_yield); } -int RGWRados::obj_operate(const RGWBucketInfo& bucket_info, const rgw_obj& obj, ObjectReadOperation *op) +int RGWRados::obj_operate(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, ObjectReadOperation *op) { rgw_rados_ref ref; - int r = get_obj_head_ref(bucket_info, obj, &ref); + int r = get_obj_head_ref(dpp, bucket_info, obj, &ref); if (r < 0) { return r; } bufferlist outbl; - return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, op, &outbl, null_yield); + return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, op, &outbl, null_yield); } -int RGWRados::olh_init_modification_impl(const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, string *op_tag) +int RGWRados::olh_init_modification_impl(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, string *op_tag) { ObjectWriteOperation op; @@ -6584,7 +6590,7 @@ int RGWRados::olh_init_modification_impl(const RGWBucketInfo& bucket_info, RGWOb */ if (has_tag) { /* guard against racing writes */ - bucket_index_guard_olh_op(state, op); + bucket_index_guard_olh_op(dpp, state, op); } if (!has_tag) { @@ -6634,7 +6640,7 @@ int RGWRados::olh_init_modification_impl(const RGWBucketInfo& bucket_info, RGWOb op.setxattr(attr_name.c_str(), bl); - int ret = obj_operate(bucket_info, olh_obj, &op); + int ret = obj_operate(dpp, bucket_info, olh_obj, &op); if (ret < 0) { return ret; } @@ -6645,11 +6651,11 @@ int RGWRados::olh_init_modification_impl(const RGWBucketInfo& bucket_info, RGWOb return 0; } -int RGWRados::olh_init_modification(const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& obj, string *op_tag) +int RGWRados::olh_init_modification(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& obj, string *op_tag) { int ret; - ret = olh_init_modification_impl(bucket_info, state, obj, op_tag); + ret = olh_init_modification_impl(dpp, bucket_info, state, obj, op_tag); if (ret == -EEXIST) { ret = -ECANCELED; } @@ -6670,14 +6676,14 @@ int RGWRados::guard_reshard(const DoutPrefixProvider *dpp, for (int i = 0; i < NUM_RESHARD_RETRIES; ++i) { r = bs->init(pobj->bucket, *pobj, nullptr /* no RGWBucketInfo */, dpp); if (r < 0) { - ldout(cct, 5) << "bs.init() returned ret=" << r << dendl; + ldpp_dout(dpp, 5) << "bs.init() returned ret=" << r << dendl; return r; } r = call(bs); if (r != -ERR_BUSY_RESHARDING) { break; } - ldout(cct, 0) << "NOTICE: resharding operation on bucket index detected, blocking" << dendl; + ldpp_dout(dpp, 0) << "NOTICE: resharding operation on bucket index detected, blocking" << dendl; string new_bucket_id; r = block_while_resharding(bs, &new_bucket_id, bucket_info, null_yield, dpp); if (r == -ERR_BUSY_RESHARDING) { @@ -6721,7 +6727,7 @@ int RGWRados::block_while_resharding(RGWRados::BucketShard *bs, RGWBucketInfo fresh_bucket_info = bucket_info; int ret = try_refresh_bucket_info(fresh_bucket_info, nullptr, dpp); if (ret < 0) { - ldout(cct, 0) << __func__ << + ldpp_dout(dpp, 0) << __func__ << " ERROR: failed to refresh bucket info after reshard at " << log_tag << ": " << cpp_strerror(-ret) << dendl; return ret; @@ -6776,7 +6782,7 @@ int RGWRados::block_while_resharding(RGWRados::BucketShard *bs, ldpp_dout(dpp, 10) << __func__ << " INFO: was able to take reshard lock for bucket " << bucket_id << dendl; - ret = RGWBucketReshard::clear_resharding(this->store, bucket_info); + ret = RGWBucketReshard::clear_resharding(dpp, this->store, bucket_info); if (ret < 0) { reshard_lock.unlock(); ldpp_dout(dpp, 0) << __func__ << @@ -6814,7 +6820,7 @@ int RGWRados::bucket_index_link_olh(const DoutPrefixProvider *dpp, const RGWBuck rgw_zone_set *_zones_trace, bool log_data_change) { rgw_rados_ref ref; - int r = get_obj_head_ref(bucket_info, obj_instance, &ref); + int r = get_obj_head_ref(dpp, bucket_info, obj_instance, &ref); if (r < 0) { return r; } @@ -6837,7 +6843,7 @@ int RGWRados::bucket_index_link_olh(const DoutPrefixProvider *dpp, const RGWBuck delete_marker, op_tag, meta, olh_epoch, unmod_since, high_precision_time, svc.zone->get_zone().log_data, zones_trace); - return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield); + return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield); }); if (r < 0) { ldpp_dout(dpp, 20) << "rgw_rados_operate() after cls_rgw_bucket_link_olh() returned r=" << r << dendl; @@ -6846,15 +6852,15 @@ int RGWRados::bucket_index_link_olh(const DoutPrefixProvider *dpp, const RGWBuck r = svc.datalog_rados->add_entry(dpp, bucket_info, bs.shard_id); if (r < 0) { - ldout(cct, 0) << "ERROR: failed writing data log" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed writing data log" << dendl; } return 0; } -void RGWRados::bucket_index_guard_olh_op(RGWObjState& olh_state, ObjectOperation& op) +void RGWRados::bucket_index_guard_olh_op(const DoutPrefixProvider *dpp, RGWObjState& olh_state, ObjectOperation& op) { - ldout(cct, 20) << __func__ << "(): olh_state.olh_tag=" << string(olh_state.olh_tag.c_str(), olh_state.olh_tag.length()) << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): olh_state.olh_tag=" << string(olh_state.olh_tag.c_str(), olh_state.olh_tag.length()) << dendl; op.cmpxattr(RGW_ATTR_OLH_ID_TAG, CEPH_OSD_CMPXATTR_OP_EQ, olh_state.olh_tag); } @@ -6862,7 +6868,7 @@ int RGWRados::bucket_index_unlink_instance(const DoutPrefixProvider *dpp, const const string& op_tag, const string& olh_tag, uint64_t olh_epoch, rgw_zone_set *_zones_trace) { rgw_rados_ref ref; - int r = get_obj_head_ref(bucket_info, obj_instance, &ref); + int r = get_obj_head_ref(dpp, bucket_info, obj_instance, &ref); if (r < 0) { return r; } @@ -6883,7 +6889,7 @@ int RGWRados::bucket_index_unlink_instance(const DoutPrefixProvider *dpp, const cls_rgw_guard_bucket_resharding(op, -ERR_BUSY_RESHARDING); cls_rgw_bucket_unlink_instance(op, key, op_tag, olh_tag, olh_epoch, svc.zone->get_zone().log_data, zones_trace); - return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield); + return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield); }); if (r < 0) { ldpp_dout(dpp, 20) << "rgw_rados_operate() after cls_rgw_bucket_link_instance() returned r=" << r << dendl; @@ -6900,7 +6906,7 @@ int RGWRados::bucket_index_read_olh_log(const DoutPrefixProvider *dpp, bool *is_truncated) { rgw_rados_ref ref; - int r = get_obj_head_ref(bucket_info, obj_instance, &ref); + int r = get_obj_head_ref(dpp, bucket_info, obj_instance, &ref); if (r < 0) { return r; } @@ -6909,7 +6915,7 @@ int RGWRados::bucket_index_read_olh_log(const DoutPrefixProvider *dpp, int ret = bs.init(obj_instance.bucket, obj_instance, nullptr /* no RGWBucketInfo */, dpp); if (ret < 0) { - ldout(cct, 5) << "bs.init() returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl; return ret; } @@ -6927,7 +6933,7 @@ int RGWRados::bucket_index_read_olh_log(const DoutPrefixProvider *dpp, int op_ret = 0; cls_rgw_get_olh_log(op, key, ver_marker, olh_tag, log_ret, op_ret); bufferlist outbl; - int r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, &outbl, null_yield); + int r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, &outbl, null_yield); if (r < 0) { return r; } @@ -6951,27 +6957,27 @@ int RGWRados::bucket_index_read_olh_log(const DoutPrefixProvider *dpp, // the attributes from another zone, causing link_olh() to fail endlessly due to // olh_tag mismatch. this attempts to detect this case and reconstruct the OLH // attributes from the bucket index. see http://tracker.ceph.com/issues/37792 -int RGWRados::repair_olh(RGWObjState* state, const RGWBucketInfo& bucket_info, +int RGWRados::repair_olh(const DoutPrefixProvider *dpp, RGWObjState* state, const RGWBucketInfo& bucket_info, const rgw_obj& obj) { // fetch the current olh entry from the bucket index rgw_bucket_olh_entry olh; - int r = bi_get_olh(bucket_info, obj, &olh); + int r = bi_get_olh(dpp, bucket_info, obj, &olh); if (r < 0) { - ldout(cct, 0) << "repair_olh failed to read olh entry for " << obj << dendl; + ldpp_dout(dpp, 0) << "repair_olh failed to read olh entry for " << obj << dendl; return r; } if (olh.tag == rgw_bl_str(state->olh_tag)) { // mismatch already resolved? return 0; } - ldout(cct, 4) << "repair_olh setting olh_tag=" << olh.tag + ldpp_dout(dpp, 4) << "repair_olh setting olh_tag=" << olh.tag << " key=" << olh.key << " delete_marker=" << olh.delete_marker << dendl; // rewrite OLH_ID_TAG and OLH_INFO from current olh ObjectWriteOperation op; // assert this is the same olh tag we think we're fixing - bucket_index_guard_olh_op(*state, op); + bucket_index_guard_olh_op(dpp, *state, op); // preserve existing mtime struct timespec mtime_ts = ceph::real_clock::to_timespec(state->mtime); op.mtime2(&mtime_ts); @@ -6989,13 +6995,13 @@ int RGWRados::repair_olh(RGWObjState* state, const RGWBucketInfo& bucket_info, op.setxattr(RGW_ATTR_OLH_INFO, bl); } rgw_rados_ref ref; - r = get_obj_head_ref(bucket_info, obj, &ref); + r = get_obj_head_ref(dpp, bucket_info, obj, &ref); if (r < 0) { return r; } - r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield); + r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield); if (r < 0) { - ldout(cct, 0) << "repair_olh failed to write olh attributes with " + ldpp_dout(dpp, 0) << "repair_olh failed to write olh attributes with " << cpp_strerror(r) << dendl; return r; } @@ -7005,7 +7011,7 @@ int RGWRados::repair_olh(RGWObjState* state, const RGWBucketInfo& bucket_info, int RGWRados::bucket_index_trim_olh_log(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& obj_instance, uint64_t ver) { rgw_rados_ref ref; - int r = get_obj_head_ref(bucket_info, obj_instance, &ref); + int r = get_obj_head_ref(dpp, bucket_info, obj_instance, &ref); if (r < 0) { return r; } @@ -7014,7 +7020,7 @@ int RGWRados::bucket_index_trim_olh_log(const DoutPrefixProvider *dpp, const RGW int ret = bs.init(obj_instance.bucket, obj_instance, nullptr /* no RGWBucketInfo */, dpp); if (ret < 0) { - ldout(cct, 5) << "bs.init() returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl; return ret; } @@ -7027,7 +7033,7 @@ int RGWRados::bucket_index_trim_olh_log(const DoutPrefixProvider *dpp, const RGW ObjectWriteOperation op; cls_rgw_guard_bucket_resharding(op, -ERR_BUSY_RESHARDING); cls_rgw_trim_olh_log(op, key, ver, olh_tag); - return pbs->bucket_obj.operate(&op, null_yield); + return pbs->bucket_obj.operate(dpp, &op, null_yield); }); if (ret < 0) { ldpp_dout(dpp, 20) << "cls_rgw_trim_olh_log() returned r=" << ret << dendl; @@ -7040,7 +7046,7 @@ int RGWRados::bucket_index_trim_olh_log(const DoutPrefixProvider *dpp, const RGW int RGWRados::bucket_index_clear_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& obj_instance) { rgw_rados_ref ref; - int r = get_obj_head_ref(bucket_info, obj_instance, &ref); + int r = get_obj_head_ref(dpp, bucket_info, obj_instance, &ref); if (r < 0) { return r; } @@ -7057,7 +7063,7 @@ int RGWRados::bucket_index_clear_olh(const DoutPrefixProvider *dpp, const RGWBuc auto& ref = pbs->bucket_obj.get_ref(); cls_rgw_guard_bucket_resharding(op, -ERR_BUSY_RESHARDING); cls_rgw_clear_olh(op, key, olh_tag); - return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield); + return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield); }); if (ret < 0) { ldpp_dout(dpp, 5) << "rgw_rados_operate() after cls_rgw_clear_olh() returned ret=" << ret << dendl; @@ -7172,7 +7178,7 @@ int RGWRados::apply_olh_log(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx } rgw_rados_ref ref; - int r = get_obj_head_ref(bucket_info, obj, &ref); + int r = get_obj_head_ref(dpp, bucket_info, obj, &ref); if (r < 0) { return r; } @@ -7196,24 +7202,24 @@ int RGWRados::apply_olh_log(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx rgw_obj obj_instance(bucket, key); int ret = delete_obj(dpp, obj_ctx, bucket_info, obj_instance, 0, RGW_BILOG_FLAG_VERSIONED_OP, ceph::real_time(), zones_trace); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "ERROR: delete_obj() returned " << ret << " obj_instance=" << obj_instance << dendl; + ldpp_dout(dpp, 0) << "ERROR: delete_obj() returned " << ret << " obj_instance=" << obj_instance << dendl; return ret; } } /* update olh object */ - r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield); + r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield); if (r == -ECANCELED) { r = 0; } if (r < 0) { - ldout(cct, 0) << "ERROR: could not apply olh update, r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not apply olh update, r=" << r << dendl; return r; } r = bucket_index_trim_olh_log(dpp, bucket_info, state, obj, last_ver); if (r < 0) { - ldout(cct, 0) << "ERROR: could not trim olh log, r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not trim olh log, r=" << r << dendl; return r; } @@ -7225,7 +7231,7 @@ int RGWRados::apply_olh_log(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx cls_obj_check_prefix_exist(rm_op, RGW_ATTR_OLH_PENDING_PREFIX, true); /* fail if found one of these, pending modification */ rm_op.remove(); - r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &rm_op, null_yield); + r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &rm_op, null_yield); if (r == -ECANCELED) { return 0; /* someone else won this race */ } else { @@ -7234,7 +7240,7 @@ int RGWRados::apply_olh_log(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx */ r = bucket_index_clear_olh(dpp, bucket_info, state, obj); if (r < 0) { - ldout(cct, 0) << "ERROR: could not clear bucket index olh entries r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not clear bucket index olh entries r=" << r << dendl; return r; } } @@ -7291,9 +7297,9 @@ int RGWRados::set_olh(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, cons return ret; } - ret = olh_init_modification(bucket_info, *state, olh_obj, &op_tag); + ret = olh_init_modification(dpp, bucket_info, *state, olh_obj, &op_tag); if (ret < 0) { - ldout(cct, 20) << "olh_init_modification() target_obj=" << target_obj << " delete_marker=" << (int)delete_marker << " returned " << ret << dendl; + ldpp_dout(dpp, 20) << "olh_init_modification() target_obj=" << target_obj << " delete_marker=" << (int)delete_marker << " returned " << ret << dendl; if (ret == -ECANCELED) { continue; } @@ -7307,7 +7313,7 @@ int RGWRados::set_olh(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, cons if (ret == -ECANCELED) { // the bucket index rejected the link_olh() due to olh tag mismatch; // attempt to reconstruct olh head attributes based on the bucket index - int r2 = repair_olh(state, bucket_info, olh_obj); + int r2 = repair_olh(dpp, state, bucket_info, olh_obj); if (r2 < 0 && r2 != -ECANCELED) { return r2; } @@ -7319,7 +7325,7 @@ int RGWRados::set_olh(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, cons } if (i == MAX_ECANCELED_RETRY) { - ldout(cct, 0) << "ERROR: exceeded max ECANCELED retries, aborting (EIO)" << dendl; + ldpp_dout(dpp, 0) << "ERROR: exceeded max ECANCELED retries, aborting (EIO)" << dendl; return -EIO; } @@ -7357,9 +7363,9 @@ int RGWRados::unlink_obj_instance(const DoutPrefixProvider *dpp, RGWObjectCtx& o if (ret < 0) return ret; - ret = olh_init_modification(bucket_info, *state, olh_obj, &op_tag); + ret = olh_init_modification(dpp, bucket_info, *state, olh_obj, &op_tag); if (ret < 0) { - ldout(cct, 20) << "olh_init_modification() target_obj=" << target_obj << " returned " << ret << dendl; + ldpp_dout(dpp, 20) << "olh_init_modification() target_obj=" << target_obj << " returned " << ret << dendl; if (ret == -ECANCELED) { continue; } @@ -7370,7 +7376,7 @@ int RGWRados::unlink_obj_instance(const DoutPrefixProvider *dpp, RGWObjectCtx& o ret = bucket_index_unlink_instance(dpp, bucket_info, target_obj, op_tag, olh_tag, olh_epoch, zones_trace); if (ret < 0) { - ldout(cct, 20) << "bucket_index_unlink_instance() target_obj=" << target_obj << " returned " << ret << dendl; + ldpp_dout(dpp, 20) << "bucket_index_unlink_instance() target_obj=" << target_obj << " returned " << ret << dendl; if (ret == -ECANCELED) { continue; } @@ -7380,7 +7386,7 @@ int RGWRados::unlink_obj_instance(const DoutPrefixProvider *dpp, RGWObjectCtx& o } if (i == MAX_ECANCELED_RETRY) { - ldout(cct, 0) << "ERROR: exceeded max ECANCELED retries, aborting (EIO)" << dendl; + ldpp_dout(dpp, 0) << "ERROR: exceeded max ECANCELED retries, aborting (EIO)" << dendl; return -EIO; } @@ -7412,14 +7418,14 @@ void RGWRados::gen_rand_obj_instance_name(rgw_obj *target_obj) gen_rand_obj_instance_name(&target_obj->key); } -int RGWRados::get_olh(const RGWBucketInfo& bucket_info, const rgw_obj& obj, RGWOLHInfo *olh) +int RGWRados::get_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, RGWOLHInfo *olh) { map attrset; ObjectReadOperation op; op.getxattrs(&attrset, NULL); - int r = obj_operate(bucket_info, obj, &op); + int r = obj_operate(dpp, bucket_info, obj, &op); if (r < 0) { return r; } @@ -7463,10 +7469,10 @@ void RGWRados::check_pending_olh_entries(map& pending_entrie } } -int RGWRados::remove_olh_pending_entries(const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, map& pending_attrs) +int RGWRados::remove_olh_pending_entries(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, map& pending_attrs) { rgw_rados_ref ref; - int r = get_obj_head_ref(bucket_info, olh_obj, &ref); + int r = get_obj_head_ref(dpp, bucket_info, olh_obj, &ref); if (r < 0) { return r; } @@ -7477,19 +7483,19 @@ int RGWRados::remove_olh_pending_entries(const RGWBucketInfo& bucket_info, RGWOb auto i = pending_attrs.begin(); while (i != pending_attrs.end()) { ObjectWriteOperation op; - bucket_index_guard_olh_op(state, op); + bucket_index_guard_olh_op(dpp, state, op); for (int n = 0; n < max_entries && i != pending_attrs.end(); ++n, ++i) { op.rmxattr(i->first.c_str()); } - r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield); + r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield); if (r == -ENOENT || r == -ECANCELED) { /* raced with some other change, shouldn't sweat about it */ return 0; } if (r < 0) { - ldout(cct, 0) << "ERROR: could not apply olh update, r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not apply olh update, r=" << r << dendl; return r; } } @@ -7505,14 +7511,14 @@ int RGWRados::follow_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& buc check_pending_olh_entries(pending_entries, &rm_pending_entries); if (!rm_pending_entries.empty()) { - int ret = remove_olh_pending_entries(bucket_info, *state, olh_obj, rm_pending_entries); + int ret = remove_olh_pending_entries(dpp, bucket_info, *state, olh_obj, rm_pending_entries); if (ret < 0) { - ldout(cct, 20) << "ERROR: rm_pending_entries returned ret=" << ret << dendl; + ldpp_dout(dpp, 20) << "ERROR: rm_pending_entries returned ret=" << ret << dendl; return ret; } } if (!pending_entries.empty()) { - ldout(cct, 20) << __func__ << "(): found pending entries, need to update_olh() on bucket=" << olh_obj.bucket << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): found pending entries, need to update_olh() on bucket=" << olh_obj.bucket << dendl; int ret = update_olh(dpp, obj_ctx, state, bucket_info, olh_obj); if (ret < 0) { @@ -7540,12 +7546,13 @@ int RGWRados::follow_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& buc return 0; } -int RGWRados::raw_obj_stat(rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *epoch, +int RGWRados::raw_obj_stat(const DoutPrefixProvider *dpp, + rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *epoch, map *attrs, bufferlist *first_chunk, RGWObjVersionTracker *objv_tracker, optional_yield y) { rgw_rados_ref ref; - int r = get_raw_obj_ref(obj, &ref); + int r = get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { return r; } @@ -7568,7 +7575,7 @@ int RGWRados::raw_obj_stat(rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, op.read(0, cct->_conf->rgw_max_chunk_size, first_chunk, NULL); } bufferlist outbl; - r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, &outbl, null_yield); + r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, &outbl, null_yield); if (epoch) { *epoch = ref.pool.ioctx().get_last_version(); @@ -7588,12 +7595,12 @@ int RGWRados::raw_obj_stat(rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, return 0; } -int RGWRados::get_bucket_stats(RGWBucketInfo& bucket_info, int shard_id, string *bucket_ver, string *master_ver, +int RGWRados::get_bucket_stats(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, int shard_id, string *bucket_ver, string *master_ver, map& stats, string *max_marker, bool *syncstopped) { vector headers; map bucket_instance_ids; - int r = cls_bucket_head(bucket_info, shard_id, headers, &bucket_instance_ids); + int r = cls_bucket_head(dpp, bucket_info, shard_id, headers, &bucket_instance_ids); if (r < 0) { return r; } @@ -7667,12 +7674,12 @@ public: } }; -int RGWRados::get_bucket_stats_async(RGWBucketInfo& bucket_info, int shard_id, RGWGetBucketStats_CB *ctx) +int RGWRados::get_bucket_stats_async(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, int shard_id, RGWGetBucketStats_CB *ctx) { int num_aio = 0; RGWGetBucketStatsContext *get_ctx = new RGWGetBucketStatsContext(ctx, bucket_info.layout.current_index.layout.normal.num_shards ? : 1); ceph_assert(get_ctx); - int r = cls_bucket_head_async(bucket_info, shard_id, get_ctx, &num_aio); + int r = cls_bucket_head_async(dpp, bucket_info, shard_id, get_ctx, &num_aio); if (r < 0) { ctx->put(); if (num_aio) { @@ -7816,7 +7823,7 @@ int RGWRados::update_containers_stats(map& m, const DoutPr return ret; } - int r = cls_bucket_head(bucket_info, RGW_NO_SHARD, headers); + int r = cls_bucket_head(dpp, bucket_info, RGW_NO_SHARD, headers); if (r < 0) return r; @@ -7840,10 +7847,10 @@ int RGWRados::update_containers_stats(map& m, const DoutPr return m.size(); } -int RGWRados::append_async(rgw_raw_obj& obj, size_t size, bufferlist& bl) +int RGWRados::append_async(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, size_t size, bufferlist& bl) { rgw_rados_ref ref; - int r = get_raw_obj_ref(obj, &ref); + int r = get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { return r; } @@ -7855,12 +7862,12 @@ int RGWRados::append_async(rgw_raw_obj& obj, size_t size, bufferlist& bl) return r; } -int RGWRados::pool_iterate_begin(const rgw_pool& pool, RGWPoolIterCtx& ctx) +int RGWRados::pool_iterate_begin(const DoutPrefixProvider *dpp, const rgw_pool& pool, RGWPoolIterCtx& ctx) { librados::IoCtx& io_ctx = ctx.io_ctx; librados::NObjectIterator& iter = ctx.iter; - int r = open_pool_ctx(pool, io_ctx, false); + int r = open_pool_ctx(dpp, pool, io_ctx, false); if (r < 0) return r; @@ -7869,18 +7876,18 @@ int RGWRados::pool_iterate_begin(const rgw_pool& pool, RGWPoolIterCtx& ctx) return 0; } -int RGWRados::pool_iterate_begin(const rgw_pool& pool, const string& cursor, RGWPoolIterCtx& ctx) +int RGWRados::pool_iterate_begin(const DoutPrefixProvider *dpp, const rgw_pool& pool, const string& cursor, RGWPoolIterCtx& ctx) { librados::IoCtx& io_ctx = ctx.io_ctx; librados::NObjectIterator& iter = ctx.iter; - int r = open_pool_ctx(pool, io_ctx, false); + int r = open_pool_ctx(dpp, pool, io_ctx, false); if (r < 0) return r; librados::ObjectCursor oc; if (!oc.from_str(cursor)) { - ldout(cct, 10) << "failed to parse cursor: " << cursor << dendl; + ldpp_dout(dpp, 10) << "failed to parse cursor: " << cursor << dendl; return -EINVAL; } @@ -7889,11 +7896,11 @@ int RGWRados::pool_iterate_begin(const rgw_pool& pool, const string& cursor, RGW return 0; } catch (const std::system_error& e) { r = -e.code().value(); - ldout(cct, 10) << "nobjects_begin threw " << e.what() + ldpp_dout(dpp, 10) << "nobjects_begin threw " << e.what() << ", returning " << r << dendl; return r; } catch (const std::exception& e) { - ldout(cct, 10) << "nobjects_begin threw " << e.what() + ldpp_dout(dpp, 10) << "nobjects_begin threw " << e.what() << ", returning -5" << dendl; return -EIO; } @@ -7954,12 +7961,12 @@ int RGWRados::pool_iterate(RGWPoolIterCtx& ctx, uint32_t num, vectorinitialized) { - int r = pool_iterate_begin(pool, marker, ctx->iter_ctx); + int r = pool_iterate_begin(dpp, pool, marker, ctx->iter_ctx); if (r < 0) { - ldout(cct, 10) << "failed to list objects pool_iterate_begin() returned r=" << r << dendl; + ldpp_dout(dpp, 10) << "failed to list objects pool_iterate_begin() returned r=" << r << dendl; return r; } ctx->initialized = true; @@ -7967,7 +7974,7 @@ int RGWRados::list_raw_objects_init(const rgw_pool& pool, const string& marker, return 0; } -int RGWRados::list_raw_objects_next(const string& prefix_filter, int max, +int RGWRados::list_raw_objects_next(const DoutPrefixProvider *dpp, const string& prefix_filter, int max, RGWListRawObjsCtx& ctx, list& oids, bool *is_truncated) { @@ -7979,7 +7986,7 @@ int RGWRados::list_raw_objects_next(const string& prefix_filter, int max, int r = pool_iterate(ctx.iter_ctx, max, objs, is_truncated, &filter); if (r < 0) { if(r != -ENOENT) - ldout(cct, 10) << "failed to list objects pool_iterate returned r=" << r << dendl; + ldpp_dout(dpp, 10) << "failed to list objects pool_iterate returned r=" << r << dendl; return r; } @@ -7991,18 +7998,18 @@ int RGWRados::list_raw_objects_next(const string& prefix_filter, int max, return oids.size(); } -int RGWRados::list_raw_objects(const rgw_pool& pool, const string& prefix_filter, +int RGWRados::list_raw_objects(const DoutPrefixProvider *dpp, const rgw_pool& pool, const string& prefix_filter, int max, RGWListRawObjsCtx& ctx, list& oids, bool *is_truncated) { if (!ctx.initialized) { - int r = list_raw_objects_init(pool, string(), &ctx); + int r = list_raw_objects_init(dpp, pool, string(), &ctx); if (r < 0) { return r; } } - return list_raw_objects_next(prefix_filter, max, ctx, oids, is_truncated); + return list_raw_objects_next(dpp, prefix_filter, max, ctx, oids, is_truncated); } string RGWRados::list_raw_objs_get_cursor(RGWListRawObjsCtx& ctx) @@ -8010,13 +8017,13 @@ string RGWRados::list_raw_objs_get_cursor(RGWListRawObjsCtx& ctx) return pool_iterate_get_cursor(ctx.iter_ctx); } -int RGWRados::bi_get_instance(const RGWBucketInfo& bucket_info, const rgw_obj& obj, +int RGWRados::bi_get_instance(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_bucket_dir_entry *dirent) { rgw_cls_bi_entry bi_entry; - int r = bi_get(bucket_info, obj, BIIndexType::Instance, &bi_entry); + int r = bi_get(dpp, bucket_info, obj, BIIndexType::Instance, &bi_entry); if (r < 0 && r != -ENOENT) { - ldout(cct, 0) << "ERROR: bi_get() returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: bi_get() returned r=" << r << dendl; } if (r < 0) { return r; @@ -8025,20 +8032,20 @@ int RGWRados::bi_get_instance(const RGWBucketInfo& bucket_info, const rgw_obj& o try { decode(*dirent, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: failed to decode bi_entry()" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to decode bi_entry()" << dendl; return -EIO; } return 0; } -int RGWRados::bi_get_olh(const RGWBucketInfo& bucket_info, const rgw_obj& obj, +int RGWRados::bi_get_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_bucket_olh_entry *olh) { rgw_cls_bi_entry bi_entry; - int r = bi_get(bucket_info, obj, BIIndexType::OLH, &bi_entry); + int r = bi_get(dpp, bucket_info, obj, BIIndexType::OLH, &bi_entry); if (r < 0 && r != -ENOENT) { - ldout(cct, 0) << "ERROR: bi_get() returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: bi_get() returned r=" << r << dendl; } if (r < 0) { return r; @@ -8047,20 +8054,20 @@ int RGWRados::bi_get_olh(const RGWBucketInfo& bucket_info, const rgw_obj& obj, try { decode(*olh, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: failed to decode bi_entry()" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to decode bi_entry()" << dendl; return -EIO; } return 0; } -int RGWRados::bi_get(const RGWBucketInfo& bucket_info, const rgw_obj& obj, +int RGWRados::bi_get(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, BIIndexType index_type, rgw_cls_bi_entry *entry) { BucketShard bs(this); - int ret = bs.init(bucket_info, obj); + int ret = bs.init(dpp, bucket_info, obj); if (ret < 0) { - ldout(cct, 5) << "bs.init() returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl; return ret; } @@ -8092,7 +8099,7 @@ int RGWRados::bi_put(const DoutPrefixProvider *dpp, rgw_bucket& bucket, rgw_obj& BucketShard bs(this); int ret = bs.init(bucket, obj, nullptr /* no RGWBucketInfo */, dpp); if (ret < 0) { - ldout(cct, 5) << "bs.init() returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl; return ret; } @@ -8105,7 +8112,7 @@ int RGWRados::bi_list(const DoutPrefixProvider *dpp, rgw_bucket& bucket, const s BucketShard bs(this); int ret = bs.init(bucket, obj, nullptr /* no RGWBucketInfo */, dpp); if (ret < 0) { - ldout(cct, 5) << "bs.init() returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl; return ret; } @@ -8150,16 +8157,16 @@ int RGWRados::bi_list(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket BucketShard bs(this); int ret = bs.init(bucket_info.bucket, shard_id, bucket_info.layout.current_index, nullptr /* no RGWBucketInfo */, dpp); if (ret < 0) { - ldout(cct, 5) << "bs.init() returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl; return ret; } return bi_list(bs, filter_obj, marker, max, entries, is_truncated); } -int RGWRados::gc_operate(string& oid, librados::ObjectWriteOperation *op) +int RGWRados::gc_operate(const DoutPrefixProvider *dpp, string& oid, librados::ObjectWriteOperation *op) { - return rgw_rados_operate(gc_pool_ctx, oid, op, null_yield); + return rgw_rados_operate(dpp, gc_pool_ctx, oid, op, null_yield); } int RGWRados::gc_aio_operate(const string& oid, librados::AioCompletion *c, @@ -8168,9 +8175,9 @@ int RGWRados::gc_aio_operate(const string& oid, librados::AioCompletion *c, return gc_pool_ctx.aio_operate(oid, c, op); } -int RGWRados::gc_operate(string& oid, librados::ObjectReadOperation *op, bufferlist *pbl) +int RGWRados::gc_operate(const DoutPrefixProvider *dpp, string& oid, librados::ObjectReadOperation *op, bufferlist *pbl) { - return rgw_rados_operate(gc_pool_ctx, oid, op, pbl, null_yield); + return rgw_rados_operate(dpp, gc_pool_ctx, oid, op, pbl, null_yield); } int RGWRados::list_gc_objs(int *index, string& marker, uint32_t max, bool expired_only, std::list& result, bool *truncated, bool& processing_queue) @@ -8205,7 +8212,7 @@ bool RGWRados::process_expire_objects(const DoutPrefixProvider *dpp) return obj_expirer->inspect_all_shards(dpp, utime_t(), ceph_clock_now()); } -int RGWRados::cls_obj_prepare_op(BucketShard& bs, RGWModifyOp op, string& tag, +int RGWRados::cls_obj_prepare_op(const DoutPrefixProvider *dpp, BucketShard& bs, RGWModifyOp op, string& tag, rgw_obj& obj, uint16_t bilog_flags, optional_yield y, rgw_zone_set *_zones_trace) { rgw_zone_set zones_trace; @@ -8218,7 +8225,7 @@ int RGWRados::cls_obj_prepare_op(BucketShard& bs, RGWModifyOp op, string& tag, cls_rgw_obj_key key(obj.key.get_index_key_name(), obj.key.instance); cls_rgw_guard_bucket_resharding(o, -ERR_BUSY_RESHARDING); cls_rgw_bucket_prepare_op(o, op, tag, key, obj.key.get_loc(), svc.zone->get_zone().log_data, bilog_flags, zones_trace); - return bs.bucket_obj.operate(&o, y); + return bs.bucket_obj.operate(dpp, &o, y); } int RGWRados::cls_obj_complete_op(BucketShard& bs, const rgw_obj& obj, RGWModifyOp op, string& tag, @@ -8287,11 +8294,11 @@ int RGWRados::cls_obj_complete_cancel(BucketShard& bs, string& tag, rgw_obj& obj zones_trace); } -int RGWRados::cls_obj_set_bucket_tag_timeout(RGWBucketInfo& bucket_info, uint64_t timeout) +int RGWRados::cls_obj_set_bucket_tag_timeout(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, uint64_t timeout) { RGWSI_RADOS::Pool index_pool; map bucket_objs; - int r = svc.bi_rados->open_bucket_index(bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr); + int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, std::nullopt, &index_pool, &bucket_objs, nullptr); if (r < 0) return r; @@ -8369,7 +8376,7 @@ int RGWRados::cls_bucket_list_ordered(const DoutPrefixProvider *dpp, // value - list result for the corresponding oid (shard), it is filled by // the AIO callback map shard_oids; - int r = svc.bi_rados->open_bucket_index(bucket_info, shard_id, + int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &shard_oids, nullptr); if (r < 0) { @@ -8631,7 +8638,7 @@ int RGWRados::cls_bucket_list_unordered(const DoutPrefixProvider *dpp, RGWSI_RADOS::Pool index_pool; map oids; - int r = svc.bi_rados->open_bucket_index(bucket_info, shard_id, &index_pool, &oids, nullptr); + int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &oids, nullptr); if (r < 0) { return r; } @@ -8664,7 +8671,7 @@ int RGWRados::cls_bucket_list_unordered(const DoutPrefixProvider *dpp, rgw_obj_key obj_key; bool parsed = rgw_obj_key::parse_raw_oid(key, &obj_key); if (!parsed) { - ldout(cct, 0) << + ldpp_dout(dpp, 0) << "ERROR: RGWRados::cls_bucket_list_unordered received an invalid " "start marker: '" << start_after << "'" << dendl; return -EINVAL; @@ -8694,7 +8701,7 @@ int RGWRados::cls_bucket_list_unordered(const DoutPrefixProvider *dpp, cls_rgw_bucket_list_op(op, marker, prefix, empty_delimiter, num_entries, list_versions, &result); - r = rgw_rados_operate(ioctx, oid, &op, nullptr, null_yield); + r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, null_yield); if (r < 0) { return r; } @@ -8769,13 +8776,13 @@ check_updates: } // RGWRados::cls_bucket_list_unordered -int RGWRados::cls_obj_usage_log_add(const string& oid, +int RGWRados::cls_obj_usage_log_add(const DoutPrefixProvider *dpp, const string& oid, rgw_usage_log_info& info) { rgw_raw_obj obj(svc.zone->get_zone_params().usage_log_pool, oid); rgw_rados_ref ref; - int r = get_raw_obj_ref(obj, &ref); + int r = get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { return r; } @@ -8783,11 +8790,11 @@ int RGWRados::cls_obj_usage_log_add(const string& oid, ObjectWriteOperation op; cls_rgw_usage_log_add(op, info); - r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield); + r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield); return r; } -int RGWRados::cls_obj_usage_log_read(const string& oid, const string& user, const string& bucket, +int RGWRados::cls_obj_usage_log_read(const DoutPrefixProvider *dpp, const string& oid, const string& user, const string& bucket, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, string& read_iter, map& usage, bool *is_truncated) @@ -8795,7 +8802,7 @@ int RGWRados::cls_obj_usage_log_read(const string& oid, const string& user, cons rgw_raw_obj obj(svc.zone->get_zone_params().usage_log_pool, oid); rgw_rados_ref ref; - int r = get_raw_obj_ref(obj, &ref); + int r = get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { return r; } @@ -8808,13 +8815,13 @@ int RGWRados::cls_obj_usage_log_read(const string& oid, const string& user, cons return r; } -static int cls_rgw_usage_log_trim_repeat(rgw_rados_ref ref, const string& user, const string& bucket, uint64_t start_epoch, uint64_t end_epoch) +static int cls_rgw_usage_log_trim_repeat(const DoutPrefixProvider *dpp, rgw_rados_ref ref, const string& user, const string& bucket, uint64_t start_epoch, uint64_t end_epoch) { bool done = false; do { librados::ObjectWriteOperation op; cls_rgw_usage_log_trim(op, user, bucket, start_epoch, end_epoch); - int r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield); + int r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield); if (r == -ENODATA) done = true; else if (r < 0) @@ -8824,45 +8831,45 @@ static int cls_rgw_usage_log_trim_repeat(rgw_rados_ref ref, const string& user, return 0; } -int RGWRados::cls_obj_usage_log_trim(const string& oid, const string& user, const string& bucket, +int RGWRados::cls_obj_usage_log_trim(const DoutPrefixProvider *dpp, const string& oid, const string& user, const string& bucket, uint64_t start_epoch, uint64_t end_epoch) { rgw_raw_obj obj(svc.zone->get_zone_params().usage_log_pool, oid); rgw_rados_ref ref; - int r = get_raw_obj_ref(obj, &ref); + int r = get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { return r; } - r = cls_rgw_usage_log_trim_repeat(ref, user, bucket, start_epoch, end_epoch); + r = cls_rgw_usage_log_trim_repeat(dpp, ref, user, bucket, start_epoch, end_epoch); return r; } -int RGWRados::cls_obj_usage_log_clear(string& oid) +int RGWRados::cls_obj_usage_log_clear(const DoutPrefixProvider *dpp, string& oid) { rgw_raw_obj obj(svc.zone->get_zone_params().usage_log_pool, oid); rgw_rados_ref ref; - int r = get_raw_obj_ref(obj, &ref); + int r = get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { return r; } librados::ObjectWriteOperation op; cls_rgw_usage_log_clear(op); - r = rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield); + r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield); return r; } -int RGWRados::remove_objs_from_index(RGWBucketInfo& bucket_info, list& oid_list) +int RGWRados::remove_objs_from_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, list& oid_list) { RGWSI_RADOS::Pool index_pool; string dir_oid; uint8_t suggest_flag = (svc.zone->get_zone().log_data ? CEPH_RGW_DIR_SUGGEST_LOG_OP : 0); - int r = svc.bi_rados->open_bucket_index(bucket_info, &index_pool, &dir_oid); + int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, &index_pool, &dir_oid); if (r < 0) return r; @@ -8871,7 +8878,7 @@ int RGWRados::remove_objs_from_index(RGWBucketInfo& bucket_info, listattrset.end()) { r = decode_policy(iter->second, &owner); if (r < 0) { - dout(0) << "WARNING: could not decode policy for object: " << obj << dendl; + ldpp_dout(dpp, 0) << "WARNING: could not decode policy for object: " << obj << dendl; } } if (astate->manifest) { RGWObjManifest::obj_iterator miter; RGWObjManifest& manifest = *astate->manifest; - for (miter = manifest.obj_begin(); miter != manifest.obj_end(); ++miter) { + for (miter = manifest.obj_begin(dpp); miter != manifest.obj_end(dpp); ++miter) { const rgw_raw_obj& raw_loc = miter.get_location().get_raw_obj(store); rgw_obj loc; RGWSI_Tier_RADOS::raw_obj_to_obj(manifest.get_obj().bucket, raw_loc, &loc); if (loc.key.ns == RGW_OBJ_NS_MULTIPART) { - dout(10) << "check_disk_state(): removing manifest part from index: " << loc << dendl; + ldpp_dout(dpp, 0) << "check_disk_state(): removing manifest part from index: " << loc << dendl; r = delete_obj_index(loc, astate->mtime, dpp); if (r < 0) { - dout(0) << "WARNING: delete_obj_index() returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "WARNING: delete_obj_index() returned r=" << r << dendl; } } } @@ -8996,21 +9003,21 @@ int RGWRados::check_disk_state(const DoutPrefixProvider *dpp, return 0; } -int RGWRados::cls_bucket_head(const RGWBucketInfo& bucket_info, int shard_id, vector& headers, map *bucket_instance_ids) +int RGWRados::cls_bucket_head(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id, vector& headers, map *bucket_instance_ids) { RGWSI_RADOS::Pool index_pool; map oids; map list_results; - int r = svc.bi_rados->open_bucket_index(bucket_info, shard_id, &index_pool, &oids, bucket_instance_ids); + int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &oids, bucket_instance_ids); if (r < 0) { - ldout(cct, 20) << "cls_bucket_head: open_bucket_index() returned " + ldpp_dout(dpp, 20) << "cls_bucket_head: open_bucket_index() returned " << r << dendl; return r; } r = CLSRGWIssueGetDirHeader(index_pool.ioctx(), oids, list_results, cct->_conf->rgw_bucket_index_max_aio)(); if (r < 0) { - ldout(cct, 20) << "cls_bucket_head: CLSRGWIssueGetDirHeader() returned " + ldpp_dout(dpp, 20) << "cls_bucket_head: CLSRGWIssueGetDirHeader() returned " << r << dendl; return r; } @@ -9022,11 +9029,11 @@ int RGWRados::cls_bucket_head(const RGWBucketInfo& bucket_info, int shard_id, ve return 0; } -int RGWRados::cls_bucket_head_async(const RGWBucketInfo& bucket_info, int shard_id, RGWGetDirHeader_CB *ctx, int *num_aio) +int RGWRados::cls_bucket_head_async(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id, RGWGetDirHeader_CB *ctx, int *num_aio) { RGWSI_RADOS::Pool index_pool; map bucket_objs; - int r = svc.bi_rados->open_bucket_index(bucket_info, shard_id, &index_pool, &bucket_objs, nullptr); + int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &bucket_objs, nullptr); if (r < 0) return r; @@ -9085,10 +9092,10 @@ int RGWRados::check_bucket_shards(const RGWBucketInfo& bucket_info, "; new num shards " << final_num_shards << " (suggested " << suggested_num_shards << ")" << dendl; - return add_bucket_to_reshard(bucket_info, final_num_shards, dpp); + return add_bucket_to_reshard(dpp, bucket_info, final_num_shards); } -int RGWRados::add_bucket_to_reshard(const RGWBucketInfo& bucket_info, uint32_t new_num_shards, const DoutPrefixProvider *dpp) +int RGWRados::add_bucket_to_reshard(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, uint32_t new_num_shards) { RGWReshard reshard(this->store, dpp); @@ -9108,7 +9115,7 @@ int RGWRados::add_bucket_to_reshard(const RGWBucketInfo& bucket_info, uint32_t n entry.old_num_shards = num_source_shards; entry.new_num_shards = new_num_shards; - return reshard.add(entry); + return reshard.add(dpp, entry); } int RGWRados::check_quota(const rgw_user& bucket_owner, rgw_bucket& bucket, @@ -9162,12 +9169,12 @@ librados::Rados* RGWRados::get_rados_handle() return &rados; } -int RGWRados::delete_raw_obj_aio(const rgw_raw_obj& obj, list& handles) +int RGWRados::delete_raw_obj_aio(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, list& handles) { rgw_rados_ref ref; - int ret = get_raw_obj_ref(obj, &ref); + int ret = get_raw_obj_ref(dpp, obj, &ref); if (ret < 0) { - lderr(cct) << "ERROR: failed to get obj ref with ret=" << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to get obj ref with ret=" << ret << dendl; return ret; } @@ -9178,7 +9185,7 @@ int RGWRados::delete_raw_obj_aio(const rgw_raw_obj& obj, listpause(); - ldout(cct, 1) << "Frontends paused" << dendl; + ldpp_dout(&dp, 1) << "Frontends paused" << dendl; // TODO: make RGWRados responsible for rgw_log_usage lifetime rgw_log_usage_finalize(); @@ -93,7 +94,7 @@ void RGWRealmReloader::reload() StoreManager::close_storage(store); store = nullptr; - ldout(cct, 1) << "Store closed" << dendl; + ldpp_dout(&dp, 1) << "Store closed" << dendl; { // allow a new notify to reschedule us. it's important that we do this // before we start loading the new realm, or we could miss some updates @@ -101,7 +102,6 @@ void RGWRealmReloader::reload() reload_scheduled = nullptr; } - const DoutPrefix dp(cct, dout_subsys, "rgw realm reloader: "); while (!store) { // recreate and initialize a new store @@ -115,7 +115,7 @@ void RGWRealmReloader::reload() cct->_conf.get_val("rgw_dynamic_resharding"), cct->_conf->rgw_cache_enabled); - ldout(cct, 1) << "Creating new store" << dendl; + ldpp_dout(&dp, 1) << "Creating new store" << dendl; rgw::sal::Store* store_cleanup = nullptr; { @@ -126,7 +126,7 @@ void RGWRealmReloader::reload() // sleep until we get another notification, and retry until we get // a working configuration if (store == nullptr) { - lderr(cct) << "Failed to reinitialize RGWRados after a realm " + ldpp_dout(&dp, -1) << "Failed to reinitialize RGWRados after a realm " "configuration update. Waiting for a new update." << dendl; // sleep until another event is scheduled @@ -147,7 +147,7 @@ void RGWRealmReloader::reload() } if (store_cleanup) { - ldout(cct, 4) << "Got another notification, restarting RGWRados " + ldpp_dout(&dp, 4) << "Got another notification, restarting RGWRados " "initialization." << dendl; StoreManager::close_storage(store_cleanup); @@ -156,19 +156,19 @@ void RGWRealmReloader::reload() int r = store->register_to_service_map("rgw", service_map_meta); if (r < 0) { - lderr(cct) << "ERROR: failed to register to service map: " << cpp_strerror(-r) << dendl; + ldpp_dout(&dp, -1) << "ERROR: failed to register to service map: " << cpp_strerror(-r) << dendl; /* ignore error */ } - ldout(cct, 1) << "Finishing initialization of new store" << dendl; + ldpp_dout(&dp, 1) << "Finishing initialization of new store" << dendl; // finish initializing the new store - ldout(cct, 1) << " - REST subsystem init" << dendl; + ldpp_dout(&dp, 1) << " - REST subsystem init" << dendl; rgw_rest_init(cct, store->get_zone()->get_zonegroup()); - ldout(cct, 1) << " - usage subsystem init" << dendl; + ldpp_dout(&dp, 1) << " - usage subsystem init" << dendl; rgw_log_usage_init(cct, store); - ldout(cct, 1) << "Resuming frontends with new realm configuration." << dendl; + ldpp_dout(&dp, 1) << "Resuming frontends with new realm configuration." << dendl; frontends->resume(store); } diff --git a/src/rgw/rgw_realm_watcher.cc b/src/rgw/rgw_realm_watcher.cc index aec48e76e02..f6cd3475985 100644 --- a/src/rgw/rgw_realm_watcher.cc +++ b/src/rgw/rgw_realm_watcher.cc @@ -13,19 +13,19 @@ #define dout_prefix (*_dout << "rgw realm watcher: ") -RGWRealmWatcher::RGWRealmWatcher(CephContext* cct, const RGWRealm& realm) +RGWRealmWatcher::RGWRealmWatcher(const DoutPrefixProvider *dpp, CephContext* cct, const RGWRealm& realm) : cct(cct) { // no default realm, nothing to watch if (realm.get_id().empty()) { - ldout(cct, 4) << "No realm, disabling dynamic reconfiguration." << dendl; + ldpp_dout(dpp, 4) << "No realm, disabling dynamic reconfiguration." << dendl; return; } // establish the watch on RGWRealm - int r = watch_start(realm); + int r = watch_start(dpp, realm); if (r < 0) { - lderr(cct) << "Failed to establish a watch on RGWRealm, " + ldpp_dout(dpp, -1) << "Failed to establish a watch on RGWRealm, " "disabling dynamic reconfiguration." << dendl; return; } @@ -78,27 +78,27 @@ void RGWRealmWatcher::handle_error(uint64_t cookie, int err) watch_restart(); } -int RGWRealmWatcher::watch_start(const RGWRealm& realm) +int RGWRealmWatcher::watch_start(const DoutPrefixProvider *dpp, const RGWRealm& realm) { // initialize a Rados client int r = rados.init_with_context(cct); if (r < 0) { - lderr(cct) << "Rados client initialization failed with " + ldpp_dout(dpp, -1) << "Rados client initialization failed with " << cpp_strerror(-r) << dendl; return r; } r = rados.connect(); if (r < 0) { - lderr(cct) << "Rados client connection failed with " + ldpp_dout(dpp, -1) << "Rados client connection failed with " << cpp_strerror(-r) << dendl; return r; } // open an IoCtx for the realm's pool rgw_pool pool(realm.get_pool(cct)); - r = rgw_init_ioctx(&rados, pool, pool_ctx); + r = rgw_init_ioctx(dpp, &rados, pool, pool_ctx); if (r < 0) { - lderr(cct) << "Failed to open pool " << pool + ldpp_dout(dpp, -1) << "Failed to open pool " << pool << " with " << cpp_strerror(-r) << dendl; rados.shutdown(); return r; @@ -108,14 +108,14 @@ int RGWRealmWatcher::watch_start(const RGWRealm& realm) auto oid = realm.get_control_oid(); r = pool_ctx.watch2(oid, &watch_handle, this); if (r < 0) { - lderr(cct) << "Failed to watch " << oid + ldpp_dout(dpp, -1) << "Failed to watch " << oid << " with " << cpp_strerror(-r) << dendl; pool_ctx.close(); rados.shutdown(); return r; } - ldout(cct, 10) << "Watching " << oid << dendl; + ldpp_dout(dpp, 10) << "Watching " << oid << dendl; std::swap(watch_oid, oid); return 0; } diff --git a/src/rgw/rgw_realm_watcher.h b/src/rgw/rgw_realm_watcher.h index c6741ea96da..b2e3ac6b9d6 100644 --- a/src/rgw/rgw_realm_watcher.h +++ b/src/rgw/rgw_realm_watcher.h @@ -36,7 +36,7 @@ class RGWRealmWatcher : public librados::WatchCtx2 { bufferlist::const_iterator& p) = 0; }; - RGWRealmWatcher(CephContext* cct, const RGWRealm& realm); + RGWRealmWatcher(const DoutPrefixProvider *dpp, CephContext* cct, const RGWRealm& realm); ~RGWRealmWatcher() override; /// register a watcher for the given notification type @@ -59,7 +59,7 @@ class RGWRealmWatcher : public librados::WatchCtx2 { uint64_t watch_handle = 0; std::string watch_oid; - int watch_start(const RGWRealm& realm); + int watch_start(const DoutPrefixProvider *dpp, const RGWRealm& realm); int watch_restart(); void watch_stop(); diff --git a/src/rgw/rgw_reshard.cc b/src/rgw/rgw_reshard.cc index 4c8af6ab522..2ff08df78e3 100644 --- a/src/rgw/rgw_reshard.cc +++ b/src/rgw/rgw_reshard.cc @@ -259,23 +259,24 @@ RGWBucketReshard::RGWBucketReshard(rgw::sal::RadosStore* _store, outer_reshard_lock(_outer_reshard_lock) { } -int RGWBucketReshard::set_resharding_status(rgw::sal::RadosStore* store, +int RGWBucketReshard::set_resharding_status(const DoutPrefixProvider *dpp, + rgw::sal::RadosStore* store, const RGWBucketInfo& bucket_info, const string& new_instance_id, int32_t num_shards, cls_rgw_reshard_status status) { if (new_instance_id.empty()) { - ldout(store->ctx(), 0) << __func__ << " missing new bucket instance id" << dendl; + ldpp_dout(dpp, 0) << __func__ << " missing new bucket instance id" << dendl; return -EINVAL; } cls_rgw_bucket_instance_entry instance_entry; instance_entry.set_status(new_instance_id, num_shards, status); - int ret = store->getRados()->bucket_set_reshard(bucket_info, instance_entry); + int ret = store->getRados()->bucket_set_reshard(dpp, bucket_info, instance_entry); if (ret < 0) { - ldout(store->ctx(), 0) << "RGWReshard::" << __func__ << " ERROR: error setting bucket resharding flag on bucket index: " + ldpp_dout(dpp, 0) << "RGWReshard::" << __func__ << " ERROR: error setting bucket resharding flag on bucket index: " << cpp_strerror(-ret) << dendl; return ret; } @@ -283,21 +284,22 @@ int RGWBucketReshard::set_resharding_status(rgw::sal::RadosStore* store, } // reshard lock assumes lock is held -int RGWBucketReshard::clear_resharding(rgw::sal::RadosStore* store, +int RGWBucketReshard::clear_resharding(const DoutPrefixProvider *dpp, + rgw::sal::RadosStore* store, const RGWBucketInfo& bucket_info) { - int ret = clear_index_shard_reshard_status(store, bucket_info); + int ret = clear_index_shard_reshard_status(dpp, store, bucket_info); if (ret < 0) { - ldout(store->ctx(), 0) << "RGWBucketReshard::" << __func__ << + ldpp_dout(dpp, 0) << "RGWBucketReshard::" << __func__ << " ERROR: error clearing reshard status from index shard " << cpp_strerror(-ret) << dendl; return ret; } cls_rgw_bucket_instance_entry instance_entry; - ret = store->getRados()->bucket_set_reshard(bucket_info, instance_entry); + ret = store->getRados()->bucket_set_reshard(dpp, bucket_info, instance_entry); if (ret < 0) { - ldout(store->ctx(), 0) << "RGWReshard::" << __func__ << + ldpp_dout(dpp, 0) << "RGWReshard::" << __func__ << " ERROR: error setting bucket resharding flag on bucket index: " << cpp_strerror(-ret) << dendl; return ret; @@ -306,18 +308,19 @@ int RGWBucketReshard::clear_resharding(rgw::sal::RadosStore* store, return 0; } -int RGWBucketReshard::clear_index_shard_reshard_status(rgw::sal::RadosStore* store, +int RGWBucketReshard::clear_index_shard_reshard_status(const DoutPrefixProvider *dpp, + rgw::sal::RadosStore* store, const RGWBucketInfo& bucket_info) { uint32_t num_shards = bucket_info.layout.current_index.layout.normal.num_shards; if (num_shards < std::numeric_limits::max()) { - int ret = set_resharding_status(store, bucket_info, + int ret = set_resharding_status(dpp, store, bucket_info, bucket_info.bucket.bucket_id, (num_shards < 1 ? 1 : num_shards), cls_rgw_reshard_status::NOT_RESHARDING); if (ret < 0) { - ldout(store->ctx(), 0) << "RGWBucketReshard::" << __func__ << + ldpp_dout(dpp, 0) << "RGWBucketReshard::" << __func__ << " ERROR: error clearing reshard status from index shard " << cpp_strerror(-ret) << dendl; return ret; @@ -344,7 +347,7 @@ static int create_new_bucket_instance(rgw::sal::RadosStore* store, new_bucket_info.new_bucket_instance_id.clear(); new_bucket_info.reshard_status = cls_rgw_reshard_status::NOT_RESHARDING; - int ret = store->svc()->bi->init_index(new_bucket_info); + int ret = store->svc()->bi->init_index(dpp, new_bucket_info); if (ret < 0) { cerr << "ERROR: failed to init new bucket indexes: " << cpp_strerror(-ret) << std::endl; return ret; @@ -367,14 +370,14 @@ int RGWBucketReshard::create_new_bucket_instance(int new_num_shards, bucket_info, bucket_attrs, new_bucket_info, dpp); } -int RGWBucketReshard::cancel() +int RGWBucketReshard::cancel(const DoutPrefixProvider *dpp) { int ret = reshard_lock.lock(); if (ret < 0) { return ret; } - ret = clear_resharding(); + ret = clear_resharding(dpp); reshard_lock.unlock(); return ret; @@ -417,9 +420,9 @@ public: if (in_progress) { // resharding must not have ended correctly, clean up int ret = - RGWBucketReshard::clear_index_shard_reshard_status(store, bucket_info); + RGWBucketReshard::clear_index_shard_reshard_status(dpp, store, bucket_info); if (ret < 0) { - lderr(store->ctx()) << "Error: " << __func__ << + ldpp_dout(dpp, -1) << "Error: " << __func__ << " clear_index_shard_status returned " << ret << dendl; } bucket_info.new_bucket_instance_id.clear(); @@ -630,7 +633,7 @@ int RGWBucketReshard::do_reshard(int num_shards, } int ret = store->getRados()->get_target_shard_id(new_bucket_info.layout.current_index.layout.normal, obj.get_hash_object(), &target_shard_id); if (ret < 0) { - lderr(store->ctx()) << "ERROR: get_target_shard_id() returned ret=" << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: get_target_shard_id() returned ret=" << ret << dendl; return ret; } @@ -654,7 +657,7 @@ int RGWBucketReshard::do_reshard(int num_shards, } ret = reshard_lock.renew(now); if (ret < 0) { - lderr(store->ctx()) << "Error renewing bucket lock: " << ret << dendl; + ldpp_dout(dpp, -1) << "Error renewing bucket lock: " << ret << dendl; return ret; } } @@ -677,7 +680,7 @@ int RGWBucketReshard::do_reshard(int num_shards, ret = target_shards_mgr.finish(); if (ret < 0) { - lderr(store->ctx()) << "ERROR: failed to reshard" << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to reshard" << dendl; return -EIO; } @@ -697,9 +700,9 @@ int RGWBucketReshard::do_reshard(int num_shards, // NB: some error clean-up is done by ~BucketInfoReshardUpdate } // RGWBucketReshard::do_reshard -int RGWBucketReshard::get_status(list *status) +int RGWBucketReshard::get_status(const DoutPrefixProvider *dpp, list *status) { - return store->svc()->bi_rados->get_reshard_status(bucket_info, status); + return store->svc()->bi_rados->get_reshard_status(dpp, bucket_info, status); } @@ -721,7 +724,7 @@ int RGWBucketReshard::execute(int num_shards, int max_op_entries, } if (reshard_log) { - ret = reshard_log->update(bucket_info, new_bucket_info); + ret = reshard_log->update(dpp, bucket_info, new_bucket_info); if (ret < 0) { goto error_out; } @@ -729,7 +732,7 @@ int RGWBucketReshard::execute(int num_shards, int max_op_entries, // set resharding status of current bucket_info & shards with // information about planned resharding - ret = set_resharding_status(new_bucket_info.bucket.bucket_id, + ret = set_resharding_status(dpp, new_bucket_info.bucket.bucket_id, num_shards, cls_rgw_reshard_status::IN_PROGRESS); if (ret < 0) { goto error_out; @@ -752,9 +755,9 @@ int RGWBucketReshard::execute(int num_shards, int max_op_entries, // best effort and don't report out an error; the lock isn't needed // at this point since all we're using a best effor to to remove old // shard objects - ret = store->svc()->bi->clean_index(bucket_info); + ret = store->svc()->bi->clean_index(dpp, bucket_info); if (ret < 0) { - lderr(store->ctx()) << "Error: " << __func__ << + ldpp_dout(dpp, -1) << "Error: " << __func__ << " failed to clean up old shards; " << "RGWRados::clean_bucket_index returned " << ret << dendl; } @@ -782,9 +785,9 @@ error_out: // since the real problem is the issue that led to this error code // path, we won't touch ret and instead use another variable to // temporarily error codes - int ret2 = store->svc()->bi->clean_index(new_bucket_info); + int ret2 = store->svc()->bi->clean_index(dpp, new_bucket_info); if (ret2 < 0) { - lderr(store->ctx()) << "Error: " << __func__ << + ldpp_dout(dpp, -1) << "Error: " << __func__ << " failed to clean up shards from failed incomplete resharding; " << "RGWRados::clean_bucket_index returned " << ret2 << dendl; } @@ -830,10 +833,10 @@ void RGWReshard::get_bucket_logshard_oid(const string& tenant, const string& buc get_logshard_oid(int(sid), oid); } -int RGWReshard::add(cls_rgw_reshard_entry& entry) +int RGWReshard::add(const DoutPrefixProvider *dpp, cls_rgw_reshard_entry& entry) { if (!store->svc()->zone->can_reshard()) { - ldout(store->ctx(), 20) << __func__ << " Resharding is disabled" << dendl; + ldpp_dout(dpp, 20) << __func__ << " Resharding is disabled" << dendl; return 0; } @@ -844,15 +847,15 @@ int RGWReshard::add(cls_rgw_reshard_entry& entry) librados::ObjectWriteOperation op; cls_rgw_reshard_add(op, entry); - int ret = rgw_rados_operate(store->getRados()->reshard_pool_ctx, logshard_oid, &op, null_yield); + int ret = rgw_rados_operate(dpp, store->getRados()->reshard_pool_ctx, logshard_oid, &op, null_yield); if (ret < 0) { - lderr(store->ctx()) << "ERROR: failed to add entry to reshard log, oid=" << logshard_oid << " tenant=" << entry.tenant << " bucket=" << entry.bucket_name << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to add entry to reshard log, oid=" << logshard_oid << " tenant=" << entry.tenant << " bucket=" << entry.bucket_name << dendl; return ret; } return 0; } -int RGWReshard::update(const RGWBucketInfo& bucket_info, const RGWBucketInfo& new_bucket_info) +int RGWReshard::update(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const RGWBucketInfo& new_bucket_info) { cls_rgw_reshard_entry entry; entry.bucket_name = bucket_info.bucket.name; @@ -866,9 +869,9 @@ int RGWReshard::update(const RGWBucketInfo& bucket_info, const RGWBucketInfo& ne entry.new_instance_id = new_bucket_info.bucket.name + ":" + new_bucket_info.bucket.bucket_id; - ret = add(entry); + ret = add(dpp, entry); if (ret < 0) { - ldout(store->ctx(), 0) << __func__ << ":Error in updating entry bucket " << entry.bucket_name << ": " << + ldpp_dout(dpp, 0) << __func__ << ":Error in updating entry bucket " << entry.bucket_name << ": " << cpp_strerror(-ret) << dendl; } @@ -919,7 +922,7 @@ int RGWReshard::get(cls_rgw_reshard_entry& entry) return 0; } -int RGWReshard::remove(cls_rgw_reshard_entry& entry) +int RGWReshard::remove(const DoutPrefixProvider *dpp, cls_rgw_reshard_entry& entry) { string logshard_oid; @@ -928,9 +931,9 @@ int RGWReshard::remove(cls_rgw_reshard_entry& entry) librados::ObjectWriteOperation op; cls_rgw_reshard_remove(op, entry); - int ret = rgw_rados_operate(store->getRados()->reshard_pool_ctx, logshard_oid, &op, null_yield); + int ret = rgw_rados_operate(dpp, store->getRados()->reshard_pool_ctx, logshard_oid, &op, null_yield); if (ret < 0) { - lderr(store->ctx()) << "ERROR: failed to remove entry from reshard log, oid=" << logshard_oid << " tenant=" << entry.tenant << " bucket=" << entry.bucket_name << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to remove entry from reshard log, oid=" << logshard_oid << " tenant=" << entry.tenant << " bucket=" << entry.bucket_name << dendl; return ret; } @@ -1057,7 +1060,7 @@ int RGWReshard::process_single_logshard(int logshard_num, const DoutPrefixProvid ": removing reshard queue entry for a resharded or non-existent bucket" << entry.bucket_name << dendl; - ret = remove(entry); + ret = remove(dpp, entry); if (ret < 0) { ldpp_dout(dpp, 0) << __func__ << ": Error removing non-existent bucket " << @@ -1084,7 +1087,7 @@ int RGWReshard::process_single_logshard(int logshard_num, const DoutPrefixProvid " removing reshard queue entry for bucket " << entry.bucket_name << dendl; - ret = remove(entry); + ret = remove(dpp, entry); if (ret < 0) { ldpp_dout(dpp, 0) << __func__ << ": Error removing bucket " << entry.bucket_name << " from resharding queue: " << diff --git a/src/rgw/rgw_reshard.h b/src/rgw/rgw_reshard.h index 3c4a984b417..23970c93ea2 100644 --- a/src/rgw/rgw_reshard.h +++ b/src/rgw/rgw_reshard.h @@ -107,27 +107,29 @@ public: bool verbose = false, ostream *out = nullptr, Formatter *formatter = nullptr, RGWReshard *reshard_log = nullptr); - int get_status(std::list *status); - int cancel(); - static int clear_resharding(rgw::sal::RadosStore* store, + int get_status(const DoutPrefixProvider *dpp, std::list *status); + int cancel(const DoutPrefixProvider *dpp); + static int clear_resharding(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, const RGWBucketInfo& bucket_info); - int clear_resharding() { - return clear_resharding(store, bucket_info); + int clear_resharding(const DoutPrefixProvider *dpp) { + return clear_resharding(dpp, store, bucket_info); } - static int clear_index_shard_reshard_status(rgw::sal::RadosStore* store, + static int clear_index_shard_reshard_status(const DoutPrefixProvider *dpp, + rgw::sal::RadosStore* store, const RGWBucketInfo& bucket_info); - int clear_index_shard_reshard_status() { - return clear_index_shard_reshard_status(store, bucket_info); + int clear_index_shard_reshard_status(const DoutPrefixProvider *dpp) { + return clear_index_shard_reshard_status(dpp, store, bucket_info); } - static int set_resharding_status(rgw::sal::RadosStore* store, + static int set_resharding_status(const DoutPrefixProvider *dpp, + rgw::sal::RadosStore* store, const RGWBucketInfo& bucket_info, const string& new_instance_id, int32_t num_shards, cls_rgw_reshard_status status); - int set_resharding_status(const string& new_instance_id, + int set_resharding_status(const DoutPrefixProvider *dpp, const string& new_instance_id, int32_t num_shards, cls_rgw_reshard_status status) { - return set_resharding_status(store, bucket_info, + return set_resharding_status(dpp, store, bucket_info, new_instance_id, num_shards, status); } @@ -233,10 +235,10 @@ protected: public: RGWReshard(rgw::sal::RadosStore* _store, bool _verbose = false, ostream *_out = nullptr, Formatter *_formatter = nullptr); - int add(cls_rgw_reshard_entry& entry); - int update(const RGWBucketInfo& bucket_info, const RGWBucketInfo& new_bucket_info); + int add(const DoutPrefixProvider *dpp, cls_rgw_reshard_entry& entry); + int update(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const RGWBucketInfo& new_bucket_info); int get(cls_rgw_reshard_entry& entry); - int remove(cls_rgw_reshard_entry& entry); + int remove(const DoutPrefixProvider *dpp, cls_rgw_reshard_entry& entry); int list(int logshard_num, string& marker, uint32_t max, std::list& entries, bool *is_truncated); int clear_bucket_resharding(const string& bucket_instance_oid, cls_rgw_reshard_entry& entry); diff --git a/src/rgw/rgw_rest.cc b/src/rgw/rgw_rest.cc index 2f579f4d2a3..e807c295ac4 100644 --- a/src/rgw/rgw_rest.cc +++ b/src/rgw/rgw_rest.cc @@ -1857,7 +1857,7 @@ int RGWHandler_REST::init_permissions(RGWOp* op, optional_yield y) } } catch (const std::exception& e) { - lderr(s->cct) << "Error reading IAM User Policy: " << e.what() << dendl; + ldpp_dout(op, -1) << "Error reading IAM User Policy: " << e.what() << dendl; } } rgw_build_iam_environment(store, s); diff --git a/src/rgw/rgw_rest_bucket.cc b/src/rgw/rgw_rest_bucket.cc index c52e6a7647a..8b2e68586c0 100644 --- a/src/rgw/rgw_rest_bucket.cc +++ b/src/rgw/rgw_rest_bucket.cc @@ -150,7 +150,7 @@ void RGWOp_Bucket_Link::execute(optional_yield y) op_state.set_new_bucket_name(new_bucket_name); bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -188,7 +188,7 @@ void RGWOp_Bucket_Unlink::execute(optional_yield y) op_state.set_bucket_name(bucket); bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; diff --git a/src/rgw/rgw_rest_client.cc b/src/rgw/rgw_rest_client.cc index 5e33204b28a..d7ae1cc3279 100644 --- a/src/rgw/rgw_rest_client.cc +++ b/src/rgw/rgw_rest_client.cc @@ -206,7 +206,7 @@ static int sign_request_v2(const DoutPrefixProvider *dpp, RGWAccessKey& key, } string canonical_header; - if (!rgw_create_s3_canonical_header(info, NULL, canonical_header, false)) { + if (!rgw_create_s3_canonical_header(dpp, info, NULL, canonical_header, false)) { ldpp_dout(dpp, 0) << "failed to create canonical s3 header" << dendl; return -EINVAL; } @@ -346,7 +346,7 @@ static void scope_from_api_name(CephContext *cct, } } -int RGWRESTSimpleRequest::forward_request(RGWAccessKey& key, req_info& info, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y) +int RGWRESTSimpleRequest::forward_request(const DoutPrefixProvider *dpp, RGWAccessKey& key, req_info& info, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y) { string date_str; @@ -386,9 +386,9 @@ int RGWRESTSimpleRequest::forward_request(RGWAccessKey& key, req_info& info, siz new_env.set("HTTP_X_AMZ_CONTENT_SHA256", maybe_payload_hash); } - int ret = sign_request(this, key, region, service, new_env, new_info, nullptr); + int ret = sign_request(dpp, key, region, service, new_env, new_info, nullptr); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to sign request" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to sign request" << dendl; return ret; } @@ -599,7 +599,7 @@ void RGWRESTGenerateHTTPHeaders::set_extra_headers(const map& ex } } -int RGWRESTGenerateHTTPHeaders::set_obj_attrs(map& rgw_attrs) +int RGWRESTGenerateHTTPHeaders::set_obj_attrs(const DoutPrefixProvider *dpp, map& rgw_attrs) { map new_attrs; @@ -616,9 +616,9 @@ int RGWRESTGenerateHTTPHeaders::set_obj_attrs(map& rgw_attrs } RGWAccessControlPolicy policy; - int ret = rgw_policy_from_attrset(cct, rgw_attrs, &policy); + int ret = rgw_policy_from_attrset(dpp, cct, rgw_attrs, &policy); if (ret < 0) { - ldout(cct, 0) << "ERROR: couldn't get policy ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: couldn't get policy ret=" << ret << dendl; return ret; } @@ -660,11 +660,11 @@ void RGWRESTGenerateHTTPHeaders::set_policy(RGWAccessControlPolicy& policy) add_grants_headers(grants_by_type, *new_env, new_info->x_meta_map); } -int RGWRESTGenerateHTTPHeaders::sign(RGWAccessKey& key, const bufferlist *opt_content) +int RGWRESTGenerateHTTPHeaders::sign(const DoutPrefixProvider *dpp, RGWAccessKey& key, const bufferlist *opt_content) { - int ret = sign_request(this, key, region, service, *new_env, *new_info, opt_content); + int ret = sign_request(dpp, key, region, service, *new_env, *new_info, opt_content); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to sign request" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to sign request" << dendl; return ret; } @@ -701,25 +701,25 @@ void RGWRESTStreamS3PutObj::send_init(rgw::sal::Object* obj) url = headers_gen.get_url(); } -void RGWRESTStreamS3PutObj::send_ready(RGWAccessKey& key, map& rgw_attrs) +void RGWRESTStreamS3PutObj::send_ready(const DoutPrefixProvider *dpp, RGWAccessKey& key, map& rgw_attrs) { - headers_gen.set_obj_attrs(rgw_attrs); + headers_gen.set_obj_attrs(dpp, rgw_attrs); - send_ready(key); + send_ready(dpp, key); } -void RGWRESTStreamS3PutObj::send_ready(RGWAccessKey& key, const map& http_attrs, +void RGWRESTStreamS3PutObj::send_ready(const DoutPrefixProvider *dpp, RGWAccessKey& key, const map& http_attrs, RGWAccessControlPolicy& policy) { headers_gen.set_http_attrs(http_attrs); headers_gen.set_policy(policy); - send_ready(key); + send_ready(dpp, key); } -void RGWRESTStreamS3PutObj::send_ready(RGWAccessKey& key) +void RGWRESTStreamS3PutObj::send_ready(const DoutPrefixProvider *dpp, RGWAccessKey& key) { - headers_gen.sign(key, nullptr); + headers_gen.sign(dpp, key, nullptr); for (const auto& kv: new_env.get_map()) { headers.emplace_back(kv); @@ -728,10 +728,10 @@ void RGWRESTStreamS3PutObj::send_ready(RGWAccessKey& key) out_cb = new RGWRESTStreamOutCB(this); } -void RGWRESTStreamS3PutObj::put_obj_init(RGWAccessKey& key, rgw::sal::Object* obj, map& attrs) +void RGWRESTStreamS3PutObj::put_obj_init(const DoutPrefixProvider *dpp, RGWAccessKey& key, rgw::sal::Object* obj, map& attrs) { send_init(obj); - send_ready(key, attrs); + send_ready(dpp, key, attrs); } void set_str_from_headers(map& out_headers, const string& header_name, string& str) @@ -783,33 +783,33 @@ static void send_prepare_convert(const rgw_obj& obj, string *resource) *resource = urlsafe_bucket + "/" + urlsafe_object; } -int RGWRESTStreamRWRequest::send_request(RGWAccessKey& key, map& extra_headers, const rgw_obj& obj, RGWHTTPManager *mgr) +int RGWRESTStreamRWRequest::send_request(const DoutPrefixProvider *dpp, RGWAccessKey& key, map& extra_headers, const rgw_obj& obj, RGWHTTPManager *mgr) { string resource; send_prepare_convert(obj, &resource); - return send_request(&key, extra_headers, resource, mgr); + return send_request(dpp, &key, extra_headers, resource, mgr); } -int RGWRESTStreamRWRequest::send_prepare(RGWAccessKey& key, map& extra_headers, const rgw_obj& obj) +int RGWRESTStreamRWRequest::send_prepare(const DoutPrefixProvider *dpp, RGWAccessKey& key, map& extra_headers, const rgw_obj& obj) { string resource; send_prepare_convert(obj, &resource); - return do_send_prepare(&key, extra_headers, resource); + return do_send_prepare(dpp, &key, extra_headers, resource); } -int RGWRESTStreamRWRequest::send_prepare(RGWAccessKey *key, map& extra_headers, const string& resource, +int RGWRESTStreamRWRequest::send_prepare(const DoutPrefixProvider *dpp, RGWAccessKey *key, map& extra_headers, const string& resource, bufferlist *send_data) { string new_resource; //do not encode slash url_encode(resource, new_resource, false); - return do_send_prepare(key, extra_headers, new_resource, send_data); + return do_send_prepare(dpp, key, extra_headers, new_resource, send_data); } -int RGWRESTStreamRWRequest::do_send_prepare(RGWAccessKey *key, map& extra_headers, const string& resource, +int RGWRESTStreamRWRequest::do_send_prepare(const DoutPrefixProvider *dpp, RGWAccessKey *key, map& extra_headers, const string& resource, bufferlist *send_data) { string new_url = url; @@ -865,10 +865,10 @@ int RGWRESTStreamRWRequest::do_send_prepare(RGWAccessKey *key, map& extra_headers, const string& resource, +int RGWRESTStreamRWRequest::send_request(const DoutPrefixProvider *dpp, RGWAccessKey *key, map& extra_headers, const string& resource, RGWHTTPManager *mgr, bufferlist *send_data) { - int ret = send_prepare(key, extra_headers, resource, send_data); + int ret = send_prepare(dpp, key, extra_headers, resource, send_data); if (ret < 0) { return ret; } @@ -880,7 +880,7 @@ int RGWRESTStreamRWRequest::send_request(RGWAccessKey *key, map& int RGWRESTStreamRWRequest::send(RGWHTTPManager *mgr) { if (!headers_gen) { - ldout(cct, 0) << "ERROR: " << __func__ << "(): send_prepare() was not called: likey a bug!" << dendl; + ldpp_dout(this, 0) << "ERROR: " << __func__ << "(): send_prepare() was not called: likey a bug!" << dendl; return -EINVAL; } @@ -891,9 +891,9 @@ int RGWRESTStreamRWRequest::send(RGWHTTPManager *mgr) } if (sign_key) { - int r = headers_gen->sign(*sign_key, outblp); + int r = headers_gen->sign(this, *sign_key, outblp); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to sign request" << dendl; + ldpp_dout(this, 0) << "ERROR: failed to sign request" << dendl; return r; } } diff --git a/src/rgw/rgw_rest_client.h b/src/rgw/rgw_rest_client.h index 3e5ffc24b09..8412b2e283d 100644 --- a/src/rgw/rgw_rest_client.h +++ b/src/rgw/rgw_rest_client.h @@ -65,7 +65,7 @@ public: param_vec_t *_headers, param_vec_t *_params, std::optional _api_name) : RGWHTTPSimpleRequest(_cct, _method, _url, _headers, _params), api_name(_api_name) {} - int forward_request(RGWAccessKey& key, req_info& info, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y); + int forward_request(const DoutPrefixProvider *dpp, RGWAccessKey& key, req_info& info, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y); }; class RGWWriteDrainCB { @@ -92,10 +92,10 @@ public: const string& resource, const param_vec_t& params, std::optional api_name); void set_extra_headers(const map& extra_headers); - int set_obj_attrs(map& rgw_attrs); + int set_obj_attrs(const DoutPrefixProvider *dpp, map& rgw_attrs); void set_http_attrs(const map& http_attrs); void set_policy(RGWAccessControlPolicy& policy); - int sign(RGWAccessKey& key, const bufferlist *opt_content); + int sign(const DoutPrefixProvider *dpp, RGWAccessKey& key, const bufferlist *opt_content); const string& get_url() { return url; } }; @@ -193,17 +193,17 @@ public: } virtual ~RGWRESTStreamRWRequest() override {} - int send_prepare(RGWAccessKey *key, map& extra_headers, const string& resource, bufferlist *send_data = nullptr /* optional input data */); - int send_prepare(RGWAccessKey& key, map& extra_headers, const rgw_obj& obj); + int send_prepare(const DoutPrefixProvider *dpp, RGWAccessKey *key, map& extra_headers, const string& resource, bufferlist *send_data = nullptr /* optional input data */); + int send_prepare(const DoutPrefixProvider *dpp, RGWAccessKey& key, map& extra_headers, const rgw_obj& obj); int send(RGWHTTPManager *mgr); - int send_request(RGWAccessKey& key, map& extra_headers, const rgw_obj& obj, RGWHTTPManager *mgr); - int send_request(RGWAccessKey *key, map& extra_headers, const string& resource, RGWHTTPManager *mgr, bufferlist *send_data = nullptr /* optional input data */); + int send_request(const DoutPrefixProvider *dpp, RGWAccessKey& key, map& extra_headers, const rgw_obj& obj, RGWHTTPManager *mgr); + int send_request(const DoutPrefixProvider *dpp, RGWAccessKey *key, map& extra_headers, const string& resource, RGWHTTPManager *mgr, bufferlist *send_data = nullptr /* optional input data */); void add_params(param_vec_t *params); private: - int do_send_prepare(RGWAccessKey *key, map& extra_headers, const string& resource, bufferlist *send_data = nullptr /* optional input data */); + int do_send_prepare(const DoutPrefixProvider *dpp, RGWAccessKey *key, map& extra_headers, const string& resource, bufferlist *send_data = nullptr /* optional input data */); }; class RGWRESTStreamReadRequest : public RGWRESTStreamRWRequest { @@ -235,12 +235,12 @@ public: ~RGWRESTStreamS3PutObj() override; void send_init(rgw::sal::Object* obj); - void send_ready(RGWAccessKey& key, map& rgw_attrs); - void send_ready(RGWAccessKey& key, const map& http_attrs, + void send_ready(const DoutPrefixProvider *dpp, RGWAccessKey& key, map& rgw_attrs); + void send_ready(const DoutPrefixProvider *dpp, RGWAccessKey& key, const map& http_attrs, RGWAccessControlPolicy& policy); - void send_ready(RGWAccessKey& key); + void send_ready(const DoutPrefixProvider *dpp, RGWAccessKey& key); - void put_obj_init(RGWAccessKey& key, rgw::sal::Object* obj, map& attrs); + void put_obj_init(const DoutPrefixProvider *dpp, RGWAccessKey& key, rgw::sal::Object* obj, map& attrs); RGWGetDataCB *get_out_cb() { return out_cb; } }; diff --git a/src/rgw/rgw_rest_config.cc b/src/rgw/rgw_rest_config.cc index f1ff0921c96..299ed28eb6b 100644 --- a/src/rgw/rgw_rest_config.cc +++ b/src/rgw/rgw_rest_config.cc @@ -31,9 +31,9 @@ #define dout_subsys ceph_subsys_rgw void RGWOp_ZoneGroupMap_Get::execute(optional_yield y) { - op_ret = zonegroup_map.read(g_ceph_context, static_cast(store)->svc()->sysobj, y); + op_ret = zonegroup_map.read(this, g_ceph_context, static_cast(store)->svc()->sysobj, y); if (op_ret < 0) { - dout(5) << "failed to read zone_group map" << dendl; + ldpp_dout(this, 5) << "failed to read zone_group map" << dendl; } } diff --git a/src/rgw/rgw_rest_conn.cc b/src/rgw/rgw_rest_conn.cc index d81cefcacbe..04293d64c79 100644 --- a/src/rgw/rgw_rest_conn.cc +++ b/src/rgw/rgw_rest_conn.cc @@ -127,7 +127,7 @@ void RGWRESTConn::populate_params(param_vec_t& params, const rgw_user *uid, cons populate_zonegroup(params, zonegroup); } -int RGWRESTConn::forward(const rgw_user& uid, req_info& info, obj_version *objv, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y) +int RGWRESTConn::forward(const DoutPrefixProvider *dpp, const rgw_user& uid, req_info& info, obj_version *objv, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y) { string url; int ret = get_url(url); @@ -142,7 +142,7 @@ int RGWRESTConn::forward(const rgw_user& uid, req_info& info, obj_version *objv, params.push_back(param_pair_t(RGW_SYS_PARAM_PREFIX "ver", buf)); } RGWRESTSimpleRequest req(cct, info.method, url, NULL, ¶ms, api_name); - return req.forward_request(key, info, max_response, inbl, outbl, y); + return req.forward_request(dpp, key, info, max_response, inbl, outbl, y); } int RGWRESTConn::put_obj_send_init(rgw::sal::Object* obj, const rgw_http_param_pair *extra_params, RGWRESTStreamS3PutObj **req) @@ -166,7 +166,7 @@ int RGWRESTConn::put_obj_send_init(rgw::sal::Object* obj, const rgw_http_param_p return 0; } -int RGWRESTConn::put_obj_async_init(const rgw_user& uid, rgw::sal::Object* obj, +int RGWRESTConn::put_obj_async_init(const DoutPrefixProvider *dpp, const rgw_user& uid, rgw::sal::Object* obj, map& attrs, RGWRESTStreamS3PutObj **req) { @@ -178,7 +178,7 @@ int RGWRESTConn::put_obj_async_init(const rgw_user& uid, rgw::sal::Object* obj, param_vec_t params; populate_params(params, &uid, self_zone_group); RGWRESTStreamS3PutObj *wr = new RGWRESTStreamS3PutObj(cct, "PUT", url, NULL, ¶ms, api_name, host_style); - wr->put_obj_init(key, obj, attrs); + wr->put_obj_init(dpp, key, obj, attrs); *req = wr; return 0; } @@ -216,7 +216,7 @@ static void set_header(T val, map& headers, const string& header } -int RGWRESTConn::get_obj(const rgw_user& uid, req_info *info /* optional */, const rgw::sal::Object* obj, +int RGWRESTConn::get_obj(const DoutPrefixProvider *dpp, const rgw_user& uid, req_info *info /* optional */, const rgw::sal::Object* obj, const real_time *mod_ptr, const real_time *unmod_ptr, uint32_t mod_zone_id, uint64_t mod_pg_ver, bool prepend_metadata, bool get_op, bool rgwx_stat, @@ -234,10 +234,10 @@ int RGWRESTConn::get_obj(const rgw_user& uid, req_info *info /* optional */, con params.sync_manifest = sync_manifest; params.skip_decrypt = skip_decrypt; params.cb = cb; - return get_obj(obj, params, send, req); + return get_obj(dpp, obj, params, send, req); } -int RGWRESTConn::get_obj(const rgw::sal::Object* obj, const get_obj_params& in_params, bool send, RGWRESTStreamRWRequest **req) +int RGWRESTConn::get_obj(const DoutPrefixProvider *dpp, const rgw::sal::Object* obj, const get_obj_params& in_params, bool send, RGWRESTStreamRWRequest **req) { string url; int ret = get_url(url); @@ -300,7 +300,7 @@ int RGWRESTConn::get_obj(const rgw::sal::Object* obj, const get_obj_params& in_p set_header(buf, extra_headers, "RANGE"); } - int r = (*req)->send_prepare(key, extra_headers, obj->get_obj()); + int r = (*req)->send_prepare(dpp, key, extra_headers, obj->get_obj()); if (r < 0) { goto done_err; } @@ -334,7 +334,8 @@ int RGWRESTConn::complete_request(RGWRESTStreamRWRequest *req, return ret; } -int RGWRESTConn::get_resource(const string& resource, +int RGWRESTConn::get_resource(const DoutPrefixProvider *dpp, + const string& resource, param_vec_t *extra_params, map *extra_headers, bufferlist& bl, @@ -364,9 +365,9 @@ int RGWRESTConn::get_resource(const string& resource, headers.insert(extra_headers->begin(), extra_headers->end()); } - ret = req.send_request(&key, headers, resource, mgr, send_data); + ret = req.send_request(dpp, &key, headers, resource, mgr, send_data); if (ret < 0) { - ldout(cct, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl; return ret; } @@ -407,22 +408,22 @@ void RGWRESTReadResource::init_common(param_vec_t *extra_headers) req.set_params(¶ms); } -int RGWRESTReadResource::read(optional_yield y) +int RGWRESTReadResource::read(const DoutPrefixProvider *dpp, optional_yield y) { - int ret = req.send_request(&conn->get_key(), headers, resource, mgr); + int ret = req.send_request(dpp, &conn->get_key(), headers, resource, mgr); if (ret < 0) { - ldout(cct, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl; return ret; } return req.complete_request(y); } -int RGWRESTReadResource::aio_read() +int RGWRESTReadResource::aio_read(const DoutPrefixProvider *dpp) { - int ret = req.send_request(&conn->get_key(), headers, resource, mgr); + int ret = req.send_request(dpp, &conn->get_key(), headers, resource, mgr); if (ret < 0) { - ldout(cct, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl; return ret; } @@ -465,28 +466,28 @@ void RGWRESTSendResource::init_common(param_vec_t *extra_headers) req.set_params(¶ms); } -int RGWRESTSendResource::send(bufferlist& outbl, optional_yield y) +int RGWRESTSendResource::send(const DoutPrefixProvider *dpp, bufferlist& outbl, optional_yield y) { req.set_send_length(outbl.length()); req.set_outbl(outbl); - int ret = req.send_request(&conn->get_key(), headers, resource, mgr); + int ret = req.send_request(dpp, &conn->get_key(), headers, resource, mgr); if (ret < 0) { - ldout(cct, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl; return ret; } return req.complete_request(y); } -int RGWRESTSendResource::aio_send(bufferlist& outbl) +int RGWRESTSendResource::aio_send(const DoutPrefixProvider *dpp, bufferlist& outbl) { req.set_send_length(outbl.length()); req.set_outbl(outbl); - int ret = req.send_request(&conn->get_key(), headers, resource, mgr); + int ret = req.send_request(dpp, &conn->get_key(), headers, resource, mgr); if (ret < 0) { - ldout(cct, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl; + ldpp_dout(dpp, 5) << __func__ << ": send_request() resource=" << resource << " returned ret=" << ret << dendl; return ret; } diff --git a/src/rgw/rgw_rest_conn.h b/src/rgw/rgw_rest_conn.h index 4fbd4198bb4..fefa56ca026 100644 --- a/src/rgw/rgw_rest_conn.h +++ b/src/rgw/rgw_rest_conn.h @@ -141,12 +141,12 @@ public: virtual void populate_params(param_vec_t& params, const rgw_user *uid, const string& zonegroup); /* sync request */ - int forward(const rgw_user& uid, req_info& info, obj_version *objv, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y); + int forward(const DoutPrefixProvider *dpp, const rgw_user& uid, req_info& info, obj_version *objv, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y); /* async requests */ int put_obj_send_init(rgw::sal::Object* obj, const rgw_http_param_pair *extra_params, RGWRESTStreamS3PutObj **req); - int put_obj_async_init(const rgw_user& uid, rgw::sal::Object* obj, + int put_obj_async_init(const DoutPrefixProvider *dpp, const rgw_user& uid, rgw::sal::Object* obj, map& attrs, RGWRESTStreamS3PutObj **req); int complete_request(RGWRESTStreamS3PutObj *req, string& etag, ceph::real_time *mtime, optional_yield y); @@ -176,9 +176,9 @@ public: uint64_t range_end{0}; }; - int get_obj(const rgw::sal::Object* obj, const get_obj_params& params, bool send, RGWRESTStreamRWRequest **req); + int get_obj(const DoutPrefixProvider *dpp, const rgw::sal::Object* obj, const get_obj_params& params, bool send, RGWRESTStreamRWRequest **req); - int get_obj(const rgw_user& uid, req_info *info /* optional */, const rgw::sal::Object* obj, + int get_obj(const DoutPrefixProvider *dpp, const rgw_user& uid, req_info *info /* optional */, const rgw::sal::Object* obj, const ceph::real_time *mod_ptr, const ceph::real_time *unmod_ptr, uint32_t mod_zone_id, uint64_t mod_pg_ver, bool prepend_metadata, bool get_op, bool rgwx_stat, bool sync_manifest, @@ -191,7 +191,8 @@ public: map *pheaders, optional_yield y); - int get_resource(const string& resource, + int get_resource(const DoutPrefixProvider *dpp, + const string& resource, param_vec_t *extra_params, map* extra_headers, bufferlist& bl, @@ -200,13 +201,13 @@ public: optional_yield y); template - int get_json_resource(const string& resource, param_vec_t *params, + int get_json_resource(const DoutPrefixProvider *dpp, const string& resource, param_vec_t *params, bufferlist *in_data, optional_yield y, T& t); template - int get_json_resource(const string& resource, param_vec_t *params, + int get_json_resource(const DoutPrefixProvider *dpp, const string& resource, param_vec_t *params, optional_yield y, T& t); template - int get_json_resource(const string& resource, const rgw_http_param_pair *pp, + int get_json_resource(const DoutPrefixProvider *dpp, const string& resource, const rgw_http_param_pair *pp, optional_yield y, T& t); private: @@ -250,11 +251,11 @@ public: template -int RGWRESTConn::get_json_resource(const string& resource, param_vec_t *params, +int RGWRESTConn::get_json_resource(const DoutPrefixProvider *dpp, const string& resource, param_vec_t *params, bufferlist *in_data, optional_yield y, T& t) { bufferlist bl; - int ret = get_resource(resource, params, nullptr, bl, in_data, nullptr, y); + int ret = get_resource(dpp, resource, params, nullptr, bl, in_data, nullptr, y); if (ret < 0) { return ret; } @@ -268,18 +269,18 @@ int RGWRESTConn::get_json_resource(const string& resource, param_vec_t *params, } template -int RGWRESTConn::get_json_resource(const string& resource, param_vec_t *params, +int RGWRESTConn::get_json_resource(const DoutPrefixProvider *dpp, const string& resource, param_vec_t *params, optional_yield y, T& t) { - return get_json_resource(resource, params, nullptr, y, t); + return get_json_resource(dpp, resource, params, nullptr, y, t); } template -int RGWRESTConn::get_json_resource(const string& resource, const rgw_http_param_pair *pp, +int RGWRESTConn::get_json_resource(const DoutPrefixProvider *dpp, const string& resource, const rgw_http_param_pair *pp, optional_yield y, T& t) { param_vec_t params = make_param_list(pp); - return get_json_resource(resource, ¶ms, y, t); + return get_json_resource(dpp, resource, ¶ms, y, t); } class RGWStreamIntoBufferlist : public RGWHTTPStreamRWRequest::ReceiveCB { @@ -335,9 +336,9 @@ public: template int decode_resource(T *dest); - int read(optional_yield y); + int read(const DoutPrefixProvider *dpp, optional_yield y); - int aio_read(); + int aio_read(const DoutPrefixProvider *dpp); string to_str() { return req.to_str(); @@ -364,7 +365,7 @@ public: int wait(T *dest, optional_yield y); template - int fetch(T *dest, optional_yield y); + int fetch(const DoutPrefixProvider *dpp, T *dest, optional_yield y); }; @@ -383,9 +384,9 @@ int RGWRESTReadResource::decode_resource(T *dest) } template -int RGWRESTReadResource::fetch(T *dest, optional_yield y) +int RGWRESTReadResource::fetch(const DoutPrefixProvider *dpp, T *dest, optional_yield y) { - int ret = read(y); + int ret = read(dpp, y); if (ret < 0) { return ret; } @@ -456,9 +457,9 @@ public: return req.get_io_user_info(); } - int send(bufferlist& bl, optional_yield y); + int send(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y); - int aio_send(bufferlist& bl); + int aio_send(const DoutPrefixProvider *dpp, bufferlist& bl); string to_str() { return req.to_str(); diff --git a/src/rgw/rgw_rest_iam.cc b/src/rgw/rgw_rest_iam.cc index 49ceb58fa03..9a95cdb254f 100644 --- a/src/rgw/rgw_rest_iam.cc +++ b/src/rgw/rgw_rest_iam.cc @@ -19,7 +19,7 @@ void RGWHandler_REST_IAM::rgw_iam_parse_input() { if (post_body.size() > 0) { - ldout(s->cct, 10) << "Content of POST: " << post_body << dendl; + ldpp_dout(s, 10) << "Content of POST: " << post_body << dendl; if (post_body.find("Action") != string::npos) { boost::char_separator sep("&"); @@ -89,7 +89,7 @@ int RGWHandler_REST_IAM::init(rgw::sal::Store* store, s->dialect = "iam"; if (int ret = RGWHandler_REST_IAM::init_from_header(s, RGW_FORMAT_XML, true); ret < 0) { - ldout(s->cct, 10) << "init_from_header returned err=" << ret << dendl; + ldpp_dout(s, 10) << "init_from_header returned err=" << ret << dendl; return ret; } diff --git a/src/rgw/rgw_rest_log.cc b/src/rgw/rgw_rest_log.cc index 9c4f32b1635..c0e46e4bac4 100644 --- a/src/rgw/rgw_rest_log.cc +++ b/src/rgw/rgw_rest_log.cc @@ -48,14 +48,14 @@ void RGWOp_MDLog_List::execute(optional_yield y) { if (s->info.args.exists("start-time") || s->info.args.exists("end-time")) { - dout(5) << "start-time and end-time are no longer accepted" << dendl; + ldpp_dout(this, 5) << "start-time and end-time are no longer accepted" << dendl; op_ret = -EINVAL; return; } shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err); if (!err.empty()) { - dout(5) << "Error parsing shard_id " << shard << dendl; + ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl; op_ret = -EINVAL; return; } @@ -63,7 +63,7 @@ void RGWOp_MDLog_List::execute(optional_yield y) { if (!max_entries_str.empty()) { max_entries = (unsigned)strict_strtol(max_entries_str.c_str(), 10, &err); if (!err.empty()) { - dout(5) << "Error parsing max-entries " << max_entries_str << dendl; + ldpp_dout(this, 5) << "Error parsing max-entries " << max_entries_str << dendl; op_ret = -EINVAL; return; } @@ -73,10 +73,10 @@ void RGWOp_MDLog_List::execute(optional_yield y) { } if (period.empty()) { - ldout(s->cct, 5) << "Missing period id trying to use current" << dendl; + ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl; period = store->get_zone()->get_current_period_id(); if (period.empty()) { - ldout(s->cct, 5) << "Missing period id" << dendl; + ldpp_dout(this, 5) << "Missing period id" << dendl; op_ret = -EINVAL; return; } @@ -86,7 +86,7 @@ void RGWOp_MDLog_List::execute(optional_yield y) { meta_log.init_list_entries(shard_id, {}, {}, marker, &handle); - op_ret = meta_log.list_entries(handle, max_entries, entries, + op_ret = meta_log.list_entries(this, handle, max_entries, entries, &last_marker, &truncated); meta_log.complete_list_entries(handle); @@ -145,24 +145,24 @@ void RGWOp_MDLog_ShardInfo::execute(optional_yield y) { unsigned shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err); if (!err.empty()) { - dout(5) << "Error parsing shard_id " << shard << dendl; + ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl; op_ret = -EINVAL; return; } if (period.empty()) { - ldout(s->cct, 5) << "Missing period id trying to use current" << dendl; + ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl; period = store->get_zone()->get_current_period_id(); if (period.empty()) { - ldout(s->cct, 5) << "Missing period id" << dendl; + ldpp_dout(this, 5) << "Missing period id" << dendl; op_ret = -EINVAL; return; } } RGWMetadataLog meta_log{s->cct, static_cast(store)->svc()->zone, static_cast(store)->svc()->cls, period}; - op_ret = meta_log.get_info(shard_id, &info); + op_ret = meta_log.get_info(this, shard_id, &info); } void RGWOp_MDLog_ShardInfo::send_response() { @@ -184,12 +184,12 @@ void RGWOp_MDLog_Delete::execute(optional_yield y) { if (s->info.args.exists("start-time") || s->info.args.exists("end-time")) { - dout(5) << "start-time and end-time are no longer accepted" << dendl; + ldpp_dout(this, 5) << "start-time and end-time are no longer accepted" << dendl; op_ret = -EINVAL; } if (s->info.args.exists("start-marker")) { - dout(5) << "start-marker is no longer accepted" << dendl; + ldpp_dout(this, 5) << "start-marker is no longer accepted" << dendl; op_ret = -EINVAL; } @@ -197,7 +197,7 @@ void RGWOp_MDLog_Delete::execute(optional_yield y) { if (!s->info.args.exists("marker")) { marker = s->info.args.get("end-marker"); } else { - dout(5) << "end-marker and marker cannot both be provided" << dendl; + ldpp_dout(this, 5) << "end-marker and marker cannot both be provided" << dendl; op_ret = -EINVAL; } } @@ -206,7 +206,7 @@ void RGWOp_MDLog_Delete::execute(optional_yield y) { shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err); if (!err.empty()) { - dout(5) << "Error parsing shard_id " << shard << dendl; + ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl; op_ret = -EINVAL; return; } @@ -217,18 +217,18 @@ void RGWOp_MDLog_Delete::execute(optional_yield y) { } if (period.empty()) { - ldout(s->cct, 5) << "Missing period id trying to use current" << dendl; + ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl; period = store->get_zone()->get_current_period_id(); if (period.empty()) { - ldout(s->cct, 5) << "Missing period id" << dendl; + ldpp_dout(this, 5) << "Missing period id" << dendl; op_ret = -EINVAL; return; } } RGWMetadataLog meta_log{s->cct, static_cast(store)->svc()->zone, static_cast(store)->svc()->cls, period}; - op_ret = meta_log.trim(shard_id, {}, {}, {}, marker); + op_ret = meta_log.trim(this, shard_id, {}, {}, {}, marker); } void RGWOp_MDLog_Lock::execute(optional_yield y) { @@ -244,7 +244,7 @@ void RGWOp_MDLog_Lock::execute(optional_yield y) { zone_id = s->info.args.get("zone-id"); if (period.empty()) { - ldout(s->cct, 5) << "Missing period id trying to use current" << dendl; + ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl; period = store->get_zone()->get_current_period_id(); } @@ -253,7 +253,7 @@ void RGWOp_MDLog_Lock::execute(optional_yield y) { (duration_str.empty()) || locker_id.empty() || zone_id.empty()) { - dout(5) << "Error invalid parameter list" << dendl; + ldpp_dout(this, 5) << "Error invalid parameter list" << dendl; op_ret = -EINVAL; return; } @@ -261,7 +261,7 @@ void RGWOp_MDLog_Lock::execute(optional_yield y) { string err; shard_id = (unsigned)strict_strtol(shard_id_str.c_str(), 10, &err); if (!err.empty()) { - dout(5) << "Error parsing shard_id param " << shard_id_str << dendl; + ldpp_dout(this, 5) << "Error parsing shard_id param " << shard_id_str << dendl; op_ret = -EINVAL; return; } @@ -270,11 +270,11 @@ void RGWOp_MDLog_Lock::execute(optional_yield y) { unsigned dur; dur = (unsigned)strict_strtol(duration_str.c_str(), 10, &err); if (!err.empty() || dur <= 0) { - dout(5) << "invalid length param " << duration_str << dendl; + ldpp_dout(this, 5) << "invalid length param " << duration_str << dendl; op_ret = -EINVAL; return; } - op_ret = meta_log.lock_exclusive(shard_id, make_timespan(dur), zone_id, + op_ret = meta_log.lock_exclusive(s, shard_id, make_timespan(dur), zone_id, locker_id); if (op_ret == -EBUSY) op_ret = -ERR_LOCKED; @@ -292,7 +292,7 @@ void RGWOp_MDLog_Unlock::execute(optional_yield y) { zone_id = s->info.args.get("zone-id"); if (period.empty()) { - ldout(s->cct, 5) << "Missing period id trying to use current" << dendl; + ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl; period = store->get_zone()->get_current_period_id(); } @@ -300,7 +300,7 @@ void RGWOp_MDLog_Unlock::execute(optional_yield y) { shard_id_str.empty() || locker_id.empty() || zone_id.empty()) { - dout(5) << "Error invalid parameter list" << dendl; + ldpp_dout(this, 5) << "Error invalid parameter list" << dendl; op_ret = -EINVAL; return; } @@ -308,13 +308,13 @@ void RGWOp_MDLog_Unlock::execute(optional_yield y) { string err; shard_id = (unsigned)strict_strtol(shard_id_str.c_str(), 10, &err); if (!err.empty()) { - dout(5) << "Error parsing shard_id param " << shard_id_str << dendl; + ldpp_dout(this, 5) << "Error parsing shard_id param " << shard_id_str << dendl; op_ret = -EINVAL; return; } RGWMetadataLog meta_log{s->cct, static_cast(store)->svc()->zone, static_cast(store)->svc()->cls, period}; - op_ret = meta_log.unlock(shard_id, zone_id, locker_id); + op_ret = meta_log.unlock(s, shard_id, zone_id, locker_id); } void RGWOp_MDLog_Notify::execute(optional_yield y) { @@ -329,12 +329,12 @@ void RGWOp_MDLog_Notify::execute(optional_yield y) { } char* buf = data.c_str(); - ldout(s->cct, 20) << __func__ << "(): read data: " << buf << dendl; + ldpp_dout(this, 20) << __func__ << "(): read data: " << buf << dendl; JSONParser p; r = p.parse(buf, data.length()); if (r < 0) { - ldout(s->cct, 0) << "ERROR: failed to parse JSON" << dendl; + ldpp_dout(this, 0) << "ERROR: failed to parse JSON" << dendl; op_ret = r; return; } @@ -343,14 +343,14 @@ void RGWOp_MDLog_Notify::execute(optional_yield y) { try { decode_json_obj(updated_shards, &p); } catch (JSONDecoder::err& err) { - ldout(s->cct, 0) << "ERROR: failed to decode JSON" << dendl; + ldpp_dout(this, 0) << "ERROR: failed to decode JSON" << dendl; op_ret = -EINVAL; return; } if (store->ctx()->_conf->subsys.should_gather()) { for (set::iterator iter = updated_shards.begin(); iter != updated_shards.end(); ++iter) { - ldout(s->cct, 20) << __func__ << "(): updated shard=" << *iter << dendl; + ldpp_dout(this, 20) << __func__ << "(): updated shard=" << *iter << dendl; } } @@ -370,7 +370,7 @@ void RGWOp_BILog_List::execute(optional_yield y) { unsigned max_entries; if (bucket_name.empty() && bucket_instance.empty()) { - dout(5) << "ERROR: neither bucket nor bucket instance specified" << dendl; + ldpp_dout(this, 5) << "ERROR: neither bucket nor bucket instance specified" << dendl; op_ret = -EINVAL; return; } @@ -388,7 +388,7 @@ void RGWOp_BILog_List::execute(optional_yield y) { } op_ret = store->get_bucket(s, nullptr, b, &bucket, y); if (op_ret < 0) { - ldpp_dout(s, 5) << "could not get bucket info for bucket=" << bucket_name << dendl; + ldpp_dout(this, 5) << "could not get bucket info for bucket=" << bucket_name << dendl; return; } @@ -403,11 +403,11 @@ void RGWOp_BILog_List::execute(optional_yield y) { send_response(); do { list entries; - int ret = static_cast(store)->svc()->bilog_rados->log_list(bucket->get_info(), shard_id, + int ret = static_cast(store)->svc()->bilog_rados->log_list(s, bucket->get_info(), shard_id, marker, max_entries - count, entries, &truncated); if (ret < 0) { - ldpp_dout(s, 5) << "ERROR: list_bi_log_entries()" << dendl; + ldpp_dout(this, 5) << "ERROR: list_bi_log_entries()" << dendl; return; } @@ -459,7 +459,7 @@ void RGWOp_BILog_Info::execute(optional_yield y) { rgw_bucket b(rgw_bucket_key(tenant_name, bucket_name)); if (bucket_name.empty() && bucket_instance.empty()) { - ldpp_dout(s, 5) << "ERROR: neither bucket nor bucket instance specified" << dendl; + ldpp_dout(this, 5) << "ERROR: neither bucket nor bucket instance specified" << dendl; op_ret = -EINVAL; return; } @@ -477,12 +477,12 @@ void RGWOp_BILog_Info::execute(optional_yield y) { } op_ret = store->get_bucket(s, nullptr, b, &bucket, y); if (op_ret < 0) { - ldpp_dout(s, 5) << "could not get bucket info for bucket=" << bucket_name << dendl; + ldpp_dout(this, 5) << "could not get bucket info for bucket=" << bucket_name << dendl; return; } map stats; - int ret = bucket->get_bucket_stats(shard_id, &bucket_ver, &master_ver, stats, &max_marker, &syncstopped); + int ret = bucket->get_bucket_stats(s, shard_id, &bucket_ver, &master_ver, stats, &max_marker, &syncstopped); if (ret < 0 && ret != -ENOENT) { op_ret = ret; return; @@ -520,7 +520,7 @@ void RGWOp_BILog_Delete::execute(optional_yield y) { op_ret = 0; if ((bucket_name.empty() && bucket_instance.empty()) || end_marker.empty()) { - ldpp_dout(s, 5) << "ERROR: one of bucket and bucket instance, and also end-marker is mandatory" << dendl; + ldpp_dout(this, 5) << "ERROR: one of bucket and bucket instance, and also end-marker is mandatory" << dendl; op_ret = -EINVAL; return; } @@ -538,13 +538,13 @@ void RGWOp_BILog_Delete::execute(optional_yield y) { } op_ret = store->get_bucket(s, nullptr, b, &bucket, y); if (op_ret < 0) { - ldpp_dout(s, 5) << "could not get bucket info for bucket=" << bucket_name << dendl; + ldpp_dout(this, 5) << "could not get bucket info for bucket=" << bucket_name << dendl; return; } - op_ret = static_cast(store)->svc()->bilog_rados->log_trim(bucket->get_info(), shard_id, start_marker, end_marker); + op_ret = static_cast(store)->svc()->bilog_rados->log_trim(s, bucket->get_info(), shard_id, start_marker, end_marker); if (op_ret < 0) { - ldpp_dout(s, 5) << "ERROR: trim_bi_log_entries() " << dendl; + ldpp_dout(this, 5) << "ERROR: trim_bi_log_entries() " << dendl; } return; } @@ -559,7 +559,7 @@ void RGWOp_DATALog_List::execute(optional_yield y) { if (s->info.args.exists("start-time") || s->info.args.exists("end-time")) { - dout(5) << "start-time and end-time are no longer accepted" << dendl; + ldpp_dout(this, 5) << "start-time and end-time are no longer accepted" << dendl; op_ret = -EINVAL; } @@ -567,7 +567,7 @@ void RGWOp_DATALog_List::execute(optional_yield y) { shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err); if (!err.empty()) { - dout(5) << "Error parsing shard_id " << shard << dendl; + ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl; op_ret = -EINVAL; return; } @@ -575,7 +575,7 @@ void RGWOp_DATALog_List::execute(optional_yield y) { if (!max_entries_str.empty()) { max_entries = (unsigned)strict_strtol(max_entries_str.c_str(), 10, &err); if (!err.empty()) { - dout(5) << "Error parsing max-entries " << max_entries_str << dendl; + ldpp_dout(this, 5) << "Error parsing max-entries " << max_entries_str << dendl; op_ret = -EINVAL; return; } @@ -586,7 +586,7 @@ void RGWOp_DATALog_List::execute(optional_yield y) { // Note that last_marker is updated to be the marker of the last // entry listed - op_ret = static_cast(store)->svc()->datalog_rados->list_entries(shard_id, + op_ret = static_cast(store)->svc()->datalog_rados->list_entries(this, shard_id, max_entries, entries, marker, &last_marker, &truncated); @@ -642,12 +642,12 @@ void RGWOp_DATALog_ShardInfo::execute(optional_yield y) { unsigned shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err); if (!err.empty()) { - dout(5) << "Error parsing shard_id " << shard << dendl; + ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl; op_ret = -EINVAL; return; } - op_ret = static_cast(store)->svc()->datalog_rados->get_info(shard_id, &info); + op_ret = static_cast(store)->svc()->datalog_rados->get_info(this, shard_id, &info); } void RGWOp_DATALog_ShardInfo::send_response() { @@ -672,12 +672,12 @@ void RGWOp_DATALog_Notify::execute(optional_yield y) { } char* buf = data.c_str(); - ldout(s->cct, 20) << __func__ << "(): read data: " << buf << dendl; + ldpp_dout(this, 20) << __func__ << "(): read data: " << buf << dendl; JSONParser p; r = p.parse(buf, data.length()); if (r < 0) { - ldout(s->cct, 0) << "ERROR: failed to parse JSON" << dendl; + ldpp_dout(this, 0) << "ERROR: failed to parse JSON" << dendl; op_ret = r; return; } @@ -686,17 +686,17 @@ void RGWOp_DATALog_Notify::execute(optional_yield y) { try { decode_json_obj(updated_shards, &p); } catch (JSONDecoder::err& err) { - ldout(s->cct, 0) << "ERROR: failed to decode JSON" << dendl; + ldpp_dout(this, 0) << "ERROR: failed to decode JSON" << dendl; op_ret = -EINVAL; return; } if (store->ctx()->_conf->subsys.should_gather()) { for (map >::iterator iter = updated_shards.begin(); iter != updated_shards.end(); ++iter) { - ldout(s->cct, 20) << __func__ << "(): updated shard=" << iter->first << dendl; + ldpp_dout(this, 20) << __func__ << "(): updated shard=" << iter->first << dendl; set& keys = iter->second; for (set::iterator kiter = keys.begin(); kiter != keys.end(); ++kiter) { - ldout(s->cct, 20) << __func__ << "(): modified key=" << *kiter << dendl; + ldpp_dout(this, 20) << __func__ << "(): modified key=" << *kiter << dendl; } } } @@ -716,12 +716,12 @@ void RGWOp_DATALog_Delete::execute(optional_yield y) { if (s->info.args.exists("start-time") || s->info.args.exists("end-time")) { - dout(5) << "start-time and end-time are no longer accepted" << dendl; + ldpp_dout(this, 5) << "start-time and end-time are no longer accepted" << dendl; op_ret = -EINVAL; } if (s->info.args.exists("start-marker")) { - dout(5) << "start-marker is no longer accepted" << dendl; + ldpp_dout(this, 5) << "start-marker is no longer accepted" << dendl; op_ret = -EINVAL; } @@ -729,14 +729,14 @@ void RGWOp_DATALog_Delete::execute(optional_yield y) { if (!s->info.args.exists("marker")) { marker = s->info.args.get("end-marker"); } else { - dout(5) << "end-marker and marker cannot both be provided" << dendl; + ldpp_dout(this, 5) << "end-marker and marker cannot both be provided" << dendl; op_ret = -EINVAL; } } shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err); if (!err.empty()) { - dout(5) << "Error parsing shard_id " << shard << dendl; + ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl; op_ret = -EINVAL; return; } @@ -745,7 +745,7 @@ void RGWOp_DATALog_Delete::execute(optional_yield y) { return; } - op_ret = static_cast(store)->svc()->datalog_rados->trim_entries(shard_id, marker); + op_ret = static_cast(store)->svc()->datalog_rados->trim_entries(this, shard_id, marker); } // not in header to avoid pulling in rgw_sync.h @@ -767,11 +767,11 @@ void RGWOp_MDLog_Status::execute(optional_yield y) { auto sync = static_cast(store)->getRados()->get_meta_sync_manager(); if (sync == nullptr) { - ldout(s->cct, 1) << "no sync manager" << dendl; + ldpp_dout(this, 1) << "no sync manager" << dendl; op_ret = -ENOENT; return; } - op_ret = sync->read_sync_status(&status); + op_ret = sync->read_sync_status(this, &status); } void RGWOp_MDLog_Status::send_response() @@ -812,7 +812,7 @@ void RGWOp_BILog_Status::execute(optional_yield y) key = source_key; } if (key.empty()) { - ldpp_dout(s, 4) << "no 'bucket' provided" << dendl; + ldpp_dout(this, 4) << "no 'bucket' provided" << dendl; op_ret = -EINVAL; return; } @@ -821,7 +821,7 @@ void RGWOp_BILog_Status::execute(optional_yield y) int shard_id{-1}; // unused op_ret = rgw_bucket_parse_bucket_key(s->cct, key, &b, &shard_id); if (op_ret < 0) { - ldpp_dout(s, 4) << "invalid 'bucket' provided" << dendl; + ldpp_dout(this, 4) << "invalid 'bucket' provided" << dendl; op_ret = -EINVAL; return; } @@ -830,7 +830,7 @@ void RGWOp_BILog_Status::execute(optional_yield y) std::unique_ptr bucket; op_ret = store->get_bucket(s, nullptr, b, &bucket, y); if (op_ret < 0) { - ldpp_dout(s, 4) << "failed to read bucket info: " << cpp_strerror(op_ret) << dendl; + ldpp_dout(this, 4) << "failed to read bucket info: " << cpp_strerror(op_ret) << dendl; return; } @@ -842,7 +842,7 @@ void RGWOp_BILog_Status::execute(optional_yield y) } else { op_ret = rgw_bucket_parse_bucket_key(s->cct, source_key, &source_bucket, nullptr); if (op_ret < 0) { - ldpp_dout(s, 4) << "invalid 'source-bucket' provided (key=" << source_key << ")" << dendl; + ldpp_dout(this, 4) << "invalid 'source-bucket' provided (key=" << source_key << ")" << dendl; return; } } @@ -856,12 +856,12 @@ void RGWOp_BILog_Status::execute(optional_yield y) pipe.dest.zone = local_zone_id; pipe.dest.bucket = bucket->get_key(); - ldout(s->cct, 20) << "RGWOp_BILog_Status::execute(optional_yield y): getting sync status for pipe=" << pipe << dendl; + ldpp_dout(this, 20) << "RGWOp_BILog_Status::execute(optional_yield y): getting sync status for pipe=" << pipe << dendl; op_ret = rgw_bucket_sync_status(this, static_cast(store), pipe, bucket->get_info(), nullptr, &status); if (op_ret < 0) { - lderr(s->cct) << "ERROR: rgw_bucket_sync_status() on pipe=" << pipe << " returned ret=" << op_ret << dendl; + ldpp_dout(this, -1) << "ERROR: rgw_bucket_sync_status() on pipe=" << pipe << " returned ret=" << op_ret << dendl; } return; } @@ -871,7 +871,7 @@ void RGWOp_BILog_Status::execute(optional_yield y) RGWBucketSyncPolicyHandlerRef source_handler; op_ret = store->get_sync_policy_handler(s, source_zone_id, source_bucket, &source_handler, y); if (op_ret < 0) { - lderr(s->cct) << "could not get bucket sync policy handler (r=" << op_ret << ")" << dendl; + ldpp_dout(this, -1) << "could not get bucket sync policy handler (r=" << op_ret << ")" << dendl; return; } @@ -881,14 +881,14 @@ void RGWOp_BILog_Status::execute(optional_yield y) for (auto& entry : local_dests) { auto pipe = entry.second; - ldout(s->cct, 20) << "RGWOp_BILog_Status::execute(optional_yield y): getting sync status for pipe=" << pipe << dendl; + ldpp_dout(this, 20) << "RGWOp_BILog_Status::execute(optional_yield y): getting sync status for pipe=" << pipe << dendl; RGWBucketInfo *pinfo = &bucket->get_info(); std::optional opt_dest_info; if (!pipe.dest.bucket) { /* Uh oh, something went wrong */ - ldout(s->cct, 20) << "ERROR: RGWOp_BILog_Status::execute(optional_yield y): BUG: pipe.dest.bucket was not initialized" << pipe << dendl; + ldpp_dout(this, 20) << "ERROR: RGWOp_BILog_Status::execute(optional_yield y): BUG: pipe.dest.bucket was not initialized" << pipe << dendl; op_ret = -EIO; return; } @@ -898,7 +898,7 @@ void RGWOp_BILog_Status::execute(optional_yield y) std::unique_ptr dest_bucket; op_ret = store->get_bucket(s, nullptr, *pipe.dest.bucket, &dest_bucket, y); if (op_ret < 0) { - ldpp_dout(s, 4) << "failed to read target bucket info (bucket=: " << cpp_strerror(op_ret) << dendl; + ldpp_dout(this, 4) << "failed to read target bucket info (bucket=: " << cpp_strerror(op_ret) << dendl; return; } @@ -909,7 +909,7 @@ void RGWOp_BILog_Status::execute(optional_yield y) int r = rgw_bucket_sync_status(this, static_cast(store), pipe, *pinfo, &bucket->get_info(), ¤t_status); if (r < 0) { - lderr(s->cct) << "ERROR: rgw_bucket_sync_status() on pipe=" << pipe << " returned ret=" << r << dendl; + ldpp_dout(this, -1) << "ERROR: rgw_bucket_sync_status() on pipe=" << pipe << " returned ret=" << r << dendl; op_ret = r; return; } @@ -920,7 +920,7 @@ void RGWOp_BILog_Status::execute(optional_yield y) if (current_status.size() != status.size()) { op_ret = -EINVAL; - lderr(s->cct) << "ERROR: different number of shards for sync status of buckets syncing from the same source: status.size()= " << status.size() << " current_status.size()=" << current_status.size() << dendl; + ldpp_dout(this, -1) << "ERROR: different number of shards for sync status of buckets syncing from the same source: status.size()= " << status.size() << " current_status.size()=" << current_status.size() << dendl; return; } auto m = status.begin(); @@ -967,11 +967,11 @@ void RGWOp_DATALog_Status::execute(optional_yield y) const auto source_zone = s->info.args.get("source-zone"); auto sync = store->get_data_sync_manager(source_zone); if (sync == nullptr) { - ldout(s->cct, 1) << "no sync manager for source-zone " << source_zone << dendl; + ldpp_dout(this, 1) << "no sync manager for source-zone " << source_zone << dendl; op_ret = -ENOENT; return; } - op_ret = sync->read_sync_status(&status); + op_ret = sync->read_sync_status(this, &status); } void RGWOp_DATALog_Status::send_response() diff --git a/src/rgw/rgw_rest_metadata.cc b/src/rgw/rgw_rest_metadata.cc index 18964c2fa81..1c8bfc6dfbb 100644 --- a/src/rgw/rgw_rest_metadata.cc +++ b/src/rgw/rgw_rest_metadata.cc @@ -77,7 +77,7 @@ void RGWOp_Metadata_Get_Myself::execute(optional_yield y) { void RGWOp_Metadata_List::execute(optional_yield y) { string marker; - ldout(s->cct, 16) << __func__ + ldpp_dout(this, 16) << __func__ << " raw marker " << s->info.args.get("marker") << dendl; @@ -86,7 +86,7 @@ void RGWOp_Metadata_List::execute(optional_yield y) { if (!marker.empty()) { marker = rgw::from_base64(marker); } - ldout(s->cct, 16) << __func__ + ldpp_dout(this, 16) << __func__ << " marker " << marker << dendl; } catch (...) { marker = std::string(""); @@ -104,7 +104,7 @@ void RGWOp_Metadata_List::execute(optional_yield y) { string err; max_entries = (unsigned)strict_strtol(max_entries_str.c_str(), 10, &err); if (!err.empty()) { - dout(5) << "Error parsing max-entries " << max_entries_str << dendl; + ldpp_dout(this, 5) << "Error parsing max-entries " << max_entries_str << dendl; op_ret = -EINVAL; return; } @@ -123,9 +123,9 @@ void RGWOp_Metadata_List::execute(optional_yield y) { marker = "3:bf885d8f:root::sorry_janefonda_665:head"; */ - op_ret = store->meta_list_keys_init(metadata_key, marker, &handle); + op_ret = store->meta_list_keys_init(this, metadata_key, marker, &handle); if (op_ret < 0) { - dout(5) << "ERROR: can't get key: " << cpp_strerror(op_ret) << dendl; + ldpp_dout(this, 5) << "ERROR: can't get key: " << cpp_strerror(op_ret) << dendl; return; } @@ -144,7 +144,7 @@ void RGWOp_Metadata_List::execute(optional_yield y) { left = (max_entries_specified ? max_entries - count : max); op_ret = store->meta_list_keys_next(handle, left, keys, &truncated); if (op_ret < 0) { - dout(5) << "ERROR: lists_keys_next(): " << cpp_strerror(op_ret) + ldpp_dout(this, 5) << "ERROR: lists_keys_next(): " << cpp_strerror(op_ret) << dendl; return; } @@ -188,7 +188,7 @@ int RGWOp_Metadata_Put::get_data(bufferlist& bl) { } read_len = recv_body(s, data, cl); if (cl != (size_t)read_len) { - dout(10) << "recv_body incomplete" << dendl; + ldpp_dout(this, 10) << "recv_body incomplete" << dendl; } if (read_len < 0) { free(data); diff --git a/src/rgw/rgw_rest_oidc_provider.cc b/src/rgw/rgw_rest_oidc_provider.cc index 519b92d4287..21d35e2c33d 100644 --- a/src/rgw/rgw_rest_oidc_provider.cc +++ b/src/rgw/rgw_rest_oidc_provider.cc @@ -28,7 +28,7 @@ int RGWRestOIDCProvider::verify_permission(optional_yield y) provider_arn = s->info.args.get("OpenIDConnectProviderArn"); if (provider_arn.empty()) { - ldout(s->cct, 20) << "ERROR: Provider ARN is empty"<< dendl; + ldpp_dout(this, 20) << "ERROR: Provider ARN is empty"<< dendl; return -EINVAL; } @@ -107,7 +107,7 @@ int RGWCreateOIDCProvider::get_params() } if (provider_url.empty() || thumbprints.empty()) { - ldout(s->cct, 20) << "ERROR: one of url or thumbprints is empty" << dendl; + ldpp_dout(this, 20) << "ERROR: one of url or thumbprints is empty" << dendl; return -EINVAL; } @@ -146,7 +146,7 @@ void RGWDeleteOIDCProvider::execute(optional_yield y) std::unique_ptr provider = store->get_oidc_provider(); provider->set_arn(provider_arn); provider->set_tenant(s->user->get_tenant()); - op_ret = provider->delete_obj(this, y); + op_ret = provider->delete_obj(s, y); if (op_ret < 0 && op_ret != -ENOENT && op_ret != -EINVAL) { op_ret = ERR_INTERNAL_ERROR; diff --git a/src/rgw/rgw_rest_pubsub.cc b/src/rgw/rgw_rest_pubsub.cc index 3b57ef34cdf..418f41c029c 100644 --- a/src/rgw/rgw_rest_pubsub.cc +++ b/src/rgw/rgw_rest_pubsub.cc @@ -31,7 +31,7 @@ public: int get_params() override { topic_name = s->info.args.get("Name"); if (topic_name.empty()) { - ldout(s->cct, 1) << "CreateTopic Action 'Name' argument is missing" << dendl; + ldpp_dout(this, 1) << "CreateTopic Action 'Name' argument is missing" << dendl; return -EINVAL; } @@ -57,7 +57,7 @@ public: if (!dest.push_endpoint.empty() && dest.persistent) { const auto ret = rgw::notify::add_persistent_topic(topic_name, s->yield); if (ret < 0) { - ldout(s->cct, 1) << "CreateTopic Action failed to create queue for persistent topics. error:" << ret << dendl; + ldpp_dout(this, 1) << "CreateTopic Action failed to create queue for persistent topics. error:" << ret << dendl; return ret; } } @@ -137,7 +137,7 @@ public: const auto topic_arn = rgw::ARN::parse((s->info.args.get("TopicArn"))); if (!topic_arn || topic_arn->resource.empty()) { - ldout(s->cct, 1) << "GetTopic Action 'TopicArn' argument is missing or invalid" << dendl; + ldpp_dout(this, 1) << "GetTopic Action 'TopicArn' argument is missing or invalid" << dendl; return -EINVAL; } @@ -178,7 +178,7 @@ public: const auto topic_arn = rgw::ARN::parse((s->info.args.get("TopicArn"))); if (!topic_arn || topic_arn->resource.empty()) { - ldout(s->cct, 1) << "GetTopicAttribute Action 'TopicArn' argument is missing or invalid" << dendl; + ldpp_dout(this, 1) << "GetTopicAttribute Action 'TopicArn' argument is missing or invalid" << dendl; return -EINVAL; } @@ -219,7 +219,7 @@ public: const auto topic_arn = rgw::ARN::parse((s->info.args.get("TopicArn"))); if (!topic_arn || topic_arn->resource.empty()) { - ldout(s->cct, 1) << "DeleteTopic Action 'TopicArn' argument is missing or invalid" << dendl; + ldpp_dout(this, 1) << "DeleteTopic Action 'TopicArn' argument is missing or invalid" << dendl; return -EINVAL; } @@ -233,7 +233,7 @@ public: return 0; } if (ret < 0) { - ldout(s->cct, 1) << "DeleteTopic Action failed to remove queue for persistent topics. error:" << ret << dendl; + ldpp_dout(this, 1) << "DeleteTopic Action failed to remove queue for persistent topics. error:" << ret << dendl; return ret; } @@ -343,7 +343,7 @@ void update_attribute_map(const std::string& input, AttributeMap& map) { void RGWHandler_REST_PSTopic_AWS::rgw_topic_parse_input() { if (post_body.size() > 0) { - ldout(s->cct, 10) << "Content of POST: " << post_body << dendl; + ldpp_dout(s, 10) << "Content of POST: " << post_body << dendl; if (post_body.find("Action") != string::npos) { const boost::char_separator sep("&"); @@ -434,29 +434,29 @@ class RGWPSCreateNotif_ObjStore_S3 : public RGWPSCreateNotifOp { std::tie(r, data) = read_all_input(s, max_size, false); if (r < 0) { - ldout(s->cct, 1) << "failed to read XML payload" << dendl; + ldpp_dout(this, 1) << "failed to read XML payload" << dendl; return r; } if (data.length() == 0) { - ldout(s->cct, 1) << "XML payload missing" << dendl; + ldpp_dout(this, 1) << "XML payload missing" << dendl; return -EINVAL; } RGWXMLDecoder::XMLParser parser; if (!parser.init()){ - ldout(s->cct, 1) << "failed to initialize XML parser" << dendl; + ldpp_dout(this, 1) << "failed to initialize XML parser" << dendl; return -EINVAL; } if (!parser.parse(data.c_str(), data.length(), 1)) { - ldout(s->cct, 1) << "failed to parse XML payload" << dendl; + ldpp_dout(this, 1) << "failed to parse XML payload" << dendl; return -ERR_MALFORMED_XML; } try { // NotificationConfigurations is mandatory RGWXMLDecoder::decode_xml("NotificationConfiguration", configurations, &parser, true); } catch (RGWXMLDecoder::err& err) { - ldout(s->cct, 1) << "failed to parse XML payload. error: " << err << dendl; + ldpp_dout(this, 1) << "failed to parse XML payload. error: " << err << dendl; return -ERR_MALFORMED_XML; } return 0; @@ -466,15 +466,15 @@ class RGWPSCreateNotif_ObjStore_S3 : public RGWPSCreateNotifOp { bool exists; const auto no_value = s->info.args.get("notification", &exists); if (!exists) { - ldout(s->cct, 1) << "missing required param 'notification'" << dendl; + ldpp_dout(this, 1) << "missing required param 'notification'" << dendl; return -EINVAL; } if (no_value.length() > 0) { - ldout(s->cct, 1) << "param 'notification' should not have any value" << dendl; + ldpp_dout(this, 1) << "param 'notification' should not have any value" << dendl; return -EINVAL; } if (s->bucket_name.empty()) { - ldout(s->cct, 1) << "request must be on a bucket" << dendl; + ldpp_dout(this, 1) << "request must be on a bucket" << dendl; return -EINVAL; } bucket_name = s->bucket_name; @@ -512,25 +512,25 @@ void RGWPSCreateNotif_ObjStore_S3::execute(optional_yield y) { for (const auto& c : configurations.list) { const auto& notif_name = c.id; if (notif_name.empty()) { - ldout(s->cct, 1) << "missing notification id" << dendl; + ldpp_dout(this, 1) << "missing notification id" << dendl; op_ret = -EINVAL; return; } if (c.topic_arn.empty()) { - ldout(s->cct, 1) << "missing topic ARN in notification: '" << notif_name << "'" << dendl; + ldpp_dout(this, 1) << "missing topic ARN in notification: '" << notif_name << "'" << dendl; op_ret = -EINVAL; return; } const auto arn = rgw::ARN::parse(c.topic_arn); if (!arn || arn->resource.empty()) { - ldout(s->cct, 1) << "topic ARN has invalid format: '" << c.topic_arn << "' in notification: '" << notif_name << "'" << dendl; + ldpp_dout(this, 1) << "topic ARN has invalid format: '" << c.topic_arn << "' in notification: '" << notif_name << "'" << dendl; op_ret = -EINVAL; return; } if (std::find(c.events.begin(), c.events.end(), rgw::notify::UnknownEvent) != c.events.end()) { - ldout(s->cct, 1) << "unknown event type in notification: '" << notif_name << "'" << dendl; + ldpp_dout(this, 1) << "unknown event type in notification: '" << notif_name << "'" << dendl; op_ret = -EINVAL; return; } @@ -541,7 +541,7 @@ void RGWPSCreateNotif_ObjStore_S3::execute(optional_yield y) { rgw_pubsub_topic topic_info; op_ret = ps->get_topic(topic_name, &topic_info); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to get topic '" << topic_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to get topic '" << topic_name << "', ret=" << op_ret << dendl; return; } // make sure that full topic configuration match @@ -554,24 +554,24 @@ void RGWPSCreateNotif_ObjStore_S3::execute(optional_yield y) { // generate the internal topic. destination is stored here for the "push-only" case // when no subscription exists // ARN is cached to make the "GET" method faster - op_ret = ps->create_topic(unique_topic_name, topic_info.dest, topic_info.arn, topic_info.opaque_data, y); + op_ret = ps->create_topic(this, unique_topic_name, topic_info.dest, topic_info.arn, topic_info.opaque_data, y); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to auto-generate unique topic '" << unique_topic_name << + ldpp_dout(this, 1) << "failed to auto-generate unique topic '" << unique_topic_name << "', ret=" << op_ret << dendl; return; } - ldout(s->cct, 20) << "successfully auto-generated unique topic '" << unique_topic_name << "'" << dendl; + ldpp_dout(this, 20) << "successfully auto-generated unique topic '" << unique_topic_name << "'" << dendl; // generate the notification rgw::notify::EventTypeList events; - op_ret = b->create_notification(unique_topic_name, c.events, std::make_optional(c.filter), notif_name, y); + op_ret = b->create_notification(this, unique_topic_name, c.events, std::make_optional(c.filter), notif_name, y); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to auto-generate notification for unique topic '" << unique_topic_name << + ldpp_dout(this, 1) << "failed to auto-generate notification for unique topic '" << unique_topic_name << "', ret=" << op_ret << dendl; // rollback generated topic (ignore return value) - ps->remove_topic(unique_topic_name, y); + ps->remove_topic(this, unique_topic_name, y); return; } - ldout(s->cct, 20) << "successfully auto-generated notification for unique topic '" << unique_topic_name << "'" << dendl; + ldpp_dout(this, 20) << "successfully auto-generated notification for unique topic '" << unique_topic_name << "'" << dendl; if (!push_only) { // generate the subscription with destination information from the original topic @@ -579,16 +579,16 @@ void RGWPSCreateNotif_ObjStore_S3::execute(optional_yield y) { dest.bucket_name = data_bucket_prefix + s->owner.get_id().to_str() + "-" + unique_topic_name; dest.oid_prefix = data_oid_prefix + notif_name + "/"; auto sub = ps->get_sub(notif_name); - op_ret = sub->subscribe(unique_topic_name, dest, y, notif_name); + op_ret = sub->subscribe(this, unique_topic_name, dest, y, notif_name); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to auto-generate subscription '" << notif_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to auto-generate subscription '" << notif_name << "', ret=" << op_ret << dendl; // rollback generated notification (ignore return value) - b->remove_notification(unique_topic_name, y); + b->remove_notification(this, unique_topic_name, y); // rollback generated topic (ignore return value) - ps->remove_topic(unique_topic_name, y); + ps->remove_topic(this, unique_topic_name, y); return; } - ldout(s->cct, 20) << "successfully auto-generated subscription '" << notif_name << "'" << dendl; + ldpp_dout(this, 20) << "successfully auto-generated subscription '" << notif_name << "'" << dendl; } } } @@ -602,11 +602,11 @@ private: bool exists; notif_name = s->info.args.get("notification", &exists); if (!exists) { - ldout(s->cct, 1) << "missing required param 'notification'" << dendl; + ldpp_dout(this, 1) << "missing required param 'notification'" << dendl; return -EINVAL; } if (s->bucket_name.empty()) { - ldout(s->cct, 1) << "request must be on a bucket" << dendl; + ldpp_dout(this, 1) << "request must be on a bucket" << dendl; return -EINVAL; } bucket_name = s->bucket_name; @@ -614,13 +614,13 @@ private: } void remove_notification_by_topic(const std::string& topic_name, const RGWPubSub::BucketRef& b, optional_yield y) { - op_ret = b->remove_notification(topic_name, y); + op_ret = b->remove_notification(this, topic_name, y); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to remove notification of topic '" << topic_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to remove notification of topic '" << topic_name << "', ret=" << op_ret << dendl; } - op_ret = ps->remove_topic(topic_name, y); + op_ret = ps->remove_topic(this, topic_name, y); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to remove auto-generated topic '" << topic_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to remove auto-generated topic '" << topic_name << "', ret=" << op_ret << dendl; } } @@ -643,7 +643,7 @@ void RGWPSDeleteNotif_ObjStore_S3::execute(optional_yield y) { rgw_pubsub_bucket_topics bucket_topics; op_ret = b->get_topics(&bucket_topics); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to get list of topics from bucket '" << bucket_info.bucket.name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to get list of topics from bucket '" << bucket_info.bucket.name << "', ret=" << op_ret << dendl; return; } @@ -654,16 +654,16 @@ void RGWPSDeleteNotif_ObjStore_S3::execute(optional_yield y) { // remove the auto generated subscription according to notification name (if exist) const auto unique_topic_name = unique_topic->get().topic.name; auto sub = ps->get_sub(notif_name); - op_ret = sub->unsubscribe(unique_topic_name, y); + op_ret = sub->unsubscribe(this, unique_topic_name, y); if (op_ret < 0 && op_ret != -ENOENT) { - ldout(s->cct, 1) << "failed to remove auto-generated subscription '" << notif_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to remove auto-generated subscription '" << notif_name << "', ret=" << op_ret << dendl; return; } remove_notification_by_topic(unique_topic_name, b, y); return; } // notification to be removed is not found - considered success - ldout(s->cct, 20) << "notification '" << notif_name << "' already removed" << dendl; + ldpp_dout(this, 20) << "notification '" << notif_name << "' already removed" << dendl; return; } @@ -677,15 +677,15 @@ void RGWPSDeleteNotif_ObjStore_S3::execute(optional_yield y) { rgw_pubsub_sub_config sub_conf; op_ret = sub->get_conf(&sub_conf); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to get subscription '" << topic_sub_name << "' info, ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to get subscription '" << topic_sub_name << "' info, ret=" << op_ret << dendl; return; } if (!sub_conf.s3_id.empty()) { // S3 notification, has autogenerated subscription const auto& sub_topic_name = sub_conf.topic; - op_ret = sub->unsubscribe(sub_topic_name, y); + op_ret = sub->unsubscribe(this, sub_topic_name, y); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to remove auto-generated subscription '" << topic_sub_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to remove auto-generated subscription '" << topic_sub_name << "', ret=" << op_ret << dendl; return; } } @@ -704,11 +704,11 @@ private: bool exists; notif_name = s->info.args.get("notification", &exists); if (!exists) { - ldout(s->cct, 1) << "missing required param 'notification'" << dendl; + ldpp_dout(this, 1) << "missing required param 'notification'" << dendl; return -EINVAL; } if (s->bucket_name.empty()) { - ldout(s->cct, 1) << "request must be on a bucket" << dendl; + ldpp_dout(this, 1) << "request must be on a bucket" << dendl; return -EINVAL; } bucket_name = s->bucket_name; @@ -742,7 +742,7 @@ void RGWPSListNotifs_ObjStore_S3::execute(optional_yield y) { rgw_pubsub_bucket_topics bucket_topics; op_ret = b->get_topics(&bucket_topics); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to get list of topics from bucket '" << bucket_info.bucket.name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to get list of topics from bucket '" << bucket_info.bucket.name << "', ret=" << op_ret << dendl; return; } if (!notif_name.empty()) { @@ -753,7 +753,7 @@ void RGWPSListNotifs_ObjStore_S3::execute(optional_yield y) { return; } op_ret = -ENOENT; - ldout(s->cct, 1) << "failed to get notification info for '" << notif_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to get notification info for '" << notif_name << "', ret=" << op_ret << dendl; return; } // loop through all topics of the bucket diff --git a/src/rgw/rgw_rest_pubsub_common.cc b/src/rgw/rgw_rest_pubsub_common.cc index 0c03c4fbcf0..74a92637670 100644 --- a/src/rgw/rgw_rest_pubsub_common.cc +++ b/src/rgw/rgw_rest_pubsub_common.cc @@ -54,12 +54,12 @@ void RGWPSCreateTopicOp::execute(optional_yield y) { } ps.emplace(static_cast(store), s->owner.get_id().tenant); - op_ret = ps->create_topic(topic_name, dest, topic_arn, opaque_data, y); + op_ret = ps->create_topic(this, topic_name, dest, topic_arn, opaque_data, y); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to create topic '" << topic_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to create topic '" << topic_name << "', ret=" << op_ret << dendl; return; } - ldout(s->cct, 20) << "successfully created topic '" << topic_name << "'" << dendl; + ldpp_dout(this, 20) << "successfully created topic '" << topic_name << "'" << dendl; } void RGWPSListTopicsOp::execute(optional_yield y) { @@ -68,15 +68,15 @@ void RGWPSListTopicsOp::execute(optional_yield y) { // if there are no topics it is not considered an error op_ret = op_ret == -ENOENT ? 0 : op_ret; if (op_ret < 0) { - ldout(s->cct, 1) << "failed to get topics, ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to get topics, ret=" << op_ret << dendl; return; } if (topics_has_endpoint_secret(result) && !rgw_transport_is_secure(s->cct, *(s->info.env))) { - ldout(s->cct, 1) << "topics contain secret and cannot be sent over insecure transport" << dendl; + ldpp_dout(this, 1) << "topics contain secret and cannot be sent over insecure transport" << dendl; op_ret = -EPERM; return; } - ldout(s->cct, 20) << "successfully got topics" << dendl; + ldpp_dout(this, 20) << "successfully got topics" << dendl; } void RGWPSGetTopicOp::execute(optional_yield y) { @@ -87,15 +87,15 @@ void RGWPSGetTopicOp::execute(optional_yield y) { ps.emplace(static_cast(store), s->owner.get_id().tenant); op_ret = ps->get_topic(topic_name, &result); if (topic_has_endpoint_secret(result) && !rgw_transport_is_secure(s->cct, *(s->info.env))) { - ldout(s->cct, 1) << "topic '" << topic_name << "' contain secret and cannot be sent over insecure transport" << dendl; + ldpp_dout(this, 1) << "topic '" << topic_name << "' contain secret and cannot be sent over insecure transport" << dendl; op_ret = -EPERM; return; } if (op_ret < 0) { - ldout(s->cct, 1) << "failed to get topic '" << topic_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to get topic '" << topic_name << "', ret=" << op_ret << dendl; return; } - ldout(s->cct, 1) << "successfully got topic '" << topic_name << "'" << dendl; + ldpp_dout(this, 1) << "successfully got topic '" << topic_name << "'" << dendl; } void RGWPSDeleteTopicOp::execute(optional_yield y) { @@ -104,12 +104,12 @@ void RGWPSDeleteTopicOp::execute(optional_yield y) { return; } ps.emplace(static_cast(store), s->owner.get_id().tenant); - op_ret = ps->remove_topic(topic_name, y); + op_ret = ps->remove_topic(this, topic_name, y); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to remove topic '" << topic_name << ", ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to remove topic '" << topic_name << ", ret=" << op_ret << dendl; return; } - ldout(s->cct, 1) << "successfully removed topic '" << topic_name << "'" << dendl; + ldpp_dout(this, 1) << "successfully removed topic '" << topic_name << "'" << dendl; } void RGWPSCreateSubOp::execute(optional_yield y) { @@ -119,12 +119,12 @@ void RGWPSCreateSubOp::execute(optional_yield y) { } ps.emplace(static_cast(store), s->owner.get_id().tenant); auto sub = ps->get_sub(sub_name); - op_ret = sub->subscribe(topic_name, dest, y); + op_ret = sub->subscribe(this, topic_name, dest, y); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to create subscription '" << sub_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to create subscription '" << sub_name << "', ret=" << op_ret << dendl; return; } - ldout(s->cct, 20) << "successfully created subscription '" << sub_name << "'" << dendl; + ldpp_dout(this, 20) << "successfully created subscription '" << sub_name << "'" << dendl; } void RGWPSGetSubOp::execute(optional_yield y) { @@ -136,15 +136,15 @@ void RGWPSGetSubOp::execute(optional_yield y) { auto sub = ps->get_sub(sub_name); op_ret = sub->get_conf(&result); if (subscription_has_endpoint_secret(result) && !rgw_transport_is_secure(s->cct, *(s->info.env))) { - ldout(s->cct, 1) << "subscription '" << sub_name << "' contain secret and cannot be sent over insecure transport" << dendl; + ldpp_dout(this, 1) << "subscription '" << sub_name << "' contain secret and cannot be sent over insecure transport" << dendl; op_ret = -EPERM; return; } if (op_ret < 0) { - ldout(s->cct, 1) << "failed to get subscription '" << sub_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to get subscription '" << sub_name << "', ret=" << op_ret << dendl; return; } - ldout(s->cct, 20) << "successfully got subscription '" << sub_name << "'" << dendl; + ldpp_dout(this, 20) << "successfully got subscription '" << sub_name << "'" << dendl; } void RGWPSDeleteSubOp::execute(optional_yield y) { @@ -154,12 +154,12 @@ void RGWPSDeleteSubOp::execute(optional_yield y) { } ps.emplace(static_cast(store), s->owner.get_id().tenant); auto sub = ps->get_sub(sub_name); - op_ret = sub->unsubscribe(topic_name, y); + op_ret = sub->unsubscribe(this, topic_name, y); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to remove subscription '" << sub_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to remove subscription '" << sub_name << "', ret=" << op_ret << dendl; return; } - ldout(s->cct, 20) << "successfully removed subscription '" << sub_name << "'" << dendl; + ldpp_dout(this, 20) << "successfully removed subscription '" << sub_name << "'" << dendl; } void RGWPSAckSubEventOp::execute(optional_yield y) { @@ -186,7 +186,7 @@ void RGWPSPullSubEventsOp::execute(optional_yield y) { sub = ps->get_sub_with_events(sub_name); if (!sub) { op_ret = -ENOENT; - ldout(s->cct, 1) << "failed to get subscription '" << sub_name << "' for events, ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to get subscription '" << sub_name << "' for events, ret=" << op_ret << dendl; return; } op_ret = sub->list_events(s, marker, max_entries); @@ -208,13 +208,13 @@ int RGWPSCreateNotifOp::verify_permission(optional_yield y) { std::unique_ptr bucket; ret = store->get_bucket(this, user.get(), s->owner.get_id().tenant, bucket_name, &bucket, y); if (ret < 0) { - ldout(s->cct, 1) << "failed to get bucket info, cannot verify ownership" << dendl; + ldpp_dout(this, 1) << "failed to get bucket info, cannot verify ownership" << dendl; return ret; } bucket_info = bucket->get_info(); if (bucket_info.owner != s->owner.get_id()) { - ldout(s->cct, 1) << "user doesn't own bucket, not allowed to create notification" << dendl; + ldpp_dout(this, 1) << "user doesn't own bucket, not allowed to create notification" << dendl; return -EPERM; } return 0; @@ -235,7 +235,7 @@ int RGWPSDeleteNotifOp::verify_permission(optional_yield y) { bucket_info = bucket->get_info(); if (bucket_info.owner != s->owner.get_id()) { - ldout(s->cct, 1) << "user doesn't own bucket, cannot remove notification" << dendl; + ldpp_dout(this, 1) << "user doesn't own bucket, cannot remove notification" << dendl; return -EPERM; } return 0; @@ -256,7 +256,7 @@ int RGWPSListNotifsOp::verify_permission(optional_yield y) { bucket_info = bucket->get_info(); if (bucket_info.owner != s->owner.get_id()) { - ldout(s->cct, 1) << "user doesn't own bucket, cannot get notification list" << dendl; + ldpp_dout(this, 1) << "user doesn't own bucket, cannot get notification list" << dendl; return -EPERM; } diff --git a/src/rgw/rgw_rest_realm.cc b/src/rgw/rgw_rest_realm.cc index 469855df472..24925c55e89 100644 --- a/src/rgw/rgw_rest_realm.cc +++ b/src/rgw/rgw_rest_realm.cc @@ -36,7 +36,7 @@ void RGWOp_Period_Base::send_response() if (op_ret < 0) { if (!s->err.message.empty()) { - ldout(s->cct, 4) << "Request failed with " << op_ret + ldpp_dout(this, 4) << "Request failed with " << op_ret << ": " << s->err.message << dendl; } end_header(s); @@ -73,9 +73,9 @@ void RGWOp_Period_Get::execute(optional_yield y) period.set_id(period_id); period.set_epoch(epoch); - op_ret = period.init(store->ctx(), static_cast(store)->svc()->sysobj, realm_id, y, realm_name); + op_ret = period.init(this, store->ctx(), static_cast(store)->svc()->sysobj, realm_id, y, realm_name); if (op_ret < 0) - ldout(store->ctx(), 5) << "failed to read period" << dendl; + ldpp_dout(this, 5) << "failed to read period" << dendl; } // POST /admin/realm/period @@ -97,14 +97,14 @@ void RGWOp_Period_Post::execute(optional_yield y) auto cct = store->ctx(); // initialize the period without reading from rados - period.init(cct, static_cast(store)->svc()->sysobj, y, false); + period.init(this, cct, static_cast(store)->svc()->sysobj, y, false); // decode the period from input const auto max_size = cct->_conf->rgw_max_put_param_size; bool empty; op_ret = get_json_input(cct, s, period, max_size, &empty); if (op_ret < 0) { - lderr(cct) << "failed to decode period" << dendl; + ldpp_dout(this, -1) << "failed to decode period" << dendl; return; } @@ -120,17 +120,17 @@ void RGWOp_Period_Post::execute(optional_yield y) // period that we haven't restarted with yet. we also don't want to modify // the objects in use by RGWRados RGWRealm realm(period.get_realm()); - op_ret = realm.init(cct, static_cast(store)->svc()->sysobj, y); + op_ret = realm.init(this, cct, static_cast(store)->svc()->sysobj, y); if (op_ret < 0) { - lderr(cct) << "failed to read current realm: " + ldpp_dout(this, -1) << "failed to read current realm: " << cpp_strerror(-op_ret) << dendl; return; } RGWPeriod current_period; - op_ret = current_period.init(cct, static_cast(store)->svc()->sysobj, realm.get_id(), y); + op_ret = current_period.init(this, cct, static_cast(store)->svc()->sysobj, realm.get_id(), y); if (op_ret < 0) { - lderr(cct) << "failed to read current period: " + ldpp_dout(this, -1) << "failed to read current period: " << cpp_strerror(-op_ret) << dendl; return; } @@ -139,36 +139,36 @@ void RGWOp_Period_Post::execute(optional_yield y) if (period.get_id().empty()) { op_ret = period.commit(this, store, realm, current_period, error_stream, y); if (op_ret < 0) { - lderr(cct) << "master zone failed to commit period" << dendl; + ldpp_dout(this, -1) << "master zone failed to commit period" << dendl; } return; } // if it's not period commit, nobody is allowed to push to the master zone if (period.get_master_zone() == store->get_zone()->get_params().get_id()) { - ldout(cct, 10) << "master zone rejecting period id=" + ldpp_dout(this, 10) << "master zone rejecting period id=" << period.get_id() << " epoch=" << period.get_epoch() << dendl; op_ret = -EINVAL; // XXX: error code return; } // write the period to rados - op_ret = period.store_info(false, y); + op_ret = period.store_info(this, false, y); if (op_ret < 0) { - lderr(cct) << "failed to store period " << period.get_id() << dendl; + ldpp_dout(this, -1) << "failed to store period " << period.get_id() << dendl; return; } // set as latest epoch - op_ret = period.update_latest_epoch(period.get_epoch(), y); + op_ret = period.update_latest_epoch(this, period.get_epoch(), y); if (op_ret == -EEXIST) { // already have this epoch (or a more recent one) - ldout(cct, 4) << "already have epoch >= " << period.get_epoch() + ldpp_dout(this, 4) << "already have epoch >= " << period.get_epoch() << " for period " << period.get_id() << dendl; op_ret = 0; return; } if (op_ret < 0) { - lderr(cct) << "failed to set latest epoch" << dendl; + ldpp_dout(this, -1) << "failed to set latest epoch" << dendl; return; } @@ -179,7 +179,7 @@ void RGWOp_Period_Post::execute(optional_yield y) auto current_epoch = current_period.get_realm_epoch(); // discard periods in the past if (period.get_realm_epoch() < current_epoch) { - ldout(cct, 10) << "discarding period " << period.get_id() + ldpp_dout(this, 10) << "discarding period " << period.get_id() << " with realm epoch " << period.get_realm_epoch() << " older than current epoch " << current_epoch << dendl; // return success to ack that we have this period @@ -187,18 +187,18 @@ void RGWOp_Period_Post::execute(optional_yield y) } // discard periods too far in the future if (period.get_realm_epoch() > current_epoch + PERIOD_HISTORY_FETCH_MAX) { - lderr(cct) << "discarding period " << period.get_id() + ldpp_dout(this, -1) << "discarding period " << period.get_id() << " with realm epoch " << period.get_realm_epoch() << " too far in " "the future from current epoch " << current_epoch << dendl; op_ret = -ENOENT; // XXX: error code return; } // attach a copy of the period into the period history - auto cursor = period_history->attach(RGWPeriod{period}, y); + auto cursor = period_history->attach(this, RGWPeriod{period}, y); if (!cursor) { // we're missing some history between the new period and current_period op_ret = cursor.get_error(); - lderr(cct) << "failed to collect the periods between current period " + ldpp_dout(this, -1) << "failed to collect the periods between current period " << current_period.get_id() << " (realm epoch " << current_epoch << ") and the new period " << period.get_id() << " (realm epoch " << period.get_realm_epoch() @@ -207,33 +207,33 @@ void RGWOp_Period_Post::execute(optional_yield y) } if (cursor.has_next()) { // don't switch if we have a newer period in our history - ldout(cct, 4) << "attached period " << period.get_id() + ldpp_dout(this, 4) << "attached period " << period.get_id() << " to history, but the history contains newer periods" << dendl; return; } // set as current period - op_ret = realm.set_current_period(period, y); + op_ret = realm.set_current_period(this, period, y); if (op_ret < 0) { - lderr(cct) << "failed to update realm's current period" << dendl; + ldpp_dout(this, -1) << "failed to update realm's current period" << dendl; return; } - ldout(cct, 4) << "period " << period.get_id() + ldpp_dout(this, 4) << "period " << period.get_id() << " is newer than current period " << current_period.get_id() << ", updating realm's current period and notifying zone" << dendl; - realm.notify_new_period(period, y); + realm.notify_new_period(this, period, y); return; } // reflect the period into our local objects - op_ret = period.reflect(y); + op_ret = period.reflect(this, y); if (op_ret < 0) { - lderr(cct) << "failed to update local objects: " + ldpp_dout(this, -1) << "failed to update local objects: " << cpp_strerror(-op_ret) << dendl; return; } - ldout(cct, 4) << "period epoch " << period.get_epoch() + ldpp_dout(this, 4) << "period epoch " << period.get_epoch() << " is newer than current epoch " << current_period.get_epoch() << ", updating period's latest epoch and notifying zone" << dendl; - realm.notify_new_period(period, y); + realm.notify_new_period(this, period, y); // update the period history period_history->insert(RGWPeriod{period}); } @@ -281,9 +281,9 @@ void RGWOp_Realm_Get::execute(optional_yield y) // read realm realm.reset(new RGWRealm(id, name)); - op_ret = realm->init(g_ceph_context, static_cast(store)->svc()->sysobj, y); + op_ret = realm->init(this, g_ceph_context, static_cast(store)->svc()->sysobj, y); if (op_ret < 0) - lderr(store->ctx()) << "failed to read realm id=" << id + ldpp_dout(this, -1) << "failed to read realm id=" << id << " name=" << name << dendl; } @@ -323,11 +323,11 @@ void RGWOp_Realm_List::execute(optional_yield y) { // read default realm RGWRealm realm(store->ctx(), static_cast(store)->svc()->sysobj); - [[maybe_unused]] int ret = realm.read_default_id(default_id, y); + [[maybe_unused]] int ret = realm.read_default_id(this, default_id, y); } - op_ret = static_cast(store)->svc()->zone->list_realms(realms); + op_ret = static_cast(store)->svc()->zone->list_realms(this, realms); if (op_ret < 0) - lderr(store->ctx()) << "failed to list realms" << dendl; + ldpp_dout(this, -1) << "failed to list realms" << dendl; } void RGWOp_Realm_List::send_response() diff --git a/src/rgw/rgw_rest_role.cc b/src/rgw/rgw_rest_role.cc index c133f5fe908..a542804220d 100644 --- a/src/rgw/rgw_rest_role.cc +++ b/src/rgw/rgw_rest_role.cc @@ -108,7 +108,7 @@ int RGWCreateRole::get_params() max_session_duration = s->info.args.get("MaxSessionDuration"); if (role_name.empty() || trust_policy.empty()) { - ldout(s->cct, 20) << "ERROR: one of role name or assume role policy document is empty" + ldpp_dout(this, 20) << "ERROR: one of role name or assume role policy document is empty" << dendl; return -EINVAL; } @@ -118,7 +118,7 @@ int RGWCreateRole::get_params() const rgw::IAM::Policy p(s->cct, s->user->get_tenant(), bl); } catch (rgw::IAM::PolicyParseException& e) { - ldout(s->cct, 20) << "failed to parse policy: " << e.what() << dendl; + ldpp_dout(this, 20) << "failed to parse policy: " << e.what() << dendl; return -ERR_MALFORMED_DOC; } @@ -161,7 +161,7 @@ int RGWDeleteRole::get_params() role_name = s->info.args.get("RoleName"); if (role_name.empty()) { - ldout(s->cct, 20) << "ERROR: Role name is empty"<< dendl; + ldpp_dout(this, 20) << "ERROR: Role name is empty"<< dendl; return -EINVAL; } @@ -220,7 +220,7 @@ int RGWGetRole::get_params() role_name = s->info.args.get("RoleName"); if (role_name.empty()) { - ldout(s->cct, 20) << "ERROR: Role name is empty"<< dendl; + ldpp_dout(this, 20) << "ERROR: Role name is empty"<< dendl; return -EINVAL; } @@ -264,12 +264,12 @@ int RGWModifyRole::get_params() trust_policy = s->info.args.get("PolicyDocument"); if (role_name.empty() || trust_policy.empty()) { - ldout(s->cct, 20) << "ERROR: One of role name or trust policy is empty"<< dendl; + ldpp_dout(this, 20) << "ERROR: One of role name or trust policy is empty"<< dendl; return -EINVAL; } JSONParser p; if (!p.parse(trust_policy.c_str(), trust_policy.length())) { - ldout(s->cct, 20) << "ERROR: failed to parse assume role policy doc" << dendl; + ldpp_dout(this, 20) << "ERROR: failed to parse assume role policy doc" << dendl; return -ERR_MALFORMED_DOC; } @@ -354,7 +354,7 @@ int RGWPutRolePolicy::get_params() perm_policy = s->info.args.get("PolicyDocument"); if (role_name.empty() || policy_name.empty() || perm_policy.empty()) { - ldout(s->cct, 20) << "ERROR: One of role name, policy name or perm policy is empty"<< dendl; + ldpp_dout(this, 20) << "ERROR: One of role name, policy name or perm policy is empty"<< dendl; return -EINVAL; } bufferlist bl = bufferlist::static_from_string(perm_policy); @@ -362,7 +362,7 @@ int RGWPutRolePolicy::get_params() const rgw::IAM::Policy p(s->cct, s->user->get_tenant(), bl); } catch (rgw::IAM::PolicyParseException& e) { - ldout(s->cct, 20) << "failed to parse policy: " << e.what() << dendl; + ldpp_dout(this, 20) << "failed to parse policy: " << e.what() << dendl; return -ERR_MALFORMED_DOC; } return 0; @@ -393,7 +393,7 @@ int RGWGetRolePolicy::get_params() policy_name = s->info.args.get("PolicyName"); if (role_name.empty() || policy_name.empty()) { - ldout(s->cct, 20) << "ERROR: One of role name or policy name is empty"<< dendl; + ldpp_dout(this, 20) << "ERROR: One of role name or policy name is empty"<< dendl; return -EINVAL; } return 0; @@ -431,7 +431,7 @@ int RGWListRolePolicies::get_params() role_name = s->info.args.get("RoleName"); if (role_name.empty()) { - ldout(s->cct, 20) << "ERROR: Role name is empty"<< dendl; + ldpp_dout(this, 20) << "ERROR: Role name is empty"<< dendl; return -EINVAL; } return 0; @@ -465,7 +465,7 @@ int RGWDeleteRolePolicy::get_params() policy_name = s->info.args.get("PolicyName"); if (role_name.empty() || policy_name.empty()) { - ldout(s->cct, 20) << "ERROR: One of role name or policy name is empty"<< dendl; + ldpp_dout(this, 20) << "ERROR: One of role name or policy name is empty"<< dendl; return -EINVAL; } return 0; diff --git a/src/rgw/rgw_rest_s3.cc b/src/rgw/rgw_rest_s3.cc index 0af5245d0a6..2d2df3ee03f 100644 --- a/src/rgw/rgw_rest_s3.cc +++ b/src/rgw/rgw_rest_s3.cc @@ -452,7 +452,7 @@ int RGWGetObj_ObjStore_S3::get_decrypt_filter(std::unique_ptr if (block_crypt != nullptr) { auto f = std::make_unique(s->cct, cb, std::move(block_crypt)); if (manifest_bl != nullptr) { - res = f->read_manifest(*manifest_bl); + res = f->read_manifest(this, *manifest_bl); if (res == 0) { *filter = std::move(f); } @@ -609,7 +609,7 @@ void RGWGetBucketTags_ObjStore_S3::send_response_data(bufferlist& bl) try { tagset.decode(iter); } catch (buffer::error& err) { - ldout(s->cct,0) << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl; + ldpp_dout(this,0) << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl; op_ret= -EIO; return; } @@ -621,7 +621,7 @@ void RGWGetBucketTags_ObjStore_S3::send_response_data(bufferlist& bl) } } -int RGWPutBucketTags_ObjStore_S3::get_params(optional_yield y) +int RGWPutBucketTags_ObjStore_S3::get_params(const DoutPrefixProvider *dpp, optional_yield y) { RGWXMLParser parser; @@ -647,7 +647,7 @@ int RGWPutBucketTags_ObjStore_S3::get_params(optional_yield y) RGWXMLDecoder::decode_xml("Tagging", tagging, &parser); } catch (RGWXMLDecoder::err& err) { - ldout(s->cct, 5) << "Malformed tagging request: " << err << dendl; + ldpp_dout(dpp, 5) << "Malformed tagging request: " << err << dendl; return -ERR_MALFORMED_XML; } @@ -657,7 +657,7 @@ int RGWPutBucketTags_ObjStore_S3::get_params(optional_yield y) return r; obj_tags.encode(tags_bl); - ldout(s->cct, 20) << "Read " << obj_tags.count() << "tags" << dendl; + ldpp_dout(dpp, 20) << "Read " << obj_tags.count() << "tags" << dendl; // forward bucket tags requests to meta master zone if (!store->is_meta_master()) { @@ -1131,7 +1131,7 @@ struct ReplicationConfiguration { bool enabled; int r = rule.to_sync_policy_pipe(s, store, &pipe, &enabled); if (r < 0) { - ldout(s->cct, 5) << "NOTICE: failed to convert replication configuration into sync policy pipe (rule.id=" << rule.id << "): " << cpp_strerror(-r) << dendl; + ldpp_dout(s, 5) << "NOTICE: failed to convert replication configuration into sync policy pipe (rule.id=" << rule.id << "): " << cpp_strerror(-r) << dendl; return r; } @@ -1215,7 +1215,7 @@ int RGWPutBucketReplication_ObjStore_S3::get_params(optional_yield y) RGWXMLDecoder::decode_xml("ReplicationConfiguration", conf, &parser); } catch (RGWXMLDecoder::err& err) { - ldout(s->cct, 5) << "Malformed tagging request: " << err << dendl; + ldpp_dout(this, 5) << "Malformed tagging request: " << err << dendl; return -ERR_MALFORMED_XML; } @@ -1469,7 +1469,7 @@ int RGWListBucket_ObjStore_S3::get_common_params() string err; shard_id = strict_strtol(shard_id_str, 10, &err); if (!err.empty()) { - ldout(s->cct, 5) << "bad shard id specified: " << shard_id_str << dendl; + ldpp_dout(this, 5) << "bad shard id specified: " << shard_id_str << dendl; return -EINVAL; } } else { @@ -2066,16 +2066,16 @@ int RGWSetBucketWebsite_ObjStore_S3::get_params(optional_yield y) if (website_conf.is_redirect_all && website_conf.redirect_all.hostname.empty()) { s->err.message = "A host name must be provided to redirect all requests (e.g. \"example.com\")."; - ldout(s->cct, 5) << s->err.message << dendl; + ldpp_dout(this, 5) << s->err.message << dendl; return -EINVAL; } else if (!website_conf.is_redirect_all && !website_conf.is_set_index_doc) { s->err.message = "A value for IndexDocument Suffix must be provided if RedirectAllRequestsTo is empty"; - ldout(s->cct, 5) << s->err.message << dendl; + ldpp_dout(this, 5) << s->err.message << dendl; return -EINVAL; } else if (!website_conf.is_redirect_all && website_conf.is_set_index_doc && website_conf.index_doc_suffix.empty()) { s->err.message = "The IndexDocument Suffix is not well formed"; - ldout(s->cct, 5) << s->err.message << dendl; + ldpp_dout(this, 5) << s->err.message << dendl; return -EINVAL; } @@ -2557,7 +2557,7 @@ int RGWPutObj_ObjStore_S3::get_decrypt_filter( //RGWGetObj_BlockDecrypt* f = new RGWGetObj_BlockDecrypt(s->cct, cb, std::move(block_crypt)); if (f != nullptr) { if (manifest_bl != nullptr) { - res = f->read_manifest(*manifest_bl); + res = f->read_manifest(this, *manifest_bl); if (res == 0) { *filter = std::move(f); } @@ -3442,7 +3442,7 @@ void RGWGetCORS_ObjStore_S3::send_response() int RGWPutCORS_ObjStore_S3::get_params(optional_yield y) { - RGWCORSXMLParser_S3 parser(s->cct); + RGWCORSXMLParser_S3 parser(this, s->cct); RGWCORSConfiguration_S3 *cors_config; const auto max_size = s->cct->_conf->rgw_max_put_param_size; @@ -3956,7 +3956,7 @@ void RGWGetObjLayout_ObjStore_S3::send_response() ::encode_json("head", head_obj, &f); ::encode_json("manifest", *manifest, &f); f.open_array_section("data_location"); - for (auto miter = manifest->obj_begin(); miter != manifest->obj_end(); ++miter) { + for (auto miter = manifest->obj_begin(this); miter != manifest->obj_end(this); ++miter) { f.open_object_section("obj"); rgw_raw_obj raw_loc = miter.get_location().get_raw_obj(store); uint64_t ofs = miter.get_ofs(); @@ -4634,7 +4634,7 @@ static int verify_mfa(rgw::sal::Store* store, RGWUserInfo *user, return -EACCES; } - int ret = static_cast(store)->svc()->cls->mfa.check_mfa(user->user_id, serial, pin, y); + int ret = static_cast(store)->svc()->cls->mfa.check_mfa(dpp, user->user_id, serial, pin, y); if (ret < 0) { ldpp_dout(dpp, 20) << "NOTICE: failed to check MFA, serial=" << serial << dendl; return -EACCES; @@ -4986,7 +4986,7 @@ RGWOp* RGWHandler_REST_S3Website::op_head() return get_obj_op(false); } -int RGWHandler_REST_S3Website::serve_errordoc(int http_ret, const string& errordoc_key, optional_yield y) { +int RGWHandler_REST_S3Website::serve_errordoc(const DoutPrefixProvider *dpp, int http_ret, const string& errordoc_key, optional_yield y) { int ret = 0; s->formatter->reset(); /* Try to throw it all away */ @@ -5099,7 +5099,7 @@ int RGWHandler_REST_S3Website::error_handler(int err_no, On success, it will return zero, and no further content should be sent to the socket On failure, we need the double-error handler */ - new_err_no = RGWHandler_REST_S3Website::serve_errordoc(http_error_code, s->bucket->get_info().website_conf.error_doc, y); + new_err_no = RGWHandler_REST_S3Website::serve_errordoc(s, http_error_code, s->bucket->get_info().website_conf.error_doc, y); if (new_err_no != -1) { err_no = new_err_no; } @@ -5615,7 +5615,7 @@ AWSGeneralAbstractor::get_auth_data_v2(const req_state* const s) const /* Let's canonize the HTTP headers that are covered by the AWS auth v2. */ std::string string_to_sign; utime_t header_time; - if (! rgw_create_s3_canonical_header(s->info, &header_time, string_to_sign, + if (! rgw_create_s3_canonical_header(s, s->info, &header_time, string_to_sign, qsr)) { ldpp_dout(s, 10) << "failed to create the canonized auth header\n" << rgw::crypt_sanitize::auth{s,string_to_sign} << dendl; @@ -5946,7 +5946,7 @@ rgw::auth::s3::STSEngine::get_session_token(const DoutPrefixProvider* dpp, const auto iter = dec_output.cbegin(); decode(token, iter); } catch (const buffer::error& e) { - ldout(cct, 0) << "ERROR: decode SessionToken failed: " << error << dendl; + ldpp_dout(dpp, 0) << "ERROR: decode SessionToken failed: " << error << dendl; return -EINVAL; } } @@ -6105,16 +6105,16 @@ int RGWSelectObj_ObjStore_S3::get_params(optional_yield y) int max_size = 4096; std::tie(ret, data) = read_all_input(s, max_size, false); if (ret != 0) { - ldout(s->cct, 10) << "s3-select query: failed to retrieve query; ret = " << ret << dendl; + ldpp_dout(this, 10) << "s3-select query: failed to retrieve query; ret = " << ret << dendl; return ret; } m_s3select_query = data.to_str(); if (m_s3select_query.length() > 0) { - ldout(s->cct, 10) << "s3-select query: " << m_s3select_query << dendl; + ldpp_dout(this, 10) << "s3-select query: " << m_s3select_query << dendl; } else { - ldout(s->cct, 10) << "s3-select query: failed to retrieve query;" << dendl; + ldpp_dout(this, 10) << "s3-select query: failed to retrieve query;" << dendl; return -1; } @@ -6265,7 +6265,7 @@ int RGWSelectObj_ObjStore_S3::run_s3select(const char* query, const char* input, if (s3select_syntax->get_error_description().empty() == false) { m_result.append(s3select_syntax->get_error_description()); - ldout(s->cct, 10) << "s3-select query: failed to prase query; {" << s3select_syntax->get_error_description() << "}"<< dendl; + ldpp_dout(this, 10) << "s3-select query: failed to prase query; {" << s3select_syntax->get_error_description() << "}"<< dendl; status = -1; } else { @@ -6316,7 +6316,7 @@ int RGWSelectObj_ObjStore_S3::handle_aws_cli_parameters(std::string& sql_query) extract_by_tag("QuoteEscapeCharacter", m_escape_char); extract_by_tag("CompressionType", m_compression_type); if (m_compression_type.length()>0 && m_compression_type.compare("NONE") != 0) { - ldout(s->cct, 10) << "RGW supports currently only NONE option for compression type" << dendl; + ldpp_dout(this, 10) << "RGW supports currently only NONE option for compression type" << dendl; return -1; } @@ -6368,11 +6368,11 @@ int RGWSelectObj_ObjStore_S3::send_response_data(bufferlist& bl, off_t ofs, off_ for(auto& it : bl.buffers()) { - ldout(s->cct, 10) << "processing segment " << i << " out of " << bl_len << " off " << ofs + ldpp_dout(this, 10) << "processing segment " << i << " out of " << bl_len << " off " << ofs << " len " << len << " obj-size " << s->obj_size << dendl; if(it.length() == 0) { - ldout(s->cct, 10) << "s3select:it->_len is zero. segment " << i << " out of " << bl_len + ldpp_dout(this, 10) << "s3select:it->_len is zero. segment " << i << " out of " << bl_len << " obj-size " << s->obj_size << dendl; continue; } diff --git a/src/rgw/rgw_rest_s3.h b/src/rgw/rgw_rest_s3.h index 40f843b4ea5..c908a9f441d 100644 --- a/src/rgw/rgw_rest_s3.h +++ b/src/rgw/rgw_rest_s3.h @@ -94,7 +94,7 @@ public: class RGWPutBucketTags_ObjStore_S3 : public RGWPutBucketTags_ObjStore { public: - int get_params(optional_yield y) override; + int get_params(const DoutPrefixProvider *dpp, optional_yield y) override; void send_response() override; }; diff --git a/src/rgw/rgw_rest_s3website.h b/src/rgw/rgw_rest_s3website.h index 295f2b98cd2..36bf6f94a04 100644 --- a/src/rgw/rgw_rest_s3website.h +++ b/src/rgw/rgw_rest_s3website.h @@ -35,7 +35,7 @@ protected: RGWOp *op_copy() override { return NULL; } RGWOp *op_options() override { return NULL; } - int serve_errordoc(int http_ret, const string &errordoc_key, optional_yield y); + int serve_errordoc(const DoutPrefixProvider *dpp, int http_ret, const string &errordoc_key, optional_yield y); public: using RGWHandler_REST_S3::RGWHandler_REST_S3; ~RGWHandler_REST_S3Website() override = default; diff --git a/src/rgw/rgw_rest_sts.cc b/src/rgw/rgw_rest_sts.cc index 790d59c360c..25dd117450c 100644 --- a/src/rgw/rgw_rest_sts.cc +++ b/src/rgw/rgw_rest_sts.cc @@ -346,7 +346,7 @@ WebTokenEngine::authenticate( const DoutPrefixProvider* dpp, if (t) { string role_session = s->info.args.get("RoleSessionName"); if (role_session.empty()) { - ldout(s->cct, 0) << "Role Session Name is empty " << dendl; + ldpp_dout(dpp, 0) << "Role Session Name is empty " << dendl; return result_t::deny(-EACCES); } string role_arn = s->info.args.get("RoleArn"); @@ -381,16 +381,16 @@ int RGWREST_STS::verify_permission(optional_yield y) // If yes, then return 0, else -EPERM auto p_res = p.eval_principal(s->env, *s->auth.identity); if (p_res == rgw::IAM::Effect::Deny) { - ldout(s->cct, 0) << "evaluating principal returned deny" << dendl; + ldpp_dout(this, 0) << "evaluating principal returned deny" << dendl; return -EPERM; } auto c_res = p.eval_conditions(s->env); if (c_res == rgw::IAM::Effect::Deny) { - ldout(s->cct, 0) << "evaluating condition returned deny" << dendl; + ldpp_dout(this, 0) << "evaluating condition returned deny" << dendl; return -EPERM; } } catch (rgw::IAM::PolicyParseException& e) { - ldout(s->cct, 0) << "failed to parse policy: " << e.what() << dendl; + ldpp_dout(this, 0) << "failed to parse policy: " << e.what() << dendl; return -EPERM; } @@ -414,7 +414,7 @@ int RGWSTSGetSessionToken::verify_permission(optional_yield y) s, rgw::ARN(partition, service, "", s->user->get_tenant(), ""), rgw::IAM::stsGetSessionToken)) { - ldout(s->cct, 0) << "User does not have permssion to perform GetSessionToken" << dendl; + ldpp_dout(this, 0) << "User does not have permssion to perform GetSessionToken" << dendl; return -EACCES; } @@ -431,13 +431,13 @@ int RGWSTSGetSessionToken::get_params() string err; uint64_t duration_in_secs = strict_strtoll(duration.c_str(), 10, &err); if (!err.empty()) { - ldout(s->cct, 0) << "Invalid value of input duration: " << duration << dendl; + ldpp_dout(this, 0) << "Invalid value of input duration: " << duration << dendl; return -EINVAL; } if (duration_in_secs < STS::GetSessionTokenRequest::getMinDuration() || duration_in_secs > s->cct->_conf->rgw_sts_max_session_duration) { - ldout(s->cct, 0) << "Invalid duration in secs: " << duration_in_secs << dendl; + ldpp_dout(this, 0) << "Invalid duration in secs: " << duration_in_secs << dendl; return -EINVAL; } } @@ -480,7 +480,7 @@ int RGWSTSAssumeRoleWithWebIdentity::get_params() aud = s->info.args.get("aud"); if (roleArn.empty() || roleSessionName.empty() || sub.empty() || aud.empty()) { - ldout(s->cct, 0) << "ERROR: one of role arn or role session name or token is empty" << dendl; + ldpp_dout(this, 0) << "ERROR: one of role arn or role session name or token is empty" << dendl; return -EINVAL; } @@ -490,7 +490,7 @@ int RGWSTSAssumeRoleWithWebIdentity::get_params() const rgw::IAM::Policy p(s->cct, s->user->get_tenant(), bl); } catch (rgw::IAM::PolicyParseException& e) { - ldout(s->cct, 20) << "failed to parse policy: " << e.what() << "policy" << policy << dendl; + ldpp_dout(this, 20) << "failed to parse policy: " << e.what() << "policy" << policy << dendl; return -ERR_MALFORMED_DOC; } } @@ -539,7 +539,7 @@ int RGWSTSAssumeRole::get_params() tokenCode = s->info.args.get("TokenCode"); if (roleArn.empty() || roleSessionName.empty()) { - ldout(s->cct, 0) << "ERROR: one of role arn or role session name is empty" << dendl; + ldpp_dout(this, 0) << "ERROR: one of role arn or role session name is empty" << dendl; return -EINVAL; } @@ -549,7 +549,7 @@ int RGWSTSAssumeRole::get_params() const rgw::IAM::Policy p(s->cct, s->user->get_tenant(), bl); } catch (rgw::IAM::PolicyParseException& e) { - ldout(s->cct, 0) << "failed to parse policy: " << e.what() << "policy" << policy << dendl; + ldpp_dout(this, 0) << "failed to parse policy: " << e.what() << "policy" << policy << dendl; return -ERR_MALFORMED_DOC; } } @@ -594,7 +594,7 @@ int RGW_Auth_STS::authorize(const DoutPrefixProvider *dpp, void RGWHandler_REST_STS::rgw_sts_parse_input() { if (post_body.size() > 0) { - ldout(s->cct, 10) << "Content of POST: " << post_body << dendl; + ldpp_dout(s, 10) << "Content of POST: " << post_body << dendl; if (post_body.find("Action") != string::npos) { boost::char_separator sep("&"); @@ -637,7 +637,7 @@ int RGWHandler_REST_STS::init(rgw::sal::Store* store, s->dialect = "sts"; if (int ret = RGWHandler_REST_STS::init_from_header(s, RGW_FORMAT_XML, true); ret < 0) { - ldout(s->cct, 10) << "init_from_header returned err=" << ret << dendl; + ldpp_dout(s, 10) << "init_from_header returned err=" << ret << dendl; return ret; } diff --git a/src/rgw/rgw_rest_swift.cc b/src/rgw/rgw_rest_swift.cc index 7cd72fbb5dd..e2a2db57d4a 100644 --- a/src/rgw/rgw_rest_swift.cc +++ b/src/rgw/rgw_rest_swift.cc @@ -624,7 +624,7 @@ static int get_swift_container_settings(req_state * const s, RGWCORSConfiguration_SWIFT *swift_cors = new RGWCORSConfiguration_SWIFT; int r = swift_cors->create_update(allow_origins, allow_headers, expose_headers, max_age); if (r < 0) { - dout(0) << "Error creating/updating the cors configuration" << dendl; + ldpp_dout(s, 0) << "Error creating/updating the cors configuration" << dendl; delete swift_cors; return r; } @@ -3006,7 +3006,7 @@ int RGWHandler_REST_SWIFT::init_from_header(rgw::sal::Store* store, next_tok(req, first, '/'); - dout(10) << "ver=" << ver << " first=" << first << " req=" << req << dendl; + ldpp_dout(s, 10) << "ver=" << ver << " first=" << first << " req=" << req << dendl; if (first.size() == 0) return 0; diff --git a/src/rgw/rgw_rest_user.cc b/src/rgw/rgw_rest_user.cc index f373d0f1d5a..38ff4e1267e 100644 --- a/src/rgw/rgw_rest_user.cc +++ b/src/rgw/rgw_rest_user.cc @@ -42,7 +42,7 @@ void RGWOp_User_List::execute(optional_yield y) op_state.max_entries = max_entries; op_state.marker = marker; - op_ret = RGWUserAdminOp_User::list(store, op_state, flusher); + op_ret = RGWUserAdminOp_User::list(this, store, op_state, flusher); } class RGWOp_User_Info : public RGWRESTOp { @@ -151,7 +151,7 @@ void RGWOp_User_Create::execute(optional_yield y) RESTArgs::get_string(s, "placement-tags", placement_tags_str, &placement_tags_str); if (!s->user->get_info().system && system) { - ldout(s->cct, 0) << "cannot set system flag by non-system user" << dendl; + ldpp_dout(this, 0) << "cannot set system flag by non-system user" << dendl; op_ret = -EINVAL; return; } @@ -172,7 +172,7 @@ void RGWOp_User_Create::execute(optional_yield y) uint32_t op_mask; int ret = rgw_parse_op_type_list(op_mask_str, &op_mask); if (ret < 0) { - ldout(s->cct, 0) << "failed to parse op_mask: " << ret << dendl; + ldpp_dout(this, 0) << "failed to parse op_mask: " << ret << dendl; op_ret = -EINVAL; return; } @@ -211,7 +211,7 @@ void RGWOp_User_Create::execute(optional_yield y) rgw_placement_rule target_rule; target_rule.from_str(default_placement_str); if (!store->get_zone()->get_params().valid_placement(target_rule)) { - ldout(s->cct, 0) << "NOTICE: invalid dest placement: " << target_rule.to_str() << dendl; + ldpp_dout(this, 0) << "NOTICE: invalid dest placement: " << target_rule.to_str() << dendl; op_ret = -EINVAL; return; } @@ -225,7 +225,7 @@ void RGWOp_User_Create::execute(optional_yield y) } bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -288,7 +288,7 @@ void RGWOp_User_Modify::execute(optional_yield y) RESTArgs::get_string(s, "placement-tags", placement_tags_str, &placement_tags_str); if (!s->user->get_info().system && system) { - ldout(s->cct, 0) << "cannot set system flag by non-system user" << dendl; + ldpp_dout(this, 0) << "cannot set system flag by non-system user" << dendl; op_ret = -EINVAL; return; } @@ -325,7 +325,7 @@ void RGWOp_User_Modify::execute(optional_yield y) if (!op_mask_str.empty()) { uint32_t op_mask; if (rgw_parse_op_type_list(op_mask_str, &op_mask) < 0) { - ldout(s->cct, 0) << "failed to parse op_mask" << dendl; + ldpp_dout(this, 0) << "failed to parse op_mask" << dendl; op_ret = -EINVAL; return; } @@ -342,7 +342,7 @@ void RGWOp_User_Modify::execute(optional_yield y) uint32_t op_mask; int ret = rgw_parse_op_type_list(op_mask_str, &op_mask); if (ret < 0) { - ldout(s->cct, 0) << "failed to parse op_mask: " << ret << dendl; + ldpp_dout(this, 0) << "failed to parse op_mask: " << ret << dendl; op_ret = -EINVAL; return; } @@ -353,7 +353,7 @@ void RGWOp_User_Modify::execute(optional_yield y) rgw_placement_rule target_rule; target_rule.from_str(default_placement_str); if (!store->get_zone()->get_params().valid_placement(target_rule)) { - ldout(s->cct, 0) << "NOTICE: invalid dest placement: " << target_rule.to_str() << dendl; + ldpp_dout(this, 0) << "NOTICE: invalid dest placement: " << target_rule.to_str() << dendl; op_ret = -EINVAL; return; } @@ -367,7 +367,7 @@ void RGWOp_User_Modify::execute(optional_yield y) } bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -408,7 +408,7 @@ void RGWOp_User_Remove::execute(optional_yield y) op_state.set_purge_data(purge_data); bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -484,7 +484,7 @@ void RGWOp_Subuser_Create::execute(optional_yield y) op_state.set_key_type(key_type); bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -551,7 +551,7 @@ void RGWOp_Subuser_Modify::execute(optional_yield y) op_state.set_key_type(key_type); bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -594,7 +594,7 @@ void RGWOp_Subuser_Remove::execute(optional_yield y) op_state.set_purge_keys(); bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -735,7 +735,7 @@ void RGWOp_Caps_Add::execute(optional_yield y) op_state.set_caps(caps); bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -773,7 +773,7 @@ void RGWOp_Caps_Remove::execute(optional_yield y) op_state.set_caps(caps); bufferlist data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -956,7 +956,7 @@ void RGWOp_Quota_Set::execute(optional_yield y) bool set_user = set_all || (quota_type == "user"); if (!(set_all || set_bucket || set_user)) { - ldout(store->ctx(), 20) << "invalid quota type" << dendl; + ldpp_dout(this, 20) << "invalid quota type" << dendl; op_ret = -EINVAL; return; } @@ -971,7 +971,7 @@ void RGWOp_Quota_Set::execute(optional_yield y) } if (use_http_params && set_all) { - ldout(store->ctx(), 20) << "quota type was not specified, can't set all quotas via http headers" << dendl; + ldpp_dout(this, 20) << "quota type was not specified, can't set all quotas via http headers" << dendl; op_ret = -EINVAL; return; } diff --git a/src/rgw/rgw_rest_user_policy.cc b/src/rgw/rgw_rest_user_policy.cc index 2428e94689d..4e98406511c 100644 --- a/src/rgw/rgw_rest_user_policy.cc +++ b/src/rgw/rgw_rest_user_policy.cc @@ -62,13 +62,13 @@ int RGWRestUserPolicy::verify_permission(optional_yield y) bool RGWRestUserPolicy::validate_input() { if (policy_name.length() > MAX_POLICY_NAME_LEN) { - ldout(s->cct, 0) << "ERROR: Invalid policy name length " << dendl; + ldpp_dout(this, 0) << "ERROR: Invalid policy name length " << dendl; return false; } std::regex regex_policy_name("[A-Za-z0-9:=,.@-]+"); if (! std::regex_match(policy_name, regex_policy_name)) { - ldout(s->cct, 0) << "ERROR: Invalid chars in policy name " << dendl; + ldpp_dout(this, 0) << "ERROR: Invalid chars in policy name " << dendl; return false; } @@ -97,7 +97,7 @@ int RGWPutUserPolicy::get_params() policy = url_decode(s->info.args.get("PolicyDocument"), true); if (policy_name.empty() || user_name.empty() || policy.empty()) { - ldout(s->cct, 20) << "ERROR: one of policy name, user name or policy document is empty" + ldpp_dout(this, 20) << "ERROR: one of policy name, user name or policy document is empty" << dendl; return -EINVAL; } @@ -133,7 +133,7 @@ void RGWPutUserPolicy::execute(optional_yield y) } ceph::bufferlist in_data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -156,7 +156,7 @@ void RGWPutUserPolicy::execute(optional_yield y) op_ret = -ERR_INTERNAL_ERROR; } } catch (rgw::IAM::PolicyParseException& e) { - ldout(s->cct, 20) << "failed to parse policy: " << e.what() << dendl; + ldpp_dout(this, 20) << "failed to parse policy: " << e.what() << dendl; op_ret = -ERR_MALFORMED_DOC; } @@ -180,7 +180,7 @@ int RGWGetUserPolicy::get_params() user_name = s->info.args.get("UserName"); if (policy_name.empty() || user_name.empty()) { - ldout(s->cct, 20) << "ERROR: one of policy name or user name is empty" + ldpp_dout(this, 20) << "ERROR: one of policy name or user name is empty" << dendl; return -EINVAL; } @@ -244,7 +244,7 @@ int RGWListUserPolicies::get_params() user_name = s->info.args.get("UserName"); if (user_name.empty()) { - ldout(s->cct, 20) << "ERROR: user name is empty" << dendl; + ldpp_dout(this, 20) << "ERROR: user name is empty" << dendl; return -EINVAL; } @@ -305,7 +305,7 @@ int RGWDeleteUserPolicy::get_params() user_name = s->info.args.get("UserName"); if (policy_name.empty() || user_name.empty()) { - ldout(s->cct, 20) << "ERROR: One of policy name or user name is empty"<< dendl; + ldpp_dout(this, 20) << "ERROR: One of policy name or user name is empty"<< dendl; return -EINVAL; } @@ -333,7 +333,7 @@ void RGWDeleteUserPolicy::execute(optional_yield y) } ceph::bufferlist in_data; - op_ret = store->forward_request_to_master(s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { // a policy might've been uploaded to this site when there was no sync // req. in earlier releases, proceed deletion diff --git a/src/rgw/rgw_sal.h b/src/rgw/rgw_sal.h index 31261bb8099..437d1d771af 100644 --- a/src/rgw/rgw_sal.h +++ b/src/rgw/rgw_sal.h @@ -159,7 +159,7 @@ class Store { std::unique_ptr* bucket, optional_yield y) = 0; virtual bool is_meta_master() = 0; - virtual int forward_request_to_master(User* user, obj_version* objv, + virtual int forward_request_to_master(const DoutPrefixProvider *dpp, User* user, obj_version* objv, bufferlist& in_data, JSONParser* jp, req_info& info, optional_yield y) = 0; virtual int defer_gc(const DoutPrefixProvider* dpp, RGWObjectCtx* rctx, Bucket* bucket, Object* obj, @@ -177,13 +177,13 @@ class Store { const DoutPrefixProvider* dpp, optional_yield y) = 0; virtual RGWLC* get_rgwlc(void) = 0; virtual RGWCoroutinesManagerRegistry* get_cr_registry() = 0; - virtual int delete_raw_obj(const rgw_raw_obj& obj) = 0; - virtual int delete_raw_obj_aio(const rgw_raw_obj& obj, Completions* aio) = 0; + virtual int delete_raw_obj(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj) = 0; + virtual int delete_raw_obj_aio(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, Completions* aio) = 0; virtual void get_raw_obj(const rgw_placement_rule& placement_rule, const rgw_obj& obj, rgw_raw_obj* raw_obj) = 0; virtual int get_raw_chunk_size(const DoutPrefixProvider* dpp, const rgw_raw_obj& obj, uint64_t* chunk_size) = 0; - virtual int log_usage(map& usage_info) = 0; - virtual int log_op(std::string& oid, bufferlist& bl) = 0; + virtual int log_usage(const DoutPrefixProvider *dpp, map& usage_info) = 0; + virtual int log_op(const DoutPrefixProvider *dpp, std::string& oid, bufferlist& bl) = 0; virtual int register_to_service_map(const std::string& daemon_type, const map& meta) = 0; virtual void get_quota(RGWQuotaInfo& bucket_quota, RGWQuotaInfo& user_quota) = 0; @@ -197,14 +197,14 @@ class Store { virtual RGWDataSyncStatusManager* get_data_sync_manager(const rgw_zone_id& source_zone) = 0; virtual void wakeup_meta_sync_shards(set& shard_ids) = 0; virtual void wakeup_data_sync_shards(const rgw_zone_id& source_zone, map >& shard_ids) = 0; - virtual int clear_usage() = 0; - virtual int read_all_usage(uint64_t start_epoch, uint64_t end_epoch, + virtual int clear_usage(const DoutPrefixProvider *dpp) = 0; + virtual int read_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool* is_truncated, RGWUsageIter& usage_iter, map& usage) = 0; - virtual int trim_all_usage(uint64_t start_epoch, uint64_t end_epoch) = 0; + virtual int trim_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) = 0; virtual int get_config_key_val(std::string name, bufferlist* bl) = 0; - virtual int meta_list_keys_init(const std::string& section, const std::string& marker, void** phandle) = 0; + virtual int meta_list_keys_init(const DoutPrefixProvider *dpp, const std::string& section, const std::string& marker, void** phandle) = 0; virtual int meta_list_keys_next(void* handle, int max, list& keys, bool* truncated) = 0; virtual void meta_list_keys_complete(void* handle) = 0; virtual std::string meta_get_marker(void* handle) = 0; @@ -272,15 +272,16 @@ class User { static bool empty(User* u) { return (!u || u->info.user_id.id.empty()); } static bool empty(std::unique_ptr& u) { return (!u || u->info.user_id.id.empty()); } virtual int read_attrs(const DoutPrefixProvider* dpp, optional_yield y) = 0; - virtual int read_stats(optional_yield y, RGWStorageStats* stats, + virtual int read_stats(const DoutPrefixProvider *dpp, + optional_yield y, RGWStorageStats* stats, ceph::real_time* last_stats_sync = nullptr, ceph::real_time* last_stats_update = nullptr) = 0; - virtual int read_stats_async(RGWGetUserStats_CB* cb) = 0; - virtual int complete_flush_stats(optional_yield y) = 0; - virtual int read_usage(uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, + virtual int read_stats_async(const DoutPrefixProvider *dpp, RGWGetUserStats_CB* cb) = 0; + virtual int complete_flush_stats(const DoutPrefixProvider *dpp, optional_yield y) = 0; + virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool* is_truncated, RGWUsageIter& usage_iter, map& usage) = 0; - virtual int trim_usage(uint64_t start_epoch, uint64_t end_epoch) = 0; + virtual int trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) = 0; virtual RGWObjVersionTracker& get_version_tracker() { return objv_tracker; } virtual Attrs& get_attrs() { return attrs; } virtual void set_attrs(Attrs& _attrs) { attrs = _attrs; } @@ -379,14 +380,14 @@ class Bucket { virtual RGWAccessControlPolicy& get_acl(void) = 0; virtual int set_acl(const DoutPrefixProvider* dpp, RGWAccessControlPolicy& acl, optional_yield y) = 0; virtual int get_bucket_info(const DoutPrefixProvider* dpp, optional_yield y) = 0; - virtual int get_bucket_stats(int shard_id, + virtual int get_bucket_stats(const DoutPrefixProvider *dpp, int shard_id, std::string* bucket_ver, std::string* master_ver, std::map& stats, std::string* max_marker = nullptr, bool* syncstopped = nullptr) = 0; - virtual int get_bucket_stats_async(int shard_id, RGWGetBucketStats_CB* ctx) = 0; + virtual int get_bucket_stats_async(const DoutPrefixProvider *dpp, int shard_id, RGWGetBucketStats_CB* ctx) = 0; virtual int read_bucket_stats(const DoutPrefixProvider* dpp, optional_yield y) = 0; - virtual int sync_user_stats(optional_yield y) = 0; + virtual int sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y) = 0; virtual int update_container_stats(const DoutPrefixProvider* dpp) = 0; virtual int check_bucket_shards(const DoutPrefixProvider* dpp) = 0; virtual int link(const DoutPrefixProvider* dpp, User* new_user, optional_yield y, bool update_entrypoint = true, RGWObjVersionTracker* objv = nullptr) = 0; @@ -402,14 +403,14 @@ class Bucket { virtual int check_quota(RGWQuotaInfo& user_quota, RGWQuotaInfo& bucket_quota, uint64_t obj_size, optional_yield y, bool check_size_only = false) = 0; virtual int set_instance_attrs(const DoutPrefixProvider* dpp, Attrs& attrs, optional_yield y) = 0; virtual int try_refresh_info(const DoutPrefixProvider* dpp, ceph::real_time* pmtime) = 0; - virtual int read_usage(uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, + virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool* is_truncated, RGWUsageIter& usage_iter, map& usage) = 0; - virtual int trim_usage(uint64_t start_epoch, uint64_t end_epoch) = 0; - virtual int remove_objs_from_index(std::list& objs_to_unlink) = 0; - virtual int check_index(std::map& existing_stats, std::map& calculated_stats) = 0; - virtual int rebuild_index() = 0; - virtual int set_tag_timeout(uint64_t timeout) = 0; + virtual int trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) = 0; + virtual int remove_objs_from_index(const DoutPrefixProvider *dpp, std::list& objs_to_unlink) = 0; + virtual int check_index(const DoutPrefixProvider *dpp, std::map& existing_stats, std::map& calculated_stats) = 0; + virtual int rebuild_index(const DoutPrefixProvider *dpp) = 0; + virtual int set_tag_timeout(const DoutPrefixProvider *dpp, uint64_t timeout) = 0; virtual int purge_instance(const DoutPrefixProvider* dpp) = 0; bool empty() const { return info.bucket.name.empty(); } @@ -616,7 +617,7 @@ class Object { virtual ~StatOp() = default; - virtual int stat_async() = 0; + virtual int stat_async(const DoutPrefixProvider *dpp) = 0; virtual int wait() = 0; }; @@ -681,7 +682,7 @@ class Object { virtual void gen_rand_obj_instance_name() = 0; virtual void raw_obj_to_obj(const rgw_raw_obj& raw_obj) = 0; virtual void get_raw_obj(rgw_raw_obj* raw_obj) = 0; - virtual MPSerializer* get_serializer(const std::string& lock_name) = 0; + virtual MPSerializer* get_serializer(const DoutPrefixProvider *dpp, const std::string& lock_name) = 0; virtual int transition(RGWObjectCtx& rctx, Bucket* bucket, const rgw_placement_rule& placement_rule, @@ -734,15 +735,15 @@ class Object { virtual std::unique_ptr get_stat_op(RGWObjectCtx*) = 0; /* OMAP */ - virtual int omap_get_vals(const std::string& marker, uint64_t count, + virtual int omap_get_vals(const DoutPrefixProvider *dpp, const std::string& marker, uint64_t count, std::map* m, bool* pmore, optional_yield y) = 0; - virtual int omap_get_all(std::map* m, + virtual int omap_get_all(const DoutPrefixProvider *dpp, std::map* m, optional_yield y) = 0; - virtual int omap_get_vals_by_keys(const std::string& oid, + virtual int omap_get_vals_by_keys(const DoutPrefixProvider *dpp, const std::string& oid, const std::set& keys, Attrs* vals) = 0; - virtual int omap_set_val_by_key(const std::string& key, bufferlist& val, + virtual int omap_set_val_by_key(const DoutPrefixProvider *dpp, const std::string& key, bufferlist& val, bool must_exist, optional_yield y) = 0; static bool empty(Object* o) { return (!o || o->empty()); } @@ -779,7 +780,7 @@ struct Serializer { Serializer() = default; virtual ~Serializer() = default; - virtual int try_lock(utime_t dur, optional_yield y) = 0; + virtual int try_lock(const DoutPrefixProvider *dpp, utime_t dur, optional_yield y) = 0; virtual int unlock() = 0; }; @@ -842,7 +843,7 @@ protected: Notification(Object* _obj, rgw::notify::EventType _type) : obj(_obj), event_type(_type) {} virtual ~Notification() = default; - virtual int publish_reserve(RGWObjTags* obj_tags = nullptr) = 0; + virtual int publish_reserve(const DoutPrefixProvider *dpp, RGWObjTags* obj_tags = nullptr) = 0; virtual int publish_commit(const DoutPrefixProvider* dpp, uint64_t size, const ceph::real_time& mtime, const std::string& etag) = 0; }; @@ -855,9 +856,9 @@ protected: GCChain(Object* _obj) : obj(_obj) {} virtual ~GCChain() = default; - virtual void update(RGWObjManifest* manifest) = 0; + virtual void update(const DoutPrefixProvider *dpp, RGWObjManifest* manifest) = 0; virtual int send(const std::string& tag) = 0; - virtual void delete_inline(const std::string& tag) = 0; + virtual void delete_inline(const DoutPrefixProvider *dpp, const std::string& tag) = 0; }; using RawObjSet = std::set; diff --git a/src/rgw/rgw_sal_rados.cc b/src/rgw/rgw_sal_rados.cc index 8c42ca2108a..8c1326fc697 100644 --- a/src/rgw/rgw_sal_rados.cc +++ b/src/rgw/rgw_sal_rados.cc @@ -153,39 +153,40 @@ int RadosUser::read_attrs(const DoutPrefixProvider* dpp, optional_yield y) return store->ctl()->user->get_attrs_by_uid(dpp, get_id(), &attrs, y, &objv_tracker); } -int RadosUser::read_stats(optional_yield y, RGWStorageStats* stats, +int RadosUser::read_stats(const DoutPrefixProvider *dpp, + optional_yield y, RGWStorageStats* stats, ceph::real_time* last_stats_sync, ceph::real_time* last_stats_update) { - return store->ctl()->user->read_stats(get_id(), stats, y, last_stats_sync, last_stats_update); + return store->ctl()->user->read_stats(dpp, get_id(), stats, y, last_stats_sync, last_stats_update); } -int RadosUser::read_stats_async(RGWGetUserStats_CB* cb) +int RadosUser::read_stats_async(const DoutPrefixProvider *dpp, RGWGetUserStats_CB* cb) { - return store->ctl()->user->read_stats_async(get_id(), cb); + return store->ctl()->user->read_stats_async(dpp, get_id(), cb); } -int RadosUser::complete_flush_stats(optional_yield y) +int RadosUser::complete_flush_stats(const DoutPrefixProvider *dpp, optional_yield y) { - return store->ctl()->user->complete_flush_stats(get_id(), y); + return store->ctl()->user->complete_flush_stats(dpp, get_id(), y); } -int RadosUser::read_usage(uint64_t start_epoch, uint64_t end_epoch, +int RadosUser::read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool* is_truncated, RGWUsageIter& usage_iter, map& usage) { std::string bucket_name; - return store->getRados()->read_usage(get_id(), bucket_name, start_epoch, + return store->getRados()->read_usage(dpp, get_id(), bucket_name, start_epoch, end_epoch, max_entries, is_truncated, usage_iter, usage); } -int RadosUser::trim_usage(uint64_t start_epoch, uint64_t end_epoch) +int RadosUser::trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) { std::string bucket_name; - return store->getRados()->trim_usage(get_id(), bucket_name, start_epoch, end_epoch); + return store->getRados()->trim_usage(dpp, get_id(), bucket_name, start_epoch, end_epoch); } int RadosUser::load_user(const DoutPrefixProvider* dpp, optional_yield y) @@ -261,7 +262,7 @@ int RadosBucket::remove_bucket(const DoutPrefixProvider* dpp, bool delete_childr } } - ret = store->ctl()->bucket->sync_user_stats(info.owner, info, y); + ret = store->ctl()->bucket->sync_user_stats(dpp, info.owner, info, y); if (ret < 0) { ldout(store->ctx(), 1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret << dendl; } @@ -281,7 +282,7 @@ int RadosBucket::remove_bucket(const DoutPrefixProvider* dpp, bool delete_childr // they should be removed (note that any pending notifications on the bucket are still going to be sent) RGWPubSub ps(store, info.owner.tenant); RGWPubSub::Bucket ps_bucket(&ps, info.bucket); - const auto ps_ret = ps_bucket.remove_notifications(y); + const auto ps_ret = ps_bucket.remove_notifications(dpp, y); if (ps_ret < 0 && ps_ret != -ENOENT) { lderr(store->ctx()) << "ERROR: unable to remove notifications from bucket. ret=" << ps_ret << dendl; } @@ -293,7 +294,7 @@ int RadosBucket::remove_bucket(const DoutPrefixProvider* dpp, bool delete_childr if (forward_to_master) { bufferlist in_data; - ret = store->forward_request_to_master(owner, &ot.read_version, in_data, nullptr, *req_info, y); + ret = store->forward_request_to_master(dpp, owner, &ot.read_version, in_data, nullptr, *req_info, y); if (ret < 0) { if (ret == -ENOENT) { /* adjust error, we want to return with NoSuchBucket and not @@ -335,17 +336,17 @@ int RadosBucket::get_bucket_info(const DoutPrefixProvider* dpp, optional_yield y return ret; } -int RadosBucket::get_bucket_stats(int shard_id, +int RadosBucket::get_bucket_stats(const DoutPrefixProvider *dpp, int shard_id, std::string* bucket_ver, std::string* master_ver, std::map& stats, std::string* max_marker, bool* syncstopped) { - return store->getRados()->get_bucket_stats(info, shard_id, bucket_ver, master_ver, stats, max_marker, syncstopped); + return store->getRados()->get_bucket_stats(dpp, info, shard_id, bucket_ver, master_ver, stats, max_marker, syncstopped); } -int RadosBucket::get_bucket_stats_async(int shard_id, RGWGetBucketStats_CB* ctx) +int RadosBucket::get_bucket_stats_async(const DoutPrefixProvider *dpp, int shard_id, RGWGetBucketStats_CB* ctx) { - return store->getRados()->get_bucket_stats_async(get_info(), shard_id, ctx); + return store->getRados()->get_bucket_stats_async(dpp, get_info(), shard_id, ctx); } int RadosBucket::read_bucket_stats(const DoutPrefixProvider* dpp, optional_yield y) @@ -355,9 +356,9 @@ int RadosBucket::read_bucket_stats(const DoutPrefixProvider* dpp, optional_yield return ret; } -int RadosBucket::sync_user_stats(optional_yield y) +int RadosBucket::sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y) { - return store->ctl()->bucket->sync_user_stats(owner->get_id(), info, y); + return store->ctl()->bucket->sync_user_stats(dpp, owner->get_id(), info, y); } int RadosBucket::update_container_stats(const DoutPrefixProvider* dpp) @@ -480,39 +481,39 @@ int RadosBucket::try_refresh_info(const DoutPrefixProvider* dpp, ceph::real_time return store->getRados()->try_refresh_bucket_info(info, pmtime, dpp, &attrs); } -int RadosBucket::read_usage(uint64_t start_epoch, uint64_t end_epoch, +int RadosBucket::read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool* is_truncated, RGWUsageIter& usage_iter, map& usage) { - return store->getRados()->read_usage(owner->get_id(), get_name(), start_epoch, + return store->getRados()->read_usage(dpp, owner->get_id(), get_name(), start_epoch, end_epoch, max_entries, is_truncated, usage_iter, usage); } -int RadosBucket::trim_usage(uint64_t start_epoch, uint64_t end_epoch) +int RadosBucket::trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) { - return store->getRados()->trim_usage(owner->get_id(), get_name(), start_epoch, end_epoch); + return store->getRados()->trim_usage(dpp, owner->get_id(), get_name(), start_epoch, end_epoch); } -int RadosBucket::remove_objs_from_index(std::list& objs_to_unlink) +int RadosBucket::remove_objs_from_index(const DoutPrefixProvider *dpp, std::list& objs_to_unlink) { - return store->getRados()->remove_objs_from_index(info, objs_to_unlink); + return store->getRados()->remove_objs_from_index(dpp, info, objs_to_unlink); } -int RadosBucket::check_index(std::map& existing_stats, std::map& calculated_stats) +int RadosBucket::check_index(const DoutPrefixProvider *dpp, std::map& existing_stats, std::map& calculated_stats) { - return store->getRados()->bucket_check_index(info, &existing_stats, &calculated_stats); + return store->getRados()->bucket_check_index(dpp, info, &existing_stats, &calculated_stats); } -int RadosBucket::rebuild_index() +int RadosBucket::rebuild_index(const DoutPrefixProvider *dpp) { - return store->getRados()->bucket_rebuild_index(info); + return store->getRados()->bucket_rebuild_index(dpp, info); } -int RadosBucket::set_tag_timeout(uint64_t timeout) +int RadosBucket::set_tag_timeout(const DoutPrefixProvider *dpp, uint64_t timeout) { - return store->getRados()->cls_obj_set_bucket_tag_timeout(info, timeout); + return store->getRados()->cls_obj_set_bucket_tag_timeout(dpp, info, timeout); } int RadosBucket::purge_instance(const DoutPrefixProvider* dpp) @@ -753,7 +754,7 @@ int RadosStore::create_bucket(const DoutPrefixProvider* dpp, if (!svc()->zone->is_meta_master()) { JSONParser jp; - ret = forward_request_to_master(&u, NULL, in_data, &jp, req_info, y); + ret = forward_request_to_master(dpp, &u, NULL, in_data, &jp, req_info, y); if (ret < 0) { return ret; } @@ -785,7 +786,7 @@ int RadosStore::create_bucket(const DoutPrefixProvider* dpp, if (*existed) { rgw_placement_rule selected_placement_rule; - ret = svc()->zone->select_bucket_placement(u.get_info(), + ret = svc()->zone->select_bucket_placement(dpp, u.get_info(), zid, placement_rule, &selected_placement_rule, nullptr, y); if (selected_placement_rule != info.placement_rule) { @@ -821,7 +822,7 @@ bool RadosStore::is_meta_master() return svc()->zone->is_meta_master(); } -int RadosStore::forward_request_to_master(User* user, obj_version* objv, +int RadosStore::forward_request_to_master(const DoutPrefixProvider *dpp, User* user, obj_version* objv, bufferlist& in_data, JSONParser* jp, req_info& info, optional_yield y) @@ -832,22 +833,22 @@ int RadosStore::forward_request_to_master(User* user, obj_version* objv, } if (!svc()->zone->get_master_conn()) { - ldout(ctx(), 0) << "rest connection is invalid" << dendl; + ldpp_dout(dpp, 0) << "rest connection is invalid" << dendl; return -EINVAL; } - ldout(ctx(), 0) << "sending request to master zonegroup" << dendl; + ldpp_dout(dpp, 0) << "sending request to master zonegroup" << dendl; bufferlist response; std::string uid_str = user->get_id().to_str(); #define MAX_REST_RESPONSE (128 * 1024) // we expect a very small response - int ret = svc()->zone->get_master_conn()->forward(rgw_user(uid_str), info, + int ret = svc()->zone->get_master_conn()->forward(dpp, rgw_user(uid_str), info, objv, MAX_REST_RESPONSE, &in_data, &response, y); if (ret < 0) return ret; - ldout(ctx(), 20) << "response: " << response.c_str() << dendl; + ldpp_dout(dpp, 20) << "response: " << response.c_str() << dendl; if (jp && !jp->parse(response.c_str(), response.length())) { - ldout(ctx(), 0) << "failed parsing response from master zonegroup" << dendl; + ldpp_dout(dpp, 0) << "failed parsing response from master zonegroup" << dendl; return -EINVAL; } @@ -900,7 +901,7 @@ std::unique_ptr RadosStore::get_notification(rgw::sal::Object* obj struct req_state* s, rgw::notify::EventType event_type) { - return std::unique_ptr(new RadosNotification(this, obj, s, event_type)); + return std::unique_ptr(new RadosNotification(s, this, obj, s, event_type)); } std::unique_ptr RadosStore::get_gc_chain(rgw::sal::Object* obj) @@ -915,16 +916,16 @@ std::unique_ptr RadosStore::get_writer(Aio* aio, rgw::sal::Bucket* bucke return std::unique_ptr(new RadosWriter(aio, this, bucket, obj_ctx, std::move(_head_obj), dpp, y)); } -int RadosStore::delete_raw_obj(const rgw_raw_obj& obj) +int RadosStore::delete_raw_obj(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj) { - return rados->delete_raw_obj(obj); + return rados->delete_raw_obj(dpp, obj); } -int RadosStore::delete_raw_obj_aio(const rgw_raw_obj& obj, Completions* aio) +int RadosStore::delete_raw_obj_aio(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, Completions* aio) { RadosCompletions* raio = static_cast(aio); - return rados->delete_raw_obj_aio(obj, raio->handles); + return rados->delete_raw_obj_aio(dpp, obj, raio->handles); } void RadosStore::get_raw_obj(const rgw_placement_rule& placement_rule, const rgw_obj& obj, rgw_raw_obj* raw_obj) @@ -937,22 +938,22 @@ int RadosStore::get_raw_chunk_size(const DoutPrefixProvider* dpp, const rgw_raw_ return rados->get_max_chunk_size(obj.pool, chunk_size, dpp); } -int RadosStore::log_usage(map& usage_info) +int RadosStore::log_usage(const DoutPrefixProvider *dpp, map& usage_info) { - return rados->log_usage(usage_info); + return rados->log_usage(dpp, usage_info); } -int RadosStore::log_op(std::string& oid, bufferlist& bl) +int RadosStore::log_op(const DoutPrefixProvider *dpp, std::string& oid, bufferlist& bl) { rgw_raw_obj obj(svc()->zone->get_zone_params().log_pool, oid); - int ret = rados->append_async(obj, bl.length(), bl); + int ret = rados->append_async(dpp, obj, bl.length(), bl); if (ret == -ENOENT) { - ret = rados->create_pool(svc()->zone->get_zone_params().log_pool); + ret = rados->create_pool(dpp, svc()->zone->get_zone_params().log_pool); if (ret < 0) return ret; // retry - ret = rados->append_async(obj, bl.length(), bl); + ret = rados->append_async(dpp, obj, bl.length(), bl); } return ret; @@ -989,7 +990,7 @@ RGWDataSyncStatusManager* RadosStore::get_data_sync_manager(const rgw_zone_id& s return rados->get_data_sync_manager(source_zone); } -int RadosStore::read_all_usage(uint64_t start_epoch, uint64_t end_epoch, +int RadosStore::read_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool* is_truncated, RGWUsageIter& usage_iter, map& usage) @@ -997,16 +998,16 @@ int RadosStore::read_all_usage(uint64_t start_epoch, uint64_t end_epoch, rgw_user uid; std::string bucket_name; - return rados->read_usage(uid, bucket_name, start_epoch, end_epoch, max_entries, + return rados->read_usage(dpp, uid, bucket_name, start_epoch, end_epoch, max_entries, is_truncated, usage_iter, usage); } -int RadosStore::trim_all_usage(uint64_t start_epoch, uint64_t end_epoch) +int RadosStore::trim_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) { rgw_user uid; std::string bucket_name; - return rados->trim_usage(uid, bucket_name, start_epoch, end_epoch); + return rados->trim_usage(dpp, uid, bucket_name, start_epoch, end_epoch); } int RadosStore::get_config_key_val(std::string name, bufferlist* bl) @@ -1014,9 +1015,9 @@ int RadosStore::get_config_key_val(std::string name, bufferlist* bl) return svc()->config_key->get(name, true, bl); } -int RadosStore::meta_list_keys_init(const std::string& section, const std::string& marker, void** phandle) +int RadosStore::meta_list_keys_init(const DoutPrefixProvider *dpp, const std::string& section, const std::string& marker, void** phandle) { - return ctl()->meta.mgr->list_keys_init(section, marker, phandle); + return ctl()->meta.mgr->list_keys_init(dpp, section, marker, phandle); } int RadosStore::meta_list_keys_next(void* handle, int max, list& keys, bool* truncated) @@ -1086,7 +1087,7 @@ int RadosStore::get_roles(const DoutPrefixProvider *dpp, RGWListRawObjsCtx ctx; do { list oids; - int r = rados->list_raw_objects(pool, prefix, 1000, ctx, oids, &is_truncated); + int r = rados->list_raw_objects(dpp, pool, prefix, 1000, ctx, oids, &is_truncated); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: listing filtered objects failed: " << prefix << ": " << cpp_strerror(-r) << dendl; @@ -1143,7 +1144,7 @@ int RadosStore::get_oidc_providers(const DoutPrefixProvider *dpp, RGWListRawObjsCtx ctx; do { list oids; - int r = rados->list_raw_objects(pool, prefix, 1000, ctx, oids, &is_truncated); + int r = rados->list_raw_objects(dpp, pool, prefix, 1000, ctx, oids, &is_truncated); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: listing filtered objects failed: OIDC pool: " << pool.name << ": " << prefix << ": " << cpp_strerror(-r) << dendl; @@ -1175,9 +1176,9 @@ int RadosStore::get_oidc_providers(const DoutPrefixProvider *dpp, return 0; } -int RadosStore::get_obj_head_ioctx(const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::IoCtx* ioctx) +int RadosStore::get_obj_head_ioctx(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::IoCtx* ioctx) { - return rados->get_obj_head_ioctx(bucket_info, obj, ioctx); + return rados->get_obj_head_ioctx(dpp, bucket_info, obj, ioctx); } int Object::range_to_ofs(uint64_t obj_size, int64_t &ofs, int64_t &end) @@ -1338,7 +1339,7 @@ void RadosObject::get_raw_obj(rgw_raw_obj* raw_obj) store->getRados()->obj_to_raw((bucket->get_info()).placement_rule, get_obj(), raw_obj); } -int RadosObject::omap_get_vals(const std::string& marker, uint64_t count, +int RadosObject::omap_get_vals(const DoutPrefixProvider *dpp, const std::string& marker, uint64_t count, std::map *m, bool* pmore, optional_yield y) { @@ -1347,10 +1348,10 @@ int RadosObject::omap_get_vals(const std::string& marker, uint64_t count, get_raw_obj(&raw_obj); auto sysobj = obj_ctx.get_obj(raw_obj); - return sysobj.omap().get_vals(marker, count, m, pmore, y); + return sysobj.omap().get_vals(dpp, marker, count, m, pmore, y); } -int RadosObject::omap_get_all(std::map *m, +int RadosObject::omap_get_all(const DoutPrefixProvider *dpp, std::map *m, optional_yield y) { auto obj_ctx = store->svc()->sysobj->init_obj_ctx(); @@ -1358,10 +1359,10 @@ int RadosObject::omap_get_all(std::map *m, get_raw_obj(&raw_obj); auto sysobj = obj_ctx.get_obj(raw_obj); - return sysobj.omap().get_all(m, y); + return sysobj.omap().get_all(dpp, m, y); } -int RadosObject::omap_get_vals_by_keys(const std::string& oid, +int RadosObject::omap_get_vals_by_keys(const DoutPrefixProvider *dpp, const std::string& oid, const std::set& keys, Attrs* vals) { @@ -1371,7 +1372,7 @@ int RadosObject::omap_get_vals_by_keys(const std::string& oid, rgw_obj obj = get_obj(); store->getRados()->obj_to_raw(bucket->get_placement_rule(), obj, &head_obj); - ret = store->get_obj_head_ioctx(bucket->get_info(), obj, &cur_ioctx); + ret = store->get_obj_head_ioctx(dpp, bucket->get_info(), obj, &cur_ioctx); if (ret < 0) { return ret; } @@ -1379,7 +1380,7 @@ int RadosObject::omap_get_vals_by_keys(const std::string& oid, return cur_ioctx.omap_get_vals_by_keys(oid, keys, vals); } -int RadosObject::omap_set_val_by_key(const std::string& key, bufferlist& val, +int RadosObject::omap_set_val_by_key(const DoutPrefixProvider *dpp, const std::string& key, bufferlist& val, bool must_exist, optional_yield y) { rgw_raw_obj raw_meta_obj; @@ -1390,12 +1391,12 @@ int RadosObject::omap_set_val_by_key(const std::string& key, bufferlist& val, auto obj_ctx = store->svc()->sysobj->init_obj_ctx(); auto sysobj = obj_ctx.get_obj(raw_meta_obj); - return sysobj.omap().set_must_exist(must_exist).set(key, val, y); + return sysobj.omap().set_must_exist(must_exist).set(dpp, key, val, y); } -MPSerializer* RadosObject::get_serializer(const std::string& lock_name) +MPSerializer* RadosObject::get_serializer(const DoutPrefixProvider *dpp, const std::string& lock_name) { - return new MPRadosSerializer(store, this, lock_name); + return new MPRadosSerializer(dpp, store, this, lock_name); } int RadosObject::transition(RGWObjectCtx& rctx, @@ -1576,9 +1577,9 @@ RadosObject::RadosStatOp::RadosStatOp(RadosObject *_source, RGWObjectCtx *_rctx) parent_op(&op_target) { } -int RadosObject::RadosStatOp::stat_async() +int RadosObject::RadosStatOp::stat_async(const DoutPrefixProvider *dpp) { - return parent_op.stat_async(); + return parent_op.stat_async(dpp); } int RadosObject::RadosStatOp::wait() @@ -1741,7 +1742,7 @@ int RadosObject::swift_versioning_copy(RGWObjectCtx* obj_ctx, y); } -MPRadosSerializer::MPRadosSerializer(RadosStore* store, RadosObject* obj, const std::string& lock_name) : +MPRadosSerializer::MPRadosSerializer(const DoutPrefixProvider *dpp, RadosStore* store, RadosObject* obj, const std::string& lock_name) : lock(lock_name) { rgw_pool meta_pool; @@ -1751,15 +1752,15 @@ MPRadosSerializer::MPRadosSerializer(RadosStore* store, RadosObject* obj, const oid = raw_obj.oid; store->getRados()->get_obj_data_pool(obj->get_bucket()->get_placement_rule(), obj->get_obj(), &meta_pool); - store->getRados()->open_pool_ctx(meta_pool, ioctx, true); + store->getRados()->open_pool_ctx(dpp, meta_pool, ioctx, true); } -int MPRadosSerializer::try_lock(utime_t dur, optional_yield y) +int MPRadosSerializer::try_lock(const DoutPrefixProvider *dpp, utime_t dur, optional_yield y) { op.assert_exists(); lock.set_duration(dur); lock.lock_exclusive(&op); - int ret = rgw_rados_operate(ioctx, oid, &op, y); + int ret = rgw_rados_operate(dpp, ioctx, oid, &op, y); if (! ret) { locked = true; } @@ -1773,7 +1774,7 @@ LCRadosSerializer::LCRadosSerializer(RadosStore* store, const std::string& _oid, lock.set_cookie(cookie); } -int LCRadosSerializer::try_lock(utime_t dur, optional_yield y) +int LCRadosSerializer::try_lock(const DoutPrefixProvider *dpp, utime_t dur, optional_yield y) { lock.set_duration(dur); return lock.lock_exclusive(ioctx, oid); @@ -1872,9 +1873,9 @@ LCSerializer* RadosLifecycle::get_serializer(const std::string& lock_name, const return new LCRadosSerializer(store, oid, lock_name, cookie); } -int RadosNotification::publish_reserve(RGWObjTags* obj_tags) +int RadosNotification::publish_reserve(const DoutPrefixProvider *dpp, RGWObjTags* obj_tags) { - return rgw::notify::publish_reserve(event_type, res, obj_tags); + return rgw::notify::publish_reserve(dpp, event_type, res, obj_tags); } int RadosNotification::publish_commit(const DoutPrefixProvider* dpp, uint64_t size, @@ -1883,10 +1884,10 @@ int RadosNotification::publish_commit(const DoutPrefixProvider* dpp, uint64_t si return rgw::notify::publish_commit(obj, size, mtime, etag, event_type, res, dpp); } -void RadosGCChain::update(RGWObjManifest* manifest) +void RadosGCChain::update(const DoutPrefixProvider *dpp, RGWObjManifest* manifest) { rgw_obj target = obj->get_obj(); - store->getRados()->update_gc_chain(target, *manifest, &chain); + store->getRados()->update_gc_chain(dpp, target, *manifest, &chain); } int RadosGCChain::send(const std::string& tag) @@ -1894,15 +1895,15 @@ int RadosGCChain::send(const std::string& tag) return store->getRados()->send_chain_to_gc(chain, tag); } -void RadosGCChain::delete_inline(const std::string& tag) +void RadosGCChain::delete_inline(const DoutPrefixProvider *dpp, const std::string& tag) { - store->getRados()->delete_objs_inline(chain, tag); + store->getRados()->delete_objs_inline(dpp, chain, tag); } int RadosWriter::set_stripe_obj(const rgw_raw_obj& raw_obj) { stripe_obj = store->svc()->rados->obj(raw_obj); - return stripe_obj.open(); + return stripe_obj.open(dpp); } int RadosWriter::process(bufferlist&& bl, uint64_t offset) @@ -1974,7 +1975,7 @@ RadosWriter::~RadosWriter() continue; } - int r = store->delete_raw_obj(obj); + int r = store->delete_raw_obj(dpp, obj); if (r < 0 && r != -ENOENT) { ldpp_dout(dpp, 0) << "WARNING: failed to remove obj (" << obj << "), leaked" << dendl; } @@ -2068,7 +2069,7 @@ int RadosLuaScriptManager::put(const DoutPrefixProvider* dpp, optional_yield y, bufferlist bl; ceph::encode(script, bl); - int r = rgw_put_system_obj(obj_ctx, pool, key, bl, false, nullptr, real_time(), y); + int r = rgw_put_system_obj(dpp, obj_ctx, pool, key, bl, false, nullptr, real_time(), y); if (r < 0) { return r; } @@ -2078,7 +2079,7 @@ int RadosLuaScriptManager::put(const DoutPrefixProvider* dpp, optional_yield y, int RadosLuaScriptManager::del(const DoutPrefixProvider* dpp, optional_yield y, const std::string& key) { - int r = rgw_delete_system_obj(store->svc()->sysobj, pool, key, nullptr, y); + int r = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, key, nullptr, y); if (r < 0 && r != -ENOENT) { return r; } @@ -2094,7 +2095,7 @@ int RadosOIDCProvider::store_url(const DoutPrefixProvider *dpp, const std::strin bufferlist bl; using ceph::encode; encode(*this, bl); - return rgw_put_system_obj(obj_ctx, store->get_zone()->get_params().oidc_pool, oid, bl, exclusive, nullptr, real_time(), y); + return rgw_put_system_obj(dpp, obj_ctx, store->get_zone()->get_params().oidc_pool, oid, bl, exclusive, nullptr, real_time(), y); } int RadosOIDCProvider::read_url(const DoutPrefixProvider *dpp, const std::string& url, const std::string& tenant) @@ -2141,7 +2142,7 @@ int RadosOIDCProvider::delete_obj(const DoutPrefixProvider *dpp, optional_yield // Delete url std::string oid = tenant + get_url_oid_prefix() + url; - ret = rgw_delete_system_obj(store->svc()->sysobj, pool, oid, nullptr, y); + ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: deleting oidc url from pool: " << pool.name << ": " << provider_url << ": " << cpp_strerror(-ret) << dendl; @@ -2159,7 +2160,7 @@ int RadosRole::store_info(const DoutPrefixProvider *dpp, bool exclusive, optiona bufferlist bl; encode(*this, bl); - return rgw_put_system_obj(obj_ctx, store->get_zone()->get_params().roles_pool, oid, bl, exclusive, nullptr, real_time(), y); + return rgw_put_system_obj(dpp, obj_ctx, store->get_zone()->get_params().roles_pool, oid, bl, exclusive, nullptr, real_time(), y); } int RadosRole::store_name(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) @@ -2174,7 +2175,7 @@ int RadosRole::store_name(const DoutPrefixProvider *dpp, bool exclusive, optiona using ceph::encode; encode(nameToId, bl); - return rgw_put_system_obj(obj_ctx, store->get_zone()->get_params().roles_pool, oid, bl, exclusive, nullptr, real_time(), y); + return rgw_put_system_obj(dpp, obj_ctx, store->get_zone()->get_params().roles_pool, oid, bl, exclusive, nullptr, real_time(), y); } int RadosRole::store_path(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) @@ -2184,7 +2185,7 @@ int RadosRole::store_path(const DoutPrefixProvider *dpp, bool exclusive, optiona bufferlist bl; - return rgw_put_system_obj(obj_ctx, store->get_zone()->get_params().roles_pool, oid, bl, exclusive, nullptr, real_time(), y); + return rgw_put_system_obj(dpp, obj_ctx, store->get_zone()->get_params().roles_pool, oid, bl, exclusive, nullptr, real_time(), y); } int RadosRole::read_id(const DoutPrefixProvider *dpp, const std::string& role_name, const std::string& tenant, std::string& role_id, optional_yield y) @@ -2319,7 +2320,7 @@ int RadosRole::create(const DoutPrefixProvider *dpp, bool exclusive, optional_yi //Delete the role info that was stored in the previous call std::string oid = get_info_oid_prefix() + id; - int info_ret = rgw_delete_system_obj(store->svc()->sysobj, pool, oid, nullptr, y); + int info_ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y); if (info_ret < 0) { ldpp_dout(dpp, 0) << "ERROR: cleanup of role id from Role pool: " << id << ": " << cpp_strerror(-info_ret) << dendl; @@ -2333,14 +2334,14 @@ int RadosRole::create(const DoutPrefixProvider *dpp, bool exclusive, optional_yi << path << ": " << cpp_strerror(-ret) << dendl; //Delete the role info that was stored in the previous call std::string oid = get_info_oid_prefix() + id; - int info_ret = rgw_delete_system_obj(store->svc()->sysobj, pool, oid, nullptr, y); + int info_ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y); if (info_ret < 0) { ldpp_dout(dpp, 0) << "ERROR: cleanup of role id from Role pool: " << id << ": " << cpp_strerror(-info_ret) << dendl; } //Delete role name that was stored in previous call oid = tenant + get_names_oid_prefix() + name; - int name_ret = rgw_delete_system_obj(store->svc()->sysobj, pool, oid, nullptr, y); + int name_ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y); if (name_ret < 0) { ldpp_dout(dpp, 0) << "ERROR: cleanup of role name from Role pool: " << name << ": " << cpp_strerror(-name_ret) << dendl; @@ -2370,7 +2371,7 @@ int RadosRole::delete_obj(const DoutPrefixProvider *dpp, optional_yield y) // Delete id std::string oid = get_info_oid_prefix() + id; - ret = rgw_delete_system_obj(store->svc()->sysobj, pool, oid, nullptr, y); + ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: deleting role id from Role pool: " << id << ": " << cpp_strerror(-ret) << dendl; @@ -2378,7 +2379,7 @@ int RadosRole::delete_obj(const DoutPrefixProvider *dpp, optional_yield y) // Delete name oid = tenant + get_names_oid_prefix() + name; - ret = rgw_delete_system_obj(store->svc()->sysobj, pool, oid, nullptr, y); + ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: deleting role name from Role pool: " << name << ": " << cpp_strerror(-ret) << dendl; @@ -2386,7 +2387,7 @@ int RadosRole::delete_obj(const DoutPrefixProvider *dpp, optional_yield y) // Delete path oid = tenant + get_path_oid_prefix() + path + get_info_oid_prefix() + id; - ret = rgw_delete_system_obj(store->svc()->sysobj, pool, oid, nullptr, y); + ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: deleting role path from Role pool: " << path << ": " << cpp_strerror(-ret) << dendl; diff --git a/src/rgw/rgw_sal_rados.h b/src/rgw/rgw_sal_rados.h index 69cd9c50b5a..bc79ec83cbf 100644 --- a/src/rgw/rgw_sal_rados.h +++ b/src/rgw/rgw_sal_rados.h @@ -53,15 +53,16 @@ class RadosUser : public User { optional_yield y) override; virtual Bucket* create_bucket(rgw_bucket& bucket, ceph::real_time creation_time) override; virtual int read_attrs(const DoutPrefixProvider* dpp, optional_yield y) override; - virtual int read_stats(optional_yield y, RGWStorageStats* stats, + virtual int read_stats(const DoutPrefixProvider *dpp, + optional_yield y, RGWStorageStats* stats, ceph::real_time* last_stats_sync = nullptr, ceph::real_time* last_stats_update = nullptr) override; - virtual int read_stats_async(RGWGetUserStats_CB* cb) override; - virtual int complete_flush_stats(optional_yield y) override; - virtual int read_usage(uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, + virtual int read_stats_async(const DoutPrefixProvider *dpp, RGWGetUserStats_CB* cb) override; + virtual int complete_flush_stats(const DoutPrefixProvider *dpp, optional_yield y) override; + virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool* is_truncated, RGWUsageIter& usage_iter, map& usage) override; - virtual int trim_usage(uint64_t start_epoch, uint64_t end_epoch) override; + virtual int trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) override; virtual int load_user(const DoutPrefixProvider* dpp, optional_yield y) override; virtual int store_user(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, RGWUserInfo* old_info = nullptr) override; @@ -132,7 +133,7 @@ class RadosObject : public Object { public: RadosStatOp(RadosObject* _source, RGWObjectCtx* _rctx); - virtual int stat_async() override; + virtual int stat_async(const DoutPrefixProvider *dpp) override; virtual int wait() override; }; @@ -186,7 +187,7 @@ class RadosObject : public Object { virtual std::unique_ptr clone() override { return std::unique_ptr(new RadosObject(*this)); } - virtual MPSerializer* get_serializer(const std::string& lock_name) override; + virtual MPSerializer* get_serializer(const DoutPrefixProvider *dpp, const std::string& lock_name) override; virtual int transition(RGWObjectCtx& rctx, Bucket* bucket, const rgw_placement_rule& placement_rule, @@ -216,15 +217,15 @@ class RadosObject : public Object { virtual std::unique_ptr get_stat_op(RGWObjectCtx*) override; /* OMAP */ - virtual int omap_get_vals(const std::string& marker, uint64_t count, + virtual int omap_get_vals(const DoutPrefixProvider *dpp, const std::string& marker, uint64_t count, std::map *m, bool* pmore, optional_yield y) override; - virtual int omap_get_all(std::map *m, + virtual int omap_get_all(const DoutPrefixProvider *dpp, std::map *m, optional_yield y) override; - virtual int omap_get_vals_by_keys(const std::string& oid, + virtual int omap_get_vals_by_keys(const DoutPrefixProvider *dpp, const std::string& oid, const std::set& keys, Attrs* vals) override; - virtual int omap_set_val_by_key(const std::string& key, bufferlist& val, + virtual int omap_set_val_by_key(const DoutPrefixProvider *dpp, const std::string& key, bufferlist& val, bool must_exist, optional_yield y) override; private: @@ -293,14 +294,14 @@ class RadosBucket : public Bucket { virtual RGWAccessControlPolicy& get_acl(void) override { return acls; } virtual int set_acl(const DoutPrefixProvider* dpp, RGWAccessControlPolicy& acl, optional_yield y) override; virtual int get_bucket_info(const DoutPrefixProvider* dpp, optional_yield y) override; - virtual int get_bucket_stats(int shard_id, + virtual int get_bucket_stats(const DoutPrefixProvider *dpp, int shard_id, std::string* bucket_ver, std::string* master_ver, std::map& stats, std::string* max_marker = nullptr, bool* syncstopped = nullptr) override; - virtual int get_bucket_stats_async(int shard_id, RGWGetBucketStats_CB* ctx) override; + virtual int get_bucket_stats_async(const DoutPrefixProvider *dpp, int shard_id, RGWGetBucketStats_CB* ctx) override; virtual int read_bucket_stats(const DoutPrefixProvider* dpp, optional_yield y) override; - virtual int sync_user_stats(optional_yield y) override; + virtual int sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y) override; virtual int update_container_stats(const DoutPrefixProvider* dpp) override; virtual int check_bucket_shards(const DoutPrefixProvider* dpp) override; virtual int link(const DoutPrefixProvider* dpp, User* new_user, optional_yield y, bool update_entrypoint, RGWObjVersionTracker* objv) override; @@ -314,14 +315,14 @@ class RadosBucket : public Bucket { virtual int check_quota(RGWQuotaInfo& user_quota, RGWQuotaInfo& bucket_quota, uint64_t obj_size, optional_yield y, bool check_size_only = false) override; virtual int set_instance_attrs(const DoutPrefixProvider* dpp, Attrs& attrs, optional_yield y) override; virtual int try_refresh_info(const DoutPrefixProvider* dpp, ceph::real_time* pmtime) override; - virtual int read_usage(uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, + virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool* is_truncated, RGWUsageIter& usage_iter, map& usage) override; - virtual int trim_usage(uint64_t start_epoch, uint64_t end_epoch) override; - virtual int remove_objs_from_index(std::list& objs_to_unlink) override; - virtual int check_index(std::map& existing_stats, std::map& calculated_stats) override; - virtual int rebuild_index() override; - virtual int set_tag_timeout(uint64_t timeout) override; + virtual int trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) override; + virtual int remove_objs_from_index(const DoutPrefixProvider *dpp, std::list& objs_to_unlink) override; + virtual int check_index(const DoutPrefixProvider *dpp, std::map& existing_stats, std::map& calculated_stats) override; + virtual int rebuild_index(const DoutPrefixProvider *dpp) override; + virtual int set_tag_timeout(const DoutPrefixProvider *dpp, uint64_t timeout) override; virtual int purge_instance(const DoutPrefixProvider* dpp) override; virtual std::unique_ptr clone() override { return std::make_unique(*this); @@ -389,7 +390,7 @@ class RadosStore : public Store { std::unique_ptr* bucket, optional_yield y) override; virtual bool is_meta_master() override; - virtual int forward_request_to_master(User* user, obj_version* objv, + virtual int forward_request_to_master(const DoutPrefixProvider *dpp, User* user, obj_version* objv, bufferlist& in_data, JSONParser* jp, req_info& info, optional_yield y) override; virtual int defer_gc(const DoutPrefixProvider* dpp, RGWObjectCtx* rctx, Bucket* bucket, Object* obj, @@ -407,13 +408,13 @@ class RadosStore : public Store { const DoutPrefixProvider* dpp, optional_yield y) override; virtual RGWLC* get_rgwlc(void) override { return rados->get_lc(); } virtual RGWCoroutinesManagerRegistry* get_cr_registry() override { return rados->get_cr_registry(); } - virtual int delete_raw_obj(const rgw_raw_obj& obj) override; - virtual int delete_raw_obj_aio(const rgw_raw_obj& obj, Completions* aio) override; + virtual int delete_raw_obj(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj) override; + virtual int delete_raw_obj_aio(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, Completions* aio) override; virtual void get_raw_obj(const rgw_placement_rule& placement_rule, const rgw_obj& obj, rgw_raw_obj* raw_obj) override; virtual int get_raw_chunk_size(const DoutPrefixProvider* dpp, const rgw_raw_obj& obj, uint64_t* chunk_size) override; - virtual int log_usage(map& usage_info) override; - virtual int log_op(std::string& oid, bufferlist& bl) override; + virtual int log_usage(const DoutPrefixProvider *dpp, map& usage_info) override; + virtual int log_op(const DoutPrefixProvider *dpp, std::string& oid, bufferlist& bl) override; virtual int register_to_service_map(const std::string& daemon_type, const map& meta) override; virtual void get_quota(RGWQuotaInfo& bucket_quota, RGWQuotaInfo& user_quota) override; @@ -427,14 +428,14 @@ class RadosStore : public Store { virtual RGWDataSyncStatusManager* get_data_sync_manager(const rgw_zone_id& source_zone) override; virtual void wakeup_meta_sync_shards(set& shard_ids) override { rados->wakeup_meta_sync_shards(shard_ids); } virtual void wakeup_data_sync_shards(const rgw_zone_id& source_zone, map >& shard_ids) override { rados->wakeup_data_sync_shards(source_zone, shard_ids); } - virtual int clear_usage() override { return rados->clear_usage(); } - virtual int read_all_usage(uint64_t start_epoch, uint64_t end_epoch, + virtual int clear_usage(const DoutPrefixProvider *dpp) override { return rados->clear_usage(dpp); } + virtual int read_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool* is_truncated, RGWUsageIter& usage_iter, map& usage) override; - virtual int trim_all_usage(uint64_t start_epoch, uint64_t end_epoch) override; + virtual int trim_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) override; virtual int get_config_key_val(std::string name, bufferlist* bl) override; - virtual int meta_list_keys_init(const std::string& section, const std::string& marker, void** phandle) override; + virtual int meta_list_keys_init(const DoutPrefixProvider *dpp, const std::string& section, const std::string& marker, void** phandle) override; virtual int meta_list_keys_next(void* handle, int max, list& keys, bool* truncated) override; virtual void meta_list_keys_complete(void* handle) override; virtual std::string meta_get_marker(void* handle) override; @@ -471,7 +472,7 @@ class RadosStore : public Store { } /* Unique to RadosStore */ - int get_obj_head_ioctx(const RGWBucketInfo& bucket_info, const rgw_obj& obj, + int get_obj_head_ioctx(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::IoCtx* ioctx); void setRados(RGWRados * st) { rados = st; } @@ -491,9 +492,9 @@ class MPRadosSerializer : public MPSerializer { librados::ObjectWriteOperation op; public: - MPRadosSerializer(RadosStore* store, RadosObject* obj, const std::string& lock_name); + MPRadosSerializer(const DoutPrefixProvider *dpp, RadosStore* store, RadosObject* obj, const std::string& lock_name); - virtual int try_lock(utime_t dur, optional_yield y) override; + virtual int try_lock(const DoutPrefixProvider *dpp, utime_t dur, optional_yield y) override; virtual int unlock() override { return lock.unlock(&ioctx, oid); } @@ -507,7 +508,7 @@ class LCRadosSerializer : public LCSerializer { public: LCRadosSerializer(RadosStore* store, const std::string& oid, const std::string& lock_name, const std::string& cookie); - virtual int try_lock(utime_t dur, optional_yield y) override; + virtual int try_lock(const DoutPrefixProvider *dpp, utime_t dur, optional_yield y) override; virtual int unlock() override { return lock.unlock(ioctx, oid); } @@ -535,10 +536,10 @@ class RadosNotification : public Notification { rgw::notify::reservation_t res; public: - RadosNotification(RadosStore* _store, Object* _obj, req_state* _s, rgw::notify::EventType _type) : Notification(_obj, _type), store(_store), res(_store, _s, _obj) { } + RadosNotification(const DoutPrefixProvider *_dpp, RadosStore* _store, Object* _obj, req_state* _s, rgw::notify::EventType _type) : Notification(_obj, _type), store(_store), res(_dpp, _store, _s, _obj) { } ~RadosNotification() = default; - virtual int publish_reserve(RGWObjTags* obj_tags = nullptr) override; + virtual int publish_reserve(const DoutPrefixProvider *dpp, RGWObjTags* obj_tags = nullptr) override; virtual int publish_commit(const DoutPrefixProvider* dpp, uint64_t size, const ceph::real_time& mtime, const std::string& etag) override; }; @@ -552,9 +553,9 @@ protected: RadosGCChain(RadosStore* _store, Object* _obj) : GCChain(_obj), store(_store) {} ~RadosGCChain() = default; - virtual void update(RGWObjManifest* manifest) override; + virtual void update(const DoutPrefixProvider *dpp, RGWObjManifest* manifest) override; virtual int send(const std::string& tag) override; - virtual void delete_inline(const std::string& tag) override; + virtual void delete_inline(const DoutPrefixProvider *dpp, const std::string& tag) override; }; class RadosWriter : public Writer { diff --git a/src/rgw/rgw_service.cc b/src/rgw/rgw_service.cc index 61599ad5634..7cb91e4771a 100644 --- a/src/rgw/rgw_service.cc +++ b/src/rgw/rgw_service.cc @@ -74,7 +74,7 @@ int RGWServices_Def::init(CephContext *cct, user_rados = std::make_unique(cct); if (have_cache) { - sysobj_cache = std::make_unique(cct); + sysobj_cache = std::make_unique(dpp, cct); } vector meta_bes{meta_be_sobj.get(), meta_be_otp.get()}; @@ -141,11 +141,11 @@ int RGWServices_Def::init(CephContext *cct, return r; } - r = datalog_rados->start(&zone->get_zone(), + r = datalog_rados->start(dpp, &zone->get_zone(), zone->get_zone_params(), rados->get_rados_handle()); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to start datalog_rados service (" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to start datalog_rados service (" << cpp_strerror(-r) << dendl; return r; } diff --git a/src/rgw/rgw_sync.cc b/src/rgw/rgw_sync.cc index 3a5c059a2da..5c652e07e8e 100644 --- a/src/rgw/rgw_sync.cc +++ b/src/rgw/rgw_sync.cc @@ -51,7 +51,7 @@ string RGWSyncErrorLogger::get_shard_oid(const string& oid_prefix, int shard_id) return string(buf); } -RGWCoroutine *RGWSyncErrorLogger::log_error_cr(const string& source_zone, const string& section, const string& name, uint32_t error_code, const string& message) { +RGWCoroutine *RGWSyncErrorLogger::log_error_cr(const DoutPrefixProvider *dpp, const string& source_zone, const string& section, const string& name, uint32_t error_code, const string& message) { cls_log_entry entry; rgw_sync_error_info info(source_zone, error_code, message); @@ -62,7 +62,7 @@ RGWCoroutine *RGWSyncErrorLogger::log_error_cr(const string& source_zone, const uint32_t shard_id = ++counter % num_shards; - return new RGWRadosTimelogAddCR(store, oids[shard_id], entry); + return new RGWRadosTimelogAddCR(dpp, store, oids[shard_id], entry); } void RGWSyncBackoff::update_wait_time() @@ -89,7 +89,7 @@ void RGWSyncBackoff::backoff(RGWCoroutine *op) op->wait(utime_t(cur_wait, 0)); } -int RGWBackoffControlCR::operate() { +int RGWBackoffControlCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { // retry the operation until it succeeds while (true) { @@ -152,7 +152,7 @@ void rgw_mdlog_shard_data::decode_json(JSONObj *obj) { JSONDecoder::decode_json("entries", entries, obj); }; -int RGWShardCollectCR::operate() { +int RGWShardCollectCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { while (spawn_next()) { current_running++; @@ -238,12 +238,12 @@ RGWRemoteMetaLog::~RGWRemoteMetaLog() delete error_logger; } -int RGWRemoteMetaLog::read_log_info(rgw_mdlog_info *log_info) +int RGWRemoteMetaLog::read_log_info(const DoutPrefixProvider *dpp, rgw_mdlog_info *log_info) { rgw_http_param_pair pairs[] = { { "type", "metadata" }, { NULL, NULL } }; - int ret = conn->get_json_resource("/admin/log", pairs, null_yield, *log_info); + int ret = conn->get_json_resource(dpp, "/admin/log", pairs, null_yield, *log_info); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to fetch mdlog info" << dendl; return ret; @@ -254,28 +254,28 @@ int RGWRemoteMetaLog::read_log_info(rgw_mdlog_info *log_info) return 0; } -int RGWRemoteMetaLog::read_master_log_shards_info(const string &master_period, map *shards_info) +int RGWRemoteMetaLog::read_master_log_shards_info(const DoutPrefixProvider *dpp, const string &master_period, map *shards_info) { if (store->svc()->zone->is_meta_master()) { return 0; } rgw_mdlog_info log_info; - int ret = read_log_info(&log_info); + int ret = read_log_info(dpp, &log_info); if (ret < 0) { return ret; } - return run(new RGWReadRemoteMDLogInfoCR(&sync_env, master_period, log_info.num_shards, shards_info)); + return run(dpp, new RGWReadRemoteMDLogInfoCR(&sync_env, master_period, log_info.num_shards, shards_info)); } -int RGWRemoteMetaLog::read_master_log_shards_next(const string& period, map shard_markers, map *result) +int RGWRemoteMetaLog::read_master_log_shards_next(const DoutPrefixProvider *dpp, const string& period, map shard_markers, map *result) { if (store->svc()->zone->is_meta_master()) { return 0; } - return run(new RGWListRemoteMDLogCR(&sync_env, period, shard_markers, 1, result)); + return run(dpp, new RGWListRemoteMDLogCR(&sync_env, period, shard_markers, 1, result)); } int RGWRemoteMetaLog::init() @@ -305,35 +305,35 @@ void RGWRemoteMetaLog::finish() #define CLONE_MAX_ENTRIES 100 -int RGWMetaSyncStatusManager::init() +int RGWMetaSyncStatusManager::init(const DoutPrefixProvider *dpp) { if (store->svc()->zone->is_meta_master()) { return 0; } if (!store->svc()->zone->get_master_conn()) { - lderr(store->ctx()) << "no REST connection to master zone" << dendl; + ldpp_dout(dpp, -1) << "no REST connection to master zone" << dendl; return -EIO; } - int r = rgw_init_ioctx(store->getRados()->get_rados_handle(), store->svc()->zone->get_zone_params().log_pool, ioctx, true); + int r = rgw_init_ioctx(dpp, store->getRados()->get_rados_handle(), store->svc()->zone->get_zone_params().log_pool, ioctx, true); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to open log pool (" << store->svc()->zone->get_zone_params().log_pool << " ret=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to open log pool (" << store->svc()->zone->get_zone_params().log_pool << " ret=" << r << dendl; return r; } r = master_log.init(); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to init remote log, r=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to init remote log, r=" << r << dendl; return r; } RGWMetaSyncEnv& sync_env = master_log.get_sync_env(); rgw_meta_sync_status sync_status; - r = read_sync_status(&sync_status); + r = read_sync_status(dpp, &sync_status); if (r < 0 && r != -ENOENT) { - lderr(store->ctx()) << "ERROR: failed to read sync status, r=" << r << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed to read sync status, r=" << r << dendl; return r; } @@ -391,13 +391,14 @@ string RGWMetaSyncEnv::shard_obj_name(int shard_id) } class RGWAsyncReadMDLogEntries : public RGWAsyncRadosRequest { + const DoutPrefixProvider *dpp; rgw::sal::RadosStore* store; RGWMetadataLog *mdlog; int shard_id; int max_entries; protected: - int _send_request() override { + int _send_request(const DoutPrefixProvider *dpp) override { real_time from_time; real_time end_time; @@ -405,7 +406,7 @@ protected: mdlog->init_list_entries(shard_id, from_time, end_time, marker, &handle); - int ret = mdlog->list_entries(handle, max_entries, entries, &marker, &truncated); + int ret = mdlog->list_entries(dpp, handle, max_entries, entries, &marker, &truncated); mdlog->complete_list_entries(handle); @@ -416,10 +417,10 @@ public: list entries; bool truncated; - RGWAsyncReadMDLogEntries(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store, + RGWAsyncReadMDLogEntries(const DoutPrefixProvider *dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store, RGWMetadataLog* mdlog, int _shard_id, std::string _marker, int _max_entries) - : RGWAsyncRadosRequest(caller, cn), store(_store), mdlog(mdlog), + : RGWAsyncRadosRequest(caller, cn), dpp(dpp), store(_store), mdlog(mdlog), shard_id(_shard_id), max_entries(_max_entries), marker(std::move(_marker)) {} }; @@ -449,9 +450,9 @@ public: } } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { marker = *pmarker; - req = new RGWAsyncReadMDLogEntries(this, stack->create_completion_notifier(), + req = new RGWAsyncReadMDLogEntries(dpp, this, stack->create_completion_notifier(), sync_env->store, mdlog, shard_id, marker, max_entries); sync_env->async_rados->queue(req); @@ -481,7 +482,7 @@ public: : RGWCoroutine(env->store->ctx()), env(env), http_op(NULL), period(period), shard_id(_shard_id), shard_info(_shard_info) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { auto store = env->store; RGWRESTConn *conn = store->svc()->zone->get_master_conn(); reenter(this) { @@ -501,7 +502,7 @@ public: init_new_io(http_op); - int ret = http_op->aio_read(); + int ret = http_op->aio_read(dpp); if (ret < 0) { ldpp_dout(env->dpp, 0) << "ERROR: failed to read from " << p << dendl; log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl; @@ -549,7 +550,7 @@ public: : RGWSimpleCoroutine(env->store->ctx()), sync_env(env), http_op(NULL), period(period), shard_id(_shard_id), marker(_marker), max_entries(_max_entries), result(_result) {} - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { RGWRESTConn *conn = sync_env->conn; char buf[32]; @@ -572,9 +573,9 @@ public: http_op = new RGWRESTReadResource(conn, p, pairs, NULL, sync_env->http_manager); init_new_io(http_op); - int ret = http_op->aio_read(); + int ret = http_op->aio_read(dpp); if (ret < 0) { - ldpp_dout(sync_env->dpp, 0) << "ERROR: failed to read from " << p << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to read from " << p << dendl; log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl; http_op->put(); return ret; @@ -644,7 +645,7 @@ public: } } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { int ret; reenter(this) { yield { @@ -659,7 +660,7 @@ public: } while (!lease_cr->is_locked()) { if (lease_cr->is_done()) { - ldpp_dout(sync_env->dpp, 5) << "lease cr failed, done early " << dendl; + ldpp_dout(dpp, 5) << "lease cr failed, done early " << dendl; set_status("lease lock failed, early abort"); return set_cr_error(lease_cr->get_ret_status()); } @@ -669,14 +670,14 @@ public: yield { set_status("writing sync status"); rgw::sal::RadosStore* store = sync_env->store; - call(new RGWSimpleRadosWriteCR(sync_env->async_rados, store->svc()->sysobj, + call(new RGWSimpleRadosWriteCR(dpp, sync_env->async_rados, store->svc()->sysobj, rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()), status)); } if (retcode < 0) { set_status("failed to write sync status"); - ldpp_dout(sync_env->dpp, 0) << "ERROR: failed to write sync status, retcode=" << retcode << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to write sync status, retcode=" << retcode << dendl; yield lease_cr->go_down(); return set_cr_error(retcode); } @@ -699,7 +700,8 @@ public: marker.next_step_marker = info.marker; marker.timestamp = info.last_update; rgw::sal::RadosStore* store = sync_env->store; - spawn(new RGWSimpleRadosWriteCR(sync_env->async_rados, + spawn(new RGWSimpleRadosWriteCR(dpp, + sync_env->async_rados, store->svc()->sysobj, rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->shard_obj_name(i)), marker), true); @@ -709,7 +711,7 @@ public: set_status("changing sync state: build full sync maps"); status.state = rgw_meta_sync_info::StateBuildingFullSyncMaps; rgw::sal::RadosStore* store = sync_env->store; - call(new RGWSimpleRadosWriteCR(sync_env->async_rados, store->svc()->sysobj, + call(new RGWSimpleRadosWriteCR(dpp, sync_env->async_rados, store->svc()->sysobj, rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()), status)); } @@ -753,7 +755,7 @@ bool RGWReadSyncStatusMarkersCR::spawn_next() using CR = RGWSimpleRadosReadCR; rgw_raw_obj obj{env->store->svc()->zone->get_zone_params().log_pool, env->shard_obj_name(shard_id)}; - spawn(new CR(env->async_rados, env->store->svc()->sysobj, obj, &markers[shard_id]), false); + spawn(new CR(env->dpp, env->async_rados, env->store->svc()->sysobj, obj, &markers[shard_id]), false); shard_id++; return true; } @@ -767,10 +769,10 @@ public: rgw_meta_sync_status *_status) : RGWCoroutine(_sync_env->cct), sync_env(_sync_env), sync_status(_status) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int RGWReadSyncStatusCoroutine::operate() +int RGWReadSyncStatusCoroutine::operate(const DoutPrefixProvider *dpp) { reenter(this) { // read sync info @@ -779,11 +781,11 @@ int RGWReadSyncStatusCoroutine::operate() bool empty_on_enoent = false; // fail on ENOENT rgw_raw_obj obj{sync_env->store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()}; - call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj, obj, + call(new ReadInfoCR(dpp, sync_env->async_rados, sync_env->store->svc()->sysobj, obj, &sync_status->sync_info, empty_on_enoent)); } if (retcode < 0) { - ldpp_dout(sync_env->dpp, 4) << "failed to read sync status info with " + ldpp_dout(dpp, 4) << "failed to read sync status info with " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } @@ -792,7 +794,7 @@ int RGWReadSyncStatusCoroutine::operate() yield call(new ReadMarkersCR(sync_env, sync_status->sync_info.num_shards, sync_status->sync_markers)); if (retcode < 0) { - ldpp_dout(sync_env->dpp, 4) << "failed to read sync status markers with " + ldpp_dout(dpp, 4) << "failed to read sync status markers with " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } @@ -878,7 +880,7 @@ public: std::back_inserter(sections)); } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { RGWRESTConn *conn = sync_env->conn; reenter(this) { @@ -894,7 +896,7 @@ public: } while (!lease_cr->is_locked()) { if (lease_cr->is_done()) { - ldpp_dout(sync_env->dpp, 5) << "lease cr failed, done early " << dendl; + ldpp_dout(dpp, 5) << "lease cr failed, done early " << dendl; set_status("failed acquiring lock"); return set_cr_error(lease_cr->get_ret_status()); } @@ -909,7 +911,7 @@ public: "/admin/metadata", NULL, §ions)); } if (get_ret_status() < 0) { - ldpp_dout(sync_env->dpp, 0) << "ERROR: failed to fetch metadata sections" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to fetch metadata sections" << dendl; yield entries_index->finish(); yield lease_cr->go_down(); drain_all(); @@ -975,7 +977,7 @@ public: int shard_id = (int)iter->first; rgw_meta_sync_marker& marker = iter->second; marker.total_entries = entries_index->get_total_entries(shard_id); - spawn(new RGWSimpleRadosWriteCR(sync_env->async_rados, sync_env->store->svc()->sysobj, + spawn(new RGWSimpleRadosWriteCR(dpp, sync_env->async_rados, sync_env->store->svc()->sysobj, rgw_raw_obj(sync_env->store->svc()->zone->get_zone_params().log_pool, sync_env->shard_obj_name(shard_id)), marker), true); } @@ -1041,7 +1043,7 @@ public: section + ":" + key); } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { RGWRESTConn *conn = sync_env->conn; reenter(this) { yield { @@ -1056,9 +1058,9 @@ public: init_new_io(http_op); - int ret = http_op->aio_read(); + int ret = http_op->aio_read(dpp); if (ret < 0) { - ldpp_dout(sync_env->dpp, 0) << "ERROR: failed to fetch mdlog data" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to fetch mdlog data" << dendl; log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl; http_op->put(); return set_cr_error(ret); @@ -1085,7 +1087,7 @@ class RGWAsyncMetaStoreEntry : public RGWAsyncRadosRequest { bufferlist bl; const DoutPrefixProvider *dpp; protected: - int _send_request() override { + int _send_request(const DoutPrefixProvider *dpp) override { int ret = store->ctl()->meta.mgr->put(raw_key, bl, null_yield, dpp, RGWMDLogSyncType::APPLY_ALWAYS, true); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: can't store key: " << raw_key << " ret=" << ret << dendl; @@ -1122,9 +1124,9 @@ public: } } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { req = new RGWAsyncMetaStoreEntry(this, stack->create_completion_notifier(), - sync_env->store, raw_key, bl, sync_env->dpp); + sync_env->store, raw_key, bl, dpp); sync_env->async_rados->queue(req); return 0; } @@ -1139,7 +1141,7 @@ class RGWAsyncMetaRemoveEntry : public RGWAsyncRadosRequest { string raw_key; const DoutPrefixProvider *dpp; protected: - int _send_request() override { + int _send_request(const DoutPrefixProvider *dpp) override { int ret = store->ctl()->meta.mgr->remove(raw_key, null_yield, dpp); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: can't remove key: " << raw_key << " ret=" << ret << dendl; @@ -1172,9 +1174,9 @@ public: } } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { req = new RGWAsyncMetaRemoveEntry(this, stack->create_completion_notifier(), - sync_env->store, raw_key, sync_env->dpp); + sync_env->store, raw_key, dpp); sync_env->async_rados->queue(req); return 0; } @@ -1191,7 +1193,7 @@ public: #define META_SYNC_UPDATE_MARKER_WINDOW 10 -int RGWLastCallerWinsCR::operate() { +int RGWLastCallerWinsCR::operate(const DoutPrefixProvider *dpp) { RGWCoroutine *call_cr; reenter(this) { while (cr) { @@ -1236,7 +1238,7 @@ public: ldpp_dout(sync_env->dpp, 20) << __func__ << "(): updating marker marker_oid=" << marker_oid << " marker=" << new_marker << " realm_epoch=" << sync_marker.realm_epoch << dendl; tn->log(20, SSTR("new marker=" << new_marker)); rgw::sal::RadosStore* store = sync_env->store; - return new RGWSimpleRadosWriteCR(sync_env->async_rados, + return new RGWSimpleRadosWriteCR(sync_env->dpp, sync_env->async_rados, store->svc()->sysobj, rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, marker_oid), sync_marker); @@ -1260,7 +1262,7 @@ RGWMetaSyncSingleEntryCR::RGWMetaSyncSingleEntryCR(RGWMetaSyncEnv *_sync_env, tn = sync_env->sync_tracer->add_node(_tn_parent, "entry", raw_key); } -int RGWMetaSyncSingleEntryCR::operate() { +int RGWMetaSyncSingleEntryCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { #define NUM_TRANSIENT_ERROR_RETRIES 10 @@ -1295,14 +1297,14 @@ int RGWMetaSyncSingleEntryCR::operate() { } if ((sync_status == -EAGAIN || sync_status == -ECANCELED) && (tries < NUM_TRANSIENT_ERROR_RETRIES - 1)) { - ldpp_dout(sync_env->dpp, 20) << *this << ": failed to fetch remote metadata: " << section << ":" << key << ", will retry" << dendl; + ldpp_dout(dpp, 20) << *this << ": failed to fetch remote metadata: " << section << ":" << key << ", will retry" << dendl; continue; } if (sync_status < 0) { tn->log(10, SSTR("failed to send read remote metadata entry: section=" << section << " key=" << key << " status=" << sync_status)); log_error() << "failed to send read remote metadata entry: section=" << section << " key=" << key << " status=" << sync_status << std::endl; - yield call(sync_env->error_logger->log_error_cr(sync_env->conn->get_remote_id(), section, key, -sync_status, + yield call(sync_env->error_logger->log_error_cr(dpp, sync_env->conn->get_remote_id(), section, key, -sync_status, string("failed to read remote metadata entry: ") + cpp_strerror(-sync_status))); return set_cr_error(sync_status); } @@ -1320,7 +1322,7 @@ int RGWMetaSyncSingleEntryCR::operate() { yield call(new RGWMetaRemoveEntryCR(sync_env, raw_key)); } if ((retcode == -EAGAIN || retcode == -ECANCELED) && (tries < NUM_TRANSIENT_ERROR_RETRIES - 1)) { - ldpp_dout(sync_env->dpp, 20) << *this << ": failed to store metadata: " << section << ":" << key << ", got retcode=" << retcode << dendl; + ldpp_dout(dpp, 20) << *this << ": failed to store metadata: " << section << ":" << key << ", got retcode=" << retcode << dendl; continue; } break; @@ -1380,12 +1382,12 @@ public: } } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; int state_init(); int state_read_shard_status(); int state_read_shard_status_complete(); - int state_send_rest_request(); + int state_send_rest_request(const DoutPrefixProvider *dpp); int state_receive_rest_response(); int state_store_mdlog_entries(); int state_store_mdlog_entries_complete(); @@ -1472,21 +1474,21 @@ public: marker_tracker = mt; } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { int r; while (true) { switch (sync_marker.state) { case rgw_meta_sync_marker::FullSync: r = full_sync(); if (r < 0) { - ldpp_dout(sync_env->dpp, 10) << "sync: full_sync: shard_id=" << shard_id << " r=" << r << dendl; + ldpp_dout(dpp, 10) << "sync: full_sync: shard_id=" << shard_id << " r=" << r << dendl; return set_cr_error(r); } return 0; case rgw_meta_sync_marker::IncrementalSync: r = incremental_sync(); if (r < 0) { - ldpp_dout(sync_env->dpp, 10) << "sync: incremental_sync: shard_id=" << shard_id << " r=" << r << dendl; + ldpp_dout(dpp, 10) << "sync: incremental_sync: shard_id=" << shard_id << " r=" << r << dendl; return set_cr_error(r); } return 0; @@ -1648,7 +1650,7 @@ public: ldpp_dout(sync_env->dpp, 4) << *this << ": saving marker pos=" << temp_marker->marker << " realm_epoch=" << realm_epoch << dendl; using WriteMarkerCR = RGWSimpleRadosWriteCR; - yield call(new WriteMarkerCR(sync_env->async_rados, sync_env->store->svc()->sysobj, + yield call(new WriteMarkerCR(sync_env->dpp, sync_env->async_rados, sync_env->store->svc()->sysobj, rgw_raw_obj(pool, sync_env->shard_obj_name(shard_id)), *temp_marker)); } @@ -1892,7 +1894,7 @@ public: RGWCoroutine *alloc_finisher_cr() override { rgw::sal::RadosStore* store = sync_env->store; - return new RGWSimpleRadosReadCR(sync_env->async_rados, store->svc()->sysobj, + return new RGWSimpleRadosReadCR(sync_env->dpp, sync_env->async_rados, store->svc()->sysobj, rgw_raw_obj(pool, sync_env->shard_obj_name(shard_id)), &sync_marker); } @@ -1927,7 +1929,7 @@ public: ~RGWMetaSyncCR() { } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { // loop through one period at a time tn->log(1, "start"); @@ -1935,15 +1937,15 @@ public: if (cursor == sync_env->store->svc()->mdlog->get_period_history()->get_current()) { next = RGWPeriodHistory::Cursor{}; if (cursor) { - ldpp_dout(sync_env->dpp, 10) << "RGWMetaSyncCR on current period=" + ldpp_dout(dpp, 10) << "RGWMetaSyncCR on current period=" << cursor.get_period().get_id() << dendl; } else { - ldpp_dout(sync_env->dpp, 10) << "RGWMetaSyncCR with no period" << dendl; + ldpp_dout(dpp, 10) << "RGWMetaSyncCR with no period" << dendl; } } else { next = cursor; next.next(); - ldpp_dout(sync_env->dpp, 10) << "RGWMetaSyncCR on period=" + ldpp_dout(dpp, 10) << "RGWMetaSyncCR on period=" << cursor.get_period().get_id() << ", next=" << next.get_period().get_id() << dendl; } @@ -1970,7 +1972,7 @@ public: period_marker = next.get_period().get_sync_status()[shard_id]; if (period_marker.empty()) { // no metadata changes have occurred on this shard, skip it - ldpp_dout(sync_env->dpp, 10) << "RGWMetaSyncCR: skipping shard " << shard_id + ldpp_dout(dpp, 10) << "RGWMetaSyncCR: skipping shard " << shard_id << " with empty period marker" << dendl; continue; } @@ -2005,7 +2007,7 @@ public: // write the updated sync info sync_status.sync_info.period = cursor.get_period().get_id(); sync_status.sync_info.realm_epoch = cursor.get_epoch(); - yield call(new RGWSimpleRadosWriteCR(sync_env->async_rados, + yield call(new RGWSimpleRadosWriteCR(dpp, sync_env->async_rados, sync_env->store->svc()->sysobj, rgw_raw_obj(pool, sync_env->status_oid()), sync_status.sync_info)); @@ -2035,7 +2037,7 @@ void RGWRemoteMetaLog::init_sync_env(RGWMetaSyncEnv *env) { env->sync_tracer = store->getRados()->get_sync_tracer(); } -int RGWRemoteMetaLog::read_sync_status(rgw_meta_sync_status *sync_status) +int RGWRemoteMetaLog::read_sync_status(const DoutPrefixProvider *dpp, rgw_meta_sync_status *sync_status) { if (store->svc()->zone->is_meta_master()) { return 0; @@ -2051,21 +2053,21 @@ int RGWRemoteMetaLog::read_sync_status(rgw_meta_sync_status *sync_status) RGWMetaSyncEnv sync_env_local = sync_env; sync_env_local.http_manager = &http_manager; tn->log(20, "read sync status"); - ret = crs.run(new RGWReadSyncStatusCoroutine(&sync_env_local, sync_status)); + ret = crs.run(dpp, new RGWReadSyncStatusCoroutine(&sync_env_local, sync_status)); http_manager.stop(); return ret; } -int RGWRemoteMetaLog::init_sync_status() +int RGWRemoteMetaLog::init_sync_status(const DoutPrefixProvider *dpp) { if (store->svc()->zone->is_meta_master()) { return 0; } rgw_mdlog_info mdlog_info; - int r = read_log_info(&mdlog_info); + int r = read_log_info(dpp, &mdlog_info); if (r < 0) { - lderr(store->ctx()) << "ERROR: fail to fetch master log info (r=" << r << ")" << dendl; + ldpp_dout(dpp, -1) << "ERROR: fail to fetch master log info (r=" << r << ")" << dendl; return r; } @@ -2077,19 +2079,20 @@ int RGWRemoteMetaLog::init_sync_status() sync_info.realm_epoch = cursor.get_epoch(); } - return run(new RGWInitSyncStatusCoroutine(&sync_env, sync_info)); + return run(dpp, new RGWInitSyncStatusCoroutine(&sync_env, sync_info)); } -int RGWRemoteMetaLog::store_sync_info(const rgw_meta_sync_info& sync_info) +int RGWRemoteMetaLog::store_sync_info(const DoutPrefixProvider *dpp, const rgw_meta_sync_info& sync_info) { tn->log(20, "store sync info"); - return run(new RGWSimpleRadosWriteCR(async_rados, store->svc()->sysobj, + return run(dpp, new RGWSimpleRadosWriteCR(dpp, async_rados, store->svc()->sysobj, rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env.status_oid()), sync_info)); } // return a cursor to the period at our sync position -static RGWPeriodHistory::Cursor get_period_at(rgw::sal::RadosStore* store, +static RGWPeriodHistory::Cursor get_period_at(const DoutPrefixProvider *dpp, + rgw::sal::RadosStore* store, const rgw_meta_sync_info& info, optional_yield y) { @@ -2104,7 +2107,7 @@ static RGWPeriodHistory::Cursor get_period_at(rgw::sal::RadosStore* store, // verify that the period ids match auto& existing = cursor.get_period().get_id(); if (existing != info.period) { - lderr(store->ctx()) << "ERROR: sync status period=" << info.period + ldpp_dout(dpp, -1) << "ERROR: sync status period=" << info.period << " does not match period=" << existing << " in history at realm epoch=" << info.realm_epoch << dendl; return RGWPeriodHistory::Cursor{-EEXIST}; @@ -2114,23 +2117,23 @@ static RGWPeriodHistory::Cursor get_period_at(rgw::sal::RadosStore* store, // read the period from rados or pull it from the master RGWPeriod period; - int r = store->svc()->mdlog->pull_period(info.period, period, y); + int r = store->svc()->mdlog->pull_period(dpp, info.period, period, y); if (r < 0) { - lderr(store->ctx()) << "ERROR: failed to read period id " + ldpp_dout(dpp, -1) << "ERROR: failed to read period id " << info.period << ": " << cpp_strerror(r) << dendl; return RGWPeriodHistory::Cursor{r}; } // attach the period to our history - cursor = store->svc()->mdlog->get_period_history()->attach(std::move(period), y); + cursor = store->svc()->mdlog->get_period_history()->attach(dpp, std::move(period), y); if (!cursor) { r = cursor.get_error(); - lderr(store->ctx()) << "ERROR: failed to read period history back to " + ldpp_dout(dpp, -1) << "ERROR: failed to read period history back to " << info.period << ": " << cpp_strerror(r) << dendl; } return cursor; } -int RGWRemoteMetaLog::run_sync(optional_yield y) +int RGWRemoteMetaLog::run_sync(const DoutPrefixProvider *dpp, optional_yield y) { if (store->svc()->zone->is_meta_master()) { return 0; @@ -2145,7 +2148,7 @@ int RGWRemoteMetaLog::run_sync(optional_yield y) ldpp_dout(dpp, 1) << __func__ << "(): going down" << dendl; return 0; } - r = read_log_info(&mdlog_info); + r = read_log_info(dpp, &mdlog_info); if (r == -EIO || r == -ENOENT) { // keep retrying if master isn't alive or hasn't initialized the log ldpp_dout(dpp, 10) << __func__ << "(): waiting for master.." << dendl; @@ -2154,7 +2157,7 @@ int RGWRemoteMetaLog::run_sync(optional_yield y) } backoff.reset(); if (r < 0) { - lderr(store->ctx()) << "ERROR: fail to fetch master log info (r=" << r << ")" << dendl; + ldpp_dout(dpp, -1) << "ERROR: fail to fetch master log info (r=" << r << ")" << dendl; return r; } break; @@ -2166,7 +2169,7 @@ int RGWRemoteMetaLog::run_sync(optional_yield y) ldpp_dout(dpp, 1) << __func__ << "(): going down" << dendl; return 0; } - r = run(new RGWReadSyncStatusCoroutine(&sync_env, &sync_status)); + r = run(dpp, new RGWReadSyncStatusCoroutine(&sync_env, &sync_status)); if (r < 0 && r != -ENOENT) { ldpp_dout(dpp, 0) << "ERROR: failed to fetch sync status r=" << r << dendl; return r; @@ -2201,7 +2204,7 @@ int RGWRemoteMetaLog::run_sync(optional_yield y) sync_status.sync_info.period = cursor.get_period().get_id(); sync_status.sync_info.realm_epoch = cursor.get_epoch(); } - r = run(new RGWInitSyncStatusCoroutine(&sync_env, sync_status.sync_info)); + r = run(dpp, new RGWInitSyncStatusCoroutine(&sync_env, sync_status.sync_info)); if (r == -EBUSY) { backoff.backoff_sleep(); continue; @@ -2216,13 +2219,13 @@ int RGWRemoteMetaLog::run_sync(optional_yield y) auto num_shards = sync_status.sync_info.num_shards; if (num_shards != mdlog_info.num_shards) { - lderr(store->ctx()) << "ERROR: can't sync, mismatch between num shards, master num_shards=" << mdlog_info.num_shards << " local num_shards=" << num_shards << dendl; + ldpp_dout(dpp, -1) << "ERROR: can't sync, mismatch between num shards, master num_shards=" << mdlog_info.num_shards << " local num_shards=" << num_shards << dendl; return -EINVAL; } RGWPeriodHistory::Cursor cursor; do { - r = run(new RGWReadSyncStatusCoroutine(&sync_env, &sync_status)); + r = run(dpp, new RGWReadSyncStatusCoroutine(&sync_env, &sync_status)); if (r < 0 && r != -ENOENT) { tn->log(0, SSTR("ERROR: failed to fetch sync status r=" << r)); return r; @@ -2231,7 +2234,7 @@ int RGWRemoteMetaLog::run_sync(optional_yield y) switch ((rgw_meta_sync_info::SyncState)sync_status.sync_info.state) { case rgw_meta_sync_info::StateBuildingFullSyncMaps: tn->log(20, "building full sync maps"); - r = run(new RGWFetchAllMetaCR(&sync_env, num_shards, sync_status.sync_markers, tn)); + r = run(dpp, new RGWFetchAllMetaCR(&sync_env, num_shards, sync_status.sync_markers, tn)); if (r == -EBUSY || r == -EAGAIN) { backoff.backoff_sleep(); continue; @@ -2243,7 +2246,7 @@ int RGWRemoteMetaLog::run_sync(optional_yield y) } sync_status.sync_info.state = rgw_meta_sync_info::StateSync; - r = store_sync_info(sync_status.sync_info); + r = store_sync_info(dpp, sync_status.sync_info); if (r < 0) { tn->log(0, SSTR("ERROR: failed to update sync status (r=" << r << ")")); return r; @@ -2252,13 +2255,13 @@ int RGWRemoteMetaLog::run_sync(optional_yield y) case rgw_meta_sync_info::StateSync: tn->log(20, "sync"); // find our position in the period history (if any) - cursor = get_period_at(store, sync_status.sync_info, y); + cursor = get_period_at(dpp, store, sync_status.sync_info, y); r = cursor.get_error(); if (r < 0) { return r; } meta_sync_cr = new RGWMetaSyncCR(&sync_env, cursor, sync_status, tn); - r = run(meta_sync_cr); + r = run(dpp, meta_sync_cr); if (r < 0) { tn->log(0, "ERROR: failed to fetch all metadata keys"); return r; @@ -2281,37 +2284,37 @@ void RGWRemoteMetaLog::wakeup(int shard_id) meta_sync_cr->wakeup(shard_id); } -int RGWCloneMetaLogCoroutine::operate() +int RGWCloneMetaLogCoroutine::operate(const DoutPrefixProvider *dpp) { reenter(this) { do { yield { - ldpp_dout(sync_env->dpp, 20) << __func__ << ": shard_id=" << shard_id << ": init request" << dendl; + ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": init request" << dendl; return state_init(); } yield { - ldpp_dout(sync_env->dpp, 20) << __func__ << ": shard_id=" << shard_id << ": reading shard status" << dendl; + ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": reading shard status" << dendl; return state_read_shard_status(); } yield { - ldpp_dout(sync_env->dpp, 20) << __func__ << ": shard_id=" << shard_id << ": reading shard status complete" << dendl; + ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": reading shard status complete" << dendl; return state_read_shard_status_complete(); } yield { - ldpp_dout(sync_env->dpp, 20) << __func__ << ": shard_id=" << shard_id << ": sending rest request" << dendl; - return state_send_rest_request(); + ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": sending rest request" << dendl; + return state_send_rest_request(dpp); } yield { - ldpp_dout(sync_env->dpp, 20) << __func__ << ": shard_id=" << shard_id << ": receiving rest response" << dendl; + ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": receiving rest response" << dendl; return state_receive_rest_response(); } yield { - ldpp_dout(sync_env->dpp, 20) << __func__ << ": shard_id=" << shard_id << ": storing mdlog entries" << dendl; + ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": storing mdlog entries" << dendl; return state_store_mdlog_entries(); } } while (truncated); yield { - ldpp_dout(sync_env->dpp, 20) << __func__ << ": shard_id=" << shard_id << ": storing mdlog entries complete" << dendl; + ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": storing mdlog entries complete" << dendl; return state_store_mdlog_entries_complete(); } } @@ -2345,7 +2348,7 @@ int RGWCloneMetaLogCoroutine::state_read_shard_status() io_complete(); }), add_ref); - int ret = mdlog->get_info_async(shard_id, completion.get()); + int ret = mdlog->get_info_async(sync_env->dpp, shard_id, completion.get()); if (ret < 0) { ldpp_dout(sync_env->dpp, 0) << "ERROR: mdlog->get_info_async() returned ret=" << ret << dendl; return set_cr_error(ret); @@ -2365,7 +2368,7 @@ int RGWCloneMetaLogCoroutine::state_read_shard_status_complete() return 0; } -int RGWCloneMetaLogCoroutine::state_send_rest_request() +int RGWCloneMetaLogCoroutine::state_send_rest_request(const DoutPrefixProvider *dpp) { RGWRESTConn *conn = sync_env->conn; @@ -2388,9 +2391,9 @@ int RGWCloneMetaLogCoroutine::state_send_rest_request() init_new_io(http_op); - int ret = http_op->aio_read(); + int ret = http_op->aio_read(dpp); if (ret < 0) { - ldpp_dout(sync_env->dpp, 0) << "ERROR: failed to fetch mdlog data" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to fetch mdlog data" << dendl; log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl; http_op->put(); http_op = NULL; @@ -2456,7 +2459,7 @@ int RGWCloneMetaLogCoroutine::state_store_mdlog_entries() RGWAioCompletionNotifier *cn = stack->create_completion_notifier(); - int ret = mdlog->store_entries_in_shard(dest_entries, shard_id, cn->completion()); + int ret = mdlog->store_entries_in_shard(sync_env->dpp, dest_entries, shard_id, cn->completion()); if (ret < 0) { cn->put(); ldpp_dout(sync_env->dpp, 10) << "failed to store md log entries shard_id=" << shard_id << " ret=" << ret << dendl; diff --git a/src/rgw/rgw_sync.h b/src/rgw/rgw_sync.h index 52badfdd06e..84831c0988b 100644 --- a/src/rgw/rgw_sync.h +++ b/src/rgw/rgw_sync.h @@ -78,7 +78,7 @@ class RGWSyncErrorLogger { std::atomic counter = { 0 }; public: RGWSyncErrorLogger(rgw::sal::RadosStore* _store, const string &oid_prefix, int _num_shards); - RGWCoroutine *log_error_cr(const string& source_zone, const string& section, const string& name, uint32_t error_code, const string& message); + RGWCoroutine *log_error_cr(const DoutPrefixProvider *dpp, const string& source_zone, const string& section, const string& name, uint32_t error_code, const string& message); static string get_shard_oid(const string& oid_prefix, int shard_id); }; @@ -169,7 +169,7 @@ public: virtual RGWCoroutine *alloc_cr() = 0; virtual RGWCoroutine *alloc_finisher_cr() { return NULL; } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; struct RGWMetaSyncEnv { @@ -210,7 +210,7 @@ class RGWRemoteMetaLog : public RGWCoroutinesManager { RGWMetaSyncEnv sync_env; void init_sync_env(RGWMetaSyncEnv *env); - int store_sync_info(const rgw_meta_sync_info& sync_info); + int store_sync_info(const DoutPrefixProvider *dpp, const rgw_meta_sync_info& sync_info); std::atomic going_down = { false }; @@ -230,12 +230,12 @@ public: int init(); void finish(); - int read_log_info(rgw_mdlog_info *log_info); - int read_master_log_shards_info(const string& master_period, map *shards_info); - int read_master_log_shards_next(const string& period, map shard_markers, map *result); - int read_sync_status(rgw_meta_sync_status *sync_status); - int init_sync_status(); - int run_sync(optional_yield y); + int read_log_info(const DoutPrefixProvider *dpp, rgw_mdlog_info *log_info); + int read_master_log_shards_info(const DoutPrefixProvider *dpp, const string& master_period, map *shards_info); + int read_master_log_shards_next(const DoutPrefixProvider *dpp, const string& period, map shard_markers, map *result); + int read_sync_status(const DoutPrefixProvider *dpp, rgw_meta_sync_status *sync_status); + int init_sync_status(const DoutPrefixProvider *dpp); + int run_sync(const DoutPrefixProvider *dpp, optional_yield y); void wakeup(int shard_id); @@ -274,23 +274,23 @@ public: RGWMetaSyncStatusManager(rgw::sal::RadosStore* _store, RGWAsyncRadosProcessor *async_rados) : store(_store), master_log(this, store, async_rados, this) {} - int init(); + int init(const DoutPrefixProvider *dpp); - int read_sync_status(rgw_meta_sync_status *sync_status) { - return master_log.read_sync_status(sync_status); + int read_sync_status(const DoutPrefixProvider *dpp, rgw_meta_sync_status *sync_status) { + return master_log.read_sync_status(dpp, sync_status); } - int init_sync_status() { return master_log.init_sync_status(); } - int read_log_info(rgw_mdlog_info *log_info) { - return master_log.read_log_info(log_info); + int init_sync_status(const DoutPrefixProvider *dpp) { return master_log.init_sync_status(dpp); } + int read_log_info(const DoutPrefixProvider *dpp, rgw_mdlog_info *log_info) { + return master_log.read_log_info(dpp, log_info); } - int read_master_log_shards_info(const string& master_period, map *shards_info) { - return master_log.read_master_log_shards_info(master_period, shards_info); + int read_master_log_shards_info(const DoutPrefixProvider *dpp, const string& master_period, map *shards_info) { + return master_log.read_master_log_shards_info(dpp, master_period, shards_info); } - int read_master_log_shards_next(const string& period, map shard_markers, map *result) { - return master_log.read_master_log_shards_next(period, shard_markers, result); + int read_master_log_shards_next(const DoutPrefixProvider *dpp, const string& period, map shard_markers, map *result) { + return master_log.read_master_log_shards_next(dpp, period, shard_markers, result); } - int run(optional_yield y) { return master_log.run_sync(y); } + int run(const DoutPrefixProvider *dpp, optional_yield y) { return master_log.run_sync(dpp, y); } // implements DoutPrefixProvider @@ -324,7 +324,7 @@ public: } } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; void call_cr(RGWCoroutine *_cr) override { if (cr) { @@ -506,7 +506,7 @@ public: const RGWMDLogStatus& _op_status, RGWMetaSyncShardMarkerTrack *_marker_tracker, const RGWSyncTraceNodeRef& _tn_parent); - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; class RGWShardCollectCR : public RGWCoroutine { @@ -522,7 +522,7 @@ public: status(0) {} virtual bool spawn_next() = 0; - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; // factory functions for meta sync coroutines needed in mdlog trimming diff --git a/src/rgw/rgw_sync_checkpoint.cc b/src/rgw/rgw_sync_checkpoint.cc index 58ff89c35d2..83dc68f44c4 100644 --- a/src/rgw/rgw_sync_checkpoint.cc +++ b/src/rgw/rgw_sync_checkpoint.cc @@ -126,7 +126,8 @@ int bucket_source_sync_checkpoint(const DoutPrefixProvider* dpp, return 0; } -int source_bilog_markers(RGWSI_Zone* zone_svc, +int source_bilog_markers(const DoutPrefixProvider *dpp, + RGWSI_Zone* zone_svc, const rgw_sync_bucket_pipe& pipe, BucketIndexShardsManager& remote_markers, optional_yield y) @@ -139,7 +140,7 @@ int source_bilog_markers(RGWSI_Zone* zone_svc, return -EINVAL; } - return rgw_read_remote_bilog_info(conn->second, *pipe.source.bucket, + return rgw_read_remote_bilog_info(dpp, conn->second, *pipe.source.bucket, remote_markers, y); } @@ -178,7 +179,7 @@ int rgw_bucket_sync_checkpoint(const DoutPrefixProvider* dpp, // fetch remote markers spawn::spawn(ioctx, [&] (spawn::yield_context yield) { auto y = optional_yield{ioctx, yield}; - int r = source_bilog_markers(store->svc()->zone, entry.pipe, + int r = source_bilog_markers(dpp, store->svc()->zone, entry.pipe, entry.remote_markers, y); if (r < 0) { ldpp_dout(dpp, 0) << "failed to fetch remote bilog markers: " diff --git a/src/rgw/rgw_sync_error_repo.cc b/src/rgw/rgw_sync_error_repo.cc index e952ce91230..1f332276d0d 100644 --- a/src/rgw/rgw_sync_error_repo.cc +++ b/src/rgw/rgw_sync_error_repo.cc @@ -65,13 +65,13 @@ class RGWErrorRepoWriteCR : public RGWSimpleCoroutine { key(key), timestamp(timestamp) {} - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { librados::ObjectWriteOperation op; int r = rgw_error_repo_write(op, key, timestamp); if (r < 0) { return r; } - r = obj.open(); + r = obj.open(dpp); if (r < 0) { return r; } @@ -108,13 +108,13 @@ class RGWErrorRepoRemoveCR : public RGWSimpleCoroutine { key(key), timestamp(timestamp) {} - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { librados::ObjectWriteOperation op; int r = rgw_error_repo_remove(op, key, timestamp); if (r < 0) { return r; } - r = obj.open(); + r = obj.open(dpp); if (r < 0) { return r; } diff --git a/src/rgw/rgw_sync_module.cc b/src/rgw/rgw_sync_module.cc index 7100646098f..9dd153c8660 100644 --- a/src/rgw/rgw_sync_module.cc +++ b/src/rgw/rgw_sync_module.cc @@ -39,7 +39,7 @@ RGWCallStatRemoteObjCR::RGWCallStatRemoteObjCR(RGWDataSyncCtx *_sc, src_bucket(_src_bucket), key(_key) { } -int RGWCallStatRemoteObjCR::operate() { +int RGWCallStatRemoteObjCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { yield { call(new RGWStatRemoteObjCR(sync_env->async_rados, sync_env->store, @@ -47,10 +47,10 @@ int RGWCallStatRemoteObjCR::operate() { src_bucket, key, &mtime, &size, &etag, &attrs, &headers)); } if (retcode < 0) { - ldout(sync_env->cct, 10) << "RGWStatRemoteObjCR() returned " << retcode << dendl; + ldpp_dout(dpp, 10) << "RGWStatRemoteObjCR() returned " << retcode << dendl; return set_cr_error(retcode); } - ldout(sync_env->cct, 20) << "stat of remote obj: z=" << sc->source_zone + ldpp_dout(dpp, 20) << "stat of remote obj: z=" << sc->source_zone << " b=" << src_bucket << " k=" << key << " size=" << size << " mtime=" << mtime << dendl; yield { @@ -61,7 +61,7 @@ int RGWCallStatRemoteObjCR::operate() { } } if (retcode < 0) { - ldout(sync_env->cct, 10) << "RGWStatRemoteObjCR() callback returned " << retcode << dendl; + ldpp_dout(dpp, 10) << "RGWStatRemoteObjCR() callback returned " << retcode << dendl; return set_cr_error(retcode); } return set_cr_done(); diff --git a/src/rgw/rgw_sync_module.h b/src/rgw/rgw_sync_module.h index c46d5fccae1..5a3f62f7413 100644 --- a/src/rgw/rgw_sync_module.h +++ b/src/rgw/rgw_sync_module.h @@ -190,7 +190,7 @@ public: ~RGWCallStatRemoteObjCR() override {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; virtual RGWStatRemoteObjCBCR *allocate_callback() { return nullptr; diff --git a/src/rgw/rgw_sync_module_aws.cc b/src/rgw/rgw_sync_module_aws.cc index 84ff5d15bff..69128dc1ad1 100644 --- a/src/rgw/rgw_sync_module_aws.cc +++ b/src/rgw/rgw_sync_module_aws.cc @@ -745,7 +745,7 @@ public: src_properties(_src_properties) { } - int init() override { + int init(const DoutPrefixProvider *dpp) override { /* init input connection */ @@ -764,15 +764,15 @@ public: } RGWRESTStreamRWRequest *in_req; - int ret = conn->get_obj(src_obj, req_params, false /* send */, &in_req); + int ret = conn->get_obj(dpp, src_obj, req_params, false /* send */, &in_req); if (ret < 0) { - ldout(sc->cct, 0) << "ERROR: " << __func__ << "(): conn->get_obj() returned ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): conn->get_obj() returned ret=" << ret << dendl; return ret; } set_req(in_req); - return RGWStreamReadHTTPResourceCRF::init(); + return RGWStreamReadHTTPResourceCRF::init(dpp); } int decode_rest_obj(map& headers, bufferlist& extra_data) override { @@ -967,7 +967,7 @@ public: } } - void send_ready(const rgw_rest_obj& rest_obj) override { + void send_ready(const DoutPrefixProvider *dpp, const rgw_rest_obj& rest_obj) override { RGWRESTStreamS3PutObj *r = static_cast(req); map new_attrs; @@ -979,7 +979,7 @@ public: RGWAccessControlPolicy policy; - r->send_ready(target->conn->get_key(), new_attrs, policy); + r->send_ready(dpp, target->conn->get_key(), new_attrs, policy); } void handle_headers(const map& headers) { @@ -1026,7 +1026,7 @@ public: dest_obj(_dest_obj), src_properties(_src_properties) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { /* init input */ in_crf.reset(new RGWRESTStreamGetCRF(cct, get_env(), this, sc, @@ -1087,7 +1087,7 @@ public: part_info(_part_info), petag(_petag) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { /* init input */ in_crf.reset(new RGWRESTStreamGetCRF(cct, get_env(), this, sc, @@ -1136,7 +1136,7 @@ public: dest_obj(_dest_obj), upload_id(_upload_id) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { @@ -1147,7 +1147,7 @@ public: } if (retcode < 0) { - ldout(sc->cct, 0) << "ERROR: failed to abort multipart upload for dest object=" << dest_obj << " (retcode=" << retcode << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to abort multipart upload for dest object=" << dest_obj << " (retcode=" << retcode << ")" << dendl; return set_cr_error(retcode); } @@ -1196,7 +1196,7 @@ public: attrs(_attrs), upload_id(_upload_id) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { @@ -1207,7 +1207,7 @@ public: } if (retcode < 0) { - ldout(sc->cct, 0) << "ERROR: failed to initialize multipart upload for dest object=" << dest_obj << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to initialize multipart upload for dest object=" << dest_obj << dendl; return set_cr_error(retcode); } { @@ -1237,7 +1237,7 @@ public: } } - ldout(sc->cct, 20) << "init multipart result: bucket=" << result.bucket << " key=" << result.key << " upload_id=" << result.upload_id << dendl; + ldpp_dout(dpp, 20) << "init multipart result: bucket=" << result.bucket << " key=" << result.key << " upload_id=" << result.upload_id << dendl; *upload_id = result.upload_id; @@ -1298,7 +1298,7 @@ public: upload_id(_upload_id), req_enc(_parts) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { @@ -1318,7 +1318,7 @@ public: } if (retcode < 0) { - ldout(sc->cct, 0) << "ERROR: failed to initialize multipart upload for dest object=" << dest_obj << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to initialize multipart upload for dest object=" << dest_obj << dendl; return set_cr_error(retcode); } { @@ -1348,7 +1348,7 @@ public: } } - ldout(sc->cct, 20) << "complete multipart result: location=" << result.location << " bucket=" << result.bucket << " key=" << result.key << " etag=" << result.etag << dendl; + ldpp_dout(dpp, 20) << "complete multipart result: location=" << result.location << " bucket=" << result.bucket << " key=" << result.key << " etag=" << result.etag << dendl; return set_cr_done(); } @@ -1378,16 +1378,16 @@ public: status_obj(_status_obj), upload_id(_upload_id) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield call(new RGWAWSAbortMultipartCR(sc, dest_conn, dest_obj, upload_id)); if (retcode < 0) { - ldout(sc->cct, 0) << "ERROR: failed to abort multipart upload dest obj=" << dest_obj << " upload_id=" << upload_id << " retcode=" << retcode << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to abort multipart upload dest obj=" << dest_obj << " upload_id=" << upload_id << " retcode=" << retcode << dendl; /* ignore error, best effort */ } yield call(new RGWRadosRemoveCR(sc->env->store, status_obj)); if (retcode < 0) { - ldout(sc->cct, 0) << "ERROR: failed to remove sync status obj obj=" << status_obj << " retcode=" << retcode << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to remove sync status obj obj=" << status_obj << " retcode=" << retcode << dendl; /* ignore error, best effort */ } return set_cr_done(); @@ -1447,13 +1447,13 @@ public: } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { - yield call(new RGWSimpleRadosReadCR(sync_env->async_rados, sync_env->svc->sysobj, + yield call(new RGWSimpleRadosReadCR(dpp, sync_env->async_rados, sync_env->svc->sysobj, status_obj, &status, false)); if (retcode < 0 && retcode != -ENOENT) { - ldout(sc->cct, 0) << "ERROR: failed to read sync status of object " << src_obj << " retcode=" << retcode << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to read sync status of object " << src_obj << " retcode=" << retcode << dendl; return retcode; } @@ -1506,15 +1506,15 @@ public: } if (retcode < 0) { - ldout(sc->cct, 0) << "ERROR: failed to sync obj=" << src_obj << ", sync via multipart upload, upload_id=" << status.upload_id << " part number " << status.cur_part << " (error: " << cpp_strerror(-retcode) << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to sync obj=" << src_obj << ", sync via multipart upload, upload_id=" << status.upload_id << " part number " << status.cur_part << " (error: " << cpp_strerror(-retcode) << ")" << dendl; ret_err = retcode; yield call(new RGWAWSStreamAbortMultipartUploadCR(sc, target->conn.get(), dest_obj, status_obj, status.upload_id)); return set_cr_error(ret_err); } - yield call(new RGWSimpleRadosWriteCR(sync_env->async_rados, sync_env->svc->sysobj, status_obj, status)); + yield call(new RGWSimpleRadosWriteCR(dpp, sync_env->async_rados, sync_env->svc->sysobj, status_obj, status)); if (retcode < 0) { - ldout(sc->cct, 0) << "ERROR: failed to store multipart upload state, retcode=" << retcode << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to store multipart upload state, retcode=" << retcode << dendl; /* continue with upload anyway */ } ldout(sc->cct, 20) << "sync of object=" << src_obj << " via multipart upload, finished sending part #" << status.cur_part << " etag=" << pcur_part_info->etag << dendl; @@ -1522,7 +1522,7 @@ public: yield call(new RGWAWSCompleteMultipartCR(sc, target->conn.get(), dest_obj, status.upload_id, status.parts)); if (retcode < 0) { - ldout(sc->cct, 0) << "ERROR: failed to complete multipart upload of obj=" << src_obj << " (error: " << cpp_strerror(-retcode) << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to complete multipart upload of obj=" << src_obj << " (error: " << cpp_strerror(-retcode) << ")" << dendl; ret_err = retcode; yield call(new RGWAWSStreamAbortMultipartUploadCR(sc, target->conn.get(), dest_obj, status_obj, status.upload_id)); return set_cr_error(ret_err); @@ -1531,7 +1531,7 @@ public: /* remove status obj */ yield call(new RGWRadosRemoveCR(sync_env->store, status_obj)); if (retcode < 0) { - ldout(sc->cct, 0) << "ERROR: failed to abort multipart upload obj=" << src_obj << " upload_id=" << status.upload_id << " part number " << status.cur_part << " (" << cpp_strerror(-retcode) << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to abort multipart upload obj=" << src_obj << " upload_id=" << status.upload_id << " part number " << status.cur_part << " (" << cpp_strerror(-retcode) << ")" << dendl; /* ignore error, best effort */ } return set_cr_done(); @@ -1604,7 +1604,7 @@ public: ~RGWAWSHandleRemoteObjCBCR(){ } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { ret = decode_attr(attrs, RGW_ATTR_PG_VER, &src_pg_ver, (uint64_t)0); if (ret < 0) { @@ -1616,7 +1616,7 @@ public: src_pg_ver = 0; /* all or nothing */ } } - ldout(sc->cct, 4) << "AWS: download begin: z=" << sc->source_zone + ldpp_dout(dpp, 4) << "AWS: download begin: z=" << sc->source_zone << " b=" << src_bucket << " k=" << key << " size=" << size << " mtime=" << mtime << " etag=" << etag << " zone_short_id=" << src_zone_short_id << " pg_ver=" << src_pg_ver @@ -1747,7 +1747,7 @@ public: AWSSyncInstanceEnv& _instance) : RGWCoroutine(_sc->cct), sc(_sc), sync_pipe(_sync_pipe), key(_key), mtime(_mtime), instance(_instance) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { ldout(sc->cct, 0) << ": remove remote obj: z=" << sc->source_zone << " b=" <source_zone << dendl; + ldpp_dout(dpp, 5) << conf->id << ": get elasticsearch info for zone: " << sc->source_zone << dendl; yield call(new RGWReadRESTResourceCR (sync_env->cct, conf->conn.get(), sync_env->http_manager, @@ -657,11 +657,11 @@ public: &(conf->default_headers), &(conf->es_info))); if (retcode < 0) { - ldout(sync_env->cct, 5) << conf->id << ": get elasticsearch failed: " << retcode << dendl; + ldpp_dout(dpp, 5) << conf->id << ": get elasticsearch failed: " << retcode << dendl; return set_cr_error(retcode); } - ldout(sync_env->cct, 5) << conf->id << ": got elastic version=" << conf->es_info.get_version_str() << dendl; + ldpp_dout(dpp, 5) << conf->id << ": got elastic version=" << conf->es_info.get_version_str() << dendl; return set_cr_done(); } return 0; @@ -678,9 +678,9 @@ public: ElasticConfigRef _conf) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), conf(_conf) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { - ldout(sc->cct, 5) << conf->id << ": put elasticsearch index for zone: " << sc->source_zone << dendl; + ldpp_dout(dpp, 5) << conf->id << ": put elasticsearch index for zone: " << sc->source_zone << dendl; yield { string path = conf->get_index_path(); @@ -688,10 +688,10 @@ public: std::unique_ptr index_conf; if (conf->es_info.version >= ES_V5) { - ldout(sc->cct, 0) << "elasticsearch: index mapping: version >= 5" << dendl; + ldpp_dout(dpp, 0) << "elasticsearch: index mapping: version >= 5" << dendl; index_conf.reset(new es_index_config(settings, conf->es_info.version)); } else { - ldout(sc->cct, 0) << "elasticsearch: index mapping: version < 5" << dendl; + ldpp_dout(dpp, 0) << "elasticsearch: index mapping: version < 5" << dendl; index_conf.reset(new es_index_config(settings, conf->es_info.version)); } call(new RGWPutRESTResourceCR (sc->cct, @@ -705,11 +705,11 @@ public: if (err_response.error.type != "index_already_exists_exception" && err_response.error.type != "resource_already_exists_exception") { - ldout(sync_env->cct, 0) << "elasticsearch: failed to initialize index: response.type=" << err_response.error.type << " response.reason=" << err_response.error.reason << dendl; + ldpp_dout(dpp, 0) << "elasticsearch: failed to initialize index: response.type=" << err_response.error.type << " response.reason=" << err_response.error.reason << dendl; return set_cr_error(retcode); } - ldout(sync_env->cct, 0) << "elasticsearch: index already exists, assuming external initialization" << dendl; + ldpp_dout(dpp, 0) << "elasticsearch: index already exists, assuming external initialization" << dendl; } return set_cr_done(); } @@ -752,7 +752,7 @@ public: ElasticConfigRef _conf) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), conf(_conf) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield call(new RGWElasticGetESInfoCBCR(sc, conf)); @@ -782,9 +782,9 @@ public: ElasticConfigRef _conf, uint64_t _versioned_epoch) : RGWStatRemoteObjCBCR(_sc, _sync_pipe.info.source_bs.bucket, _key), sync_pipe(_sync_pipe), conf(_conf), versioned_epoch(_versioned_epoch) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { - ldout(sync_env->cct, 10) << ": stat of remote obj: z=" << sc->source_zone + ldpp_dout(dpp, 10) << ": stat of remote obj: z=" << sc->source_zone << " b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " size=" << size << " mtime=" << mtime << dendl; @@ -840,9 +840,9 @@ public: ElasticConfigRef _conf) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), sync_pipe(_sync_pipe), key(_key), mtime(_mtime), conf(_conf) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { - ldout(sync_env->cct, 10) << ": remove remote obj: z=" << sc->source_zone + ldpp_dout(dpp, 10) << ": remove remote obj: z=" << sc->source_zone << " b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " mtime=" << mtime << dendl; yield { string path = conf->get_obj_path(sync_pipe.dest_bucket_info, key); diff --git a/src/rgw/rgw_sync_module_es_rest.cc b/src/rgw/rgw_sync_module_es_rest.cc index b14c26986dc..5884322dba3 100644 --- a/src/rgw/rgw_sync_module_es_rest.cc +++ b/src/rgw/rgw_sync_module_es_rest.cc @@ -218,7 +218,7 @@ void RGWMetadataSearchOp::execute(optional_yield y) bool valid = es_query.compile(&err); if (!valid) { - ldout(s->cct, 10) << "invalid query, failed generating request json" << dendl; + ldpp_dout(this, 10) << "invalid query, failed generating request json" << dendl; op_ret = -EINVAL; return; } @@ -245,20 +245,20 @@ void RGWMetadataSearchOp::execute(optional_yield y) if (marker > 0) { params.push_back(param_pair_t("from", marker_str.c_str())); } - ldout(s->cct, 20) << "sending request to elasticsearch, payload=" << string(in.c_str(), in.length()) << dendl; + ldpp_dout(this, 20) << "sending request to elasticsearch, payload=" << string(in.c_str(), in.length()) << dendl; auto& extra_headers = es_module->get_request_headers(); - op_ret = conn->get_resource(resource, ¶ms, &extra_headers, + op_ret = conn->get_resource(s, resource, ¶ms, &extra_headers, out, &in, nullptr, y); if (op_ret < 0) { - ldout(s->cct, 0) << "ERROR: failed to fetch resource (r=" << resource << ", ret=" << op_ret << ")" << dendl; + ldpp_dout(this, 0) << "ERROR: failed to fetch resource (r=" << resource << ", ret=" << op_ret << ")" << dendl; return; } - ldout(s->cct, 20) << "response: " << string(out.c_str(), out.length()) << dendl; + ldpp_dout(this, 20) << "response: " << string(out.c_str(), out.length()) << dendl; JSONParser jparser; if (!jparser.parse(out.c_str(), out.length())) { - ldout(s->cct, 0) << "ERROR: failed to parse elasticsearch response" << dendl; + ldpp_dout(this, 0) << "ERROR: failed to parse elasticsearch response" << dendl; op_ret = -EINVAL; return; } @@ -266,7 +266,7 @@ void RGWMetadataSearchOp::execute(optional_yield y) try { decode_json_obj(response, &jparser); } catch (const JSONDecoder::err& e) { - ldout(s->cct, 0) << "ERROR: failed to decode JSON input: " << e.what() << dendl; + ldpp_dout(this, 0) << "ERROR: failed to decode JSON input: " << e.what() << dendl; op_ret = -EINVAL; return; } @@ -419,7 +419,7 @@ RGWHandler_REST* RGWRESTMgr_MDSearch_S3::get_handler(rgw::sal::Store* store, RGWHandler_REST *handler = new RGWHandler_REST_MDSearch_S3(auth_registry); - ldout(s->cct, 20) << __func__ << " handler=" << typeid(*handler).name() + ldpp_dout(s, 20) << __func__ << " handler=" << typeid(*handler).name() << dendl; return handler; } diff --git a/src/rgw/rgw_sync_module_log.cc b/src/rgw/rgw_sync_module_log.cc index d0475509dae..c85fd478f1c 100644 --- a/src/rgw/rgw_sync_module_log.cc +++ b/src/rgw/rgw_sync_module_log.cc @@ -14,8 +14,8 @@ class RGWLogStatRemoteObjCBCR : public RGWStatRemoteObjCBCR { public: RGWLogStatRemoteObjCBCR(RGWDataSyncCtx *_sc, rgw_bucket& _src_bucket, rgw_obj_key& _key) : RGWStatRemoteObjCBCR(_sc, _src_bucket, _key) {} - int operate() override { - ldout(sync_env->cct, 0) << "SYNC_LOG: stat of remote obj: z=" << sc->source_zone + int operate(const DoutPrefixProvider *dpp) override { + ldpp_dout(dpp, 0) << "SYNC_LOG: stat of remote obj: z=" << sc->source_zone << " b=" << src_bucket << " k=" << key << " size=" << size << " mtime=" << mtime << " attrs=" << attrs << dendl; return set_cr_done(); diff --git a/src/rgw/rgw_sync_module_pubsub.cc b/src/rgw/rgw_sync_module_pubsub.cc index 8c60c1bae40..460bd4149c5 100644 --- a/src/rgw/rgw_sync_module_pubsub.cc +++ b/src/rgw/rgw_sync_module_pubsub.cc @@ -366,20 +366,20 @@ class RGWSingletonCR : public RGWCoroutine { return true; } - int operate_wrapper() override { + int operate_wrapper(const DoutPrefixProvider *dpp) override { reenter(&wrapper_state) { while (!is_done()) { - ldout(cct, 20) << __func__ << "(): operate_wrapper() -> operate()" << dendl; - operate_ret = operate(); + ldpp_dout(dpp, 20) << __func__ << "(): operate_wrapper() -> operate()" << dendl; + operate_ret = operate(dpp); if (operate_ret < 0) { - ldout(cct, 20) << *this << ": operate() returned r=" << operate_ret << dendl; + ldpp_dout(dpp, 20) << *this << ": operate() returned r=" << operate_ret << dendl; } if (!is_done()) { yield; } } - ldout(cct, 20) << __func__ << "(): RGWSingletonCR: operate_wrapper() done, need to wake up " << waiters.size() << " waiters" << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): RGWSingletonCR: operate_wrapper() done, need to wake up " << waiters.size() << " waiters" << dendl; /* we're done, can't yield anymore */ WaiterInfoRef waiter; @@ -464,7 +464,7 @@ class PSSubscription { retention_days = conf->events_retention_days; } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { rule.init_simple_days_rule("Pubsub Expiration", "" /* all objects in bucket */, retention_days); @@ -478,7 +478,7 @@ class PSSubscription { try { old_config.decode(iter); } catch (const buffer::error& e) { - ldpp_dout(sync_env->dpp, 0) << __func__ << "(): decode life cycle config failed" << dendl; + ldpp_dout(dpp, 0) << __func__ << "(): decode life cycle config failed" << dendl; } } @@ -489,7 +489,7 @@ class PSSubscription { if (old_rule.get_prefix().empty() && old_rule.get_expiration().get_days() == retention_days && old_rule.is_enabled()) { - ldpp_dout(sync_env->dpp, 20) << "no need to set lifecycle rule on bucket, existing rule matches config" << dendl; + ldpp_dout(dpp, 20) << "no need to set lifecycle rule on bucket, existing rule matches config" << dendl; return set_cr_done(); } } @@ -499,9 +499,9 @@ class PSSubscription { yield call(new RGWBucketLifecycleConfigCR(sync_env->async_rados, sync_env->store, lc_config, - sync_env->dpp)); + dpp)); if (retcode < 0) { - ldpp_dout(sync_env->dpp, 1) << "ERROR: failed to set lifecycle on bucket: ret=" << retcode << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to set lifecycle on bucket: ret=" << retcode << dendl; return set_cr_error(retcode); } @@ -529,7 +529,7 @@ class PSSubscription { sub_conf(sub->sub_conf) { } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { get_bucket_info.tenant = conf->user.tenant; get_bucket_info.bucket_name = sub_conf->data_bucket_name; @@ -540,9 +540,9 @@ class PSSubscription { sync_env->store, get_bucket_info, sub->get_bucket_info_result, - sync_env->dpp)); + dpp)); if (retcode < 0 && retcode != -ENOENT) { - ldpp_dout(sync_env->dpp, 1) << "ERROR: failed to geting bucket info: " << "tenant=" + ldpp_dout(dpp, 1) << "ERROR: failed to geting bucket info: " << "tenant=" << get_bucket_info.tenant << " name=" << get_bucket_info.bucket_name << ": ret=" << retcode << dendl; } if (retcode == 0) { @@ -552,7 +552,7 @@ class PSSubscription { int ret = sub->data_access->get_bucket(result->bucket->get_info(), result->bucket->get_attrs(), &sub->bucket); if (ret < 0) { - ldpp_dout(sync_env->dpp, 1) << "ERROR: data_access.get_bucket() bucket=" << result->bucket << " failed, ret=" << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: data_access.get_bucket() bucket=" << result->bucket << " failed, ret=" << ret << dendl; return set_cr_error(ret); } } @@ -560,7 +560,7 @@ class PSSubscription { yield call(new InitBucketLifecycleCR(sc, conf, sub->get_bucket_info_result->bucket.get())); if (retcode < 0) { - ldpp_dout(sync_env->dpp, 1) << "ERROR: failed to init lifecycle on bucket (bucket=" << sub_conf->data_bucket_name << ") ret=" << retcode << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to init lifecycle on bucket (bucket=" << sub_conf->data_bucket_name << ") ret=" << retcode << dendl; return set_cr_error(retcode); } @@ -569,13 +569,13 @@ class PSSubscription { create_bucket.user_info = sub->env->data_user_info; create_bucket.bucket_name = sub_conf->data_bucket_name; - ldpp_dout(sync_env->dpp, 20) << "pubsub: bucket create: using user info: " << json_str("obj", *sub->env->data_user_info, true) << dendl; + ldpp_dout(dpp, 20) << "pubsub: bucket create: using user info: " << json_str("obj", *sub->env->data_user_info, true) << dendl; yield call(new RGWBucketCreateLocalCR(sync_env->async_rados, sync_env->store, create_bucket, - sync_env->dpp)); + dpp)); if (retcode < 0) { - ldpp_dout(sync_env->dpp, 1) << "ERROR: failed to create bucket: " << "tenant=" + ldpp_dout(dpp, 1) << "ERROR: failed to create bucket: " << "tenant=" << get_bucket_info.tenant << " name=" << get_bucket_info.bucket_name << ": ret=" << retcode << dendl; return set_cr_error(retcode); } @@ -584,7 +584,7 @@ class PSSubscription { } /* failed twice on -ENOENT, unexpected */ - ldpp_dout(sync_env->dpp, 1) << "ERROR: failed to create bucket " << "tenant=" << get_bucket_info.tenant + ldpp_dout(dpp, 1) << "ERROR: failed to create bucket " << "tenant=" << get_bucket_info.tenant << " name=" << get_bucket_info.bucket_name << dendl; return set_cr_error(-EIO); } @@ -610,7 +610,7 @@ class PSSubscription { oid_prefix(sub->sub_conf->data_oid_prefix) { } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { rgw_object_simple_put_params put_obj; reenter(this) { @@ -630,12 +630,12 @@ class PSSubscription { yield call(new RGWObjectSimplePutCR(sync_env->async_rados, sync_env->store, put_obj, - sync_env->dpp)); + dpp)); if (retcode < 0) { - ldpp_dout(sync_env->dpp, 10) << "failed to store event: " << put_obj.bucket << "/" << put_obj.key << " ret=" << retcode << dendl; + ldpp_dout(dpp, 10) << "failed to store event: " << put_obj.bucket << "/" << put_obj.key << " ret=" << retcode << dendl; return set_cr_error(retcode); } else { - ldpp_dout(sync_env->dpp, 20) << "event stored: " << put_obj.bucket << "/" << put_obj.key << dendl; + ldpp_dout(dpp, 20) << "event stored: " << put_obj.bucket << "/" << put_obj.key << dendl; } return set_cr_done(); @@ -660,18 +660,18 @@ class PSSubscription { sub_conf(_sub->sub_conf) { } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { ceph_assert(sub_conf->push_endpoint); yield call(sub_conf->push_endpoint->send_to_completion_async(*event.get(), sync_env)); if (retcode < 0) { - ldout(sync_env->cct, 10) << "failed to push event: " << event->id << + ldpp_dout(dpp, 10) << "failed to push event: " << event->id << " to endpoint: " << sub_conf->push_endpoint_name << " ret=" << retcode << dendl; return set_cr_error(retcode); } - ldout(sync_env->cct, 20) << "event: " << event->id << + ldpp_dout(dpp, 20) << "event: " << event->id << " pushed to endpoint: " << sub_conf->push_endpoint_name << dendl; return set_cr_done(); } @@ -764,10 +764,10 @@ class PSManager } ~GetSubCR() { } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { if (owner.empty()) { - ldout(sync_env->cct, 1) << "ERROR: missing user info when getting subscription: " << sub_name << dendl; + ldpp_dout(dpp, 1) << "ERROR: missing user info when getting subscription: " << sub_name << dendl; mgr->remove_get_sub(owner, sub_name); return set_cr_error(-EINVAL); } else { @@ -777,7 +777,7 @@ class PSManager rgw_raw_obj obj; ps.get_sub_meta_obj(sub_name, &obj); bool empty_on_enoent = false; - call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj, + call(new ReadInfoCR(dpp, sync_env->async_rados, sync_env->store->svc()->sysobj, obj, &user_sub_conf, empty_on_enoent)); } @@ -791,7 +791,7 @@ class PSManager yield (*ref)->call_init_cr(this); if (retcode < 0) { - ldout(sync_env->cct, 1) << "ERROR: failed to init subscription when getting subscription: " << sub_name << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to init subscription when getting subscription: " << sub_name << dendl; mgr->remove_get_sub(owner, sub_name); return set_cr_error(retcode); } @@ -885,29 +885,29 @@ public: PSEnvRef& _env) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), env(_env), conf(env->conf) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { - ldpp_dout(sync_env->dpp, 1) << ": init pubsub config zone=" << sc->source_zone << dendl; + ldpp_dout(dpp, 1) << ": init pubsub config zone=" << sc->source_zone << dendl; /* nothing to do here right now */ create_user.user = conf->user; create_user.max_buckets = 0; /* unlimited */ create_user.display_name = "pubsub"; create_user.generate_key = false; - yield call(new RGWUserCreateCR(sync_env->async_rados, sync_env->store, create_user, sync_env->dpp)); + yield call(new RGWUserCreateCR(sync_env->async_rados, sync_env->store, create_user, dpp)); if (retcode < 0 && retcode != -ERR_USER_EXIST) { - ldpp_dout(sync_env->dpp, 1) << "ERROR: failed to create rgw user: ret=" << retcode << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to create rgw user: ret=" << retcode << dendl; return set_cr_error(retcode); } get_user_info.user = conf->user; - yield call(new RGWGetUserInfoCR(sync_env->async_rados, sync_env->store, get_user_info, env->data_user_info, sync_env->dpp)); + yield call(new RGWGetUserInfoCR(sync_env->async_rados, sync_env->store, get_user_info, env->data_user_info, dpp)); if (retcode < 0) { - ldpp_dout(sync_env->dpp, 1) << "ERROR: failed to create rgw user: ret=" << retcode << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to create rgw user: ret=" << retcode << dendl; return set_cr_error(retcode); } - ldpp_dout(sync_env->dpp, 20) << "pubsub: get user info cr returned: " << json_str("obj", *env->data_user_info, true) << dendl; + ldpp_dout(dpp, 20) << "pubsub: get user info cr returned: " << json_str("obj", *env->data_user_info, true) << dendl; return set_cr_done(); @@ -960,7 +960,7 @@ public: topics(_topics) { *topics = std::make_shared >(); } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { ps.get_bucket_meta_obj(bucket, &bucket_obj); ps.get_meta_obj(&user_obj); @@ -968,7 +968,7 @@ public: using ReadInfoCR = RGWSimpleRadosReadCR; yield { bool empty_on_enoent = true; - call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj, + call(new ReadInfoCR(dpp, sync_env->async_rados, sync_env->store->svc()->sysobj, bucket_obj, &bucket_topics, empty_on_enoent)); } @@ -976,13 +976,13 @@ public: return set_cr_error(retcode); } - ldout(sync_env->cct, 20) << "RGWPSFindBucketTopicsCR(): found " << bucket_topics.topics.size() << " topics for bucket " << bucket << dendl; + ldpp_dout(dpp, 20) << "RGWPSFindBucketTopicsCR(): found " << bucket_topics.topics.size() << " topics for bucket " << bucket << dendl; if (!bucket_topics.topics.empty()) { using ReadUserTopicsInfoCR = RGWSimpleRadosReadCR; yield { bool empty_on_enoent = true; - call(new ReadUserTopicsInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj, + call(new ReadUserTopicsInfoCR(dpp, sync_env->async_rados, sync_env->store->svc()->sysobj, user_obj, &user_topics, empty_on_enoent)); } @@ -1039,13 +1039,13 @@ public: has_subscriptions(false), event_handled(false) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { - ldout(sc->cct, 20) << ": handle event: obj: z=" << sc->source_zone + ldpp_dout(dpp, 20) << ": handle event: obj: z=" << sc->source_zone << " event=" << json_str("event", *event, false) << " owner=" << owner << dendl; - ldout(sc->cct, 20) << "pubsub: " << topics->size() << " topics found for path" << dendl; + ldpp_dout(dpp, 20) << "pubsub: " << topics->size() << " topics found for path" << dendl; // outside caller should check that ceph_assert(!topics->empty()); @@ -1054,17 +1054,17 @@ public: // loop over all topics related to the bucket/object for (titer = topics->begin(); titer != topics->end(); ++titer) { - ldout(sc->cct, 20) << ": notification for " << event->source << ": topic=" << + ldpp_dout(dpp, 20) << ": notification for " << event->source << ": topic=" << (*titer)->name << ", has " << (*titer)->subs.size() << " subscriptions" << dendl; // loop over all subscriptions of the topic for (siter = (*titer)->subs.begin(); siter != (*titer)->subs.end(); ++siter) { - ldout(sc->cct, 20) << ": subscription: " << *siter << dendl; + ldpp_dout(dpp, 20) << ": subscription: " << *siter << dendl; has_subscriptions = true; // try to read subscription configuration yield PSManager::call_get_subscription_cr(sc, env->manager, this, owner, *siter, &sub); if (retcode < 0) { if (perfcounter) perfcounter->inc(l_rgw_pubsub_missing_conf); - ldout(sc->cct, 1) << "ERROR: failed to find subscription config for subscription=" << *siter + ldpp_dout(dpp, 1) << "ERROR: failed to find subscription config for subscription=" << *siter << " ret=" << retcode << dendl; if (retcode == -ENOENT) { // missing subscription info should be reflected back as invalid argument @@ -1076,21 +1076,21 @@ public: } if (sub->sub_conf->s3_id.empty()) { // subscription was not made by S3 compatible API - ldout(sc->cct, 20) << "storing event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl; + ldpp_dout(dpp, 20) << "storing event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl; yield call(PSSubscription::store_event_cr(sc, sub, event)); if (retcode < 0) { if (perfcounter) perfcounter->inc(l_rgw_pubsub_store_fail); - ldout(sc->cct, 1) << "ERROR: failed to store event for subscription=" << *siter << " ret=" << retcode << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to store event for subscription=" << *siter << " ret=" << retcode << dendl; } else { if (perfcounter) perfcounter->inc(l_rgw_pubsub_store_ok); event_handled = true; } if (sub->sub_conf->push_endpoint) { - ldout(sc->cct, 20) << "push event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl; + ldpp_dout(dpp, 20) << "push event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl; yield call(PSSubscription::push_event_cr(sc, sub, event)); if (retcode < 0) { if (perfcounter) perfcounter->inc(l_rgw_pubsub_push_failed); - ldout(sc->cct, 1) << "ERROR: failed to push event for subscription=" << *siter << " ret=" << retcode << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to push event for subscription=" << *siter << " ret=" << retcode << dendl; } else { if (perfcounter) perfcounter->inc(l_rgw_pubsub_push_ok); event_handled = true; @@ -1098,23 +1098,23 @@ public: } } else { // subscription was made by S3 compatible API - ldout(sc->cct, 20) << "storing s3 event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl; + ldpp_dout(dpp, 20) << "storing s3 event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl; s3_event->configurationId = sub->sub_conf->s3_id; s3_event->opaque_data = (*titer)->opaque_data; yield call(PSSubscription::store_event_cr(sc, sub, s3_event)); if (retcode < 0) { if (perfcounter) perfcounter->inc(l_rgw_pubsub_store_fail); - ldout(sc->cct, 1) << "ERROR: failed to store s3 event for subscription=" << *siter << " ret=" << retcode << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to store s3 event for subscription=" << *siter << " ret=" << retcode << dendl; } else { if (perfcounter) perfcounter->inc(l_rgw_pubsub_store_ok); event_handled = true; } if (sub->sub_conf->push_endpoint) { - ldout(sc->cct, 20) << "push s3 event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl; + ldpp_dout(dpp, 20) << "push s3 event for subscription=" << *siter << " owner=" << owner << " ret=" << retcode << dendl; yield call(PSSubscription::push_event_cr(sc, sub, s3_event)); if (retcode < 0) { if (perfcounter) perfcounter->inc(l_rgw_pubsub_push_failed); - ldout(sc->cct, 1) << "ERROR: failed to push s3 event for subscription=" << *siter << " ret=" << retcode << dendl; + ldpp_dout(dpp, 1) << "ERROR: failed to push s3 event for subscription=" << *siter << " ret=" << retcode << dendl; } else { if (perfcounter) perfcounter->inc(l_rgw_pubsub_push_ok); event_handled = true; @@ -1157,9 +1157,9 @@ public: versioned_epoch(_versioned_epoch), topics(_topics) { } - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { - ldout(sc->cct, 20) << ": stat of remote obj: z=" << sc->source_zone + ldpp_dout(dpp, 20) << ": stat of remote obj: z=" << sc->source_zone << " b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " size=" << size << " mtime=" << mtime << " attrs=" << attrs << dendl; { @@ -1236,18 +1236,18 @@ public: ~RGWPSHandleObjCreateCR() override {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield call(new RGWPSFindBucketTopicsCR(sc, env, sync_pipe.dest_bucket_info.owner, sync_pipe.info.source_bs.bucket, key, rgw::notify::ObjectCreated, &topics)); if (retcode < 0) { - ldout(sc->cct, 1) << "ERROR: RGWPSFindBucketTopicsCR returned ret=" << retcode << dendl; + ldpp_dout(dpp, 1) << "ERROR: RGWPSFindBucketTopicsCR returned ret=" << retcode << dendl; return set_cr_error(retcode); } if (topics->empty()) { - ldout(sc->cct, 20) << "no topics found for " << sync_pipe.info.source_bs.bucket << "/" << key << dendl; + ldpp_dout(dpp, 20) << "no topics found for " << sync_pipe.info.source_bs.bucket << "/" << key << dendl; return set_cr_done(); } yield call(new RGWPSHandleRemoteObjCR(sc, sync_pipe, key, env, versioned_epoch, topics)); @@ -1283,17 +1283,17 @@ public: bucket(_sync_pipe.dest_bucket_info.bucket), key(_key), mtime(_mtime), event_type(_event_type) {} - int operate() override { + int operate(const DoutPrefixProvider *dpp) override { reenter(this) { - ldout(sc->cct, 20) << ": remove remote obj: z=" << sc->source_zone + ldpp_dout(dpp, 20) << ": remove remote obj: z=" << sc->source_zone << " b=" << bucket << " k=" << key << " mtime=" << mtime << dendl; yield call(new RGWPSFindBucketTopicsCR(sc, env, owner, bucket, key, event_type, &topics)); if (retcode < 0) { - ldout(sc->cct, 1) << "ERROR: RGWPSFindBucketTopicsCR returned ret=" << retcode << dendl; + ldpp_dout(dpp, 1) << "ERROR: RGWPSFindBucketTopicsCR returned ret=" << retcode << dendl; return set_cr_error(retcode); } if (topics->empty()) { - ldout(sc->cct, 20) << "no topics found for " << bucket << "/" << key << dendl; + ldpp_dout(dpp, 20) << "no topics found for " << bucket << "/" << key << dendl; return set_cr_done(); } // at this point we don't know whether we need the ceph event or S3 event diff --git a/src/rgw/rgw_sync_module_pubsub_rest.cc b/src/rgw/rgw_sync_module_pubsub_rest.cc index ec819fbacb1..b6afdc54aca 100644 --- a/src/rgw/rgw_sync_module_pubsub_rest.cc +++ b/src/rgw/rgw_sync_module_pubsub_rest.cc @@ -167,7 +167,7 @@ public: bool exists; topic_name = s->info.args.get("topic", &exists); if (!exists) { - ldout(s->cct, 1) << "missing required param 'topic'" << dendl; + ldpp_dout(this, 1) << "missing required param 'topic'" << dendl; return -EINVAL; } @@ -232,7 +232,7 @@ public: event_id = s->info.args.get("event-id", &exists); if (!exists) { - ldout(s->cct, 1) << "missing required param 'event-id'" << dendl; + ldpp_dout(this, 1) << "missing required param 'event-id'" << dendl; return -EINVAL; } return 0; @@ -248,7 +248,7 @@ public: const int ret = s->info.args.get_int("max-entries", &max_entries, RGWPubSub::Sub::DEFAULT_MAX_EVENTS); if (ret < 0) { - ldout(s->cct, 1) << "failed to parse 'max-entries' param" << dendl; + ldpp_dout(this, 1) << "failed to parse 'max-entries' param" << dendl; return -EINVAL; } return 0; @@ -350,7 +350,7 @@ private: bool exists; topic_name = s->info.args.get("topic", &exists); if (!exists) { - ldout(s->cct, 1) << "missing required param 'topic'" << dendl; + ldpp_dout(this, 1) << "missing required param 'topic'" << dendl; return -EINVAL; } @@ -361,7 +361,7 @@ private: } rgw::notify::from_string_list(events_str, events); if (std::find(events.begin(), events.end(), rgw::notify::UnknownEvent) != events.end()) { - ldout(s->cct, 1) << "invalid event type in list: " << events_str << dendl; + ldpp_dout(this, 1) << "invalid event type in list: " << events_str << dendl; return -EINVAL; } return notif_bucket_path(s->object->get_name(), bucket_name); @@ -377,12 +377,12 @@ void RGWPSCreateNotif_ObjStore::execute(optional_yield y) ps.emplace(static_cast(store), s->owner.get_id().tenant); auto b = ps->get_bucket(bucket_info.bucket); - op_ret = b->create_notification(topic_name, events, y); + op_ret = b->create_notification(this, topic_name, events, y); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to create notification for topic '" << topic_name << "', ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to create notification for topic '" << topic_name << "', ret=" << op_ret << dendl; return; } - ldout(s->cct, 20) << "successfully created notification for topic '" << topic_name << "'" << dendl; + ldpp_dout(this, 20) << "successfully created notification for topic '" << topic_name << "'" << dendl; } // command: DELETE /notifications/bucket/?topic= @@ -394,7 +394,7 @@ private: bool exists; topic_name = s->info.args.get("topic", &exists); if (!exists) { - ldout(s->cct, 1) << "missing required param 'topic'" << dendl; + ldpp_dout(this, 1) << "missing required param 'topic'" << dendl; return -EINVAL; } return notif_bucket_path(s->object->get_name(), bucket_name); @@ -413,12 +413,12 @@ void RGWPSDeleteNotif_ObjStore::execute(optional_yield y) { ps.emplace(static_cast(store), s->owner.get_id().tenant); auto b = ps->get_bucket(bucket_info.bucket); - op_ret = b->remove_notification(topic_name, y); + op_ret = b->remove_notification(this, topic_name, y); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to remove notification from topic '" << topic_name << "', ret=" << op_ret << dendl; + ldpp_dout(s, 1) << "failed to remove notification from topic '" << topic_name << "', ret=" << op_ret << dendl; return; } - ldout(s->cct, 20) << "successfully removed notification from topic '" << topic_name << "'" << dendl; + ldpp_dout(this, 20) << "successfully removed notification from topic '" << topic_name << "'" << dendl; } // command: GET /notifications/bucket/ @@ -454,7 +454,7 @@ void RGWPSListNotifs_ObjStore::execute(optional_yield y) auto b = ps->get_bucket(bucket_info.bucket); op_ret = b->get_topics(&result); if (op_ret < 0) { - ldout(s->cct, 1) << "failed to get topics, ret=" << op_ret << dendl; + ldpp_dout(this, 1) << "failed to get topics, ret=" << op_ret << dendl; return; } } @@ -522,7 +522,7 @@ RGWHandler_REST* RGWRESTMgr_PubSub::get_handler(rgw::sal::Store* store, } } - ldout(s->cct, 20) << __func__ << " handler=" << (handler ? typeid(*handler).name() : "") << dendl; + ldpp_dout(s, 20) << __func__ << " handler=" << (handler ? typeid(*handler).name() : "") << dendl; return handler; } diff --git a/src/rgw/rgw_sync_trace.cc b/src/rgw/rgw_sync_trace.cc index ddcdea24952..e99fdcf5030 100644 --- a/src/rgw/rgw_sync_trace.cc +++ b/src/rgw/rgw_sync_trace.cc @@ -15,7 +15,6 @@ #define dout_context g_ceph_context -#define dout_subsys ceph_subsys_rgw_sync RGWSyncTraceNode::RGWSyncTraceNode(CephContext *_cct, uint64_t _handle, const RGWSyncTraceNodeRef& _parent, @@ -65,10 +64,10 @@ public: RGWSyncTraceServiceMapThread(RGWRados *_store, RGWSyncTraceManager *_manager) : RGWRadosThread(_store, "sync-trace"), store(_store), manager(_manager) {} - int process() override; + int process(const DoutPrefixProvider *dpp) override; }; -int RGWSyncTraceServiceMapThread::process() +int RGWSyncTraceServiceMapThread::process(const DoutPrefixProvider *dpp) { map status; status["current_sync"] = manager->get_active_names(); diff --git a/src/rgw/rgw_tools.cc b/src/rgw/rgw_tools.cc index 1fadedf4e4c..ad8382f6947 100644 --- a/src/rgw/rgw_tools.cc +++ b/src/rgw/rgw_tools.cc @@ -35,7 +35,8 @@ static std::map* ext_mime_map; -int rgw_init_ioctx(librados::Rados *rados, const rgw_pool& pool, +int rgw_init_ioctx(const DoutPrefixProvider *dpp, + librados::Rados *rados, const rgw_pool& pool, librados::IoCtx& ioctx, bool create, bool mostly_omap) { @@ -43,7 +44,7 @@ int rgw_init_ioctx(librados::Rados *rados, const rgw_pool& pool, if (r == -ENOENT && create) { r = rados->pool_create(pool.name.c_str()); if (r == -ERANGE) { - dout(0) + ldpp_dout(dpp, 0) << __func__ << " ERROR: librados::Rados::pool_create returned " << cpp_strerror(-r) << " (this can be due to a pool or placement group misconfiguration, e.g." @@ -74,7 +75,7 @@ int rgw_init_ioctx(librados::Rados *rados, const rgw_pool& pool, stringify(bias) + "\"}", inbl, NULL, NULL); if (r < 0) { - dout(10) << __func__ << " warning: failed to set pg_autoscale_bias on " + ldpp_dout(dpp, 10) << __func__ << " warning: failed to set pg_autoscale_bias on " << pool.name << dendl; } // set pg_num_min @@ -85,7 +86,7 @@ int rgw_init_ioctx(librados::Rados *rados, const rgw_pool& pool, stringify(min) + "\"}", inbl, NULL, NULL); if (r < 0) { - dout(10) << __func__ << " warning: failed to set pg_num_min on " + ldpp_dout(dpp, 10) << __func__ << " warning: failed to set pg_num_min on " << pool.name << dendl; } // set recovery_priority @@ -96,7 +97,7 @@ int rgw_init_ioctx(librados::Rados *rados, const rgw_pool& pool, stringify(p) + "\"}", inbl, NULL, NULL); if (r < 0) { - dout(10) << __func__ << " warning: failed to set recovery_priority on " + ldpp_dout(dpp, 10) << __func__ << " warning: failed to set recovery_priority on " << pool.name << dendl; } } @@ -155,7 +156,8 @@ int rgw_parse_list_of_flags(struct rgw_name_to_flag *mapping, return 0; } -int rgw_put_system_obj(RGWSysObjectCtx& obj_ctx, const rgw_pool& pool, const string& oid, bufferlist& data, bool exclusive, +int rgw_put_system_obj(const DoutPrefixProvider *dpp, + RGWSysObjectCtx& obj_ctx, const rgw_pool& pool, const string& oid, bufferlist& data, bool exclusive, RGWObjVersionTracker *objv_tracker, real_time set_mtime, optional_yield y, map *pattrs) { map no_attrs; @@ -171,7 +173,7 @@ int rgw_put_system_obj(RGWSysObjectCtx& obj_ctx, const rgw_pool& pool, const str .set_exclusive(exclusive) .set_mtime(set_mtime) .set_attrs(*pattrs) - .write(data, y); + .write(dpp, data, y); return ret; } @@ -203,7 +205,7 @@ int rgw_get_system_obj(RGWSysObjectCtx& obj_ctx, const rgw_pool& pool, const str ret = rop.set_cache_info(cache_info) .set_refresh_version(refresh_version) - .read(&bl, y); + .read(dpp, &bl, y); if (ret == -ECANCELED) { /* raced, restart */ if (!original_readv.empty()) { @@ -228,7 +230,8 @@ int rgw_get_system_obj(RGWSysObjectCtx& obj_ctx, const rgw_pool& pool, const str return 0; } -int rgw_delete_system_obj(RGWSI_SysObj *sysobj_svc, const rgw_pool& pool, const string& oid, +int rgw_delete_system_obj(const DoutPrefixProvider *dpp, + RGWSI_SysObj *sysobj_svc, const rgw_pool& pool, const string& oid, RGWObjVersionTracker *objv_tracker, optional_yield y) { auto obj_ctx = sysobj_svc->init_obj_ctx(); @@ -236,12 +239,12 @@ int rgw_delete_system_obj(RGWSI_SysObj *sysobj_svc, const rgw_pool& pool, const rgw_raw_obj obj(pool, oid); return sysobj.wop() .set_objv_tracker(objv_tracker) - .remove(y); + .remove(dpp, y); } thread_local bool is_asio_thread = false; -int rgw_rados_operate(librados::IoCtx& ioctx, const std::string& oid, +int rgw_rados_operate(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid, librados::ObjectReadOperation *op, bufferlist* pbl, optional_yield y, int flags) { @@ -260,12 +263,12 @@ int rgw_rados_operate(librados::IoCtx& ioctx, const std::string& oid, } // work on asio threads should be asynchronous, so warn when they block if (is_asio_thread) { - dout(20) << "WARNING: blocking librados call" << dendl; + ldpp_dout(dpp, 20) << "WARNING: blocking librados call" << dendl; } return ioctx.operate(oid, op, nullptr, flags); } -int rgw_rados_operate(librados::IoCtx& ioctx, const std::string& oid, +int rgw_rados_operate(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid, librados::ObjectWriteOperation *op, optional_yield y, int flags) { @@ -277,12 +280,12 @@ int rgw_rados_operate(librados::IoCtx& ioctx, const std::string& oid, return -ec.value(); } if (is_asio_thread) { - dout(20) << "WARNING: blocking librados call" << dendl; + ldpp_dout(dpp, 20) << "WARNING: blocking librados call" << dendl; } return ioctx.operate(oid, op, flags); } -int rgw_rados_notify(librados::IoCtx& ioctx, const std::string& oid, +int rgw_rados_notify(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid, bufferlist& bl, uint64_t timeout_ms, bufferlist* pbl, optional_yield y) { @@ -298,7 +301,7 @@ int rgw_rados_notify(librados::IoCtx& ioctx, const std::string& oid, return -ec.value(); } if (is_asio_thread) { - dout(20) << "WARNING: blocking librados call" << dendl; + ldpp_dout(dpp, 20) << "WARNING: blocking librados call" << dendl; } return ioctx.notify2(oid, bl, timeout_ms, pbl); } diff --git a/src/rgw/rgw_tools.h b/src/rgw/rgw_tools.h index 8222c52008a..0d96d44edce 100644 --- a/src/rgw/rgw_tools.h +++ b/src/rgw/rgw_tools.h @@ -26,7 +26,8 @@ namespace rgw { namespace sal { struct obj_version; -int rgw_init_ioctx(librados::Rados *rados, const rgw_pool& pool, +int rgw_init_ioctx(const DoutPrefixProvider *dpp, + librados::Rados *rados, const rgw_pool& pool, librados::IoCtx& ioctx, bool create = false, bool mostly_omap = false); @@ -71,13 +72,14 @@ struct rgw_name_to_flag { int rgw_parse_list_of_flags(struct rgw_name_to_flag *mapping, const string& str, uint32_t *perm); -int rgw_put_system_obj(RGWSysObjectCtx& obj_ctx, const rgw_pool& pool, const string& oid, bufferlist& data, bool exclusive, +int rgw_put_system_obj(const DoutPrefixProvider *dpp, RGWSysObjectCtx& obj_ctx, const rgw_pool& pool, const string& oid, bufferlist& data, bool exclusive, RGWObjVersionTracker *objv_tracker, real_time set_mtime, optional_yield y, map *pattrs = NULL); int rgw_get_system_obj(RGWSysObjectCtx& obj_ctx, const rgw_pool& pool, const string& key, bufferlist& bl, RGWObjVersionTracker *objv_tracker, real_time *pmtime, optional_yield y, const DoutPrefixProvider *dpp, map *pattrs = NULL, rgw_cache_entry_info *cache_info = NULL, boost::optional refresh_version = boost::none); -int rgw_delete_system_obj(RGWSI_SysObj *sysobj_svc, const rgw_pool& pool, const string& oid, +int rgw_delete_system_obj(const DoutPrefixProvider *dpp, + RGWSI_SysObj *sysobj_svc, const rgw_pool& pool, const string& oid, RGWObjVersionTracker *objv_tracker, optional_yield y); const char *rgw_find_mime_by_ext(string& ext); @@ -90,13 +92,13 @@ void rgw_filter_attrset(map& unfiltered_attrset, const strin extern thread_local bool is_asio_thread; /// perform the rados operation, using the yield context when given -int rgw_rados_operate(librados::IoCtx& ioctx, const std::string& oid, +int rgw_rados_operate(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid, librados::ObjectReadOperation *op, bufferlist* pbl, optional_yield y, int flags = 0); -int rgw_rados_operate(librados::IoCtx& ioctx, const std::string& oid, +int rgw_rados_operate(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid, librados::ObjectWriteOperation *op, optional_yield y, int flags = 0); -int rgw_rados_notify(librados::IoCtx& ioctx, const std::string& oid, +int rgw_rados_notify(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid, bufferlist& bl, uint64_t timeout_ms, bufferlist* pbl, optional_yield y); diff --git a/src/rgw/rgw_torrent.cc b/src/rgw/rgw_torrent.cc index 7d6ea13272f..4fb54333a3f 100644 --- a/src/rgw/rgw_torrent.cc +++ b/src/rgw/rgw_torrent.cc @@ -66,17 +66,17 @@ int seed::get_torrent_file(rgw::sal::Object* object, string oid, key; get_obj_bucket_and_oid_loc(obj, oid, key); - ldout(s->cct, 20) << "NOTICE: head obj oid= " << oid << dendl; + ldpp_dout(s, 20) << "NOTICE: head obj oid= " << oid << dendl; const set obj_key{RGW_OBJ_TORRENT}; map m; - const int r = object->omap_get_vals_by_keys(oid, obj_key, &m); + const int r = object->omap_get_vals_by_keys(s, oid, obj_key, &m); if (r < 0) { - ldout(s->cct, 0) << "ERROR: omap_get_vals_by_keys failed: " << r << dendl; + ldpp_dout(s, 0) << "ERROR: omap_get_vals_by_keys failed: " << r << dendl; return r; } if (m.size() != 1) { - ldout(s->cct, 0) << "ERROR: omap key " RGW_OBJ_TORRENT " not found" << dendl; + ldpp_dout(s, 0) << "ERROR: omap key " RGW_OBJ_TORRENT " not found" << dendl; return -EINVAL; } bl.append(std::move(m.begin()->second)); @@ -116,7 +116,7 @@ int seed::complete(optional_yield y) ret = save_torrent_file(y); if (0 != ret) { - ldout(s->cct, 0) << "ERROR: failed to save_torrent_file() ret= "<< ret << dendl; + ldpp_dout(s, 0) << "ERROR: failed to save_torrent_file() ret= "<< ret << dendl; return ret; } @@ -204,7 +204,7 @@ void seed::set_announce() if (announce_list.empty()) { - ldout(s->cct, 5) << "NOTICE: announce_list is empty " << dendl; + ldpp_dout(s, 5) << "NOTICE: announce_list is empty " << dendl; return; } @@ -250,10 +250,10 @@ int seed::save_torrent_file(optional_yield y) int op_ret = 0; string key = RGW_OBJ_TORRENT; - op_ret = s->object->omap_set_val_by_key(key, bl, false, y); + op_ret = s->object->omap_set_val_by_key(s, key, bl, false, y); if (op_ret < 0) { - ldout(s->cct, 0) << "ERROR: failed to omap_set() op_ret = " << op_ret << dendl; + ldpp_dout(s, 0) << "ERROR: failed to omap_set() op_ret = " << op_ret << dendl; return op_ret; } diff --git a/src/rgw/rgw_trim_bilog.cc b/src/rgw/rgw_trim_bilog.cc index b939f561daa..b31bd94a49f 100644 --- a/src/rgw/rgw_trim_bilog.cc +++ b/src/rgw/rgw_trim_bilog.cc @@ -258,8 +258,8 @@ class BucketTrimWatcher : public librados::WatchCtx2 { stop(); } - int start() { - int r = store->getRados()->get_raw_obj_ref(obj, &ref); + int start(const DoutPrefixProvider *dpp) { + int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { return r; } @@ -274,13 +274,13 @@ class BucketTrimWatcher : public librados::WatchCtx2 { } } if (r < 0) { - lderr(store->ctx()) << "Failed to watch " << ref.obj + ldpp_dout(dpp, -1) << "Failed to watch " << ref.obj << " with " << cpp_strerror(-r) << dendl; ref.pool.ioctx().close(); return r; } - ldout(store->ctx(), 10) << "Watching " << ref.obj.oid << dendl; + ldpp_dout(dpp, 10) << "Watching " << ref.obj.oid << dendl; return 0; } @@ -381,15 +381,17 @@ int take_min_status(CephContext *cct, Iter first, Iter last, /// concurrent requests class BucketTrimShardCollectCR : public RGWShardCollectCR { static constexpr int MAX_CONCURRENT_SHARDS = 16; + const DoutPrefixProvider *dpp; rgw::sal::RadosStore* const store; const RGWBucketInfo& bucket_info; const std::vector& markers; //< shard markers to trim size_t i{0}; //< index of current shard marker public: - BucketTrimShardCollectCR(rgw::sal::RadosStore* store, const RGWBucketInfo& bucket_info, + BucketTrimShardCollectCR(const DoutPrefixProvider *dpp, + rgw::sal::RadosStore* store, const RGWBucketInfo& bucket_info, const std::vector& markers) : RGWShardCollectCR(store->ctx(), MAX_CONCURRENT_SHARDS), - store(store), bucket_info(bucket_info), markers(markers) + dpp(dpp), store(store), bucket_info(bucket_info), markers(markers) {} bool spawn_next() override; }; @@ -402,9 +404,9 @@ bool BucketTrimShardCollectCR::spawn_next() // skip empty markers if (!marker.empty()) { - ldout(cct, 10) << "trimming bilog shard " << shard_id + ldpp_dout(dpp, 10) << "trimming bilog shard " << shard_id << " of " << bucket_info.bucket << " at marker " << marker << dendl; - spawn(new RGWRadosBILogTrimCR(store, bucket_info, shard_id, + spawn(new RGWRadosBILogTrimCR(dpp, store, bucket_info, shard_id, std::string{}, marker), false); return true; @@ -446,13 +448,13 @@ class BucketTrimInstanceCR : public RGWCoroutine { source_policy = make_shared(); } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int BucketTrimInstanceCR::operate() +int BucketTrimInstanceCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { - ldout(cct, 4) << "starting trim on bucket=" << bucket_instance << dendl; + ldpp_dout(dpp, 4) << "starting trim on bucket=" << bucket_instance << dendl; get_policy_params.zone = zone_id; get_policy_params.bucket = bucket; @@ -512,7 +514,7 @@ int BucketTrimInstanceCR::operate() auto ziter = zone_conn_map.find(zid); if (ziter == zone_conn_map.end()) { - ldout(cct, 0) << "WARNING: no connection to zone " << zid << ", can't trim bucket: " << bucket << dendl; + ldpp_dout(dpp, 0) << "WARNING: no connection to zone " << zid << ", can't trim bucket: " << bucket << dendl; return set_cr_error(-ECANCELED); } using StatusCR = RGWReadRESTResourceCR; @@ -540,21 +542,21 @@ int BucketTrimInstanceCR::operate() retcode = take_min_status(cct, peer_status.begin(), peer_status.end(), &min_markers); if (retcode < 0) { - ldout(cct, 4) << "failed to correlate bucket sync status from peers" << dendl; + ldpp_dout(dpp, 4) << "failed to correlate bucket sync status from peers" << dendl; return set_cr_error(retcode); } // trim shards with a ShardCollectCR - ldout(cct, 10) << "trimming bilogs for bucket=" << pbucket_info->bucket + ldpp_dout(dpp, 10) << "trimming bilogs for bucket=" << pbucket_info->bucket << " markers=" << min_markers << ", shards=" << min_markers.size() << dendl; set_status("trimming bilog shards"); - yield call(new BucketTrimShardCollectCR(store, *pbucket_info, min_markers)); + yield call(new BucketTrimShardCollectCR(dpp, store, *pbucket_info, min_markers)); // ENODATA just means there were no keys to trim if (retcode == -ENODATA) { retcode = 0; } if (retcode < 0) { - ldout(cct, 4) << "failed to trim bilog shards: " + ldpp_dout(dpp, 4) << "failed to trim bilog shards: " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } @@ -637,7 +639,7 @@ class AsyncMetadataList : public RGWAsyncRadosRequest { const std::string start_marker; MetadataListCallback callback; - int _send_request() override; + int _send_request(const DoutPrefixProvider *dpp) override; public: AsyncMetadataList(CephContext *cct, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWMetadataManager *mgr, @@ -648,7 +650,7 @@ class AsyncMetadataList : public RGWAsyncRadosRequest { {} }; -int AsyncMetadataList::_send_request() +int AsyncMetadataList::_send_request(const DoutPrefixProvider *dpp) { void* handle = nullptr; std::list keys; @@ -656,15 +658,15 @@ int AsyncMetadataList::_send_request() std::string marker; // start a listing at the given marker - int r = mgr->list_keys_init(section, start_marker, &handle); + int r = mgr->list_keys_init(dpp, section, start_marker, &handle); if (r == -EINVAL) { // restart with empty marker below } else if (r < 0) { - ldout(cct, 10) << "failed to init metadata listing: " + ldpp_dout(dpp, 10) << "failed to init metadata listing: " << cpp_strerror(r) << dendl; return r; } else { - ldout(cct, 20) << "starting metadata listing at " << start_marker << dendl; + ldpp_dout(dpp, 20) << "starting metadata listing at " << start_marker << dendl; // release the handle when scope exits auto g = make_scope_guard([=] { mgr->list_keys_complete(handle); }); @@ -673,7 +675,7 @@ int AsyncMetadataList::_send_request() // get the next key and marker r = mgr->list_keys_next(handle, 1, keys, &truncated); if (r < 0) { - ldout(cct, 10) << "failed to list metadata: " + ldpp_dout(dpp, 10) << "failed to list metadata: " << cpp_strerror(r) << dendl; return r; } @@ -697,13 +699,13 @@ int AsyncMetadataList::_send_request() // restart the listing from the beginning (empty marker) handle = nullptr; - r = mgr->list_keys_init(section, "", &handle); + r = mgr->list_keys_init(dpp, section, "", &handle); if (r < 0) { - ldout(cct, 10) << "failed to restart metadata listing: " + ldpp_dout(dpp, 10) << "failed to restart metadata listing: " << cpp_strerror(r) << dendl; return r; } - ldout(cct, 20) << "restarting metadata listing" << dendl; + ldpp_dout(dpp, 20) << "restarting metadata listing" << dendl; // release the handle when scope exits auto g = make_scope_guard([=] { mgr->list_keys_complete(handle); }); @@ -711,7 +713,7 @@ int AsyncMetadataList::_send_request() // get the next key and marker r = mgr->list_keys_next(handle, 1, keys, &truncated); if (r < 0) { - ldout(cct, 10) << "failed to list metadata: " + ldpp_dout(dpp, 10) << "failed to list metadata: " << cpp_strerror(r) << dendl; return r; } @@ -753,7 +755,7 @@ class MetadataListCR : public RGWSimpleCoroutine { request_cleanup(); } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { req = new AsyncMetadataList(cct, this, stack->create_completion_notifier(), mgr, section, start_marker, callback); async_rados->queue(req); @@ -794,19 +796,19 @@ class BucketTrimCR : public RGWCoroutine { observer(observer), obj(obj), counter(config.counter_size), dpp(dpp) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; const std::string BucketTrimCR::section{"bucket.instance"}; -int BucketTrimCR::operate() +int BucketTrimCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { start_time = ceph::mono_clock::now(); if (config.buckets_per_interval) { // query watch/notify for hot buckets - ldout(cct, 10) << "fetching active bucket counters" << dendl; + ldpp_dout(dpp, 10) << "fetching active bucket counters" << dendl; set_status("fetching active bucket counters"); yield { // request the top bucket counters from each peer gateway @@ -819,7 +821,7 @@ int BucketTrimCR::operate() ¬ify_replies)); } if (retcode < 0) { - ldout(cct, 10) << "failed to fetch peer bucket counters" << dendl; + ldpp_dout(dpp, 10) << "failed to fetch peer bucket counters" << dendl; return set_cr_error(retcode); } @@ -843,17 +845,17 @@ int BucketTrimCR::operate() // read BucketTrimStatus for marker position set_status("reading trim status"); using ReadStatus = RGWSimpleRadosReadCR; - yield call(new ReadStatus(store->svc()->rados->get_async_processor(), store->svc()->sysobj, obj, + yield call(new ReadStatus(dpp, store->svc()->rados->get_async_processor(), store->svc()->sysobj, obj, &status, true, &objv)); if (retcode < 0) { - ldout(cct, 10) << "failed to read bilog trim status: " + ldpp_dout(dpp, 10) << "failed to read bilog trim status: " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } if (status.marker == "MAX") { status.marker.clear(); // restart at the beginning } - ldout(cct, 10) << "listing cold buckets from marker=" + ldpp_dout(dpp, 10) << "listing cold buckets from marker=" << status.marker << dendl; set_status("listing cold buckets for trim"); @@ -891,7 +893,7 @@ int BucketTrimCR::operate() // trim bucket instances with limited concurrency set_status("trimming buckets"); - ldout(cct, 4) << "collected " << buckets.size() << " buckets for trim" << dendl; + ldpp_dout(dpp, 4) << "collected " << buckets.size() << " buckets for trim" << dendl; yield call(new BucketTrimInstanceCollectCR(store, http, observer, buckets, config.concurrent_buckets, dpp)); // ignore errors from individual buckets @@ -902,7 +904,7 @@ int BucketTrimCR::operate() status.marker = std::move(last_cold_marker); ldpp_dout(dpp, 20) << "writing bucket trim marker=" << status.marker << dendl; using WriteStatus = RGWSimpleRadosWriteCR; - yield call(new WriteStatus(store->svc()->rados->get_async_processor(), store->svc()->sysobj, obj, + yield call(new WriteStatus(dpp, store->svc()->rados->get_async_processor(), store->svc()->sysobj, obj, status, &objv)); if (retcode < 0) { ldpp_dout(dpp, 4) << "failed to write updated trim status: " @@ -927,7 +929,7 @@ int BucketTrimCR::operate() return set_cr_error(retcode); } - ldout(cct, 4) << "bucket index log processing completed in " + ldpp_dout(dpp, 4) << "bucket index log processing completed in " << ceph::mono_clock::now() - start_time << dendl; return set_cr_done(); } @@ -954,10 +956,10 @@ class BucketTrimPollCR : public RGWCoroutine { cookie(RGWSimpleRadosLockCR::gen_random_cookie(cct)), dpp(dpp) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int BucketTrimPollCR::operate() +int BucketTrimPollCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { for (;;) { @@ -1126,7 +1128,7 @@ BucketTrimManager::~BucketTrimManager() = default; int BucketTrimManager::init() { - return impl->watcher.start(); + return impl->watcher.start(this); } void BucketTrimManager::on_bucket_changed(const std::string_view& bucket) diff --git a/src/rgw/rgw_trim_datalog.cc b/src/rgw/rgw_trim_datalog.cc index 5677b563337..72a160039cf 100644 --- a/src/rgw/rgw_trim_datalog.cc +++ b/src/rgw/rgw_trim_datalog.cc @@ -26,6 +26,7 @@ namespace { class DatalogTrimImplCR : public RGWSimpleCoroutine { + const DoutPrefixProvider *dpp; rgw::sal::RadosStore* store; boost::intrusive_ptr cn; int shard; @@ -33,23 +34,23 @@ class DatalogTrimImplCR : public RGWSimpleCoroutine { std::string* last_trim_marker; public: - DatalogTrimImplCR(rgw::sal::RadosStore* store, int shard, + DatalogTrimImplCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, int shard, const std::string& marker, std::string* last_trim_marker) - : RGWSimpleCoroutine(store->ctx()), store(store), shard(shard), + : RGWSimpleCoroutine(store->ctx()), dpp(dpp), store(store), shard(shard), marker(marker), last_trim_marker(last_trim_marker) { set_description() << "Datalog trim shard=" << shard << " marker=" << marker; } - int send_request() override { + int send_request(const DoutPrefixProvider *dpp) override { set_status() << "sending request"; cn = stack->create_completion_notifier(); - return store->svc()->datalog_rados->trim_entries(shard, marker, + return store->svc()->datalog_rados->trim_entries(dpp, shard, marker, cn->completion()); } int request_complete() override { int r = cn->completion()->get_return_value(); - ldout(cct, 20) << __PRETTY_FUNCTION__ << "(): trim of shard=" << shard + ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << "(): trim of shard=" << shard << " marker=" << marker << " returned r=" << r << dendl; set_status() << "request complete; ret=" << r; @@ -95,6 +96,7 @@ void take_min_markers(IterIn first, IterIn last, IterOut dest) class DataLogTrimCR : public RGWCoroutine { using TrimCR = DatalogTrimImplCR; + const DoutPrefixProvider *dpp; rgw::sal::RadosStore* store; RGWHTTPManager *http; const int num_shards; @@ -105,9 +107,9 @@ class DataLogTrimCR : public RGWCoroutine { int ret{0}; public: - DataLogTrimCR(rgw::sal::RadosStore* store, RGWHTTPManager *http, + DataLogTrimCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, RGWHTTPManager *http, int num_shards, std::vector& last_trim) - : RGWCoroutine(store->ctx()), store(store), http(http), + : RGWCoroutine(store->ctx()), dpp(dpp), store(store), http(http), num_shards(num_shards), zone_id(store->svc()->zone->get_zone().id), peer_status(store->svc()->zone->get_zone_data_notify_to_map().size()), @@ -116,13 +118,13 @@ class DataLogTrimCR : public RGWCoroutine { last_trim(last_trim) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int DataLogTrimCR::operate() +int DataLogTrimCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { - ldout(cct, 10) << "fetching sync status for zone " << zone_id << dendl; + ldpp_dout(dpp, 10) << "fetching sync status for zone " << zone_id << dendl; set_status("fetching sync status"); yield { // query data sync status from each sync peer @@ -135,7 +137,7 @@ int DataLogTrimCR::operate() auto p = peer_status.begin(); for (auto& c : store->svc()->zone->get_zone_data_notify_to_map()) { - ldout(cct, 20) << "query sync status from " << c.first << dendl; + ldpp_dout(dpp, 20) << "query sync status from " << c.first << dendl; using StatusCR = RGWReadRESTResourceCR; spawn(new StatusCR(cct, c.second, http, "/admin/log/", params, &*p), false); @@ -152,11 +154,11 @@ int DataLogTrimCR::operate() drain_all(); if (ret < 0) { - ldout(cct, 4) << "failed to fetch sync status from all peers" << dendl; + ldpp_dout(dpp, 4) << "failed to fetch sync status from all peers" << dendl; return set_cr_error(ret); } - ldout(cct, 10) << "trimming log shards" << dendl; + ldpp_dout(dpp, 10) << "trimming log shards" << dendl; set_status("trimming log shards"); yield { // determine the minimum marker for each shard @@ -168,10 +170,10 @@ int DataLogTrimCR::operate() if (m <= last_trim[i]) { continue; } - ldout(cct, 10) << "trimming log shard " << i + ldpp_dout(dpp, 10) << "trimming log shard " << i << " at marker=" << m << " last_trim=" << last_trim[i] << dendl; - spawn(new TrimCR(store, i, m, &last_trim[i]), + spawn(new TrimCR(dpp, store, i, m, &last_trim[i]), true); } } @@ -180,15 +182,16 @@ int DataLogTrimCR::operate() return 0; } -RGWCoroutine* create_admin_data_log_trim_cr(rgw::sal::RadosStore* store, +RGWCoroutine* create_admin_data_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, RGWHTTPManager *http, int num_shards, std::vector& markers) { - return new DataLogTrimCR(store, http, num_shards, markers); + return new DataLogTrimCR(dpp, store, http, num_shards, markers); } class DataLogTrimPollCR : public RGWCoroutine { + const DoutPrefixProvider *dpp; rgw::sal::RadosStore* store; RGWHTTPManager *http; const int num_shards; @@ -198,19 +201,19 @@ class DataLogTrimPollCR : public RGWCoroutine { std::vector last_trim; //< last trimmed marker per shard public: - DataLogTrimPollCR(rgw::sal::RadosStore* store, RGWHTTPManager *http, + DataLogTrimPollCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, RGWHTTPManager *http, int num_shards, utime_t interval) - : RGWCoroutine(store->ctx()), store(store), http(http), + : RGWCoroutine(store->ctx()), dpp(dpp), store(store), http(http), num_shards(num_shards), interval(interval), lock_oid(store->svc()->datalog_rados->get_oid(0, 0)), lock_cookie(RGWSimpleRadosLockCR::gen_random_cookie(cct)), last_trim(num_shards) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int DataLogTrimPollCR::operate() +int DataLogTrimPollCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { for (;;) { @@ -226,13 +229,13 @@ int DataLogTrimPollCR::operate() interval.sec())); if (retcode < 0) { // if the lock is already held, go back to sleep and try again later - ldout(cct, 4) << "failed to lock " << lock_oid << ", trying again in " + ldpp_dout(dpp, 4) << "failed to lock " << lock_oid << ", trying again in " << interval.sec() << "s" << dendl; continue; } set_status("trimming"); - yield call(new DataLogTrimCR(store, http, num_shards, last_trim)); + yield call(new DataLogTrimCR(dpp, store, http, num_shards, last_trim)); // note that the lock is not released. this is intentional, as it avoids // duplicating this work in other gateways @@ -241,9 +244,9 @@ int DataLogTrimPollCR::operate() return 0; } -RGWCoroutine* create_data_log_trim_cr(rgw::sal::RadosStore* store, +RGWCoroutine* create_data_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, RGWHTTPManager *http, int num_shards, utime_t interval) { - return new DataLogTrimPollCR(store, http, num_shards, interval); + return new DataLogTrimPollCR(dpp, store, http, num_shards, interval); } diff --git a/src/rgw/rgw_trim_datalog.h b/src/rgw/rgw_trim_datalog.h index d0b6779a8f7..9f5bf7252fe 100644 --- a/src/rgw/rgw_trim_datalog.h +++ b/src/rgw/rgw_trim_datalog.h @@ -6,6 +6,8 @@ #include #include +#include "common/dout.h" + class RGWCoroutine; class RGWRados; class RGWHTTPManager; @@ -15,12 +17,12 @@ namespace rgw { namespace sal { } } // DataLogTrimCR factory function -extern RGWCoroutine* create_data_log_trim_cr(rgw::sal::RadosStore* store, +extern RGWCoroutine* create_data_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, RGWHTTPManager *http, int num_shards, utime_t interval); // factory function for datalog trim via radosgw-admin -RGWCoroutine* create_admin_data_log_trim_cr(rgw::sal::RadosStore* store, +RGWCoroutine* create_admin_data_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, RGWHTTPManager *http, int num_shards, std::vector& markers); diff --git a/src/rgw/rgw_trim_mdlog.cc b/src/rgw/rgw_trim_mdlog.cc index 61ce1d99d7a..cb4b28bb475 100644 --- a/src/rgw/rgw_trim_mdlog.cc +++ b/src/rgw/rgw_trim_mdlog.cc @@ -55,6 +55,7 @@ class PurgePeriodLogsCR : public RGWCoroutine { RGWSI_Zone *zone; RGWSI_MDLog *mdlog; } svc; + const DoutPrefixProvider *dpp; rgw::sal::RadosStore* const store; RGWMetadataManager *const metadata; RGWObjVersionTracker objv; @@ -63,31 +64,31 @@ class PurgePeriodLogsCR : public RGWCoroutine { epoch_t *last_trim_epoch; //< update last trim on success public: - PurgePeriodLogsCR(rgw::sal::RadosStore* store, epoch_t realm_epoch, epoch_t *last_trim) - : RGWCoroutine(store->ctx()), store(store), metadata(store->ctl()->meta.mgr), + PurgePeriodLogsCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, epoch_t realm_epoch, epoch_t *last_trim) + : RGWCoroutine(store->ctx()), dpp(dpp), store(store), metadata(store->ctl()->meta.mgr), realm_epoch(realm_epoch), last_trim_epoch(last_trim) { svc.zone = store->svc()->zone; svc.mdlog = store->svc()->mdlog; } - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int PurgePeriodLogsCR::operate() +int PurgePeriodLogsCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { // read our current oldest log period - yield call(svc.mdlog->read_oldest_log_period_cr(&cursor, &objv)); + yield call(svc.mdlog->read_oldest_log_period_cr(dpp, &cursor, &objv)); if (retcode < 0) { return set_cr_error(retcode); } ceph_assert(cursor); - ldout(cct, 20) << "oldest log realm_epoch=" << cursor.get_epoch() + ldpp_dout(dpp, 20) << "oldest log realm_epoch=" << cursor.get_epoch() << " period=" << cursor.get_period().get_id() << dendl; // trim -up to- the given realm_epoch while (cursor.get_epoch() < realm_epoch) { - ldout(cct, 4) << "purging log shards for realm_epoch=" << cursor.get_epoch() + ldpp_dout(dpp, 4) << "purging log shards for realm_epoch=" << cursor.get_epoch() << " period=" << cursor.get_period().get_id() << dendl; yield { const auto mdlog = svc.mdlog->get_log(cursor.get_period().get_id()); @@ -96,23 +97,23 @@ int PurgePeriodLogsCR::operate() call(new PurgeLogShardsCR(store, mdlog, pool, num_shards)); } if (retcode < 0) { - ldout(cct, 1) << "failed to remove log shards: " + ldpp_dout(dpp, 1) << "failed to remove log shards: " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } - ldout(cct, 10) << "removed log shards for realm_epoch=" << cursor.get_epoch() + ldpp_dout(dpp, 10) << "removed log shards for realm_epoch=" << cursor.get_epoch() << " period=" << cursor.get_period().get_id() << dendl; // update our mdlog history - yield call(svc.mdlog->trim_log_period_cr(cursor, &objv)); + yield call(svc.mdlog->trim_log_period_cr(dpp, cursor, &objv)); if (retcode == -ENOENT) { // must have raced to update mdlog history. return success and allow the // winner to continue purging - ldout(cct, 10) << "already removed log shards for realm_epoch=" << cursor.get_epoch() + ldpp_dout(dpp, 10) << "already removed log shards for realm_epoch=" << cursor.get_epoch() << " period=" << cursor.get_period().get_id() << dendl; return set_cr_done(); } else if (retcode < 0) { - ldout(cct, 1) << "failed to remove log shards for realm_epoch=" + ldpp_dout(dpp, 1) << "failed to remove log shards for realm_epoch=" << cursor.get_epoch() << " period=" << cursor.get_period().get_id() << " with: " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); @@ -286,7 +287,7 @@ bool MetaMasterTrimShardCollectCR::spawn_next() if (stable <= last_trim) { // already trimmed - ldout(cct, 20) << "skipping log shard " << shard_id + ldpp_dout(env.dpp, 20) << "skipping log shard " << shard_id << " at marker=" << stable << " last_trim=" << last_trim << " realm_epoch=" << sync_status.sync_info.realm_epoch << dendl; @@ -296,11 +297,11 @@ bool MetaMasterTrimShardCollectCR::spawn_next() mdlog->get_shard_oid(shard_id, oid); - ldout(cct, 10) << "trimming log shard " << shard_id + ldpp_dout(env.dpp, 10) << "trimming log shard " << shard_id << " at marker=" << stable << " last_trim=" << last_trim << " realm_epoch=" << sync_status.sync_info.realm_epoch << dendl; - spawn(new RGWSyncLogTrimCR(env.store, oid, stable, &last_trim), false); + spawn(new RGWSyncLogTrimCR(env.dpp, env.store, oid, stable, &last_trim), false); shard_id++; return true; } @@ -351,25 +352,25 @@ class MetaMasterTrimCR : public RGWCoroutine { : RGWCoroutine(env.store->ctx()), env(env) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int MetaMasterTrimCR::operate() +int MetaMasterTrimCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { // TODO: detect this and fail before we spawn the trim thread? if (env.connections.empty()) { - ldout(cct, 4) << "no peers, exiting" << dendl; + ldpp_dout(dpp, 4) << "no peers, exiting" << dendl; return set_cr_done(); } - ldout(cct, 10) << "fetching sync status for zone " << env.zone << dendl; + ldpp_dout(dpp, 10) << "fetching sync status for zone " << env.zone << dendl; // query mdlog sync status from peers yield call(new MetaMasterStatusCollectCR(env)); // must get a successful reply from all peers to consider trimming if (ret < 0) { - ldout(cct, 4) << "failed to fetch sync status from all peers" << dendl; + ldpp_dout(dpp, 4) << "failed to fetch sync status from all peers" << dendl; return set_cr_error(ret); } @@ -377,19 +378,19 @@ int MetaMasterTrimCR::operate() ret = take_min_status(env.store->ctx(), env.peer_status.begin(), env.peer_status.end(), &min_status); if (ret < 0) { - ldout(cct, 4) << "failed to calculate min sync status from peers" << dendl; + ldpp_dout(dpp, 4) << "failed to calculate min sync status from peers" << dendl; return set_cr_error(ret); } yield { auto store = env.store; auto epoch = min_status.sync_info.realm_epoch; - ldout(cct, 4) << "realm epoch min=" << epoch + ldpp_dout(dpp, 4) << "realm epoch min=" << epoch << " current=" << env.current.get_epoch()<< dendl; if (epoch > env.last_trim_epoch + 1) { // delete any prior mdlog periods - spawn(new PurgePeriodLogsCR(store, epoch, &env.last_trim_epoch), true); + spawn(new PurgePeriodLogsCR(dpp, store, epoch, &env.last_trim_epoch), true); } else { - ldout(cct, 10) << "mdlogs already purged up to realm_epoch " + ldpp_dout(dpp, 10) << "mdlogs already purged up to realm_epoch " << env.last_trim_epoch << dendl; } @@ -425,17 +426,17 @@ class MetaPeerTrimShardCR : public RGWCoroutine { period_id(period_id), shard_id(shard_id), last_trim(last_trim) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int MetaPeerTrimShardCR::operate() +int MetaPeerTrimShardCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { // query master's first mdlog entry for this shard yield call(create_list_remote_mdlog_shard_cr(&env, period_id, shard_id, "", 1, &result)); if (retcode < 0) { - ldpp_dout(env.dpp, 5) << "failed to read first entry from master's mdlog shard " + ldpp_dout(dpp, 5) << "failed to read first entry from master's mdlog shard " << shard_id << " for period " << period_id << ": " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); @@ -446,12 +447,12 @@ int MetaPeerTrimShardCR::operate() // this empty reply. query the mdlog shard info to read its max timestamp, // then retry the listing to make sure it's still empty before trimming to // that - ldpp_dout(env.dpp, 10) << "empty master mdlog shard " << shard_id + ldpp_dout(dpp, 10) << "empty master mdlog shard " << shard_id << ", reading last timestamp from shard info" << dendl; // read the mdlog shard info for the last timestamp yield call(create_read_remote_mdlog_shard_info_cr(&env, period_id, shard_id, &info)); if (retcode < 0) { - ldpp_dout(env.dpp, 5) << "failed to read info from master's mdlog shard " + ldpp_dout(dpp, 5) << "failed to read info from master's mdlog shard " << shard_id << " for period " << period_id << ": " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); @@ -459,13 +460,13 @@ int MetaPeerTrimShardCR::operate() if (ceph::real_clock::is_zero(info.last_update)) { return set_cr_done(); // nothing to trim } - ldpp_dout(env.dpp, 10) << "got mdlog shard info with last update=" + ldpp_dout(dpp, 10) << "got mdlog shard info with last update=" << info.last_update << dendl; // re-read the master's first mdlog entry to make sure it hasn't changed yield call(create_list_remote_mdlog_shard_cr(&env, period_id, shard_id, "", 1, &result)); if (retcode < 0) { - ldpp_dout(env.dpp, 5) << "failed to read first entry from master's mdlog shard " + ldpp_dout(dpp, 5) << "failed to read first entry from master's mdlog shard " << shard_id << " for period " << period_id << ": " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); @@ -486,22 +487,22 @@ int MetaPeerTrimShardCR::operate() } if (stable <= *last_trim) { - ldpp_dout(env.dpp, 10) << "skipping log shard " << shard_id + ldpp_dout(dpp, 10) << "skipping log shard " << shard_id << " at timestamp=" << stable << " last_trim=" << *last_trim << dendl; return set_cr_done(); } - ldpp_dout(env.dpp, 10) << "trimming log shard " << shard_id + ldpp_dout(dpp, 10) << "trimming log shard " << shard_id << " at timestamp=" << stable << " last_trim=" << *last_trim << dendl; yield { std::string oid; mdlog->get_shard_oid(shard_id, oid); - call(new RGWRadosTimelogTrimCR(env.store, oid, real_time{}, stable, "", "")); + call(new RGWRadosTimelogTrimCR(dpp, env.store, oid, real_time{}, stable, "", "")); } if (retcode < 0 && retcode != -ENODATA) { - ldpp_dout(env.dpp, 1) << "failed to trim mdlog shard " << shard_id + ldpp_dout(dpp, 1) << "failed to trim mdlog shard " << shard_id << ": " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } @@ -552,13 +553,13 @@ class MetaPeerTrimCR : public RGWCoroutine { public: explicit MetaPeerTrimCR(PeerTrimEnv& env) : RGWCoroutine(env.store->ctx()), env(env) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int MetaPeerTrimCR::operate() +int MetaPeerTrimCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { - ldout(cct, 10) << "fetching master mdlog info" << dendl; + ldpp_dout(dpp, 10) << "fetching master mdlog info" << dendl; yield { // query mdlog_info from master for oldest_log_period rgw_http_param_pair params[] = { @@ -571,7 +572,7 @@ int MetaPeerTrimCR::operate() "/admin/log/", params, &mdlog_info)); } if (retcode < 0) { - ldout(cct, 4) << "failed to read mdlog info from master" << dendl; + ldpp_dout(dpp, 4) << "failed to read mdlog info from master" << dendl; return set_cr_error(retcode); } // use master's shard count instead @@ -579,10 +580,10 @@ int MetaPeerTrimCR::operate() if (mdlog_info.realm_epoch > env.last_trim_epoch + 1) { // delete any prior mdlog periods - yield call(new PurgePeriodLogsCR(env.store, mdlog_info.realm_epoch, + yield call(new PurgePeriodLogsCR(dpp, env.store, mdlog_info.realm_epoch, &env.last_trim_epoch)); } else { - ldout(cct, 10) << "mdlogs already purged through realm_epoch " + ldpp_dout(dpp, 10) << "mdlogs already purged through realm_epoch " << env.last_trim_epoch << dendl; } @@ -617,10 +618,10 @@ class MetaTrimPollCR : public RGWCoroutine { cookie(RGWSimpleRadosLockCR::gen_random_cookie(cct)) {} - int operate() override; + int operate(const DoutPrefixProvider *dpp) override; }; -int MetaTrimPollCR::operate() +int MetaTrimPollCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { for (;;) { diff --git a/src/rgw/rgw_usage.cc b/src/rgw/rgw_usage.cc index f43d33ea182..e76995eb486 100644 --- a/src/rgw/rgw_usage.cc +++ b/src/rgw/rgw_usage.cc @@ -58,13 +58,13 @@ int RGWUsage::show(const DoutPrefixProvider *dpp, rgw::sal::Store* store, while (is_truncated) { if (bucket) { - ret = bucket->read_usage(start_epoch, end_epoch, max_entries, &is_truncated, + ret = bucket->read_usage(dpp, start_epoch, end_epoch, max_entries, &is_truncated, usage_iter, usage); } else if (user) { - ret = user->read_usage(start_epoch, end_epoch, max_entries, &is_truncated, + ret = user->read_usage(dpp, start_epoch, end_epoch, max_entries, &is_truncated, usage_iter, usage); } else { - ret = store->read_all_usage(start_epoch, end_epoch, max_entries, &is_truncated, + ret = store->read_all_usage(dpp, start_epoch, end_epoch, max_entries, &is_truncated, usage_iter, usage); } @@ -157,15 +157,15 @@ int RGWUsage::trim(const DoutPrefixProvider *dpp, rgw::sal::Store* store, uint64_t start_epoch, uint64_t end_epoch) { if (bucket) { - return bucket->trim_usage(start_epoch, end_epoch); + return bucket->trim_usage(dpp, start_epoch, end_epoch); } else if (user) { - return user->trim_usage(start_epoch, end_epoch); + return user->trim_usage(dpp, start_epoch, end_epoch); } else { - return store->trim_all_usage(start_epoch, end_epoch); + return store->trim_all_usage(dpp, start_epoch, end_epoch); } } -int RGWUsage::clear(rgw::sal::Store* store) +int RGWUsage::clear(const DoutPrefixProvider *dpp, rgw::sal::Store* store) { - return store->clear_usage(); + return store->clear_usage(dpp); } diff --git a/src/rgw/rgw_usage.h b/src/rgw/rgw_usage.h index 5ffec449db3..1faaf35ba32 100644 --- a/src/rgw/rgw_usage.h +++ b/src/rgw/rgw_usage.h @@ -8,6 +8,7 @@ #include #include "common/Formatter.h" +#include "common/dout.h" #include "rgw_formats.h" #include "rgw_user.h" @@ -27,7 +28,7 @@ public: rgw::sal::User* user , rgw::sal::Bucket* bucket, uint64_t start_epoch, uint64_t end_epoch); - static int clear(rgw::sal::Store* store); + static int clear(const DoutPrefixProvider *dpp, rgw::sal::Store* store); }; diff --git a/src/rgw/rgw_user.cc b/src/rgw/rgw_user.cc index de539fd2f81..8f5f1e1055f 100644 --- a/src/rgw/rgw_user.cc +++ b/src/rgw/rgw_user.cc @@ -73,7 +73,7 @@ int rgw_user_sync_all_stats(const DoutPrefixProvider *dpp, rgw::sal::Store* stor ldpp_dout(dpp, 0) << "ERROR: could not read bucket info: bucket=" << bucket << " ret=" << ret << dendl; continue; } - ret = bucket->sync_user_stats(y); + ret = bucket->sync_user_stats(dpp, y); if (ret < 0) { ldout(cct, 0) << "ERROR: could not sync bucket stats: ret=" << ret << dendl; return ret; @@ -85,7 +85,7 @@ int rgw_user_sync_all_stats(const DoutPrefixProvider *dpp, rgw::sal::Store* stor } } while (user_buckets.is_truncated()); - ret = user->complete_flush_stats(y); + ret = user->complete_flush_stats(dpp, y); if (ret < 0) { cerr << "ERROR: failed to complete syncing user stats: ret=" << ret << std::endl; return ret; @@ -2184,7 +2184,7 @@ int RGWUser::info(RGWUserInfo& fetched_info, std::string *err_msg) return 0; } -int RGWUser::list(RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher) +int RGWUser::list(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher) { Formatter *formatter = flusher.get_formatter(); void *handle = nullptr; @@ -2193,7 +2193,7 @@ int RGWUser::list(RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher) op_state.max_entries = 1000; } - int ret = store->meta_list_keys_init(metadata_key, op_state.marker, &handle); + int ret = store->meta_list_keys_init(dpp, metadata_key, op_state.marker, &handle); if (ret < 0) { return ret; } @@ -2239,7 +2239,7 @@ int RGWUser::list(RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher) return 0; } -int RGWUserAdminOp_User::list(rgw::sal::Store* store, RGWUserAdminOpState& op_state, +int RGWUserAdminOp_User::list(const DoutPrefixProvider *dpp, rgw::sal::Store* store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher) { RGWUser user; @@ -2248,7 +2248,7 @@ int RGWUserAdminOp_User::list(rgw::sal::Store* store, RGWUserAdminOpState& op_st if (ret < 0) return ret; - ret = user.list(op_state, flusher); + ret = user.list(dpp, op_state, flusher); if (ret < 0) return ret; @@ -2289,7 +2289,7 @@ int RGWUserAdminOp_User::info(const DoutPrefixProvider *dpp, RGWStorageStats stats; RGWStorageStats *arg_stats = NULL; if (op_state.fetch_stats) { - int ret = ruser->read_stats(y, &stats); + int ret = ruser->read_stats(dpp, y, &stats); if (ret < 0 && ret != -ENOENT) { return ret; } @@ -2883,24 +2883,26 @@ int RGWUserCtl::remove_info(const DoutPrefixProvider *dpp, }); } -int RGWUserCtl::add_bucket(const rgw_user& user, +int RGWUserCtl::add_bucket(const DoutPrefixProvider *dpp, + const rgw_user& user, const rgw_bucket& bucket, ceph::real_time creation_time, optional_yield y) { return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) { - return svc.user->add_bucket(op->ctx(), user, bucket, creation_time, y); + return svc.user->add_bucket(dpp, op->ctx(), user, bucket, creation_time, y); }); } -int RGWUserCtl::remove_bucket(const rgw_user& user, +int RGWUserCtl::remove_bucket(const DoutPrefixProvider *dpp, + const rgw_user& user, const rgw_bucket& bucket, optional_yield y) { return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) { - return svc.user->remove_bucket(op->ctx(), user, bucket, y); + return svc.user->remove_bucket(dpp, op->ctx(), user, bucket, y); }); } @@ -2920,7 +2922,7 @@ int RGWUserCtl::list_buckets(const DoutPrefixProvider *dpp, } return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) { - int ret = svc.user->list_buckets(op->ctx(), user, marker, end_marker, + int ret = svc.user->list_buckets(dpp, op->ctx(), user, marker, end_marker, max, buckets, is_truncated, y); if (ret < 0) { return ret; @@ -2937,44 +2939,46 @@ int RGWUserCtl::list_buckets(const DoutPrefixProvider *dpp, }); } -int RGWUserCtl::flush_bucket_stats(const rgw_user& user, +int RGWUserCtl::flush_bucket_stats(const DoutPrefixProvider *dpp, + const rgw_user& user, const RGWBucketEnt& ent, optional_yield y) { return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) { - return svc.user->flush_bucket_stats(op->ctx(), user, ent, y); + return svc.user->flush_bucket_stats(dpp, op->ctx(), user, ent, y); }); } -int RGWUserCtl::complete_flush_stats(const rgw_user& user, optional_yield y) +int RGWUserCtl::complete_flush_stats(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y) { return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) { - return svc.user->complete_flush_stats(op->ctx(), user, y); + return svc.user->complete_flush_stats(dpp, op->ctx(), user, y); }); } -int RGWUserCtl::reset_stats(const rgw_user& user, optional_yield y) +int RGWUserCtl::reset_stats(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y) { return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) { - return svc.user->reset_bucket_stats(op->ctx(), user, y); + return svc.user->reset_bucket_stats(dpp, op->ctx(), user, y); }); } -int RGWUserCtl::read_stats(const rgw_user& user, RGWStorageStats *stats, +int RGWUserCtl::read_stats(const DoutPrefixProvider *dpp, + const rgw_user& user, RGWStorageStats *stats, optional_yield y, ceph::real_time *last_stats_sync, ceph::real_time *last_stats_update) { return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) { - return svc.user->read_stats(op->ctx(), user, stats, + return svc.user->read_stats(dpp, op->ctx(), user, stats, last_stats_sync, last_stats_update, y); }); } -int RGWUserCtl::read_stats_async(const rgw_user& user, RGWGetUserStats_CB *cb) +int RGWUserCtl::read_stats_async(const DoutPrefixProvider *dpp, const rgw_user& user, RGWGetUserStats_CB *cb) { return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) { - return svc.user->read_stats_async(op->ctx(), user, cb); + return svc.user->read_stats_async(dpp, op->ctx(), user, cb); }); } diff --git a/src/rgw/rgw_user.h b/src/rgw/rgw_user.h index 505c95b1413..542efc20925 100644 --- a/src/rgw/rgw_user.h +++ b/src/rgw/rgw_user.h @@ -589,7 +589,7 @@ public: int info (RGWUserInfo& fetched_info, std::string *err_msg = NULL); /* list the existing users */ - int list(RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher); + int list(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher); friend class RGWAccessKeyPool; friend class RGWSubUserPool; @@ -601,7 +601,7 @@ public: class RGWUserAdminOp_User { public: - static int list(rgw::sal::Store* store, + static int list(const DoutPrefixProvider *dpp, rgw::sal::Store* store, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher); static int info(const DoutPrefixProvider *dpp, @@ -833,11 +833,13 @@ public: const RGWUserInfo& info, optional_yield y, const RemoveParams& params = {}); - int add_bucket(const rgw_user& user, + int add_bucket(const DoutPrefixProvider *dpp, + const rgw_user& user, const rgw_bucket& bucket, ceph::real_time creation_time, optional_yield y); - int remove_bucket(const rgw_user& user, + int remove_bucket(const DoutPrefixProvider *dpp, + const rgw_user& user, const rgw_bucket& bucket, optional_yield y); int list_buckets(const DoutPrefixProvider *dpp, const rgw_user& user, @@ -850,16 +852,18 @@ public: optional_yield y, uint64_t default_max = 1000); - int flush_bucket_stats(const rgw_user& user, + int flush_bucket_stats(const DoutPrefixProvider *dpp, + const rgw_user& user, const RGWBucketEnt& ent, optional_yield y); - int complete_flush_stats(const rgw_user& user, optional_yield y); - int reset_stats(const rgw_user& user, optional_yield y); - int read_stats(const rgw_user& user, RGWStorageStats *stats, + int complete_flush_stats(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y); + int reset_stats(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y); + int read_stats(const DoutPrefixProvider *dpp, + const rgw_user& user, RGWStorageStats *stats, optional_yield y, ceph::real_time *last_stats_sync = nullptr, /* last time a full stats sync completed */ ceph::real_time *last_stats_update = nullptr); /* last time a stats update was done */ - int read_stats_async(const rgw_user& user, RGWGetUserStats_CB *ctx); + int read_stats_async(const DoutPrefixProvider *dpp, const rgw_user& user, RGWGetUserStats_CB *ctx); }; class RGWUserMetaHandlerAllocator { diff --git a/src/rgw/rgw_worker.h b/src/rgw/rgw_worker.h index 5df99dbecfb..f878ff8a6d1 100644 --- a/src/rgw/rgw_worker.h +++ b/src/rgw/rgw_worker.h @@ -24,10 +24,12 @@ #include "common/ceph_mutex.h" #include "include/common_fwd.h" +#define dout_subsys ceph_subsys_rgw + class RGWRados; class RGWRadosThread { - class Worker : public Thread { + class Worker : public Thread, public DoutPrefixProvider { CephContext *cct; RGWRadosThread *processor; ceph::mutex lock = ceph::make_mutex("RGWRadosThread::Worker"); @@ -50,6 +52,11 @@ class RGWRadosThread { std::lock_guard l{lock}; cond.notify_all(); } + + CephContext *get_cct() const { return cct; } + unsigned get_subsys() const { return dout_subsys; } + std::ostream& gen_prefix(std::ostream& out) const { return out << "rgw rados thread: "; } + }; Worker *worker; @@ -71,8 +78,8 @@ public: stop(); } - virtual int init() { return 0; } - virtual int process() = 0; + virtual int init(const DoutPrefixProvider *dpp) { return 0; } + virtual int process(const DoutPrefixProvider *dpp) = 0; bool going_down() { return down_flag; } diff --git a/src/rgw/rgw_zone.cc b/src/rgw/rgw_zone.cc index 54088c45d77..476973a04a6 100644 --- a/src/rgw/rgw_zone.cc +++ b/src/rgw/rgw_zone.cc @@ -80,7 +80,7 @@ int RGWZoneGroup::create_default(const DoutPrefixProvider *dpp, optional_yield y RGWZoneParams zone_params(default_zone_name); - int r = zone_params.init(cct, sysobj_svc, y, false); + int r = zone_params.init(dpp, cct, sysobj_svc, y, false); if (r < 0) { ldpp_dout(dpp, 0) << "create_default: error initializing zone params: " << cpp_strerror(-r) << dendl; return r; @@ -93,7 +93,7 @@ int RGWZoneGroup::create_default(const DoutPrefixProvider *dpp, optional_yield y } else if (r == -EEXIST) { ldpp_dout(dpp, 10) << "zone_params::create_default() returned -EEXIST, we raced with another default zone_params creation" << dendl; zone_params.clear_id(); - r = zone_params.init(cct, sysobj_svc, y); + r = zone_params.init(dpp, cct, sysobj_svc, y); if (r < 0) { ldpp_dout(dpp, 0) << "create_default: error in init existing zone params: " << cpp_strerror(-r) << dendl; return r; @@ -116,7 +116,7 @@ int RGWZoneGroup::create_default(const DoutPrefixProvider *dpp, optional_yield y if (r == -EEXIST) { ldpp_dout(dpp, 10) << "create_default() returned -EEXIST, we raced with another zonegroup creation" << dendl; id.clear(); - r = init(cct, sysobj_svc, y); + r = init(dpp, cct, sysobj_svc, y); if (r < 0) { return r; } @@ -126,7 +126,7 @@ int RGWZoneGroup::create_default(const DoutPrefixProvider *dpp, optional_yield y name = id; } - post_process_params(y); + post_process_params(dpp, y); return 0; } @@ -176,7 +176,8 @@ int RGWZoneGroup::equals(const string& other_zonegroup) const return (id == other_zonegroup); } -int RGWZoneGroup::add_zone(const RGWZoneParams& zone_params, bool *is_master, bool *read_only, +int RGWZoneGroup::add_zone(const DoutPrefixProvider *dpp, + const RGWZoneParams& zone_params, bool *is_master, bool *read_only, const list& endpoints, const string *ptier_type, bool *psync_from_all, list& sync_from, list& sync_from_rm, string *predirect_zone, std::optional bucket_index_max_shards, @@ -190,7 +191,7 @@ int RGWZoneGroup::add_zone(const RGWZoneParams& zone_params, bool *is_master, bo if (!zones.count(zone_id)) { for (const auto& zone : zones) { if (zone.second.name == zone_name) { - ldout(cct, 0) << "ERROR: found existing zone name " << zone_name + ldpp_dout(dpp, 0) << "ERROR: found existing zone name " << zone_name << " (" << zone.first << ") in zonegroup " << get_name() << dendl; return -EEXIST; } @@ -200,7 +201,7 @@ int RGWZoneGroup::add_zone(const RGWZoneParams& zone_params, bool *is_master, bo if (is_master) { if (*is_master) { if (!master_zone.empty() && master_zone != zone_id) { - ldout(cct, 0) << "NOTICE: overriding master zone: " << master_zone << dendl; + ldpp_dout(dpp, 0) << "NOTICE: overriding master zone: " << master_zone << dendl; } master_zone = zone_id; } else if (master_zone == zone_id) { @@ -220,7 +221,7 @@ int RGWZoneGroup::add_zone(const RGWZoneParams& zone_params, bool *is_master, bo if (ptier_type) { zone.tier_type = *ptier_type; if (!sync_mgr->get_module(*ptier_type, nullptr)) { - ldout(cct, 0) << "ERROR: could not found sync module: " << *ptier_type + ldpp_dout(dpp, 0) << "ERROR: could not found sync module: " << *ptier_type << ", valid sync modules: " << sync_mgr->get_registered_module_names() << dendl; @@ -248,22 +249,23 @@ int RGWZoneGroup::add_zone(const RGWZoneParams& zone_params, bool *is_master, bo zone.sync_from.erase(rm); } - post_process_params(y); + post_process_params(dpp, y); - return update(y); + return update(dpp,y); } -int RGWZoneGroup::rename_zone(const RGWZoneParams& zone_params, +int RGWZoneGroup::rename_zone(const DoutPrefixProvider *dpp, + const RGWZoneParams& zone_params, optional_yield y) { RGWZone& zone = zones[zone_params.get_id()]; zone.name = zone_params.get_name(); - return update(y); + return update(dpp, y); } -void RGWZoneGroup::post_process_params(optional_yield y) +void RGWZoneGroup::post_process_params(const DoutPrefixProvider *dpp, optional_yield y) { bool log_data = zones.size() > 1; @@ -279,9 +281,9 @@ void RGWZoneGroup::post_process_params(optional_yield y) zone.log_data = log_data; RGWZoneParams zone_params(zone.id, zone.name); - int ret = zone_params.init(cct, sysobj_svc, y); + int ret = zone_params.init(dpp, cct, sysobj_svc, y); if (ret < 0) { - ldout(cct, 0) << "WARNING: could not read zone params for zone id=" << zone.id << " name=" << zone.name << dendl; + ldpp_dout(dpp, 0) << "WARNING: could not read zone params for zone id=" << zone.id << " name=" << zone.name << dendl; continue; } @@ -300,53 +302,53 @@ void RGWZoneGroup::post_process_params(optional_yield y) } } -int RGWZoneGroup::remove_zone(const std::string& zone_id, optional_yield y) +int RGWZoneGroup::remove_zone(const DoutPrefixProvider *dpp, const std::string& zone_id, optional_yield y) { auto iter = zones.find(zone_id); if (iter == zones.end()) { - ldout(cct, 0) << "zone id " << zone_id << " is not a part of zonegroup " + ldpp_dout(dpp, 0) << "zone id " << zone_id << " is not a part of zonegroup " << name << dendl; return -ENOENT; } zones.erase(iter); - post_process_params(y); + post_process_params(dpp, y); - return update(y); + return update(dpp, y); } -int RGWZoneGroup::read_default_id(string& default_id, optional_yield y, +int RGWZoneGroup::read_default_id(const DoutPrefixProvider *dpp, string& default_id, optional_yield y, bool old_format) { if (realm_id.empty()) { /* try using default realm */ RGWRealm realm; - int ret = realm.init(cct, sysobj_svc, y); + int ret = realm.init(dpp, cct, sysobj_svc, y); // no default realm exist if (ret < 0) { - return read_id(default_zonegroup_name, default_id, y); + return read_id(dpp, default_zonegroup_name, default_id, y); } realm_id = realm.get_id(); } - return RGWSystemMetaObj::read_default_id(default_id, y, old_format); + return RGWSystemMetaObj::read_default_id(dpp, default_id, y, old_format); } -int RGWZoneGroup::set_as_default(optional_yield y, bool exclusive) +int RGWZoneGroup::set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive) { if (realm_id.empty()) { /* try using default realm */ RGWRealm realm; - int ret = realm.init(cct, sysobj_svc, y); + int ret = realm.init(dpp, cct, sysobj_svc, y); if (ret < 0) { - ldout(cct, 10) << "could not read realm id: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 10) << "could not read realm id: " << cpp_strerror(-ret) << dendl; return -EINVAL; } realm_id = realm.get_id(); } - return RGWSystemMetaObj::set_as_default(y, exclusive); + return RGWSystemMetaObj::set_as_default(dpp, y, exclusive); } void RGWSystemMetaObj::reinit_instance(CephContext *_cct, RGWSI_SysObj *_sysobj_svc) @@ -356,7 +358,7 @@ void RGWSystemMetaObj::reinit_instance(CephContext *_cct, RGWSI_SysObj *_sysobj_ zone_svc = _sysobj_svc->get_zone_svc(); } -int RGWSystemMetaObj::init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, +int RGWSystemMetaObj::init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y, bool setup_obj, bool old_format) { @@ -375,25 +377,26 @@ int RGWSystemMetaObj::init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, name = get_predefined_name(cct); } if (name.empty()) { - r = use_default(y, old_format); + r = use_default(dpp, y, old_format); if (r < 0) { return r; } } else if (!old_format) { - r = read_id(name, id, y); + r = read_id(dpp, name, id, y); if (r < 0) { if (r != -ENOENT) { - ldout(cct, 0) << "error in read_id for object name: " << name << " : " << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "error in read_id for object name: " << name << " : " << cpp_strerror(-r) << dendl; } return r; } } } - return read_info(id, y, old_format); + return read_info(dpp, id, y, old_format); } -int RGWSystemMetaObj::read_default(RGWDefaultSystemMetaObjInfo& default_info, +int RGWSystemMetaObj::read_default(const DoutPrefixProvider *dpp, + RGWDefaultSystemMetaObjInfo& default_info, const string& oid, optional_yield y) { using ceph::decode; @@ -402,7 +405,7 @@ int RGWSystemMetaObj::read_default(RGWDefaultSystemMetaObjInfo& default_info, auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid)); - int ret = sysobj.rop().read(&bl, y); + int ret = sysobj.rop().read(dpp, &bl, y); if (ret < 0) return ret; @@ -410,19 +413,19 @@ int RGWSystemMetaObj::read_default(RGWDefaultSystemMetaObjInfo& default_info, auto iter = bl.cbegin(); decode(default_info, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "error decoding data from " << pool << ":" << oid << dendl; + ldpp_dout(dpp, 0) << "error decoding data from " << pool << ":" << oid << dendl; return -EIO; } return 0; } -int RGWSystemMetaObj::read_default_id(string& default_id, optional_yield y, +int RGWSystemMetaObj::read_default_id(const DoutPrefixProvider *dpp, string& default_id, optional_yield y, bool old_format) { RGWDefaultSystemMetaObjInfo default_info; - int ret = read_default(default_info, get_default_oid(old_format), y); + int ret = read_default(dpp, default_info, get_default_oid(old_format), y); if (ret < 0) { return ret; } @@ -432,12 +435,12 @@ int RGWSystemMetaObj::read_default_id(string& default_id, optional_yield y, return 0; } -int RGWSystemMetaObj::use_default(optional_yield y, bool old_format) +int RGWSystemMetaObj::use_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format) { - return read_default_id(id, y, old_format); + return read_default_id(dpp, id, y, old_format); } -int RGWSystemMetaObj::set_as_default(optional_yield y, bool exclusive) +int RGWSystemMetaObj::set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive) { using ceph::encode; string oid = get_default_oid(); @@ -454,14 +457,14 @@ int RGWSystemMetaObj::set_as_default(optional_yield y, bool exclusive) auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid)); int ret = sysobj.wop() .set_exclusive(exclusive) - .write(bl, y); + .write(dpp, bl, y); if (ret < 0) return ret; return 0; } -int RGWSystemMetaObj::read_id(const string& obj_name, string& object_id, +int RGWSystemMetaObj::read_id(const DoutPrefixProvider *dpp, const string& obj_name, string& object_id, optional_yield y) { using ceph::decode; @@ -472,7 +475,7 @@ int RGWSystemMetaObj::read_id(const string& obj_name, string& object_id, auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid)); - int ret = sysobj.rop().read(&bl, y); + int ret = sysobj.rop().read(dpp, &bl, y); if (ret < 0) { return ret; } @@ -482,14 +485,14 @@ int RGWSystemMetaObj::read_id(const string& obj_name, string& object_id, auto iter = bl.cbegin(); decode(nameToId, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: failed to decode obj from " << pool << ":" << oid << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to decode obj from " << pool << ":" << oid << dendl; return -EIO; } object_id = nameToId.obj_id; return 0; } -int RGWSystemMetaObj::delete_obj(optional_yield y, bool old_format) +int RGWSystemMetaObj::delete_obj(const DoutPrefixProvider *dpp, optional_yield y, bool old_format) { rgw_pool pool(get_pool(cct)); @@ -497,16 +500,16 @@ int RGWSystemMetaObj::delete_obj(optional_yield y, bool old_format) /* check to see if obj is the default */ RGWDefaultSystemMetaObjInfo default_info; - int ret = read_default(default_info, get_default_oid(old_format), y); + int ret = read_default(dpp, default_info, get_default_oid(old_format), y); if (ret < 0 && ret != -ENOENT) return ret; if (default_info.default_id == id || (old_format && default_info.default_id == name)) { string oid = get_default_oid(old_format); rgw_raw_obj default_named_obj(pool, oid); auto sysobj = sysobj_svc->get_obj(obj_ctx, default_named_obj); - ret = sysobj.wop().remove(y); + ret = sysobj.wop().remove(dpp, y); if (ret < 0) { - ldout(cct, 0) << "Error delete default obj name " << name << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "Error delete default obj name " << name << ": " << cpp_strerror(-ret) << dendl; return ret; } } @@ -514,9 +517,9 @@ int RGWSystemMetaObj::delete_obj(optional_yield y, bool old_format) string oid = get_names_oid_prefix() + name; rgw_raw_obj object_name(pool, oid); auto sysobj = sysobj_svc->get_obj(obj_ctx, object_name); - ret = sysobj.wop().remove(y); + ret = sysobj.wop().remove(dpp, y); if (ret < 0) { - ldout(cct, 0) << "Error delete obj name " << name << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "Error delete obj name " << name << ": " << cpp_strerror(-ret) << dendl; return ret; } } @@ -530,15 +533,15 @@ int RGWSystemMetaObj::delete_obj(optional_yield y, bool old_format) rgw_raw_obj object_id(pool, oid); auto sysobj = sysobj_svc->get_obj(obj_ctx, object_id); - ret = sysobj.wop().remove(y); + ret = sysobj.wop().remove(dpp, y); if (ret < 0) { - ldout(cct, 0) << "Error delete object id " << id << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "Error delete object id " << id << ": " << cpp_strerror(-ret) << dendl; } return ret; } -int RGWSystemMetaObj::store_name(bool exclusive, optional_yield y) +int RGWSystemMetaObj::store_name(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) { rgw_pool pool(get_pool(cct)); string oid = get_names_oid_prefix() + name; @@ -553,30 +556,30 @@ int RGWSystemMetaObj::store_name(bool exclusive, optional_yield y) auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid)); return sysobj.wop() .set_exclusive(exclusive) - .write(bl, y); + .write(dpp, bl, y); } -int RGWSystemMetaObj::rename(const string& new_name, optional_yield y) +int RGWSystemMetaObj::rename(const DoutPrefixProvider *dpp, const string& new_name, optional_yield y) { string new_id; - int ret = read_id(new_name, new_id, y); + int ret = read_id(dpp, new_name, new_id, y); if (!ret) { return -EEXIST; } if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "Error read_id " << new_name << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "Error read_id " << new_name << ": " << cpp_strerror(-ret) << dendl; return ret; } string old_name = name; name = new_name; - ret = update(y); + ret = update(dpp, y); if (ret < 0) { - ldout(cct, 0) << "Error storing new obj info " << new_name << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "Error storing new obj info " << new_name << ": " << cpp_strerror(-ret) << dendl; return ret; } - ret = store_name(true, y); + ret = store_name(dpp, true, y); if (ret < 0) { - ldout(cct, 0) << "Error storing new name " << new_name << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "Error storing new name " << new_name << ": " << cpp_strerror(-ret) << dendl; return ret; } /* delete old name */ @@ -585,16 +588,16 @@ int RGWSystemMetaObj::rename(const string& new_name, optional_yield y) rgw_raw_obj old_name_obj(pool, oid); auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, old_name_obj); - ret = sysobj.wop().remove(y); + ret = sysobj.wop().remove(dpp, y); if (ret < 0) { - ldout(cct, 0) << "Error delete old obj name " << old_name << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "Error delete old obj name " << old_name << ": " << cpp_strerror(-ret) << dendl; return ret; } return ret; } -int RGWSystemMetaObj::read_info(const string& obj_id, optional_yield y, +int RGWSystemMetaObj::read_info(const DoutPrefixProvider *dpp, const string& obj_id, optional_yield y, bool old_format) { rgw_pool pool(get_pool(cct)); @@ -605,9 +608,9 @@ int RGWSystemMetaObj::read_info(const string& obj_id, optional_yield y, auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, oid}); - int ret = sysobj.rop().read(&bl, y); + int ret = sysobj.rop().read(dpp, &bl, y); if (ret < 0) { - ldout(cct, 0) << "failed reading obj info from " << pool << ":" << oid << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "failed reading obj info from " << pool << ":" << oid << ": " << cpp_strerror(-ret) << dendl; return ret; } using ceph::decode; @@ -616,21 +619,21 @@ int RGWSystemMetaObj::read_info(const string& obj_id, optional_yield y, auto iter = bl.cbegin(); decode(*this, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: failed to decode obj from " << pool << ":" << oid << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to decode obj from " << pool << ":" << oid << dendl; return -EIO; } return 0; } -int RGWSystemMetaObj::read(optional_yield y) +int RGWSystemMetaObj::read(const DoutPrefixProvider *dpp, optional_yield y) { - int ret = read_id(name, id, y); + int ret = read_id(dpp, name, id, y); if (ret < 0) { return ret; } - return read_info(id, y); + return read_info(dpp, id, y); } int RGWSystemMetaObj::create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive) @@ -638,7 +641,7 @@ int RGWSystemMetaObj::create(const DoutPrefixProvider *dpp, optional_yield y, bo int ret; /* check to see the name is not used */ - ret = read_id(name, id, y); + ret = read_id(dpp, name, id, y); if (exclusive && ret == 0) { ldpp_dout(dpp, 10) << "ERROR: name " << name << " already in use for obj id " << id << dendl; return -EEXIST; @@ -656,16 +659,16 @@ int RGWSystemMetaObj::create(const DoutPrefixProvider *dpp, optional_yield y, bo id = uuid_str; } - ret = store_info(exclusive, y); + ret = store_info(dpp, exclusive, y); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: storing info for " << id << ": " << cpp_strerror(-ret) << dendl; return ret; } - return store_name(exclusive, y); + return store_name(dpp, exclusive, y); } -int RGWSystemMetaObj::store_info(bool exclusive, optional_yield y) +int RGWSystemMetaObj::store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) { rgw_pool pool(get_pool(cct)); @@ -678,19 +681,19 @@ int RGWSystemMetaObj::store_info(bool exclusive, optional_yield y) auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, oid}); return sysobj.wop() .set_exclusive(exclusive) - .write(bl, y); + .write(dpp, bl, y); } -int RGWSystemMetaObj::write(bool exclusive, optional_yield y) +int RGWSystemMetaObj::write(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) { - int ret = store_info(exclusive, y); + int ret = store_info(dpp, exclusive, y); if (ret < 0) { - ldout(cct, 20) << __func__ << "(): store_info() returned ret=" << ret << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): store_info() returned ret=" << ret << dendl; return ret; } - ret = store_name(exclusive, y); + ret = store_name(dpp, exclusive, y); if (ret < 0) { - ldout(cct, 20) << __func__ << "(): store_name() returned ret=" << ret << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): store_name() returned ret=" << ret << dendl; return ret; } return 0; @@ -709,7 +712,7 @@ int RGWRealm::create(const DoutPrefixProvider *dpp, optional_yield y, bool exclu return ret; } // create the control object for watch/notify - ret = create_control(exclusive, y); + ret = create_control(dpp, exclusive, y); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR creating control for new realm " << name << ": " << cpp_strerror(-ret) << dendl; return ret; @@ -717,7 +720,7 @@ int RGWRealm::create(const DoutPrefixProvider *dpp, optional_yield y, bool exclu RGWPeriod period; if (current_period.empty()) { /* create new period for the realm */ - ret = period.init(cct, sysobj_svc, id, y, name, false); + ret = period.init(dpp, cct, sysobj_svc, id, y, name, false); if (ret < 0 ) { return ret; } @@ -728,37 +731,37 @@ int RGWRealm::create(const DoutPrefixProvider *dpp, optional_yield y, bool exclu } } else { period = RGWPeriod(current_period, 0); - int ret = period.init(cct, sysobj_svc, id, y, name); + int ret = period.init(dpp, cct, sysobj_svc, id, y, name); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to init period " << current_period << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to init period " << current_period << dendl; return ret; } } - ret = set_current_period(period, y); + ret = set_current_period(dpp, period, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed set current period " << current_period << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed set current period " << current_period << dendl; return ret; } // try to set as default. may race with another create, so pass exclusive=true // so we don't override an existing default - ret = set_as_default(y, true); + ret = set_as_default(dpp, y, true); if (ret < 0 && ret != -EEXIST) { - ldout(cct, 0) << "WARNING: failed to set realm as default realm, ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "WARNING: failed to set realm as default realm, ret=" << ret << dendl; } return 0; } -int RGWRealm::delete_obj(optional_yield y) +int RGWRealm::delete_obj(const DoutPrefixProvider *dpp, optional_yield y) { - int ret = RGWSystemMetaObj::delete_obj(y); + int ret = RGWSystemMetaObj::delete_obj(dpp, y); if (ret < 0) { return ret; } - return delete_control(y); + return delete_control(dpp, y); } -int RGWRealm::create_control(bool exclusive, optional_yield y) +int RGWRealm::create_control(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) { auto pool = rgw_pool{get_pool(cct)}; auto oid = get_control_oid(); @@ -767,16 +770,16 @@ int RGWRealm::create_control(bool exclusive, optional_yield y) auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, oid}); return sysobj.wop() .set_exclusive(exclusive) - .write(bl, y); + .write(dpp, bl, y); } -int RGWRealm::delete_control(optional_yield y) +int RGWRealm::delete_control(const DoutPrefixProvider *dpp, optional_yield y) { auto pool = rgw_pool{get_pool(cct)}; auto obj = rgw_raw_obj{pool, get_control_oid()}; auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, obj); - return sysobj.wop().remove(y); + return sysobj.wop().remove(dpp, y); } rgw_pool RGWRealm::get_pool(CephContext *cct) const @@ -805,16 +808,16 @@ const string& RGWRealm::get_info_oid_prefix(bool old_format) const return realm_info_oid_prefix; } -int RGWRealm::set_current_period(RGWPeriod& period, optional_yield y) +int RGWRealm::set_current_period(const DoutPrefixProvider *dpp, RGWPeriod& period, optional_yield y) { // update realm epoch to match the period's if (epoch > period.get_realm_epoch()) { - ldout(cct, 0) << "ERROR: set_current_period with old realm epoch " + ldpp_dout(dpp, 0) << "ERROR: set_current_period with old realm epoch " << period.get_realm_epoch() << ", current epoch=" << epoch << dendl; return -EINVAL; } if (epoch == period.get_realm_epoch() && current_period != period.get_id()) { - ldout(cct, 0) << "ERROR: set_current_period with same realm epoch " + ldpp_dout(dpp, 0) << "ERROR: set_current_period with same realm epoch " << period.get_realm_epoch() << ", but different period id " << period.get_id() << " != " << current_period << dendl; return -EINVAL; @@ -823,15 +826,15 @@ int RGWRealm::set_current_period(RGWPeriod& period, optional_yield y) epoch = period.get_realm_epoch(); current_period = period.get_id(); - int ret = update(y); + int ret = update(dpp, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: period update: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "ERROR: period update: " << cpp_strerror(-ret) << dendl; return ret; } - ret = period.reflect(y); + ret = period.reflect(dpp, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: period.reflect(): " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "ERROR: period.reflect(): " << cpp_strerror(-ret) << dendl; return ret; } @@ -843,19 +846,19 @@ string RGWRealm::get_control_oid() const return get_info_oid_prefix() + id + ".control"; } -int RGWRealm::notify_zone(bufferlist& bl, optional_yield y) +int RGWRealm::notify_zone(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y) { rgw_pool pool{get_pool(cct)}; auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, get_control_oid()}); - int ret = sysobj.wn().notify(bl, 0, nullptr, y); + int ret = sysobj.wn().notify(dpp, bl, 0, nullptr, y); if (ret < 0) { return ret; } return 0; } -int RGWRealm::notify_new_period(const RGWPeriod& period, optional_yield y) +int RGWRealm::notify_new_period(const DoutPrefixProvider *dpp, const RGWPeriod& period, optional_yield y) { bufferlist bl; using ceph::encode; @@ -865,7 +868,7 @@ int RGWRealm::notify_new_period(const RGWPeriod& period, optional_yield y) // reload the gateway with the new period encode(RGWRealmNotify::Reload, bl); - return notify_zone(bl, y); + return notify_zone(dpp, bl, y); } std::string RGWPeriodConfig::get_oid(const std::string& realm_id) @@ -885,7 +888,7 @@ rgw_pool RGWPeriodConfig::get_pool(CephContext *cct) return {pool_name}; } -int RGWPeriodConfig::read(RGWSI_SysObj *sysobj_svc, const std::string& realm_id, +int RGWPeriodConfig::read(const DoutPrefixProvider *dpp, RGWSI_SysObj *sysobj_svc, const std::string& realm_id, optional_yield y) { const auto& pool = get_pool(sysobj_svc->ctx()); @@ -894,7 +897,7 @@ int RGWPeriodConfig::read(RGWSI_SysObj *sysobj_svc, const std::string& realm_id, auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, oid}); - int ret = sysobj.rop().read(&bl, y); + int ret = sysobj.rop().read(dpp, &bl, y); if (ret < 0) { return ret; } @@ -908,7 +911,8 @@ int RGWPeriodConfig::read(RGWSI_SysObj *sysobj_svc, const std::string& realm_id, return 0; } -int RGWPeriodConfig::write(RGWSI_SysObj *sysobj_svc, +int RGWPeriodConfig::write(const DoutPrefixProvider *dpp, + RGWSI_SysObj *sysobj_svc, const std::string& realm_id, optional_yield y) { const auto& pool = get_pool(sysobj_svc->ctx()); @@ -920,10 +924,10 @@ int RGWPeriodConfig::write(RGWSI_SysObj *sysobj_svc, auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, oid}); return sysobj.wop() .set_exclusive(false) - .write(bl, y); + .write(dpp, bl, y); } -int RGWPeriod::init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, +int RGWPeriod::init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc, const string& period_realm_id, optional_yield y, const string& period_realm_name, bool setup_obj) { @@ -936,11 +940,12 @@ int RGWPeriod::init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, if (!setup_obj) return 0; - return init(_cct, _sysobj_svc, y, setup_obj); + return init(dpp, _cct, _sysobj_svc, y, setup_obj); } -int RGWPeriod::init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, +int RGWPeriod::init(const DoutPrefixProvider *dpp, + CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y, bool setup_obj) { cct = _cct; @@ -951,9 +956,9 @@ int RGWPeriod::init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, if (id.empty()) { RGWRealm realm(realm_id, realm_name); - int ret = realm.init(cct, sysobj_svc, y); + int ret = realm.init(dpp, cct, sysobj_svc, y); if (ret < 0) { - ldout(cct, 0) << "RGWPeriod::init failed to init realm " << realm_name << " id " << realm_id << " : " << + ldpp_dout(dpp, 0) << "RGWPeriod::init failed to init realm " << realm_name << " id " << realm_id << " : " << cpp_strerror(-ret) << dendl; return ret; } @@ -962,15 +967,15 @@ int RGWPeriod::init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, } if (!epoch) { - int ret = use_latest_epoch(y); + int ret = use_latest_epoch(dpp, y); if (ret < 0) { - ldout(cct, 0) << "failed to use_latest_epoch period id " << id << " realm " << realm_name << " id " << realm_id + ldpp_dout(dpp, 0) << "failed to use_latest_epoch period id " << id << " realm " << realm_name << " id " << realm_id << " : " << cpp_strerror(-ret) << dendl; return ret; } } - return read_info(y); + return read_info(dpp, y); } @@ -1019,7 +1024,8 @@ const string RGWPeriod::get_period_oid() const return oss.str(); } -int RGWPeriod::read_latest_epoch(RGWPeriodLatestEpochInfo& info, +int RGWPeriod::read_latest_epoch(const DoutPrefixProvider *dpp, + RGWPeriodLatestEpochInfo& info, optional_yield y, RGWObjVersionTracker *objv) { @@ -1029,9 +1035,9 @@ int RGWPeriod::read_latest_epoch(RGWPeriodLatestEpochInfo& info, bufferlist bl; auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, oid}); - int ret = sysobj.rop().read(&bl, y); + int ret = sysobj.rop().read(dpp, &bl, y); if (ret < 0) { - ldout(cct, 1) << "error read_lastest_epoch " << pool << ":" << oid << dendl; + ldpp_dout(dpp, 1) << "error read_lastest_epoch " << pool << ":" << oid << dendl; return ret; } try { @@ -1039,18 +1045,18 @@ int RGWPeriod::read_latest_epoch(RGWPeriodLatestEpochInfo& info, using ceph::decode; decode(info, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "error decoding data from " << pool << ":" << oid << dendl; + ldpp_dout(dpp, 0) << "error decoding data from " << pool << ":" << oid << dendl; return -EIO; } return 0; } -int RGWPeriod::get_latest_epoch(epoch_t& latest_epoch, optional_yield y) +int RGWPeriod::get_latest_epoch(const DoutPrefixProvider *dpp, epoch_t& latest_epoch, optional_yield y) { RGWPeriodLatestEpochInfo info; - int ret = read_latest_epoch(info, y); + int ret = read_latest_epoch(dpp, info, y); if (ret < 0) { return ret; } @@ -1060,10 +1066,10 @@ int RGWPeriod::get_latest_epoch(epoch_t& latest_epoch, optional_yield y) return 0; } -int RGWPeriod::use_latest_epoch(optional_yield y) +int RGWPeriod::use_latest_epoch(const DoutPrefixProvider *dpp, optional_yield y) { RGWPeriodLatestEpochInfo info; - int ret = read_latest_epoch(info, y); + int ret = read_latest_epoch(dpp, info, y); if (ret < 0) { return ret; } @@ -1073,7 +1079,8 @@ int RGWPeriod::use_latest_epoch(optional_yield y) return 0; } -int RGWPeriod::set_latest_epoch(optional_yield y, +int RGWPeriod::set_latest_epoch(const DoutPrefixProvider *dpp, + optional_yield y, epoch_t epoch, bool exclusive, RGWObjVersionTracker *objv) { @@ -1092,10 +1099,10 @@ int RGWPeriod::set_latest_epoch(optional_yield y, auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid)); return sysobj.wop() .set_exclusive(exclusive) - .write(bl, y); + .write(dpp, bl, y); } -int RGWPeriod::update_latest_epoch(epoch_t epoch, optional_yield y) +int RGWPeriod::update_latest_epoch(const DoutPrefixProvider *dpp, epoch_t epoch, optional_yield y) { static constexpr int MAX_RETRIES = 20; @@ -1105,33 +1112,33 @@ int RGWPeriod::update_latest_epoch(epoch_t epoch, optional_yield y) bool exclusive = false; // read existing epoch - int r = read_latest_epoch(info, y, &objv); + int r = read_latest_epoch(dpp, info, y, &objv); if (r == -ENOENT) { // use an exclusive create to set the epoch atomically exclusive = true; - ldout(cct, 20) << "creating initial latest_epoch=" << epoch + ldpp_dout(dpp, 20) << "creating initial latest_epoch=" << epoch << " for period=" << id << dendl; } else if (r < 0) { - ldout(cct, 0) << "ERROR: failed to read latest_epoch" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to read latest_epoch" << dendl; return r; } else if (epoch <= info.epoch) { r = -EEXIST; // fail with EEXIST if epoch is not newer - ldout(cct, 10) << "found existing latest_epoch " << info.epoch + ldpp_dout(dpp, 10) << "found existing latest_epoch " << info.epoch << " >= given epoch " << epoch << ", returning r=" << r << dendl; return r; } else { - ldout(cct, 20) << "updating latest_epoch from " << info.epoch + ldpp_dout(dpp, 20) << "updating latest_epoch from " << info.epoch << " -> " << epoch << " on period=" << id << dendl; } - r = set_latest_epoch(y, epoch, exclusive, &objv); + r = set_latest_epoch(dpp, y, epoch, exclusive, &objv); if (r == -EEXIST) { continue; // exclusive create raced with another update, retry } else if (r == -ECANCELED) { continue; // write raced with a conflicting version, retry } if (r < 0) { - ldout(cct, 0) << "ERROR: failed to write latest_epoch" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to write latest_epoch" << dendl; return r; } return 0; // return success @@ -1140,7 +1147,7 @@ int RGWPeriod::update_latest_epoch(epoch_t epoch, optional_yield y) return -ECANCELED; // fail after max retries } -int RGWPeriod::delete_obj(optional_yield y) +int RGWPeriod::delete_obj(const DoutPrefixProvider *dpp, optional_yield y) { rgw_pool pool(get_pool(cct)); @@ -1150,9 +1157,9 @@ int RGWPeriod::delete_obj(optional_yield y) rgw_raw_obj oid{pool, p.get_period_oid()}; auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, oid); - int ret = sysobj.wop().remove(y); + int ret = sysobj.wop().remove(dpp, y); if (ret < 0) { - ldout(cct, 0) << "WARNING: failed to delete period object " << oid + ldpp_dout(dpp, 0) << "WARNING: failed to delete period object " << oid << ": " << cpp_strerror(-ret) << dendl; } } @@ -1161,15 +1168,15 @@ int RGWPeriod::delete_obj(optional_yield y) rgw_raw_obj oid{pool, get_period_oid_prefix() + get_latest_epoch_oid()}; auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, oid); - int ret = sysobj.wop().remove(y); + int ret = sysobj.wop().remove(dpp, y); if (ret < 0) { - ldout(cct, 0) << "WARNING: failed to delete period object " << oid + ldpp_dout(dpp, 0) << "WARNING: failed to delete period object " << oid << ": " << cpp_strerror(-ret) << dendl; } return ret; } -int RGWPeriod::read_info(optional_yield y) +int RGWPeriod::read_info(const DoutPrefixProvider *dpp, optional_yield y) { rgw_pool pool(get_pool(cct)); @@ -1177,9 +1184,9 @@ int RGWPeriod::read_info(optional_yield y) auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj{pool, get_period_oid()}); - int ret = sysobj.rop().read(&bl, y); + int ret = sysobj.rop().read(dpp, &bl, y); if (ret < 0) { - ldout(cct, 0) << "failed reading obj info from " << pool << ":" << get_period_oid() << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "failed reading obj info from " << pool << ":" << get_period_oid() << ": " << cpp_strerror(-ret) << dendl; return ret; } @@ -1188,7 +1195,7 @@ int RGWPeriod::read_info(optional_yield y) auto iter = bl.cbegin(); decode(*this, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: failed to decode obj from " << pool << ":" << get_period_oid() << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to decode obj from " << pool << ":" << get_period_oid() << dendl; return -EIO; } @@ -1210,13 +1217,13 @@ int RGWPeriod::create(const DoutPrefixProvider *dpp, optional_yield y, bool excl period_map.id = id; - ret = store_info(exclusive, y); + ret = store_info(dpp, exclusive, y); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: storing info for " << id << ": " << cpp_strerror(-ret) << dendl; return ret; } - ret = set_latest_epoch(y, epoch); + ret = set_latest_epoch(dpp, y, epoch); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: setting latest epoch " << id << ": " << cpp_strerror(-ret) << dendl; } @@ -1224,7 +1231,7 @@ int RGWPeriod::create(const DoutPrefixProvider *dpp, optional_yield y, bool excl return ret; } -int RGWPeriod::store_info(bool exclusive, optional_yield y) +int RGWPeriod::store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) { rgw_pool pool(get_pool(cct)); @@ -1237,7 +1244,7 @@ int RGWPeriod::store_info(bool exclusive, optional_yield y) auto sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid)); return sysobj.wop() .set_exclusive(exclusive) - .write(bl, y); + .write(dpp, bl, y); } rgw_pool RGWPeriod::get_pool(CephContext *cct) const @@ -1248,28 +1255,28 @@ rgw_pool RGWPeriod::get_pool(CephContext *cct) const return rgw_pool(cct->_conf->rgw_period_root_pool); } -int RGWPeriod::add_zonegroup(const RGWZoneGroup& zonegroup, optional_yield y) +int RGWPeriod::add_zonegroup(const DoutPrefixProvider *dpp, const RGWZoneGroup& zonegroup, optional_yield y) { if (zonegroup.realm_id != realm_id) { return 0; } int ret = period_map.update(zonegroup, cct); if (ret < 0) { - ldout(cct, 0) << "ERROR: updating period map: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "ERROR: updating period map: " << cpp_strerror(-ret) << dendl; return ret; } - return store_info(false, y); + return store_info(dpp, false, y); } -int RGWPeriod::update(optional_yield y) +int RGWPeriod::update(const DoutPrefixProvider *dpp, optional_yield y) { auto zone_svc = sysobj_svc->get_zone_svc(); - ldout(cct, 20) << __func__ << " realm " << realm_id << " period " << get_id() << dendl; + ldpp_dout(dpp, 20) << __func__ << " realm " << realm_id << " period " << get_id() << dendl; list zonegroups; - int ret = zone_svc->list_zonegroups(zonegroups); + int ret = zone_svc->list_zonegroups(dpp, zonegroups); if (ret < 0) { - ldout(cct, 0) << "ERROR: failed to list zonegroups: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to list zonegroups: " << cpp_strerror(-ret) << dendl; return ret; } @@ -1279,24 +1286,24 @@ int RGWPeriod::update(optional_yield y) for (auto& iter : zonegroups) { RGWZoneGroup zg(string(), iter); - ret = zg.init(cct, sysobj_svc, y); + ret = zg.init(dpp, cct, sysobj_svc, y); if (ret < 0) { - ldout(cct, 0) << "WARNING: zg.init() failed: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "WARNING: zg.init() failed: " << cpp_strerror(-ret) << dendl; continue; } if (zg.realm_id != realm_id) { - ldout(cct, 20) << "skipping zonegroup " << zg.get_name() << " zone realm id " << zg.realm_id << ", not on our realm " << realm_id << dendl; + ldpp_dout(dpp, 20) << "skipping zonegroup " << zg.get_name() << " zone realm id " << zg.realm_id << ", not on our realm " << realm_id << dendl; continue; } if (zg.master_zone.empty()) { - ldout(cct, 0) << "ERROR: zonegroup " << zg.get_name() << " should have a master zone " << dendl; + ldpp_dout(dpp, 0) << "ERROR: zonegroup " << zg.get_name() << " should have a master zone " << dendl; return -EINVAL; } if (zg.zones.find(zg.master_zone) == zg.zones.end()) { - ldout(cct,0) << "ERROR: zonegroup " << zg.get_name() + ldpp_dout(dpp, 0) << "ERROR: zonegroup " << zg.get_name() << " has a non existent master zone "<< dendl; return -EINVAL; } @@ -1312,38 +1319,38 @@ int RGWPeriod::update(optional_yield y) } } - ret = period_config.read(sysobj_svc, realm_id, y); + ret = period_config.read(dpp, sysobj_svc, realm_id, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "ERROR: failed to read period config: " + ldpp_dout(dpp, 0) << "ERROR: failed to read period config: " << cpp_strerror(ret) << dendl; return ret; } return 0; } -int RGWPeriod::reflect(optional_yield y) +int RGWPeriod::reflect(const DoutPrefixProvider *dpp, optional_yield y) { for (auto& iter : period_map.zonegroups) { RGWZoneGroup& zg = iter.second; zg.reinit_instance(cct, sysobj_svc); - int r = zg.write(false, y); + int r = zg.write(dpp, false, y); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to store zonegroup info for zonegroup=" << iter.first << ": " << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to store zonegroup info for zonegroup=" << iter.first << ": " << cpp_strerror(-r) << dendl; return r; } if (zg.is_master_zonegroup()) { // set master as default if no default exists - r = zg.set_as_default(y, true); + r = zg.set_as_default(dpp, y, true); if (r == 0) { - ldout(cct, 1) << "Set the period's master zonegroup " << zg.get_id() + ldpp_dout(dpp, 1) << "Set the period's master zonegroup " << zg.get_id() << " as the default" << dendl; } } } - int r = period_config.write(sysobj_svc, realm_id, y); + int r = period_config.write(dpp, sysobj_svc, realm_id, y); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to store period config: " + ldpp_dout(dpp, 0) << "ERROR: failed to store period config: " << cpp_strerror(-r) << dendl; return r; } @@ -1359,28 +1366,29 @@ void RGWPeriod::fork() realm_epoch++; } -static int read_sync_status(rgw::sal::RadosStore* store, rgw_meta_sync_status *sync_status) +static int read_sync_status(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, rgw_meta_sync_status *sync_status) { // initialize a sync status manager to read the status RGWMetaSyncStatusManager mgr(store, store->svc()->rados->get_async_processor()); - int r = mgr.init(); + int r = mgr.init(dpp); if (r < 0) { return r; } - r = mgr.read_sync_status(sync_status); + r = mgr.read_sync_status(dpp, sync_status); mgr.stop(); return r; } -int RGWPeriod::update_sync_status(rgw::sal::Store* store, /* for now */ +int RGWPeriod::update_sync_status(const DoutPrefixProvider *dpp, + rgw::sal::Store* store, /* for now */ const RGWPeriod ¤t_period, std::ostream& error_stream, bool force_if_stale) { rgw_meta_sync_status status; - int r = read_sync_status(static_cast(store), &status); + int r = read_sync_status(dpp, static_cast(store), &status); if (r < 0) { - ldout(cct, 0) << "period failed to read sync status: " + ldpp_dout(dpp, 0) << "period failed to read sync status: " << cpp_strerror(-r) << dendl; return r; } @@ -1457,7 +1465,7 @@ int RGWPeriod::commit(const DoutPrefixProvider *dpp, // did the master zone change? if (master_zone != current_period.get_master_zone()) { // store the current metadata sync status in the period - int r = update_sync_status(store, current_period, error_stream, force_if_stale); + int r = update_sync_status(dpp, store, current_period, error_stream, force_if_stale); if (r < 0) { ldpp_dout(dpp, 0) << "failed to update metadata sync status: " << cpp_strerror(-r) << dendl; @@ -1470,7 +1478,7 @@ int RGWPeriod::commit(const DoutPrefixProvider *dpp, return r; } // set as current period - r = realm.set_current_period(*this, y); + r = realm.set_current_period(dpp, *this, y); if (r < 0) { ldpp_dout(dpp, 0) << "failed to update realm's current period: " << cpp_strerror(-r) << dendl; @@ -1478,7 +1486,7 @@ int RGWPeriod::commit(const DoutPrefixProvider *dpp, } ldpp_dout(dpp, 4) << "Promoted to master zone and committed new period " << id << dendl; - realm.notify_new_period(*this, y); + realm.notify_new_period(dpp, *this, y); return 0; } // period must be based on current epoch @@ -1495,13 +1503,13 @@ int RGWPeriod::commit(const DoutPrefixProvider *dpp, set_predecessor(current_period.get_predecessor()); realm_epoch = current_period.get_realm_epoch(); // write the period to rados - int r = store_info(false, y); + int r = store_info(dpp, false, y); if (r < 0) { ldpp_dout(dpp, 0) << "failed to store period: " << cpp_strerror(-r) << dendl; return r; } // set as latest epoch - r = update_latest_epoch(epoch, y); + r = update_latest_epoch(dpp, epoch, y); if (r == -EEXIST) { // already have this epoch (or a more recent one) return 0; @@ -1510,14 +1518,14 @@ int RGWPeriod::commit(const DoutPrefixProvider *dpp, ldpp_dout(dpp, 0) << "failed to set latest epoch: " << cpp_strerror(-r) << dendl; return r; } - r = reflect(y); + r = reflect(dpp, y); if (r < 0) { ldpp_dout(dpp, 0) << "failed to update local objects: " << cpp_strerror(-r) << dendl; return r; } ldpp_dout(dpp, 4) << "Committed new epoch " << epoch << " for period " << id << dendl; - realm.notify_new_period(*this, y); + realm.notify_new_period(dpp, *this, y); return 0; } @@ -1539,7 +1547,8 @@ int RGWZoneParams::create_default(const DoutPrefixProvider *dpp, optional_yield namespace { -int get_zones_pool_set(CephContext* cct, +int get_zones_pool_set(const DoutPrefixProvider *dpp, + CephContext* cct, RGWSI_SysObj* sysobj_svc, const list& zones, const string& my_zone_id, @@ -1548,9 +1557,9 @@ int get_zones_pool_set(CephContext* cct, { for(auto const& iter : zones) { RGWZoneParams zone(iter); - int r = zone.init(cct, sysobj_svc, y); + int r = zone.init(dpp, cct, sysobj_svc, y); if (r < 0) { - ldout(cct, 0) << "Error: init zone " << iter << ":" << cpp_strerror(-r) << dendl; + ldpp_dout(dpp, 0) << "Error: init zone " << iter << ":" << cpp_strerror(-r) << dendl; return r; } if (zone.get_id() != my_zone_id) { @@ -1613,19 +1622,19 @@ rgw_pool fix_zone_pool_dup(set pools, } } -int RGWZoneParams::fix_pool_names(optional_yield y) +int RGWZoneParams::fix_pool_names(const DoutPrefixProvider *dpp, optional_yield y) { list zones; - int r = zone_svc->list_zones(zones); + int r = zone_svc->list_zones(dpp, zones); if (r < 0) { - ldout(cct, 10) << "WARNING: store->list_zones() returned r=" << r << dendl; + ldpp_dout(dpp, 10) << "WARNING: store->list_zones() returned r=" << r << dendl; } set pools; - r = get_zones_pool_set(cct, sysobj_svc, zones, id, pools, y); + r = get_zones_pool_set(dpp, cct, sysobj_svc, zones, id, pools, y); if (r < 0) { - ldout(cct, 0) << "Error: get_zones_pool_names" << r << dendl; + ldpp_dout(dpp, 0) << "Error: get_zones_pool_names" << r << dendl; return r; } @@ -1681,7 +1690,7 @@ int RGWZoneParams::create(const DoutPrefixProvider *dpp, optional_yield y, bool placement_pools["default-placement"] = default_placement; } - r = fix_pool_names(y); + r = fix_pool_names(dpp, y); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: fix_pool_names returned r=" << r << dendl; return r; @@ -1694,7 +1703,7 @@ int RGWZoneParams::create(const DoutPrefixProvider *dpp, optional_yield y, bool // try to set as default. may race with another create, so pass exclusive=true // so we don't override an existing default - r = set_as_default(y, true); + r = set_as_default(dpp, y, true); if (r < 0 && r != -EEXIST) { ldpp_dout(dpp, 10) << "WARNING: failed to set zone as default, r=" << r << dendl; } @@ -1734,48 +1743,49 @@ const string& RGWZoneParams::get_predefined_name(CephContext *cct) const { return cct->_conf->rgw_zone; } -int RGWZoneParams::init(CephContext *cct, RGWSI_SysObj *sysobj_svc, +int RGWZoneParams::init(const DoutPrefixProvider *dpp, + CephContext *cct, RGWSI_SysObj *sysobj_svc, optional_yield y, bool setup_obj, bool old_format) { if (name.empty()) { name = cct->_conf->rgw_zone; } - return RGWSystemMetaObj::init(cct, sysobj_svc, y, setup_obj, old_format); + return RGWSystemMetaObj::init(dpp, cct, sysobj_svc, y, setup_obj, old_format); } -int RGWZoneParams::read_default_id(string& default_id, optional_yield y, +int RGWZoneParams::read_default_id(const DoutPrefixProvider *dpp, string& default_id, optional_yield y, bool old_format) { if (realm_id.empty()) { /* try using default realm */ RGWRealm realm; - int ret = realm.init(cct, sysobj_svc, y); + int ret = realm.init(dpp, cct, sysobj_svc, y); //no default realm exist if (ret < 0) { - return read_id(default_zone_name, default_id, y); + return read_id(dpp, default_zone_name, default_id, y); } realm_id = realm.get_id(); } - return RGWSystemMetaObj::read_default_id(default_id, y, old_format); + return RGWSystemMetaObj::read_default_id(dpp, default_id, y, old_format); } -int RGWZoneParams::set_as_default(optional_yield y, bool exclusive) +int RGWZoneParams::set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive) { if (realm_id.empty()) { /* try using default realm */ RGWRealm realm; - int ret = realm.init(cct, sysobj_svc, y); + int ret = realm.init(dpp, cct, sysobj_svc, y); if (ret < 0) { - ldout(cct, 10) << "could not read realm id: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 10) << "could not read realm id: " << cpp_strerror(-ret) << dendl; return -EINVAL; } realm_id = realm.get_id(); } - return RGWSystemMetaObj::set_as_default(y, exclusive); + return RGWSystemMetaObj::set_as_default(dpp, y, exclusive); } const string& RGWZoneParams::get_compression_type(const rgw_placement_rule& placement_rule) const @@ -1891,11 +1901,11 @@ uint32_t RGWPeriodMap::get_zone_short_id(const string& zone_id) const return i->second; } -int RGWZoneGroupMap::read(CephContext *cct, RGWSI_SysObj *sysobj_svc, optional_yield y) +int RGWZoneGroupMap::read(const DoutPrefixProvider *dpp, CephContext *cct, RGWSI_SysObj *sysobj_svc, optional_yield y) { RGWPeriod period; - int ret = period.init(cct, sysobj_svc, y); + int ret = period.init(dpp, cct, sysobj_svc, y); if (ret < 0) { cerr << "failed to read current period info: " << cpp_strerror(ret); return ret; diff --git a/src/rgw/rgw_zone.h b/src/rgw/rgw_zone.h index 8f80c054a01..4f6e1f9b10c 100644 --- a/src/rgw/rgw_zone.h +++ b/src/rgw/rgw_zone.h @@ -86,15 +86,16 @@ protected: RGWSI_SysObj *sysobj_svc{nullptr}; RGWSI_Zone *zone_svc{nullptr}; - int store_name(bool exclusive, optional_yield y); - int store_info(bool exclusive, optional_yield y); - int read_info(const std::string& obj_id, optional_yield y, bool old_format = false); - int read_id(const std::string& obj_name, std::string& obj_id, optional_yield y); - int read_default(RGWDefaultSystemMetaObjInfo& default_info, + int store_name(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y); + int store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y); + int read_info(const DoutPrefixProvider *dpp, const std::string& obj_id, optional_yield y, bool old_format = false); + int read_id(const DoutPrefixProvider *dpp, const std::string& obj_name, std::string& obj_id, optional_yield y); + int read_default(const DoutPrefixProvider *dpp, + RGWDefaultSystemMetaObjInfo& default_info, const std::string& oid, optional_yield y); /* read and use default id */ - int use_default(optional_yield y, bool old_format = false); + int use_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format = false); public: RGWSystemMetaObj() {} @@ -131,20 +132,20 @@ public: } void reinit_instance(CephContext *_cct, RGWSI_SysObj *_sysobj_svc); - int init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, + int init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y, bool setup_obj = true, bool old_format = false); - virtual int read_default_id(std::string& default_id, optional_yield y, + virtual int read_default_id(const DoutPrefixProvider *dpp, std::string& default_id, optional_yield y, bool old_format = false); - virtual int set_as_default(optional_yield y, bool exclusive = false); + virtual int set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = false); int delete_default(); virtual int create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = true); - int delete_obj(optional_yield y, bool old_format = false); - int rename(const std::string& new_name, optional_yield y); - int update(optional_yield y) { return store_info(false, y);} - int update_name(optional_yield y) { return store_name(false, y);} - int read(optional_yield y); - int write(bool exclusive, optional_yield y); + int delete_obj(const DoutPrefixProvider *dpp, optional_yield y, bool old_format = false); + int rename(const DoutPrefixProvider *dpp, const std::string& new_name, optional_yield y); + int update(const DoutPrefixProvider *dpp, optional_yield y) { return store_info(dpp, false, y);} + int update_name(const DoutPrefixProvider *dpp, optional_yield y) { return store_name(dpp, false, y);} + int read(const DoutPrefixProvider *dpp, optional_yield y); + int write(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y); virtual rgw_pool get_pool(CephContext *cct) const = 0; virtual const std::string get_default_oid(bool old_format = false) const = 0; @@ -400,14 +401,15 @@ struct RGWZoneParams : RGWSystemMetaObj { const std::string& get_info_oid_prefix(bool old_format = false) const override; const std::string& get_predefined_name(CephContext *cct) const override; - int init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y, + int init(const DoutPrefixProvider *dpp, + CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y, bool setup_obj = true, bool old_format = false); using RGWSystemMetaObj::init; - int read_default_id(std::string& default_id, optional_yield y, bool old_format = false) override; - int set_as_default(optional_yield y, bool exclusive = false) override; + int read_default_id(const DoutPrefixProvider *dpp, std::string& default_id, optional_yield y, bool old_format = false) override; + int set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = false) override; int create_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format = false); int create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = true) override; - int fix_pool_names(optional_yield y); + int fix_pool_names(const DoutPrefixProvider *dpp, optional_yield y); const string& get_compression_type(const rgw_placement_rule& placement_rule) const; @@ -760,11 +762,11 @@ struct RGWZoneGroup : public RGWSystemMetaObj { realm_id(_realm_id) {} bool is_master_zonegroup() const { return is_master;} - void update_master(bool _is_master, optional_yield y) { + void update_master(const DoutPrefixProvider *dpp, bool _is_master, optional_yield y) { is_master = _is_master; - post_process_params(y); + post_process_params(dpp, y); } - void post_process_params(optional_yield y); + void post_process_params(const DoutPrefixProvider *dpp, optional_yield y); void encode(bufferlist& bl) const override { ENCODE_START(5, 1, bl); @@ -812,18 +814,19 @@ struct RGWZoneGroup : public RGWSystemMetaObj { DECODE_FINISH(bl); } - int read_default_id(std::string& default_id, optional_yield y, bool old_format = false) override; - int set_as_default(optional_yield y, bool exclusive = false) override; + int read_default_id(const DoutPrefixProvider *dpp, std::string& default_id, optional_yield y, bool old_format = false) override; + int set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = false) override; int create_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format = false); int equals(const std::string& other_zonegroup) const; - int add_zone(const RGWZoneParams& zone_params, bool *is_master, bool *read_only, + int add_zone(const DoutPrefixProvider *dpp, + const RGWZoneParams& zone_params, bool *is_master, bool *read_only, const list& endpoints, const std::string *ptier_type, bool *psync_from_all, list& sync_from, list& sync_from_rm, std::string *predirect_zone, std::optional bucket_index_max_shards, RGWSyncModulesManager *sync_mgr, optional_yield y); - int remove_zone(const std::string& zone_id, optional_yield y); - int rename_zone(const RGWZoneParams& zone_params, optional_yield y); + int remove_zone(const DoutPrefixProvider *dpp, const std::string& zone_id, optional_yield y); + int rename_zone(const DoutPrefixProvider *dpp, const RGWZoneParams& zone_params, optional_yield y); rgw_pool get_pool(CephContext *cct) const override; const std::string get_default_oid(bool old_region_format = false) const override; const std::string& get_info_oid_prefix(bool old_region_format = false) const override; @@ -888,8 +891,8 @@ struct RGWPeriodConfig // the period config must be stored in a local object outside of the period, // so that it can be used in a default configuration where no realm/period // exists - int read(RGWSI_SysObj *sysobj_svc, const std::string& realm_id, optional_yield y); - int write(RGWSI_SysObj *sysobj_svc, const std::string& realm_id, optional_yield y); + int read(const DoutPrefixProvider *dpp, RGWSI_SysObj *sysobj_svc, const std::string& realm_id, optional_yield y); + int write(const DoutPrefixProvider *dpp, RGWSI_SysObj *sysobj_svc, const std::string& realm_id, optional_yield y); static std::string get_oid(const std::string& realm_id); static rgw_pool get_pool(CephContext *cct); @@ -925,7 +928,7 @@ struct RGWZoneGroupMap { RGWQuotaInfo user_quota; /* construct the map */ - int read(CephContext *cct, RGWSI_SysObj *sysobj_svc, optional_yield y); + int read(const DoutPrefixProvider *dpp, CephContext *cct, RGWSI_SysObj *sysobj_svc, optional_yield y); void encode(bufferlist& bl) const; void decode(bufferlist::const_iterator& bl); @@ -943,8 +946,8 @@ class RGWRealm : public RGWSystemMetaObj std::string current_period; epoch_t epoch{0}; //< realm epoch, incremented for each new period - int create_control(bool exclusive, optional_yield y); - int delete_control(optional_yield y); + int create_control(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y); + int delete_control(const DoutPrefixProvider *dpp, optional_yield y); public: RGWRealm() {} RGWRealm(const std::string& _id, const std::string& _name = "") : RGWSystemMetaObj(_id, _name) {} @@ -968,7 +971,7 @@ public: } int create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = true) override; - int delete_obj(optional_yield y); + int delete_obj(const DoutPrefixProvider *dpp, optional_yield y); rgw_pool get_pool(CephContext *cct) const override; const std::string get_default_oid(bool old_format = false) const override; const std::string& get_names_oid_prefix() const override; @@ -984,7 +987,7 @@ public: const std::string& get_current_period() const { return current_period; } - int set_current_period(RGWPeriod& period, optional_yield y); + int set_current_period(const DoutPrefixProvider *dpp, RGWPeriod& period, optional_yield y); void clear_current_period_and_epoch() { current_period.clear(); epoch = 0; @@ -993,9 +996,9 @@ public: std::string get_control_oid() const; /// send a notify on the realm control object - int notify_zone(bufferlist& bl, optional_yield y); + int notify_zone(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y); /// notify the zone of a new period - int notify_new_period(const RGWPeriod& period, optional_yield y); + int notify_new_period(const DoutPrefixProvider *dpp, const RGWPeriod& period, optional_yield y); }; WRITE_CLASS_ENCODER(RGWRealm) @@ -1059,18 +1062,20 @@ class RGWPeriod CephContext *cct{nullptr}; RGWSI_SysObj *sysobj_svc{nullptr}; - int read_info(optional_yield y); - int read_latest_epoch(RGWPeriodLatestEpochInfo& epoch_info, + int read_info(const DoutPrefixProvider *dpp, optional_yield y); + int read_latest_epoch(const DoutPrefixProvider *dpp, + RGWPeriodLatestEpochInfo& epoch_info, optional_yield y, RGWObjVersionTracker *objv = nullptr); - int use_latest_epoch(optional_yield y); + int use_latest_epoch(const DoutPrefixProvider *dpp, optional_yield y); int use_current_period(); const std::string get_period_oid() const; const std::string get_period_oid_prefix() const; // gather the metadata sync status for each shard; only for use on master zone - int update_sync_status(rgw::sal::Store* store, + int update_sync_status(const DoutPrefixProvider *dpp, + rgw::sal::Store* store, const RGWPeriod ¤t_period, std::ostream& error_stream, bool force_if_stale); @@ -1119,7 +1124,7 @@ public: realm_id = _realm_id; } - int reflect(optional_yield y); + int reflect(const DoutPrefixProvider *dpp, optional_yield y); int get_zonegroup(RGWZoneGroup& zonegroup, const std::string& zonegroup_id) const; @@ -1145,24 +1150,24 @@ public: return false; } - int get_latest_epoch(epoch_t& epoch, optional_yield y); - int set_latest_epoch(optional_yield y, + int get_latest_epoch(const DoutPrefixProvider *dpp, epoch_t& epoch, optional_yield y); + int set_latest_epoch(const DoutPrefixProvider *dpp, optional_yield y, epoch_t epoch, bool exclusive = false, RGWObjVersionTracker *objv = nullptr); // update latest_epoch if the given epoch is higher, else return -EEXIST - int update_latest_epoch(epoch_t epoch, optional_yield y); + int update_latest_epoch(const DoutPrefixProvider *dpp, epoch_t epoch, optional_yield y); - int init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, const std::string &period_realm_id, optional_yield y, + int init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc, const std::string &period_realm_id, optional_yield y, const std::string &period_realm_name = "", bool setup_obj = true); - int init(CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y, bool setup_obj = true); + int init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y, bool setup_obj = true); int create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = true); - int delete_obj(optional_yield y); - int store_info(bool exclusive, optional_yield y); - int add_zonegroup(const RGWZoneGroup& zonegroup, optional_yield y); + int delete_obj(const DoutPrefixProvider *dpp, optional_yield y); + int store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y); + int add_zonegroup(const DoutPrefixProvider *dpp, const RGWZoneGroup& zonegroup, optional_yield y); void fork(); - int update(optional_yield y); + int update(const DoutPrefixProvider *dpp, optional_yield y); // commit a staging period; only for use on master zone int commit(const DoutPrefixProvider *dpp, diff --git a/src/rgw/services/svc_bi.h b/src/rgw/services/svc_bi.h index cf6e605c87c..abb68e39418 100644 --- a/src/rgw/services/svc_bi.h +++ b/src/rgw/services/svc_bi.h @@ -29,10 +29,11 @@ public: RGWSI_BucketIndex(CephContext *cct) : RGWServiceInstance(cct) {} virtual ~RGWSI_BucketIndex() {} - virtual int init_index(RGWBucketInfo& bucket_info) = 0; - virtual int clean_index(RGWBucketInfo& bucket_info) = 0; + virtual int init_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info) = 0; + virtual int clean_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info) = 0; - virtual int read_stats(const RGWBucketInfo& bucket_info, + virtual int read_stats(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, RGWBucketEnt *stats, optional_yield y) = 0; diff --git a/src/rgw/services/svc_bi_rados.cc b/src/rgw/services/svc_bi_rados.cc index 74785355fbd..dd9bcc6add2 100644 --- a/src/rgw/services/svc_bi_rados.cc +++ b/src/rgw/services/svc_bi_rados.cc @@ -30,22 +30,24 @@ void RGWSI_BucketIndex_RADOS::init(RGWSI_Zone *zone_svc, svc.datalog_rados = datalog_rados_svc; } -int RGWSI_BucketIndex_RADOS::open_pool(const rgw_pool& pool, +int RGWSI_BucketIndex_RADOS::open_pool(const DoutPrefixProvider *dpp, + const rgw_pool& pool, RGWSI_RADOS::Pool *index_pool, bool mostly_omap) { *index_pool = svc.rados->pool(pool); - return index_pool->open(RGWSI_RADOS::OpenParams() + return index_pool->open(dpp, RGWSI_RADOS::OpenParams() .set_mostly_omap(mostly_omap)); } -int RGWSI_BucketIndex_RADOS::open_bucket_index_pool(const RGWBucketInfo& bucket_info, +int RGWSI_BucketIndex_RADOS::open_bucket_index_pool(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, RGWSI_RADOS::Pool *index_pool) { const rgw_pool& explicit_pool = bucket_info.bucket.explicit_placement.index_pool; if (!explicit_pool.empty()) { - return open_pool(explicit_pool, index_pool, false); + return open_pool(dpp, explicit_pool, index_pool, false); } auto& zonegroup = svc.zone->get_zonegroup(); @@ -57,28 +59,29 @@ int RGWSI_BucketIndex_RADOS::open_bucket_index_pool(const RGWBucketInfo& bucket_ } auto iter = zone_params.placement_pools.find(rule->name); if (iter == zone_params.placement_pools.end()) { - ldout(cct, 0) << "could not find placement rule " << *rule << " within zonegroup " << dendl; + ldpp_dout(dpp, 0) << "could not find placement rule " << *rule << " within zonegroup " << dendl; return -EINVAL; } - int r = open_pool(iter->second.index_pool, index_pool, true); + int r = open_pool(dpp, iter->second.index_pool, index_pool, true); if (r < 0) return r; return 0; } -int RGWSI_BucketIndex_RADOS::open_bucket_index_base(const RGWBucketInfo& bucket_info, +int RGWSI_BucketIndex_RADOS::open_bucket_index_base(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, RGWSI_RADOS::Pool *index_pool, string *bucket_oid_base) { const rgw_bucket& bucket = bucket_info.bucket; - int r = open_bucket_index_pool(bucket_info, index_pool); + int r = open_bucket_index_pool(dpp, bucket_info, index_pool); if (r < 0) return r; if (bucket.bucket_id.empty()) { - ldout(cct, 0) << "ERROR: empty bucket_id for bucket operation" << dendl; + ldpp_dout(dpp, 0) << "ERROR: empty bucket_id for bucket operation" << dendl; return -EIO; } @@ -89,20 +92,21 @@ int RGWSI_BucketIndex_RADOS::open_bucket_index_base(const RGWBucketInfo& bucket_ } -int RGWSI_BucketIndex_RADOS::open_bucket_index(const RGWBucketInfo& bucket_info, +int RGWSI_BucketIndex_RADOS::open_bucket_index(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, RGWSI_RADOS::Pool *index_pool, string *bucket_oid) { const rgw_bucket& bucket = bucket_info.bucket; - int r = open_bucket_index_pool(bucket_info, index_pool); + int r = open_bucket_index_pool(dpp, bucket_info, index_pool); if (r < 0) { - ldout(cct, 20) << __func__ << ": open_bucket_index_pool() returned " + ldpp_dout(dpp, 20) << __func__ << ": open_bucket_index_pool() returned " << r << dendl; return r; } if (bucket.bucket_id.empty()) { - ldout(cct, 0) << "ERROR: empty bucket id for bucket operation" << dendl; + ldpp_dout(dpp, 0) << "ERROR: empty bucket id for bucket operation" << dendl; return -EIO; } @@ -163,7 +167,8 @@ static void get_bucket_instance_ids(const RGWBucketInfo& bucket_info, } } -int RGWSI_BucketIndex_RADOS::open_bucket_index(const RGWBucketInfo& bucket_info, +int RGWSI_BucketIndex_RADOS::open_bucket_index(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, std::optional _shard_id, RGWSI_RADOS::Pool *index_pool, map *bucket_objs, @@ -171,9 +176,9 @@ int RGWSI_BucketIndex_RADOS::open_bucket_index(const RGWBucketInfo& bucket_info, { int shard_id = _shard_id.value_or(-1); string bucket_oid_base; - int ret = open_bucket_index_base(bucket_info, index_pool, &bucket_oid_base); + int ret = open_bucket_index_base(dpp, bucket_info, index_pool, &bucket_oid_base); if (ret < 0) { - ldout(cct, 20) << __func__ << ": open_bucket_index_pool() returned " + ldpp_dout(dpp, 20) << __func__ << ": open_bucket_index_pool() returned " << ret << dendl; return ret; } @@ -236,7 +241,8 @@ int RGWSI_BucketIndex_RADOS::get_bucket_index_object(const string& bucket_oid_ba return r; } -int RGWSI_BucketIndex_RADOS::open_bucket_index_shard(const RGWBucketInfo& bucket_info, +int RGWSI_BucketIndex_RADOS::open_bucket_index_shard(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, const string& obj_key, RGWSI_RADOS::Obj *bucket_obj, int *shard_id) @@ -245,9 +251,9 @@ int RGWSI_BucketIndex_RADOS::open_bucket_index_shard(const RGWBucketInfo& bucket RGWSI_RADOS::Pool pool; - int ret = open_bucket_index_base(bucket_info, &pool, &bucket_oid_base); + int ret = open_bucket_index_base(dpp, bucket_info, &pool, &bucket_oid_base); if (ret < 0) { - ldout(cct, 20) << __func__ << ": open_bucket_index_pool() returned " + ldpp_dout(dpp, 20) << __func__ << ": open_bucket_index_pool() returned " << ret << dendl; return ret; } @@ -257,7 +263,7 @@ int RGWSI_BucketIndex_RADOS::open_bucket_index_shard(const RGWBucketInfo& bucket ret = get_bucket_index_object(bucket_oid_base, obj_key, bucket_info.layout.current_index.layout.normal.num_shards, bucket_info.layout.current_index.layout.normal.hash_type, &oid, shard_id); if (ret < 0) { - ldout(cct, 10) << "get_bucket_index_object() returned ret=" << ret << dendl; + ldpp_dout(dpp, 10) << "get_bucket_index_object() returned ret=" << ret << dendl; return ret; } @@ -266,16 +272,17 @@ int RGWSI_BucketIndex_RADOS::open_bucket_index_shard(const RGWBucketInfo& bucket return 0; } -int RGWSI_BucketIndex_RADOS::open_bucket_index_shard(const RGWBucketInfo& bucket_info, +int RGWSI_BucketIndex_RADOS::open_bucket_index_shard(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, int shard_id, const rgw::bucket_index_layout_generation& idx_layout, RGWSI_RADOS::Obj *bucket_obj) { RGWSI_RADOS::Pool index_pool; string bucket_oid_base; - int ret = open_bucket_index_base(bucket_info, &index_pool, &bucket_oid_base); + int ret = open_bucket_index_base(dpp, bucket_info, &index_pool, &bucket_oid_base); if (ret < 0) { - ldout(cct, 20) << __func__ << ": open_bucket_index_pool() returned " + ldpp_dout(dpp, 20) << __func__ << ": open_bucket_index_pool() returned " << ret << dendl; return ret; } @@ -290,7 +297,8 @@ int RGWSI_BucketIndex_RADOS::open_bucket_index_shard(const RGWBucketInfo& bucket return 0; } -int RGWSI_BucketIndex_RADOS::cls_bucket_head(const RGWBucketInfo& bucket_info, +int RGWSI_BucketIndex_RADOS::cls_bucket_head(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, int shard_id, vector *headers, map *bucket_instance_ids, @@ -298,7 +306,7 @@ int RGWSI_BucketIndex_RADOS::cls_bucket_head(const RGWBucketInfo& bucket_info, { RGWSI_RADOS::Pool index_pool; map oids; - int r = open_bucket_index(bucket_info, shard_id, &index_pool, &oids, bucket_instance_ids); + int r = open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &oids, bucket_instance_ids); if (r < 0) return r; @@ -319,12 +327,12 @@ int RGWSI_BucketIndex_RADOS::cls_bucket_head(const RGWBucketInfo& bucket_info, } -int RGWSI_BucketIndex_RADOS::init_index(RGWBucketInfo& bucket_info) +int RGWSI_BucketIndex_RADOS::init_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info) { RGWSI_RADOS::Pool index_pool; string dir_oid = dir_oid_prefix; - int r = open_bucket_index_pool(bucket_info, &index_pool); + int r = open_bucket_index_pool(dpp, bucket_info, &index_pool); if (r < 0) { return r; } @@ -339,12 +347,12 @@ int RGWSI_BucketIndex_RADOS::init_index(RGWBucketInfo& bucket_info) cct->_conf->rgw_bucket_index_max_aio)(); } -int RGWSI_BucketIndex_RADOS::clean_index(RGWBucketInfo& bucket_info) +int RGWSI_BucketIndex_RADOS::clean_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info) { RGWSI_RADOS::Pool index_pool; std::string dir_oid = dir_oid_prefix; - int r = open_bucket_index_pool(bucket_info, &index_pool); + int r = open_bucket_index_pool(dpp, bucket_info, &index_pool); if (r < 0) { return r; } @@ -359,14 +367,15 @@ int RGWSI_BucketIndex_RADOS::clean_index(RGWBucketInfo& bucket_info) cct->_conf->rgw_bucket_index_max_aio)(); } -int RGWSI_BucketIndex_RADOS::read_stats(const RGWBucketInfo& bucket_info, +int RGWSI_BucketIndex_RADOS::read_stats(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, RGWBucketEnt *result, optional_yield y) { vector headers; result->bucket = bucket_info.bucket; - int r = cls_bucket_head(bucket_info, RGW_NO_SHARD, &headers, nullptr, y); + int r = cls_bucket_head(dpp, bucket_info, RGW_NO_SHARD, &headers, nullptr, y); if (r < 0) { return r; } @@ -388,13 +397,13 @@ int RGWSI_BucketIndex_RADOS::read_stats(const RGWBucketInfo& bucket_info, return 0; } -int RGWSI_BucketIndex_RADOS::get_reshard_status(const RGWBucketInfo& bucket_info, list *status) +int RGWSI_BucketIndex_RADOS::get_reshard_status(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, list *status) { map bucket_objs; RGWSI_RADOS::Pool index_pool; - int r = open_bucket_index(bucket_info, + int r = open_bucket_index(dpp, bucket_info, std::nullopt, &index_pool, &bucket_objs, @@ -408,7 +417,7 @@ int RGWSI_BucketIndex_RADOS::get_reshard_status(const RGWBucketInfo& bucket_info int ret = cls_rgw_get_bucket_resharding(index_pool.ioctx(), i.second, &entry); if (ret < 0 && ret != -ENOENT) { - lderr(cct) << "ERROR: " << __func__ << ": cls_rgw_get_bucket_resharding() returned ret=" << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: " << __func__ << ": cls_rgw_get_bucket_resharding() returned ret=" << ret << dendl; return ret; } @@ -431,12 +440,12 @@ int RGWSI_BucketIndex_RADOS::handle_overwrite(const DoutPrefixProvider *dpp, int ret; if (!new_sync_enabled) { - ret = svc.bilog->log_stop(info, -1); + ret = svc.bilog->log_stop(dpp, info, -1); } else { - ret = svc.bilog->log_start(info, -1); + ret = svc.bilog->log_start(dpp, info, -1); } if (ret < 0) { - lderr(cct) << "ERROR: failed writing bilog (bucket=" << info.bucket << "); ret=" << ret << dendl; + ldpp_dout(dpp, -1) << "ERROR: failed writing bilog (bucket=" << info.bucket << "); ret=" << ret << dendl; return ret; } diff --git a/src/rgw/services/svc_bi_rados.h b/src/rgw/services/svc_bi_rados.h index a6956381fa3..9037f43c8af 100644 --- a/src/rgw/services/svc_bi_rados.h +++ b/src/rgw/services/svc_bi_rados.h @@ -36,13 +36,16 @@ class RGWSI_BucketIndex_RADOS : public RGWSI_BucketIndex { friend class RGWSI_BILog_RADOS; - int open_pool(const rgw_pool& pool, + int open_pool(const DoutPrefixProvider *dpp, + const rgw_pool& pool, RGWSI_RADOS::Pool *index_pool, bool mostly_omap); - int open_bucket_index_pool(const RGWBucketInfo& bucket_info, + int open_bucket_index_pool(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, RGWSI_RADOS::Pool *index_pool); - int open_bucket_index_base(const RGWBucketInfo& bucket_info, + int open_bucket_index_base(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, RGWSI_RADOS::Pool *index_pool, string *bucket_oid_base); @@ -55,7 +58,8 @@ class RGWSI_BucketIndex_RADOS : public RGWSI_BucketIndex uint32_t num_shards, rgw::BucketHashType hash_type, string *bucket_obj, int *shard_id); - int cls_bucket_head(const RGWBucketInfo& bucket_info, + int cls_bucket_head(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, int shard_id, vector *headers, map *bucket_instance_ids, @@ -92,37 +96,42 @@ public: return rgw_shards_mod(sid2, num_shards); } - int init_index(RGWBucketInfo& bucket_info); - int clean_index(RGWBucketInfo& bucket_info); + int init_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info); + int clean_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info); /* RADOS specific */ - int read_stats(const RGWBucketInfo& bucket_info, + int read_stats(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, RGWBucketEnt *stats, optional_yield y) override; - int get_reshard_status(const RGWBucketInfo& bucket_info, + int get_reshard_status(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, std::list *status); int handle_overwrite(const DoutPrefixProvider *dpp, const RGWBucketInfo& info, const RGWBucketInfo& orig_info) override; - int open_bucket_index_shard(const RGWBucketInfo& bucket_info, + int open_bucket_index_shard(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, const string& obj_key, RGWSI_RADOS::Obj *bucket_obj, int *shard_id); - int open_bucket_index_shard(const RGWBucketInfo& bucket_info, + int open_bucket_index_shard(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, int shard_id, const rgw::bucket_index_layout_generation& idx_layout, RGWSI_RADOS::Obj *bucket_obj); - int open_bucket_index(const RGWBucketInfo& bucket_info, + int open_bucket_index(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, RGWSI_RADOS::Pool *index_pool, string *bucket_oid); - int open_bucket_index(const RGWBucketInfo& bucket_info, + int open_bucket_index(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, std::optional shard_id, RGWSI_RADOS::Pool *index_pool, map *bucket_objs, diff --git a/src/rgw/services/svc_bilog_rados.cc b/src/rgw/services/svc_bilog_rados.cc index 13368e24d6c..06cf5ce7a67 100644 --- a/src/rgw/services/svc_bilog_rados.cc +++ b/src/rgw/services/svc_bilog_rados.cc @@ -18,7 +18,7 @@ void RGWSI_BILog_RADOS::init(RGWSI_BucketIndex_RADOS *bi_rados_svc) svc.bi = bi_rados_svc; } -int RGWSI_BILog_RADOS::log_trim(const RGWBucketInfo& bucket_info, int shard_id, string& start_marker, string& end_marker) +int RGWSI_BILog_RADOS::log_trim(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id, string& start_marker, string& end_marker) { RGWSI_RADOS::Pool index_pool; map bucket_objs; @@ -26,7 +26,7 @@ int RGWSI_BILog_RADOS::log_trim(const RGWBucketInfo& bucket_info, int shard_id, BucketIndexShardsManager start_marker_mgr; BucketIndexShardsManager end_marker_mgr; - int r = svc.bi->open_bucket_index(bucket_info, shard_id, &index_pool, &bucket_objs, nullptr); + int r = svc.bi->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &bucket_objs, nullptr); if (r < 0) { return r; } @@ -45,22 +45,22 @@ int RGWSI_BILog_RADOS::log_trim(const RGWBucketInfo& bucket_info, int shard_id, cct->_conf->rgw_bucket_index_max_aio)(); } -int RGWSI_BILog_RADOS::log_start(const RGWBucketInfo& bucket_info, int shard_id) +int RGWSI_BILog_RADOS::log_start(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id) { RGWSI_RADOS::Pool index_pool; map bucket_objs; - int r = svc.bi->open_bucket_index(bucket_info, shard_id, &index_pool, &bucket_objs, nullptr); + int r = svc.bi->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &bucket_objs, nullptr); if (r < 0) return r; return CLSRGWIssueResyncBucketBILog(index_pool.ioctx(), bucket_objs, cct->_conf->rgw_bucket_index_max_aio)(); } -int RGWSI_BILog_RADOS::log_stop(const RGWBucketInfo& bucket_info, int shard_id) +int RGWSI_BILog_RADOS::log_stop(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id) { RGWSI_RADOS::Pool index_pool; map bucket_objs; - int r = svc.bi->open_bucket_index(bucket_info, shard_id, &index_pool, &bucket_objs, nullptr); + int r = svc.bi->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &bucket_objs, nullptr); if (r < 0) return r; @@ -77,16 +77,16 @@ static void build_bucket_index_marker(const string& shard_id_str, } } -int RGWSI_BILog_RADOS::log_list(const RGWBucketInfo& bucket_info, int shard_id, string& marker, uint32_t max, +int RGWSI_BILog_RADOS::log_list(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id, string& marker, uint32_t max, std::list& result, bool *truncated) { - ldout(cct, 20) << __func__ << ": " << bucket_info.bucket << " marker " << marker << " shard_id=" << shard_id << " max " << max << dendl; + ldpp_dout(dpp, 20) << __func__ << ": " << bucket_info.bucket << " marker " << marker << " shard_id=" << shard_id << " max " << max << dendl; result.clear(); RGWSI_RADOS::Pool index_pool; map oids; map bi_log_lists; - int r = svc.bi->open_bucket_index(bucket_info, shard_id, &index_pool, &oids, nullptr); + int r = svc.bi->open_bucket_index(dpp, bucket_info, shard_id, &index_pool, &oids, nullptr); if (r < 0) return r; @@ -175,14 +175,15 @@ int RGWSI_BILog_RADOS::log_list(const RGWBucketInfo& bucket_info, int shard_id, return 0; } -int RGWSI_BILog_RADOS::get_log_status(const RGWBucketInfo& bucket_info, +int RGWSI_BILog_RADOS::get_log_status(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, int shard_id, map *markers, optional_yield y) { vector headers; map bucket_instance_ids; - int r = svc.bi->cls_bucket_head(bucket_info, shard_id, &headers, &bucket_instance_ids, y); + int r = svc.bi->cls_bucket_head(dpp, bucket_info, shard_id, &headers, &bucket_instance_ids, y); if (r < 0) return r; diff --git a/src/rgw/services/svc_bilog_rados.h b/src/rgw/services/svc_bilog_rados.h index 2691d209253..84f5679af10 100644 --- a/src/rgw/services/svc_bilog_rados.h +++ b/src/rgw/services/svc_bilog_rados.h @@ -35,21 +35,24 @@ public: void init(RGWSI_BucketIndex_RADOS *bi_rados_svc); - int log_start(const RGWBucketInfo& bucket_info, int shard_id); - int log_stop(const RGWBucketInfo& bucket_info, int shard_id); + int log_start(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id); + int log_stop(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, int shard_id); - int log_trim(const RGWBucketInfo& bucket_info, + int log_trim(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, int shard_id, std::string& start_marker, std::string& end_marker); - int log_list(const RGWBucketInfo& bucket_info, + int log_list(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, int shard_id, std::string& marker, uint32_t max, std::list& result, bool *truncated); - int get_log_status(const RGWBucketInfo& bucket_info, + int get_log_status(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, int shard_id, map *markers, optional_yield y); diff --git a/src/rgw/services/svc_bucket_sobj.cc b/src/rgw/services/svc_bucket_sobj.cc index e513bbff330..13aeedf03d6 100644 --- a/src/rgw/services/svc_bucket_sobj.cc +++ b/src/rgw/services/svc_bucket_sobj.cc @@ -324,7 +324,7 @@ int RGWSI_Bucket_SObj::read_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, } /* chain to only bucket instance and *not* bucket entrypoint */ - if (!binfo_cache->put(svc.cache, cache_key, &e, {&ci})) { + if (!binfo_cache->put(dpp, svc.cache, cache_key, &e, {&ci})) { ldpp_dout(dpp, 20) << "couldn't put binfo cache entry, might have raced with data changes" << dendl; } @@ -469,7 +469,7 @@ int RGWSI_Bucket_SObj::read_bucket_info(RGWSI_Bucket_X_Ctx& ctx, *pattrs = e.attrs; /* chain to both bucket entry point and bucket instance */ - if (!binfo_cache->put(svc.cache, cache_key, &e, {&entry_cache_info, &cache_info})) { + if (!binfo_cache->put(dpp, svc.cache, cache_key, &e, {&entry_cache_info, &cache_info})) { ldpp_dout(dpp, 20) << "couldn't put binfo cache entry, might have raced with data changes" << dendl; } @@ -537,7 +537,7 @@ int RGWSI_Bucket_SObj::store_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, int ret = svc.meta_be->put(ctx.get(), key, params, &info.objv_tracker, y, dpp); if (ret >= 0) { - int r = svc.bucket_sync->handle_bi_update(info, + int r = svc.bucket_sync->handle_bi_update(dpp, info, orig_info.value_or(nullptr), y); if (r < 0) { @@ -570,14 +570,14 @@ int RGWSI_Bucket_SObj::remove_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, const DoutPrefixProvider *dpp) { RGWSI_MBSObj_RemoveParams params; - int ret = svc.meta_be->remove_entry(ctx.get(), key, params, objv_tracker, y); + int ret = svc.meta_be->remove_entry(dpp, ctx.get(), key, params, objv_tracker, y); if (ret < 0 && ret != -ENOENT) { return ret; } - int r = svc.bucket_sync->handle_bi_removal(info, y); + int r = svc.bucket_sync->handle_bi_removal(dpp, info, y); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to update bucket instance sync index: r=" << r << dendl; /* returning success as index is just keeping hints, so will keep extra hints, @@ -599,7 +599,7 @@ int RGWSI_Bucket_SObj::read_bucket_stats(const RGWBucketInfo& bucket_info, vector headers; - int r = svc.bi->read_stats(bucket_info, ent, y); + int r = svc.bi->read_stats(dpp, bucket_info, ent, y); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): read_stats returned r=" << r << dendl; return r; diff --git a/src/rgw/services/svc_bucket_sync.h b/src/rgw/services/svc_bucket_sync.h index c09cc97dff0..d90856b7afe 100644 --- a/src/rgw/services/svc_bucket_sync.h +++ b/src/rgw/services/svc_bucket_sync.h @@ -37,13 +37,16 @@ public: optional_yield y, const DoutPrefixProvider *dpp) = 0; - virtual int handle_bi_update(RGWBucketInfo& bucket_info, + virtual int handle_bi_update(const DoutPrefixProvider *dpp, + RGWBucketInfo& bucket_info, RGWBucketInfo *orig_bucket_info, optional_yield y) = 0; - virtual int handle_bi_removal(const RGWBucketInfo& bucket_info, + virtual int handle_bi_removal(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, optional_yield y) = 0; - virtual int get_bucket_sync_hints(const rgw_bucket& bucket, + virtual int get_bucket_sync_hints(const DoutPrefixProvider *dpp, + const rgw_bucket& bucket, std::set *sources, std::set *dests, optional_yield y) = 0; diff --git a/src/rgw/services/svc_bucket_sync_sobj.cc b/src/rgw/services/svc_bucket_sync_sobj.cc index 0f0285360e8..88503344215 100644 --- a/src/rgw/services/svc_bucket_sync_sobj.cc +++ b/src/rgw/services/svc_bucket_sync_sobj.cc @@ -33,7 +33,8 @@ public: rgw_raw_obj get_dests_obj(const rgw_bucket& bucket) const; template - int update_hints(const RGWBucketInfo& bucket_info, + int update_hints(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, C1& added_dests, C2& removed_dests, C1& added_sources, @@ -215,7 +216,7 @@ int RGWSI_Bucket_Sync_SObj::do_get_policy_handler(RGWSI_Bucket_X_Ctx& ctx, e.handler.reset(zone_policy_handler->alloc_child(bucket_info, std::move(attrs))); - r = e.handler->init(y); + r = e.handler->init(dpp, y); if (r < 0) { ldpp_dout(dpp, 20) << "ERROR: failed to init bucket sync policy handler: r=" << r << dendl; return r; @@ -234,7 +235,7 @@ int RGWSI_Bucket_Sync_SObj::do_get_policy_handler(RGWSI_Bucket_X_Ctx& ctx, return r; } - if (!sync_policy_cache->put(svc.cache, cache_key, &e, {&cache_info})) { + if (!sync_policy_cache->put(dpp, svc.cache, cache_key, &e, {&cache_info})) { ldpp_dout(dpp, 20) << "couldn't put bucket_sync_policy cache entry, might have raced with data changes" << dendl; } @@ -474,7 +475,8 @@ public: } template - int update(const rgw_bucket& entity, + int update(const DoutPrefixProvider *dpp, + const rgw_bucket& entity, const RGWBucketInfo& info_source, C1 *add, C2 *remove, @@ -488,8 +490,8 @@ private: C2 *remove, single_instance_info *instance); - int read(optional_yield y); - int flush(optional_yield y); + int read(const DoutPrefixProvider *dpp, optional_yield y); + int flush(const DoutPrefixProvider *dpp, optional_yield y); void invalidate() { has_data = false; @@ -506,7 +508,8 @@ WRITE_CLASS_ENCODER(RGWSI_BS_SObj_HintIndexObj::single_instance_info) WRITE_CLASS_ENCODER(RGWSI_BS_SObj_HintIndexObj::info_map) template -int RGWSI_BS_SObj_HintIndexObj::update(const rgw_bucket& entity, +int RGWSI_BS_SObj_HintIndexObj::update(const DoutPrefixProvider *dpp, + const rgw_bucket& entity, const RGWBucketInfo& info_source, C1 *add, C2 *remove, @@ -520,9 +523,9 @@ int RGWSI_BS_SObj_HintIndexObj::update(const rgw_bucket& entity, for (int i = 0; i < MAX_RETRIES; ++i) { if (!has_data) { - r = read(y); + r = read(dpp, y); if (r < 0) { - ldout(cct, 0) << "ERROR: cannot update hint index: failed to read: r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: cannot update hint index: failed to read: r=" << r << dendl; return r; } } @@ -538,19 +541,19 @@ int RGWSI_BS_SObj_HintIndexObj::update(const rgw_bucket& entity, info.instances.erase(entity); } - r = flush(y); + r = flush(dpp, y); if (r >= 0) { return 0; } if (r != -ECANCELED) { - ldout(cct, 0) << "ERROR: failed to flush hint index: obj=" << obj << " r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to flush hint index: obj=" << obj << " r=" << r << dendl; return r; } invalidate(); } - ldout(cct, 0) << "ERROR: failed to flush hint index: too many retries (obj=" << obj << "), likely a bug" << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to flush hint index: too many retries (obj=" << obj << "), likely a bug" << dendl; return -EIO; } @@ -575,14 +578,14 @@ void RGWSI_BS_SObj_HintIndexObj::update_entries(const rgw_bucket& info_source, } } -int RGWSI_BS_SObj_HintIndexObj::read(optional_yield y) { +int RGWSI_BS_SObj_HintIndexObj::read(const DoutPrefixProvider *dpp, optional_yield y) { RGWObjVersionTracker _ot; bufferlist bl; int r = sysobj.rop() .set_objv_tracker(&_ot) /* forcing read of current version */ - .read(&bl, y); + .read(dpp, &bl, y); if (r < 0 && r != -ENOENT) { - ldout(cct, 0) << "ERROR: failed reading data (obj=" << obj << "), r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed reading data (obj=" << obj << "), r=" << r << dendl; return r; } @@ -594,7 +597,7 @@ int RGWSI_BS_SObj_HintIndexObj::read(optional_yield y) { decode(info, iter); has_data = true; } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: " << __func__ << "(): failed to decode entries, ignoring" << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to decode entries, ignoring" << dendl; info.clear(); } } else { @@ -604,7 +607,7 @@ int RGWSI_BS_SObj_HintIndexObj::read(optional_yield y) { return 0; } -int RGWSI_BS_SObj_HintIndexObj::flush(optional_yield y) { +int RGWSI_BS_SObj_HintIndexObj::flush(const DoutPrefixProvider *dpp, optional_yield y) { int r; if (!info.empty()) { @@ -613,12 +616,12 @@ int RGWSI_BS_SObj_HintIndexObj::flush(optional_yield y) { r = sysobj.wop() .set_objv_tracker(&ot) /* forcing read of current version */ - .write(bl, y); + .write(dpp, bl, y); } else { /* remove */ r = sysobj.wop() .set_objv_tracker(&ot) - .remove(y); + .remove(dpp, y); } if (r < 0) { @@ -645,7 +648,8 @@ rgw_raw_obj RGWSI_Bucket_Sync_SObj_HintIndexManager::get_dests_obj(const rgw_buc } template -int RGWSI_Bucket_Sync_SObj_HintIndexManager::update_hints(const RGWBucketInfo& bucket_info, +int RGWSI_Bucket_Sync_SObj_HintIndexManager::update_hints(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, C1& added_dests, C2& removed_dests, C1& added_sources, @@ -659,13 +663,13 @@ int RGWSI_Bucket_Sync_SObj_HintIndexManager::update_hints(const RGWBucketInfo& b /* update our dests */ RGWSI_BS_SObj_HintIndexObj index(svc.sysobj, get_dests_obj(bucket_info.bucket)); - int r = index.update(bucket_info.bucket, + int r = index.update(dpp, bucket_info.bucket, bucket_info, &added_dests, &removed_dests, y); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to update targets index for bucket=" << bucket_info.bucket << " r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << bucket_info.bucket << " r=" << r << dendl; return r; } @@ -673,13 +677,13 @@ int RGWSI_Bucket_Sync_SObj_HintIndexManager::update_hints(const RGWBucketInfo& b for (auto& dest_bucket : added_dests) { RGWSI_BS_SObj_HintIndexObj dep_index(svc.sysobj, get_sources_obj(dest_bucket)); - int r = dep_index.update(dest_bucket, + int r = dep_index.update(dpp, dest_bucket, bucket_info, &self_entity, static_cast(nullptr), y); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to update targets index for bucket=" << dest_bucket << " r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << dest_bucket << " r=" << r << dendl; return r; } } @@ -687,13 +691,13 @@ int RGWSI_Bucket_Sync_SObj_HintIndexManager::update_hints(const RGWBucketInfo& b for (auto& dest_bucket : removed_dests) { RGWSI_BS_SObj_HintIndexObj dep_index(svc.sysobj, get_sources_obj(dest_bucket)); - int r = dep_index.update(dest_bucket, + int r = dep_index.update(dpp, dest_bucket, bucket_info, static_cast(nullptr), &self_entity, y); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to update targets index for bucket=" << dest_bucket << " r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << dest_bucket << " r=" << r << dendl; return r; } } @@ -704,13 +708,13 @@ int RGWSI_Bucket_Sync_SObj_HintIndexManager::update_hints(const RGWBucketInfo& b RGWSI_BS_SObj_HintIndexObj index(svc.sysobj, get_sources_obj(bucket_info.bucket)); /* update our sources */ - int r = index.update(bucket_info.bucket, + int r = index.update(dpp, bucket_info.bucket, bucket_info, &added_sources, &removed_sources, y); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to update targets index for bucket=" << bucket_info.bucket << " r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << bucket_info.bucket << " r=" << r << dendl; return r; } @@ -718,13 +722,13 @@ int RGWSI_Bucket_Sync_SObj_HintIndexManager::update_hints(const RGWBucketInfo& b for (auto& source_bucket : added_sources) { RGWSI_BS_SObj_HintIndexObj dep_index(svc.sysobj, get_dests_obj(source_bucket)); - int r = dep_index.update(source_bucket, + int r = dep_index.update(dpp, source_bucket, bucket_info, &self_entity, static_cast(nullptr), y); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to update targets index for bucket=" << source_bucket << " r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << source_bucket << " r=" << r << dendl; return r; } } @@ -732,13 +736,13 @@ int RGWSI_Bucket_Sync_SObj_HintIndexManager::update_hints(const RGWBucketInfo& b for (auto& source_bucket : removed_sources) { RGWSI_BS_SObj_HintIndexObj dep_index(svc.sysobj, get_dests_obj(source_bucket)); - int r = dep_index.update(source_bucket, + int r = dep_index.update(dpp, source_bucket, bucket_info, static_cast(nullptr), &self_entity, y); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to update targets index for bucket=" << source_bucket << " r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << source_bucket << " r=" << r << dendl; return r; } } @@ -747,7 +751,8 @@ int RGWSI_Bucket_Sync_SObj_HintIndexManager::update_hints(const RGWBucketInfo& b return 0; } -int RGWSI_Bucket_Sync_SObj::handle_bi_removal(const RGWBucketInfo& bucket_info, +int RGWSI_Bucket_Sync_SObj::handle_bi_removal(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, optional_yield y) { std::set sources_set; @@ -774,7 +779,7 @@ int RGWSI_Bucket_Sync_SObj::handle_bi_removal(const RGWBucketInfo& bucket_info, std::vector added_sources; std::vector added_dests; - return hint_index_mgr->update_hints(bucket_info, + return hint_index_mgr->update_hints(dpp, bucket_info, added_dests, removed_dests, added_sources, @@ -782,7 +787,8 @@ int RGWSI_Bucket_Sync_SObj::handle_bi_removal(const RGWBucketInfo& bucket_info, y); } -int RGWSI_Bucket_Sync_SObj::handle_bi_update(RGWBucketInfo& bucket_info, +int RGWSI_Bucket_Sync_SObj::handle_bi_update(const DoutPrefixProvider *dpp, + RGWBucketInfo& bucket_info, RGWBucketInfo *orig_bucket_info, optional_yield y) { @@ -807,21 +813,21 @@ int RGWSI_Bucket_Sync_SObj::handle_bi_update(RGWBucketInfo& bucket_info, std::vector removed_sources; std::vector added_sources; bool found = diff_sets(orig_sources, sources, &added_sources, &removed_sources); - ldout(cct, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": orig_sources=" << orig_sources << " new_sources=" << sources << dendl; - ldout(cct, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": potential sources added=" << added_sources << " removed=" << removed_sources << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": orig_sources=" << orig_sources << " new_sources=" << sources << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": potential sources added=" << added_sources << " removed=" << removed_sources << dendl; std::vector removed_dests; std::vector added_dests; found = found || diff_sets(orig_dests, dests, &added_dests, &removed_dests); - ldout(cct, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": orig_dests=" << orig_dests << " new_dests=" << dests << dendl; - ldout(cct, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": potential dests added=" << added_dests << " removed=" << removed_dests << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": orig_dests=" << orig_dests << " new_dests=" << dests << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": potential dests added=" << added_dests << " removed=" << removed_dests << dendl; if (!found) { return 0; } - return hint_index_mgr->update_hints(bucket_info, + return hint_index_mgr->update_hints(dpp, bucket_info, dests, /* set all dests, not just the ones that were added */ removed_dests, sources, /* set all sources, not just that the ones that were added */ @@ -829,7 +835,8 @@ int RGWSI_Bucket_Sync_SObj::handle_bi_update(RGWBucketInfo& bucket_info, y); } -int RGWSI_Bucket_Sync_SObj::get_bucket_sync_hints(const rgw_bucket& bucket, +int RGWSI_Bucket_Sync_SObj::get_bucket_sync_hints(const DoutPrefixProvider *dpp, + const rgw_bucket& bucket, std::set *sources, std::set *dests, optional_yield y) @@ -841,9 +848,9 @@ int RGWSI_Bucket_Sync_SObj::get_bucket_sync_hints(const rgw_bucket& bucket, if (sources) { RGWSI_BS_SObj_HintIndexObj index(svc.sysobj, hint_index_mgr->get_sources_obj(bucket)); - int r = index.read(y); + int r = index.read(dpp, y); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to update sources index for bucket=" << bucket << " r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to update sources index for bucket=" << bucket << " r=" << r << dendl; return r; } @@ -859,9 +866,9 @@ int RGWSI_Bucket_Sync_SObj::get_bucket_sync_hints(const rgw_bucket& bucket, if (dests) { RGWSI_BS_SObj_HintIndexObj index(svc.sysobj, hint_index_mgr->get_dests_obj(bucket)); - int r = index.read(y); + int r = index.read(dpp, y); if (r < 0) { - ldout(cct, 0) << "ERROR: failed to read targets index for bucket=" << bucket << " r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to read targets index for bucket=" << bucket << " r=" << r << dendl; return r; } diff --git a/src/rgw/services/svc_bucket_sync_sobj.h b/src/rgw/services/svc_bucket_sync_sobj.h index 5cd8ffd1f81..60786665d1d 100644 --- a/src/rgw/services/svc_bucket_sync_sobj.h +++ b/src/rgw/services/svc_bucket_sync_sobj.h @@ -106,13 +106,16 @@ public: optional_yield y, const DoutPrefixProvider *dpp); - int handle_bi_update(RGWBucketInfo& bucket_info, + int handle_bi_update(const DoutPrefixProvider *dpp, + RGWBucketInfo& bucket_info, RGWBucketInfo *orig_bucket_info, optional_yield y) override; - int handle_bi_removal(const RGWBucketInfo& bucket_info, + int handle_bi_removal(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, optional_yield y) override; - int get_bucket_sync_hints(const rgw_bucket& bucket, + int get_bucket_sync_hints(const DoutPrefixProvider *dpp, + const rgw_bucket& bucket, std::set *sources, std::set *dests, optional_yield y) override; diff --git a/src/rgw/services/svc_cls.cc b/src/rgw/services/svc_cls.cc index a952ede7c0a..d2aaa6d889d 100644 --- a/src/rgw/services/svc_cls.cc +++ b/src/rgw/services/svc_cls.cc @@ -28,25 +28,25 @@ int RGWSI_Cls::do_start(optional_yield y, const DoutPrefixProvider *dpp) return 0; } -int RGWSI_Cls::MFA::get_mfa_obj(const rgw_user& user, std::optional *obj) +int RGWSI_Cls::MFA::get_mfa_obj(const DoutPrefixProvider *dpp, const rgw_user& user, std::optional *obj) { string oid = get_mfa_oid(user); rgw_raw_obj o(zone_svc->get_zone_params().otp_pool, oid); obj->emplace(rados_svc->obj(o)); - int r = (*obj)->open(); + int r = (*obj)->open(dpp); if (r < 0) { - ldout(cct, 4) << "failed to open rados context for " << o << dendl; + ldpp_dout(dpp, 4) << "failed to open rados context for " << o << dendl; return r; } return 0; } -int RGWSI_Cls::MFA::get_mfa_ref(const rgw_user& user, rgw_rados_ref *ref) +int RGWSI_Cls::MFA::get_mfa_ref(const DoutPrefixProvider *dpp, const rgw_user& user, rgw_rados_ref *ref) { std::optional obj; - int r = get_mfa_obj(user, &obj); + int r = get_mfa_obj(dpp, user, &obj); if (r < 0) { return r; } @@ -54,10 +54,10 @@ int RGWSI_Cls::MFA::get_mfa_ref(const rgw_user& user, rgw_rados_ref *ref) return 0; } -int RGWSI_Cls::MFA::check_mfa(const rgw_user& user, const string& otp_id, const string& pin, optional_yield y) +int RGWSI_Cls::MFA::check_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const string& otp_id, const string& pin, optional_yield y) { rgw_rados_ref ref; - int r = get_mfa_ref(user, &ref); + int r = get_mfa_ref(dpp, user, &ref); if (r < 0) { return r; } @@ -68,7 +68,7 @@ int RGWSI_Cls::MFA::check_mfa(const rgw_user& user, const string& otp_id, const if (r < 0) return r; - ldout(cct, 20) << "OTP check, otp_id=" << otp_id << " result=" << (int)result.result << dendl; + ldpp_dout(dpp, 20) << "OTP check, otp_id=" << otp_id << " result=" << (int)result.result << dendl; return (result.result == rados::cls::otp::OTP_CHECK_SUCCESS ? 0 : -EACCES); } @@ -97,11 +97,11 @@ void RGWSI_Cls::MFA::prepare_mfa_write(librados::ObjectWriteOperation *op, op->mtime2(&mtime_ts); } -int RGWSI_Cls::MFA::create_mfa(const rgw_user& user, const rados::cls::otp::otp_info_t& config, +int RGWSI_Cls::MFA::create_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const rados::cls::otp::otp_info_t& config, RGWObjVersionTracker *objv_tracker, const ceph::real_time& mtime, optional_yield y) { std::optional obj; - int r = get_mfa_obj(user, &obj); + int r = get_mfa_obj(dpp, user, &obj); if (r < 0) { return r; } @@ -109,22 +109,23 @@ int RGWSI_Cls::MFA::create_mfa(const rgw_user& user, const rados::cls::otp::otp_ librados::ObjectWriteOperation op; prepare_mfa_write(&op, objv_tracker, mtime); rados::cls::otp::OTP::create(&op, config); - r = obj->operate(&op, y); + r = obj->operate(dpp, &op, y); if (r < 0) { - ldout(cct, 20) << "OTP create, otp_id=" << config.id << " result=" << (int)r << dendl; + ldpp_dout(dpp, 20) << "OTP create, otp_id=" << config.id << " result=" << (int)r << dendl; return r; } return 0; } -int RGWSI_Cls::MFA::remove_mfa(const rgw_user& user, const string& id, +int RGWSI_Cls::MFA::remove_mfa(const DoutPrefixProvider *dpp, + const rgw_user& user, const string& id, RGWObjVersionTracker *objv_tracker, const ceph::real_time& mtime, optional_yield y) { std::optional obj; - int r = get_mfa_obj(user, &obj); + int r = get_mfa_obj(dpp, user, &obj); if (r < 0) { return r; } @@ -132,21 +133,21 @@ int RGWSI_Cls::MFA::remove_mfa(const rgw_user& user, const string& id, librados::ObjectWriteOperation op; prepare_mfa_write(&op, objv_tracker, mtime); rados::cls::otp::OTP::remove(&op, id); - r = obj->operate(&op, y); + r = obj->operate(dpp, &op, y); if (r < 0) { - ldout(cct, 20) << "OTP remove, otp_id=" << id << " result=" << (int)r << dendl; + ldpp_dout(dpp, 20) << "OTP remove, otp_id=" << id << " result=" << (int)r << dendl; return r; } return 0; } -int RGWSI_Cls::MFA::get_mfa(const rgw_user& user, const string& id, rados::cls::otp::otp_info_t *result, +int RGWSI_Cls::MFA::get_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const string& id, rados::cls::otp::otp_info_t *result, optional_yield y) { rgw_rados_ref ref; - int r = get_mfa_ref(user, &ref); + int r = get_mfa_ref(dpp, user, &ref); if (r < 0) { return r; } @@ -159,12 +160,12 @@ int RGWSI_Cls::MFA::get_mfa(const rgw_user& user, const string& id, rados::cls:: return 0; } -int RGWSI_Cls::MFA::list_mfa(const rgw_user& user, list *result, +int RGWSI_Cls::MFA::list_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, list *result, optional_yield y) { rgw_rados_ref ref; - int r = get_mfa_ref(user, &ref); + int r = get_mfa_ref(dpp, user, &ref); if (r < 0) { return r; } @@ -177,12 +178,12 @@ int RGWSI_Cls::MFA::list_mfa(const rgw_user& user, list& entries, +int RGWSI_Cls::MFA::set_mfa(const DoutPrefixProvider *dpp, const string& oid, const list& entries, bool reset_obj, RGWObjVersionTracker *objv_tracker, const real_time& mtime, optional_yield y) { rgw_raw_obj o(zone_svc->get_zone_params().otp_pool, oid); auto obj = rados_svc->obj(o); - int r = obj.open(); + int r = obj.open(dpp); if (r < 0) { - ldout(cct, 4) << "failed to open rados context for " << o << dendl; + ldpp_dout(dpp, 4) << "failed to open rados context for " << o << dendl; return r; } librados::ObjectWriteOperation op; @@ -215,24 +216,24 @@ int RGWSI_Cls::MFA::set_mfa(const string& oid, const listget_zone_params().log_pool, oid); obj = rados_svc->obj(o); - return obj.open(); + return obj.open(dpp); } -int RGWSI_Cls::TimeLog::add(const string& oid, +int RGWSI_Cls::TimeLog::add(const DoutPrefixProvider *dpp, + const string& oid, const real_time& ut, const string& section, const string& key, @@ -278,7 +280,7 @@ int RGWSI_Cls::TimeLog::add(const string& oid, { RGWSI_RADOS::Obj obj; - int r = init_obj(oid, obj); + int r = init_obj(dpp, oid, obj); if (r < 0) { return r; } @@ -287,10 +289,11 @@ int RGWSI_Cls::TimeLog::add(const string& oid, utime_t t(ut); cls_log_add(op, t, section, key, bl); - return obj.operate(&op, y); + return obj.operate(dpp, &op, y); } -int RGWSI_Cls::TimeLog::add(const string& oid, +int RGWSI_Cls::TimeLog::add(const DoutPrefixProvider *dpp, + const string& oid, std::list& entries, librados::AioCompletion *completion, bool monotonic_inc, @@ -298,7 +301,7 @@ int RGWSI_Cls::TimeLog::add(const string& oid, { RGWSI_RADOS::Obj obj; - int r = init_obj(oid, obj); + int r = init_obj(dpp, oid, obj); if (r < 0) { return r; } @@ -307,14 +310,15 @@ int RGWSI_Cls::TimeLog::add(const string& oid, cls_log_add(op, entries, monotonic_inc); if (!completion) { - r = obj.operate(&op, y); + r = obj.operate(dpp, &op, y); } else { r = obj.aio_operate(completion, &op); } return r; } -int RGWSI_Cls::TimeLog::list(const string& oid, +int RGWSI_Cls::TimeLog::list(const DoutPrefixProvider *dpp, + const string& oid, const real_time& start_time, const real_time& end_time, int max_entries, std::list& entries, @@ -325,7 +329,7 @@ int RGWSI_Cls::TimeLog::list(const string& oid, { RGWSI_RADOS::Obj obj; - int r = init_obj(oid, obj); + int r = init_obj(dpp, oid, obj); if (r < 0) { return r; } @@ -340,20 +344,21 @@ int RGWSI_Cls::TimeLog::list(const string& oid, bufferlist obl; - int ret = obj.operate(&op, &obl, y); + int ret = obj.operate(dpp, &op, &obl, y); if (ret < 0) return ret; return 0; } -int RGWSI_Cls::TimeLog::info(const string& oid, +int RGWSI_Cls::TimeLog::info(const DoutPrefixProvider *dpp, + const string& oid, cls_log_header *header, optional_yield y) { RGWSI_RADOS::Obj obj; - int r = init_obj(oid, obj); + int r = init_obj(dpp, oid, obj); if (r < 0) { return r; } @@ -364,19 +369,20 @@ int RGWSI_Cls::TimeLog::info(const string& oid, bufferlist obl; - int ret = obj.operate(&op, &obl, y); + int ret = obj.operate(dpp, &op, &obl, y); if (ret < 0) return ret; return 0; } -int RGWSI_Cls::TimeLog::info_async(RGWSI_RADOS::Obj& obj, +int RGWSI_Cls::TimeLog::info_async(const DoutPrefixProvider *dpp, + RGWSI_RADOS::Obj& obj, const string& oid, cls_log_header *header, librados::AioCompletion *completion) { - int r = init_obj(oid, obj); + int r = init_obj(dpp, oid, obj); if (r < 0) { return r; } @@ -392,7 +398,8 @@ int RGWSI_Cls::TimeLog::info_async(RGWSI_RADOS::Obj& obj, return 0; } -int RGWSI_Cls::TimeLog::trim(const string& oid, +int RGWSI_Cls::TimeLog::trim(const DoutPrefixProvider *dpp, + const string& oid, const real_time& start_time, const real_time& end_time, const string& from_marker, @@ -402,7 +409,7 @@ int RGWSI_Cls::TimeLog::trim(const string& oid, { RGWSI_RADOS::Obj obj; - int r = init_obj(oid, obj); + int r = init_obj(dpp, oid, obj); if (r < 0) { return r; } @@ -414,14 +421,15 @@ int RGWSI_Cls::TimeLog::trim(const string& oid, cls_log_trim(op, st, et, from_marker, to_marker); if (!completion) { - r = obj.operate(&op, y); + r = obj.operate(dpp, &op, y); } else { r = obj.aio_operate(completion, &op); } return r; } -int RGWSI_Cls::Lock::lock_exclusive(const rgw_pool& pool, +int RGWSI_Cls::Lock::lock_exclusive(const DoutPrefixProvider *dpp, + const rgw_pool& pool, const string& oid, timespan& duration, string& zone_id, @@ -429,7 +437,7 @@ int RGWSI_Cls::Lock::lock_exclusive(const rgw_pool& pool, std::optional lock_name) { auto p = rados_svc->pool(pool); - int r = p.open(); + int r = p.open(dpp); if (r < 0) { return r; } @@ -446,14 +454,15 @@ int RGWSI_Cls::Lock::lock_exclusive(const rgw_pool& pool, return l.lock_exclusive(&p.ioctx(), oid); } -int RGWSI_Cls::Lock::unlock(const rgw_pool& pool, +int RGWSI_Cls::Lock::unlock(const DoutPrefixProvider *dpp, + const rgw_pool& pool, const string& oid, string& zone_id, string& owner_id, std::optional lock_name) { auto p = rados_svc->pool(pool); - int r = p.open(); + int r = p.open(dpp); if (r < 0) { return r; } diff --git a/src/rgw/services/svc_cls.h b/src/rgw/services/svc_cls.h index 4e0f1040798..61487b2f954 100644 --- a/src/rgw/services/svc_cls.h +++ b/src/rgw/services/svc_cls.h @@ -48,8 +48,8 @@ class RGWSI_Cls : public RGWServiceInstance public: class MFA : public ClsSubService { - int get_mfa_obj(const rgw_user& user, std::optional *obj); - int get_mfa_ref(const rgw_user& user, rgw_rados_ref *ref); + int get_mfa_obj(const DoutPrefixProvider *dpp, const rgw_user& user, std::optional *obj); + int get_mfa_ref(const DoutPrefixProvider *dpp, const rgw_user& user, rgw_rados_ref *ref); void prepare_mfa_write(librados::ObjectWriteOperation *op, RGWObjVersionTracker *objv_tracker, @@ -62,25 +62,26 @@ public: return string("user:") + user.to_str(); } - int check_mfa(const rgw_user& user, const string& otp_id, const string& pin, optional_yield y); - int create_mfa(const rgw_user& user, const rados::cls::otp::otp_info_t& config, + int check_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const string& otp_id, const string& pin, optional_yield y); + int create_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const rados::cls::otp::otp_info_t& config, RGWObjVersionTracker *objv_tracker, const ceph::real_time& mtime, optional_yield y); - int remove_mfa(const rgw_user& user, const string& id, + int remove_mfa(const DoutPrefixProvider *dpp, + const rgw_user& user, const string& id, RGWObjVersionTracker *objv_tracker, const ceph::real_time& mtime, optional_yield y); - int get_mfa(const rgw_user& user, const string& id, rados::cls::otp::otp_info_t *result, optional_yield y); - int list_mfa(const rgw_user& user, list *result, optional_yield y); - int otp_get_current_time(const rgw_user& user, ceph::real_time *result, optional_yield y); - int set_mfa(const string& oid, const list& entries, + int get_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const string& id, rados::cls::otp::otp_info_t *result, optional_yield y); + int list_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, list *result, optional_yield y); + int otp_get_current_time(const DoutPrefixProvider *dpp, const rgw_user& user, ceph::real_time *result, optional_yield y); + int set_mfa(const DoutPrefixProvider *dpp, const string& oid, const list& entries, bool reset_obj, RGWObjVersionTracker *objv_tracker, const real_time& mtime, optional_yield y); - int list_mfa(const string& oid, list *result, + int list_mfa(const DoutPrefixProvider *dpp, const string& oid, list *result, RGWObjVersionTracker *objv_tracker, ceph::real_time *pmtime, optional_yield y); } mfa; class TimeLog : public ClsSubService { - int init_obj(const string& oid, RGWSI_RADOS::Obj& obj); + int init_obj(const DoutPrefixProvider *dpp, const string& oid, RGWSI_RADOS::Obj& obj); public: TimeLog(CephContext *cct): ClsSubService(cct) {} @@ -89,18 +90,21 @@ public: const string& section, const string& key, bufferlist& bl); - int add(const string& oid, + int add(const DoutPrefixProvider *dpp, + const string& oid, const real_time& ut, const string& section, const string& key, bufferlist& bl, optional_yield y); - int add(const string& oid, + int add(const DoutPrefixProvider *dpp, + const string& oid, std::list& entries, librados::AioCompletion *completion, bool monotonic_inc, optional_yield y); - int list(const string& oid, + int list(const DoutPrefixProvider *dpp, + const string& oid, const real_time& start_time, const real_time& end_time, int max_entries, list& entries, @@ -108,14 +112,17 @@ public: string *out_marker, bool *truncated, optional_yield y); - int info(const string& oid, + int info(const DoutPrefixProvider *dpp, + const string& oid, cls_log_header *header, optional_yield y); - int info_async(RGWSI_RADOS::Obj& obj, + int info_async(const DoutPrefixProvider *dpp, + RGWSI_RADOS::Obj& obj, const string& oid, cls_log_header *header, librados::AioCompletion *completion); - int trim(const string& oid, + int trim(const DoutPrefixProvider *dpp, + const string& oid, const real_time& start_time, const real_time& end_time, const string& from_marker, @@ -128,13 +135,15 @@ public: int init_obj(const string& oid, RGWSI_RADOS::Obj& obj); public: Lock(CephContext *cct): ClsSubService(cct) {} - int lock_exclusive(const rgw_pool& pool, + int lock_exclusive(const DoutPrefixProvider *dpp, + const rgw_pool& pool, const string& oid, timespan& duration, string& zone_id, string& owner_id, std::optional lock_name = std::nullopt); - int unlock(const rgw_pool& pool, + int unlock(const DoutPrefixProvider *dpp, + const rgw_pool& pool, const string& oid, string& zone_id, string& owner_id, diff --git a/src/rgw/services/svc_mdlog.cc b/src/rgw/services/svc_mdlog.cc index 8363da2bce1..f93c44d680e 100644 --- a/src/rgw/services/svc_mdlog.cc +++ b/src/rgw/services/svc_mdlog.cc @@ -73,7 +73,7 @@ int RGWSI_MDLog::read_history(RGWMetadataLogHistory *state, /* bad history object, remove it */ rgw_raw_obj obj(pool, oid); auto sysobj = obj_ctx.get_obj(obj); - ret = sysobj.wop().remove(y); + ret = sysobj.wop().remove(dpp, y); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: meta history is empty, but cannot remove it (" << cpp_strerror(-ret) << ")" << dendl; return ret; @@ -91,7 +91,8 @@ int RGWSI_MDLog::read_history(RGWMetadataLogHistory *state, return 0; } -int RGWSI_MDLog::write_history(const RGWMetadataLogHistory& state, +int RGWSI_MDLog::write_history(const DoutPrefixProvider *dpp, + const RGWMetadataLogHistory& state, RGWObjVersionTracker *objv_tracker, optional_yield y, bool exclusive) { @@ -101,7 +102,7 @@ int RGWSI_MDLog::write_history(const RGWMetadataLogHistory& state, auto& pool = svc.zone->get_zone_params().log_pool; const auto& oid = RGWMetadataLogHistory::oid; auto obj_ctx = svc.sysobj->init_obj_ctx(); - return rgw_put_system_obj(obj_ctx, pool, oid, bl, + return rgw_put_system_obj(dpp, obj_ctx, pool, oid, bl, exclusive, objv_tracker, real_time{}, y); } @@ -111,6 +112,7 @@ using Cursor = RGWPeriodHistory::Cursor; /// read the mdlog history and use it to initialize the given cursor class ReadHistoryCR : public RGWCoroutine { + const DoutPrefixProvider *dpp; Svc svc; Cursor *cursor; RGWObjVersionTracker *objv_tracker; @@ -118,16 +120,17 @@ class ReadHistoryCR : public RGWCoroutine { RGWAsyncRadosProcessor *async_processor; public: - ReadHistoryCR(const Svc& svc, + ReadHistoryCR(const DoutPrefixProvider *dpp, + const Svc& svc, Cursor *cursor, RGWObjVersionTracker *objv_tracker) - : RGWCoroutine(svc.zone->ctx()), svc(svc), + : RGWCoroutine(svc.zone->ctx()), dpp(dpp), svc(svc), cursor(cursor), objv_tracker(objv_tracker), async_processor(svc.rados->get_async_processor()) {} - int operate() { + int operate(const DoutPrefixProvider *dpp) { reenter(this) { yield { rgw_raw_obj obj{svc.zone->get_zone_params().log_pool, @@ -135,11 +138,11 @@ class ReadHistoryCR : public RGWCoroutine { constexpr bool empty_on_enoent = false; using ReadCR = RGWSimpleRadosReadCR; - call(new ReadCR(async_processor, svc.sysobj, obj, + call(new ReadCR(dpp, async_processor, svc.sysobj, obj, &state, empty_on_enoent, objv_tracker)); } if (retcode < 0) { - ldout(cct, 1) << "failed to read mdlog history: " + ldpp_dout(dpp, 1) << "failed to read mdlog history: " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } @@ -148,7 +151,7 @@ class ReadHistoryCR : public RGWCoroutine { return set_cr_error(cursor->get_error()); } - ldout(cct, 10) << "read mdlog history with oldest period id=" + ldpp_dout(dpp, 10) << "read mdlog history with oldest period id=" << state.oldest_period_id << " realm_epoch=" << state.oldest_realm_epoch << dendl; return set_cr_done(); @@ -159,6 +162,7 @@ class ReadHistoryCR : public RGWCoroutine { /// write the given cursor to the mdlog history class WriteHistoryCR : public RGWCoroutine { + const DoutPrefixProvider *dpp; Svc svc; Cursor cursor; RGWObjVersionTracker *objv; @@ -166,15 +170,16 @@ class WriteHistoryCR : public RGWCoroutine { RGWAsyncRadosProcessor *async_processor; public: - WriteHistoryCR(Svc& svc, + WriteHistoryCR(const DoutPrefixProvider *dpp, + Svc& svc, const Cursor& cursor, RGWObjVersionTracker *objv) - : RGWCoroutine(svc.zone->ctx()), svc(svc), + : RGWCoroutine(svc.zone->ctx()), dpp(dpp), svc(svc), cursor(cursor), objv(objv), async_processor(svc.rados->get_async_processor()) {} - int operate() { + int operate(const DoutPrefixProvider *dpp) { reenter(this) { state.oldest_period_id = cursor.get_period().get_id(); state.oldest_realm_epoch = cursor.get_epoch(); @@ -184,15 +189,15 @@ class WriteHistoryCR : public RGWCoroutine { RGWMetadataLogHistory::oid}; using WriteCR = RGWSimpleRadosWriteCR; - call(new WriteCR(async_processor, svc.sysobj, obj, state, objv)); + call(new WriteCR(dpp, async_processor, svc.sysobj, obj, state, objv)); } if (retcode < 0) { - ldout(cct, 1) << "failed to write mdlog history: " + ldpp_dout(dpp, 1) << "failed to write mdlog history: " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } - ldout(cct, 10) << "wrote mdlog history with oldest period id=" + ldpp_dout(dpp, 10) << "wrote mdlog history with oldest period id=" << state.oldest_period_id << " realm_epoch=" << state.oldest_realm_epoch << dendl; return set_cr_done(); @@ -203,6 +208,7 @@ class WriteHistoryCR : public RGWCoroutine { /// update the mdlog history to reflect trimmed logs class TrimHistoryCR : public RGWCoroutine { + const DoutPrefixProvider *dpp; Svc svc; const Cursor cursor; //< cursor to trimmed period RGWObjVersionTracker *objv; //< to prevent racing updates @@ -210,27 +216,27 @@ class TrimHistoryCR : public RGWCoroutine { Cursor existing; //< existing cursor read from disk public: - TrimHistoryCR(const Svc& svc, Cursor cursor, RGWObjVersionTracker *objv) - : RGWCoroutine(svc.zone->ctx()), svc(svc), + TrimHistoryCR(const DoutPrefixProvider *dpp, const Svc& svc, Cursor cursor, RGWObjVersionTracker *objv) + : RGWCoroutine(svc.zone->ctx()), dpp(dpp), svc(svc), cursor(cursor), objv(objv), next(cursor) { next.next(); // advance past cursor } - int operate() { + int operate(const DoutPrefixProvider *dpp) { reenter(this) { // read an existing history, and write the new history if it's newer - yield call(new ReadHistoryCR(svc, &existing, objv)); + yield call(new ReadHistoryCR(dpp, svc, &existing, objv)); if (retcode < 0) { return set_cr_error(retcode); } // reject older trims with ECANCELED if (cursor.get_epoch() < existing.get_epoch()) { - ldout(cct, 4) << "found oldest log epoch=" << existing.get_epoch() + ldpp_dout(dpp, 4) << "found oldest log epoch=" << existing.get_epoch() << ", rejecting trim at epoch=" << cursor.get_epoch() << dendl; return set_cr_error(-ECANCELED); } // overwrite with updated history - yield call(new WriteHistoryCR(svc, next, objv)); + yield call(new WriteHistoryCR(dpp, svc, next, objv)); if (retcode < 0) { return set_cr_error(retcode); } @@ -244,7 +250,7 @@ class TrimHistoryCR : public RGWCoroutine { // traverse all the way back to the beginning of the period history, and // return a cursor to the first period in a fully attached history -Cursor RGWSI_MDLog::find_oldest_period(optional_yield y) +Cursor RGWSI_MDLog::find_oldest_period(const DoutPrefixProvider *dpp, optional_yield y) { auto cursor = period_history->get_current(); @@ -254,13 +260,13 @@ Cursor RGWSI_MDLog::find_oldest_period(optional_yield y) auto& predecessor = cursor.get_period().get_predecessor(); if (predecessor.empty()) { // this is the first period, so our logs must start here - ldout(cct, 10) << "find_oldest_period returning first " + ldpp_dout(dpp, 10) << "find_oldest_period returning first " "period " << cursor.get_period().get_id() << dendl; return cursor; } // pull the predecessor and add it to our history RGWPeriod period; - int r = period_puller->pull(predecessor, period, y); + int r = period_puller->pull(dpp, predecessor, period, y); if (r < 0) { return cursor; } @@ -268,13 +274,13 @@ Cursor RGWSI_MDLog::find_oldest_period(optional_yield y) if (!prev) { return prev; } - ldout(cct, 20) << "find_oldest_period advancing to " + ldpp_dout(dpp, 20) << "find_oldest_period advancing to " "predecessor period " << predecessor << dendl; ceph_assert(cursor.has_prev()); } cursor.prev(); } - ldout(cct, 10) << "find_oldest_period returning empty cursor" << dendl; + ldpp_dout(dpp, 10) << "find_oldest_period returning empty cursor" << dendl; return cursor; } @@ -288,7 +294,7 @@ Cursor RGWSI_MDLog::init_oldest_log_period(optional_yield y, const DoutPrefixPro if (ret == -ENOENT) { // initialize the mdlog history and write it ldpp_dout(dpp, 10) << "initializing mdlog history" << dendl; - auto cursor = find_oldest_period(y); + auto cursor = find_oldest_period(dpp, y); if (!cursor) { return cursor; } @@ -297,7 +303,7 @@ Cursor RGWSI_MDLog::init_oldest_log_period(optional_yield y, const DoutPrefixPro state.oldest_period_id = cursor.get_period().get_id(); constexpr bool exclusive = true; // don't overwrite - int ret = write_history(state, &objv, y, exclusive); + int ret = write_history(dpp, state, &objv, y, exclusive); if (ret < 0 && ret != -EEXIST) { ldpp_dout(dpp, 1) << "failed to write mdlog history: " << cpp_strerror(ret) << dendl; @@ -315,13 +321,13 @@ Cursor RGWSI_MDLog::init_oldest_log_period(optional_yield y, const DoutPrefixPro if (cursor) { return cursor; } else { - cursor = find_oldest_period(y); + cursor = find_oldest_period(dpp, y); state.oldest_realm_epoch = cursor.get_epoch(); state.oldest_period_id = cursor.get_period().get_id(); - ldout(cct, 10) << "rewriting mdlog history" << dendl; - ret = write_history(state, &objv, y); + ldpp_dout(dpp, 10) << "rewriting mdlog history" << dendl; + ret = write_history(dpp, state, &objv, y); if (ret < 0 && ret != -ECANCELED) { - ldout(cct, 1) << "failed to write mdlog history: " + ldpp_dout(dpp, 1) << "failed to write mdlog history: " << cpp_strerror(ret) << dendl; return Cursor{ret}; } @@ -330,21 +336,21 @@ Cursor RGWSI_MDLog::init_oldest_log_period(optional_yield y, const DoutPrefixPro // pull the oldest period by id RGWPeriod period; - ret = period_puller->pull(state.oldest_period_id, period, y); + ret = period_puller->pull(dpp, state.oldest_period_id, period, y); if (ret < 0) { - ldout(cct, 1) << "failed to read period id=" << state.oldest_period_id + ldpp_dout(dpp, 1) << "failed to read period id=" << state.oldest_period_id << " for mdlog history: " << cpp_strerror(ret) << dendl; return Cursor{ret}; } // verify its realm_epoch if (period.get_realm_epoch() != state.oldest_realm_epoch) { - ldout(cct, 1) << "inconsistent mdlog history: read period id=" + ldpp_dout(dpp, 1) << "inconsistent mdlog history: read period id=" << period.get_id() << " with realm_epoch=" << period.get_realm_epoch() << ", expected realm_epoch=" << state.oldest_realm_epoch << dendl; return Cursor{-EINVAL}; } // attach the period to our history - return period_history->attach(std::move(period), y); + return period_history->attach(dpp, std::move(period), y); } Cursor RGWSI_MDLog::read_oldest_log_period(optional_yield y, const DoutPrefixProvider *dpp) const @@ -364,16 +370,16 @@ Cursor RGWSI_MDLog::read_oldest_log_period(optional_yield y, const DoutPrefixPro return period_history->lookup(state.oldest_realm_epoch); } -RGWCoroutine* RGWSI_MDLog::read_oldest_log_period_cr(Cursor *period, - RGWObjVersionTracker *objv) const +RGWCoroutine* RGWSI_MDLog::read_oldest_log_period_cr(const DoutPrefixProvider *dpp, + Cursor *period, RGWObjVersionTracker *objv) const { - return new mdlog::ReadHistoryCR(svc, period, objv); + return new mdlog::ReadHistoryCR(dpp, svc, period, objv); } -RGWCoroutine* RGWSI_MDLog::trim_log_period_cr(Cursor period, - RGWObjVersionTracker *objv) const +RGWCoroutine* RGWSI_MDLog::trim_log_period_cr(const DoutPrefixProvider *dpp, + Cursor period, RGWObjVersionTracker *objv) const { - return new mdlog::TrimHistoryCR(svc, period, objv); + return new mdlog::TrimHistoryCR(dpp, svc, period, objv); } RGWMetadataLog* RGWSI_MDLog::get_log(const std::string& period) @@ -385,10 +391,10 @@ RGWMetadataLog* RGWSI_MDLog::get_log(const std::string& period) return &insert.first->second; } -int RGWSI_MDLog::add_entry(const string& hash_key, const string& section, const string& key, bufferlist& bl) +int RGWSI_MDLog::add_entry(const DoutPrefixProvider *dpp, const string& hash_key, const string& section, const string& key, bufferlist& bl) { ceph_assert(current_log); // must have called init() - return current_log->add_entry(hash_key, section, key, bl); + return current_log->add_entry(dpp, hash_key, section, key, bl); } int RGWSI_MDLog::get_shard_id(const string& hash_key, int *shard_id) @@ -397,9 +403,9 @@ int RGWSI_MDLog::get_shard_id(const string& hash_key, int *shard_id) return current_log->get_shard_id(hash_key, shard_id); } -int RGWSI_MDLog::pull_period(const std::string& period_id, RGWPeriod& period, +int RGWSI_MDLog::pull_period(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period, optional_yield y) { - return period_puller->pull(period_id, period, y); + return period_puller->pull(dpp, period_id, period, y); } diff --git a/src/rgw/services/svc_mdlog.h b/src/rgw/services/svc_mdlog.h index 4a5ed8ed855..57103efb464 100644 --- a/src/rgw/services/svc_mdlog.h +++ b/src/rgw/services/svc_mdlog.h @@ -75,7 +75,7 @@ public: // traverse all the way back to the beginning of the period history, and // return a cursor to the first period in a fully attached history - RGWPeriodHistory::Cursor find_oldest_period(optional_yield y); + RGWPeriodHistory::Cursor find_oldest_period(const DoutPrefixProvider *dpp, optional_yield y); /// initialize the oldest log period if it doesn't exist, and attach it to /// our current history @@ -87,19 +87,22 @@ public: /// read the oldest log period asynchronously and write its result to the /// given cursor pointer - RGWCoroutine* read_oldest_log_period_cr(RGWPeriodHistory::Cursor *period, + RGWCoroutine* read_oldest_log_period_cr(const DoutPrefixProvider *dpp, + RGWPeriodHistory::Cursor *period, RGWObjVersionTracker *objv) const; /// try to advance the oldest log period when the given period is trimmed, /// using a rados lock to provide atomicity - RGWCoroutine* trim_log_period_cr(RGWPeriodHistory::Cursor period, + RGWCoroutine* trim_log_period_cr(const DoutPrefixProvider *dpp, + RGWPeriodHistory::Cursor period, RGWObjVersionTracker *objv) const; int read_history(RGWMetadataLogHistory *state, RGWObjVersionTracker *objv_tracker,optional_yield y, const DoutPrefixProvider *dpp) const; - int write_history(const RGWMetadataLogHistory& state, + int write_history(const DoutPrefixProvider *dpp, + const RGWMetadataLogHistory& state, RGWObjVersionTracker *objv_tracker, optional_yield y, bool exclusive = false); - int add_entry(const string& hash_key, const string& section, const string& key, bufferlist& bl); + int add_entry(const DoutPrefixProvider *dpp, const string& hash_key, const string& section, const string& key, bufferlist& bl); int get_shard_id(const string& hash_key, int *shard_id); @@ -107,7 +110,7 @@ public: return period_history.get(); } - int pull_period(const std::string& period_id, RGWPeriod& period, optional_yield y); + int pull_period(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period, optional_yield y); /// find or create the metadata log for the given period RGWMetadataLog* get_log(const std::string& period); diff --git a/src/rgw/services/svc_meta_be.cc b/src/rgw/services/svc_meta_be.cc index beaa22b1b8b..0d4daffe2a4 100644 --- a/src/rgw/services/svc_meta_be.cc +++ b/src/rgw/services/svc_meta_be.cc @@ -15,7 +15,8 @@ RGWSI_MetaBackend::PutParams::~PutParams() {} // ... RGWSI_MetaBackend::GetParams::~GetParams() {} // ... RGWSI_MetaBackend::RemoveParams::~RemoveParams() {} // ... -int RGWSI_MetaBackend::pre_modify(RGWSI_MetaBackend::Context *ctx, +int RGWSI_MetaBackend::pre_modify(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const string& key, RGWMetadataLogData& log_data, RGWObjVersionTracker *objv_tracker, @@ -34,7 +35,8 @@ int RGWSI_MetaBackend::pre_modify(RGWSI_MetaBackend::Context *ctx, return 0; } -int RGWSI_MetaBackend::post_modify(RGWSI_MetaBackend::Context *ctx, +int RGWSI_MetaBackend::post_modify(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const string& key, RGWMetadataLogData& log_data, RGWObjVersionTracker *objv_tracker, int ret, @@ -91,7 +93,7 @@ int RGWSI_MetaBackend::do_mutate(RGWSI_MetaBackend::Context *ctx, } RGWMetadataLogData log_data; - ret = pre_modify(ctx, key, log_data, objv_tracker, op_type, y); + ret = pre_modify(dpp, ctx, key, log_data, objv_tracker, op_type, y); if (ret < 0) { return ret; } @@ -100,7 +102,7 @@ int RGWSI_MetaBackend::do_mutate(RGWSI_MetaBackend::Context *ctx, /* cascading ret into post_modify() */ - ret = post_modify(ctx, key, log_data, objv_tracker, ret, y); + ret = post_modify(dpp, ctx, key, log_data, objv_tracker, ret, y); if (ret < 0) return ret; @@ -125,7 +127,7 @@ int RGWSI_MetaBackend::put(Context *ctx, const DoutPrefixProvider *dpp) { std::function f = [&]() { - return put_entry(ctx, key, params, objv_tracker, y); + return put_entry(dpp, ctx, key, params, objv_tracker, y); }; return do_mutate(ctx, key, params.mtime, objv_tracker, @@ -144,7 +146,7 @@ int RGWSI_MetaBackend::remove(Context *ctx, const DoutPrefixProvider *dpp) { std::function f = [&]() { - return remove_entry(ctx, key, params, objv_tracker, y); + return remove_entry(dpp, ctx, key, params, objv_tracker, y); }; return do_mutate(ctx, key, params.mtime, objv_tracker, diff --git a/src/rgw/services/svc_meta_be.h b/src/rgw/services/svc_meta_be.h index ff57a0b3d99..af749d497f3 100644 --- a/src/rgw/services/svc_meta_be.h +++ b/src/rgw/services/svc_meta_be.h @@ -58,13 +58,15 @@ protected: bool generic_prepare, const DoutPrefixProvider *dpp); - virtual int pre_modify(Context *ctx, + virtual int pre_modify(const DoutPrefixProvider *dpp, + Context *ctx, const std::string& key, RGWMetadataLogData& log_data, RGWObjVersionTracker *objv_tracker, RGWMDLogStatus op_type, optional_yield y); - virtual int post_modify(Context *ctx, + virtual int post_modify(const DoutPrefixProvider *dpp, + Context *ctx, const std::string& key, RGWMetadataLogData& log_data, RGWObjVersionTracker *objv_tracker, int ret, @@ -144,18 +146,20 @@ public: RGWObjVersionTracker *objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) = 0; - virtual int put_entry(RGWSI_MetaBackend::Context *ctx, + virtual int put_entry(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const std::string& key, RGWSI_MetaBackend::PutParams& params, RGWObjVersionTracker *objv_tracker, optional_yield y) = 0; - virtual int remove_entry(Context *ctx, + virtual int remove_entry(const DoutPrefixProvider *dpp, + Context *ctx, const std::string& key, RGWSI_MetaBackend::RemoveParams& params, RGWObjVersionTracker *objv_tracker, optional_yield y) = 0; - virtual int list_init(RGWSI_MetaBackend::Context *ctx, const string& marker) = 0; + virtual int list_init(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx, const string& marker) = 0; virtual int list_next(RGWSI_MetaBackend::Context *ctx, int max, list *keys, bool *truncated) = 0; @@ -252,8 +256,8 @@ public: return be->mutate(be_ctx, key, params, objv_tracker, y, f, dpp); } - int list_init(const string& marker) { - return be->list_init(be_ctx, marker); + int list_init(const DoutPrefixProvider *dpp, const string& marker) { + return be->list_init(dpp, be_ctx, marker); } int list_next(int max, list *keys, bool *truncated) { diff --git a/src/rgw/services/svc_meta_be_otp.cc b/src/rgw/services/svc_meta_be_otp.cc index e2349a17a23..1800f8a3cac 100644 --- a/src/rgw/services/svc_meta_be_otp.cc +++ b/src/rgw/services/svc_meta_be_otp.cc @@ -49,7 +49,7 @@ int RGWSI_MetaBackend_OTP::get_entry(RGWSI_MetaBackend::Context *_ctx, { RGWSI_MBOTP_GetParams& params = static_cast(_params); - int r = cls_svc->mfa.list_mfa(key, params.pdevices, objv_tracker, params.pmtime, y); + int r = cls_svc->mfa.list_mfa(dpp, key, params.pdevices, objv_tracker, params.pmtime, y); if (r < 0) { return r; } @@ -57,7 +57,8 @@ int RGWSI_MetaBackend_OTP::get_entry(RGWSI_MetaBackend::Context *_ctx, return 0; } -int RGWSI_MetaBackend_OTP::put_entry(RGWSI_MetaBackend::Context *_ctx, +int RGWSI_MetaBackend_OTP::put_entry(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *_ctx, const string& key, RGWSI_MetaBackend::PutParams& _params, RGWObjVersionTracker *objv_tracker, @@ -65,6 +66,6 @@ int RGWSI_MetaBackend_OTP::put_entry(RGWSI_MetaBackend::Context *_ctx, { RGWSI_MBOTP_PutParams& params = static_cast(_params); - return cls_svc->mfa.set_mfa(key, params.devices, true, objv_tracker, params.mtime, y); + return cls_svc->mfa.set_mfa(dpp, key, params.devices, true, objv_tracker, params.mtime, y); } diff --git a/src/rgw/services/svc_meta_be_otp.h b/src/rgw/services/svc_meta_be_otp.h index 7efc3fba11d..9da97b024bb 100644 --- a/src/rgw/services/svc_meta_be_otp.h +++ b/src/rgw/services/svc_meta_be_otp.h @@ -79,7 +79,8 @@ public: RGWObjVersionTracker *objv_tracker, optional_yield y, const DoutPrefixProvider *dpp); - int put_entry(RGWSI_MetaBackend::Context *ctx, + int put_entry(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const string& key, RGWSI_MetaBackend::PutParams& _params, RGWObjVersionTracker *objv_tracker, diff --git a/src/rgw/services/svc_meta_be_sobj.cc b/src/rgw/services/svc_meta_be_sobj.cc index 9696e3d2f8b..253e509ca8e 100644 --- a/src/rgw/services/svc_meta_be_sobj.cc +++ b/src/rgw/services/svc_meta_be_sobj.cc @@ -28,7 +28,7 @@ RGWSI_MetaBackend::Context *RGWSI_MetaBackend_SObj::alloc_ctx() return new Context_SObj(sysobj_svc); } -int RGWSI_MetaBackend_SObj::pre_modify(RGWSI_MetaBackend::Context *_ctx, +int RGWSI_MetaBackend_SObj::pre_modify(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *_ctx, const string& key, RGWMetadataLogData& log_data, RGWObjVersionTracker *objv_tracker, @@ -36,7 +36,7 @@ int RGWSI_MetaBackend_SObj::pre_modify(RGWSI_MetaBackend::Context *_ctx, optional_yield y) { auto ctx = static_cast(_ctx); - int ret = RGWSI_MetaBackend::pre_modify(ctx, key, log_data, + int ret = RGWSI_MetaBackend::pre_modify(dpp, ctx, key, log_data, objv_tracker, op_type, y); if (ret < 0) { @@ -56,14 +56,15 @@ int RGWSI_MetaBackend_SObj::pre_modify(RGWSI_MetaBackend::Context *_ctx, bufferlist logbl; encode(log_data, logbl); - ret = mdlog_svc->add_entry(ctx->module->get_hash_key(key), ctx->module->get_section(), key, logbl); + ret = mdlog_svc->add_entry(dpp, ctx->module->get_hash_key(key), ctx->module->get_section(), key, logbl); if (ret < 0) return ret; return 0; } -int RGWSI_MetaBackend_SObj::post_modify(RGWSI_MetaBackend::Context *_ctx, +int RGWSI_MetaBackend_SObj::post_modify(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *_ctx, const string& key, RGWMetadataLogData& log_data, RGWObjVersionTracker *objv_tracker, int ret, @@ -78,14 +79,14 @@ int RGWSI_MetaBackend_SObj::post_modify(RGWSI_MetaBackend::Context *_ctx, bufferlist logbl; encode(log_data, logbl); - int r = mdlog_svc->add_entry(ctx->module->get_hash_key(key), ctx->module->get_section(), key, logbl); + int r = mdlog_svc->add_entry(dpp, ctx->module->get_hash_key(key), ctx->module->get_section(), key, logbl); if (ret < 0) return ret; if (r < 0) return r; - return RGWSI_MetaBackend::post_modify(ctx, key, log_data, objv_tracker, ret, y); + return RGWSI_MetaBackend::post_modify(dpp, ctx, key, log_data, objv_tracker, ret, y); } int RGWSI_MetaBackend_SObj::get_shard_id(RGWSI_MetaBackend::Context *_ctx, @@ -157,7 +158,8 @@ int RGWSI_MetaBackend_SObj::get_entry(RGWSI_MetaBackend::Context *_ctx, params.refresh_version); } -int RGWSI_MetaBackend_SObj::put_entry(RGWSI_MetaBackend::Context *_ctx, +int RGWSI_MetaBackend_SObj::put_entry(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *_ctx, const string& key, PutParams& _params, RGWObjVersionTracker *objv_tracker, @@ -170,11 +172,12 @@ int RGWSI_MetaBackend_SObj::put_entry(RGWSI_MetaBackend::Context *_ctx, string oid; ctx->module->get_pool_and_oid(key, &pool, &oid); - return rgw_put_system_obj(*ctx->obj_ctx, pool, oid, params.bl, params.exclusive, + return rgw_put_system_obj(dpp, *ctx->obj_ctx, pool, oid, params.bl, params.exclusive, objv_tracker, params.mtime, y, params.pattrs); } -int RGWSI_MetaBackend_SObj::remove_entry(RGWSI_MetaBackend::Context *_ctx, +int RGWSI_MetaBackend_SObj::remove_entry(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *_ctx, const string& key, RemoveParams& params, RGWObjVersionTracker *objv_tracker, @@ -190,10 +193,11 @@ int RGWSI_MetaBackend_SObj::remove_entry(RGWSI_MetaBackend::Context *_ctx, auto sysobj = ctx->obj_ctx->get_obj(k); return sysobj.wop() .set_objv_tracker(objv_tracker) - .remove(y); + .remove(dpp, y); } -int RGWSI_MetaBackend_SObj::list_init(RGWSI_MetaBackend::Context *_ctx, +int RGWSI_MetaBackend_SObj::list_init(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *_ctx, const string& marker) { RGWSI_MetaBackend_SObj::Context_SObj *ctx = static_cast(_ctx); @@ -207,7 +211,7 @@ int RGWSI_MetaBackend_SObj::list_init(RGWSI_MetaBackend::Context *_ctx, ctx->list.op.emplace(ctx->list.pool->op()); string prefix = ctx->module->get_oid_prefix(); - ctx->list.op->init(marker, prefix); + ctx->list.op->init(dpp, marker, prefix); return 0; } diff --git a/src/rgw/services/svc_meta_be_sobj.h b/src/rgw/services/svc_meta_be_sobj.h index a6033d3ddf9..8c5660a6d54 100644 --- a/src/rgw/services/svc_meta_be_sobj.h +++ b/src/rgw/services/svc_meta_be_sobj.h @@ -132,13 +132,15 @@ public: int call_with_get_params(ceph::real_time *pmtime, std::function cb) override; - int pre_modify(RGWSI_MetaBackend::Context *ctx, + int pre_modify(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const string& key, RGWMetadataLogData& log_data, RGWObjVersionTracker *objv_tracker, RGWMDLogStatus op_type, optional_yield y); - int post_modify(RGWSI_MetaBackend::Context *ctx, + int post_modify(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const string& key, RGWMetadataLogData& log_data, RGWObjVersionTracker *objv_tracker, int ret, @@ -150,18 +152,20 @@ public: RGWObjVersionTracker *objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) override; - int put_entry(RGWSI_MetaBackend::Context *ctx, + int put_entry(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const string& key, RGWSI_MetaBackend::PutParams& params, RGWObjVersionTracker *objv_tracker, optional_yield y) override; - int remove_entry(RGWSI_MetaBackend::Context *ctx, + int remove_entry(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const string& key, RGWSI_MetaBackend::RemoveParams& params, RGWObjVersionTracker *objv_tracker, optional_yield y) override; - int list_init(RGWSI_MetaBackend::Context *_ctx, const string& marker) override; + int list_init(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *_ctx, const string& marker) override; int list_next(RGWSI_MetaBackend::Context *_ctx, int max, list *keys, bool *truncated) override; diff --git a/src/rgw/services/svc_notify.cc b/src/rgw/services/svc_notify.cc index 3835671c4ba..25ccfdbfba7 100644 --- a/src/rgw/services/svc_notify.cc +++ b/src/rgw/services/svc_notify.cc @@ -16,7 +16,7 @@ static string notify_oid_prefix = "notify"; -class RGWWatcher : public librados::WatchCtx2 { +class RGWWatcher : public DoutPrefixProvider , public librados::WatchCtx2 { CephContext *cct; RGWSI_Notify *svc; int index; @@ -33,13 +33,18 @@ class RGWWatcher : public librados::WatchCtx2 { watcher->reinit(); } }; + + CephContext *get_cct() const { return cct; } + unsigned get_subsys() const { return dout_subsys; } + std::ostream& gen_prefix(std::ostream& out) const { return out << "rgw watcher librados: "; } + public: RGWWatcher(CephContext *_cct, RGWSI_Notify *s, int i, RGWSI_RADOS::Obj& o) : cct(_cct), svc(s), index(i), obj(o), watch_handle(0) {} void handle_notify(uint64_t notify_id, uint64_t cookie, uint64_t notifier_id, bufferlist& bl) override { - ldout(cct, 10) << "RGWWatcher::handle_notify() " + ldpp_dout(this, 10) << "RGWWatcher::handle_notify() " << " notify_id " << notify_id << " cookie " << cookie << " notifier " << notifier_id @@ -49,14 +54,14 @@ public: (svc->inject_notify_timeout_probability > 0 && (svc->inject_notify_timeout_probability > ceph::util::generate_random_number(0.0, 1.0)))) { - ldout(cct, 0) + ldpp_dout(this, 0) << "RGWWatcher::handle_notify() dropping notification! " << "If this isn't what you want, set " << "rgw_inject_notify_timeout_probability to zero!" << dendl; return; } - svc->watch_cb(notify_id, cookie, notifier_id, bl); + svc->watch_cb(this, notify_id, cookie, notifier_id, bl); bufferlist reply_bl; // empty reply payload obj.notify_ack(notify_id, cookie, reply_bl); @@ -160,7 +165,7 @@ RGWSI_RADOS::Obj RGWSI_Notify::pick_control_obj(const string& key) return notify_objs[i]; } -int RGWSI_Notify::init_watch(optional_yield y) +int RGWSI_Notify::init_watch(const DoutPrefixProvider *dpp, optional_yield y) { num_watchers = cct->_conf->rgw_num_control_oids; @@ -187,17 +192,17 @@ int RGWSI_Notify::init_watch(optional_yield y) notify_objs[i] = rados_svc->handle().obj({control_pool, notify_oid}); auto& notify_obj = notify_objs[i]; - int r = notify_obj.open(); + int r = notify_obj.open(dpp); if (r < 0) { - ldout(cct, 0) << "ERROR: notify_obj.open() returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: notify_obj.open() returned r=" << r << dendl; return r; } librados::ObjectWriteOperation op; op.create(false); - r = notify_obj.operate(&op, y); + r = notify_obj.operate(dpp, &op, y); if (r < 0 && r != -EEXIST) { - ldout(cct, 0) << "ERROR: notify_obj.operate() returned r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: notify_obj.operate() returned r=" << r << dendl; return r; } @@ -206,7 +211,7 @@ int RGWSI_Notify::init_watch(optional_yield y) r = watcher->register_watch_async(); if (r < 0) { - ldout(cct, 0) << "WARNING: register_watch_aio() returned " << r << dendl; + ldpp_dout(dpp, 0) << "WARNING: register_watch_aio() returned " << r << dendl; error = r; continue; } @@ -215,7 +220,7 @@ int RGWSI_Notify::init_watch(optional_yield y) for (int i = 0; i < num_watchers; ++i) { int r = watchers[i]->register_watch_finish(); if (r < 0) { - ldout(cct, 0) << "WARNING: async watch returned " << r << dendl; + ldpp_dout(dpp, 0) << "WARNING: async watch returned " << r << dendl; error = r; } } @@ -258,7 +263,7 @@ int RGWSI_Notify::do_start(optional_yield y, const DoutPrefixProvider *dpp) control_pool = zone_svc->get_zone_params().control_pool; - int ret = init_watch(y); + int ret = init_watch(dpp, y); if (ret < 0) { lderr(cct) << "ERROR: failed to initialize watch: " << cpp_strerror(-ret) << dendl; return ret; @@ -332,14 +337,15 @@ void RGWSI_Notify::remove_watcher(int i) } } -int RGWSI_Notify::watch_cb(uint64_t notify_id, +int RGWSI_Notify::watch_cb(const DoutPrefixProvider *dpp, + uint64_t notify_id, uint64_t cookie, uint64_t notifier_id, bufferlist& bl) { std::shared_lock l{watchers_lock}; if (cb) { - return cb->watch_cb(notify_id, cookie, notifier_id, bl); + return cb->watch_cb(dpp, notify_id, cookie, notifier_id, bl); } return 0; } @@ -358,7 +364,7 @@ void RGWSI_Notify::_set_enabled(bool status) } } -int RGWSI_Notify::distribute(const string& key, bufferlist& bl, +int RGWSI_Notify::distribute(const DoutPrefixProvider *dpp, const string& key, bufferlist& bl, optional_yield y) { /* The RGW uses the control pool to store the watch notify objects. @@ -370,14 +376,15 @@ int RGWSI_Notify::distribute(const string& key, bufferlist& bl, if (num_watchers > 0) { RGWSI_RADOS::Obj notify_obj = pick_control_obj(key); - ldout(cct, 10) << "distributing notification oid=" << notify_obj.get_ref().obj + ldpp_dout(dpp, 10) << "distributing notification oid=" << notify_obj.get_ref().obj << " bl.length()=" << bl.length() << dendl; - return robust_notify(notify_obj, bl, y); + return robust_notify(dpp, notify_obj, bl, y); } return 0; } -int RGWSI_Notify::robust_notify(RGWSI_RADOS::Obj& notify_obj, bufferlist& bl, +int RGWSI_Notify::robust_notify(const DoutPrefixProvider *dpp, + RGWSI_RADOS::Obj& notify_obj, bufferlist& bl, optional_yield y) { // The reply of every machine that acks goes in here. @@ -385,11 +392,11 @@ int RGWSI_Notify::robust_notify(RGWSI_RADOS::Obj& notify_obj, bufferlist& bl, bufferlist rbl; // First, try to send, without being fancy about it. - auto r = notify_obj.notify(bl, 0, &rbl, y); + auto r = notify_obj.notify(dpp, bl, 0, &rbl, y); // If that doesn't work, get serious. if (r < 0) { - ldout(cct, 1) << "robust_notify: If at first you don't succeed: " + ldpp_dout(dpp, 1) << "robust_notify: If at first you don't succeed: " << cpp_strerror(-r) << dendl; @@ -403,13 +410,13 @@ int RGWSI_Notify::robust_notify(RGWSI_RADOS::Obj& notify_obj, bufferlist& bl, std::pair id; decode(id, p); acks.insert(id); - ldout(cct, 20) << "robust_notify: acked by " << id << dendl; + ldpp_dout(dpp, 20) << "robust_notify: acked by " << id << dendl; uint32_t blen; decode(blen, p); p += blen; } } catch (const buffer::error& e) { - ldout(cct, 0) << "robust_notify: notify response parse failed: " + ldpp_dout(dpp, 0) << "robust_notify: notify response parse failed: " << e.what() << dendl; acks.clear(); // Throw away junk on failed parse. } @@ -425,9 +432,9 @@ int RGWSI_Notify::robust_notify(RGWSI_RADOS::Obj& notify_obj, bufferlist& bl, rbl.clear(); // Reset the timeouts, we're only concerned with new ones. timeouts.clear(); - r = notify_obj.notify(bl, 0, &rbl, y); + r = notify_obj.notify(dpp, bl, 0, &rbl, y); if (r < 0) { - ldout(cct, 1) << "robust_notify: retry " << tries << " failed: " + ldpp_dout(dpp, 1) << "robust_notify: retry " << tries << " failed: " << cpp_strerror(-r) << dendl; p = rbl.begin(); try { @@ -441,7 +448,7 @@ int RGWSI_Notify::robust_notify(RGWSI_RADOS::Obj& notify_obj, bufferlist& bl, decode(id, p); auto ir = acks.insert(id); if (ir.second) { - ldout(cct, 20) << "robust_notify: acked by " << id << dendl; + ldpp_dout(dpp, 20) << "robust_notify: acked by " << id << dendl; } uint32_t blen; decode(blen, p); @@ -455,13 +462,13 @@ int RGWSI_Notify::robust_notify(RGWSI_RADOS::Obj& notify_obj, bufferlist& bl, decode(id, p); // Only track timeouts from hosts that haven't acked previously. if (acks.find(id) != acks.cend()) { - ldout(cct, 20) << "robust_notify: " << id << " timed out." + ldpp_dout(dpp, 20) << "robust_notify: " << id << " timed out." << dendl; timeouts.insert(id); } } } catch (const buffer::error& e) { - ldout(cct, 0) << "robust_notify: notify response parse failed: " + ldpp_dout(dpp, 0) << "robust_notify: notify response parse failed: " << e.what() << dendl; continue; } diff --git a/src/rgw/services/svc_notify.h b/src/rgw/services/svc_notify.h index e4378bb9920..5b01d77b7bf 100644 --- a/src/rgw/services/svc_notify.h +++ b/src/rgw/services/svc_notify.h @@ -53,7 +53,7 @@ private: bool finalized{false}; - int init_watch(optional_yield y); + int init_watch(const DoutPrefixProvider *dpp, optional_yield y); void finalize_watch(); void init(RGWSI_Zone *_zone_svc, @@ -70,14 +70,16 @@ private: void add_watcher(int i); void remove_watcher(int i); - int watch_cb(uint64_t notify_id, + int watch_cb(const DoutPrefixProvider *dpp, + uint64_t notify_id, uint64_t cookie, uint64_t notifier_id, bufferlist& bl); void _set_enabled(bool status); void set_enabled(bool status); - int robust_notify(RGWSI_RADOS::Obj& notify_obj, bufferlist& bl, + int robust_notify(const DoutPrefixProvider *dpp, + RGWSI_RADOS::Obj& notify_obj, bufferlist& bl, optional_yield y); void schedule_context(Context *c); @@ -88,14 +90,15 @@ public: class CB { public: virtual ~CB() {} - virtual int watch_cb(uint64_t notify_id, + virtual int watch_cb(const DoutPrefixProvider *dpp, + uint64_t notify_id, uint64_t cookie, uint64_t notifier_id, bufferlist& bl) = 0; virtual void set_enabled(bool status) = 0; }; - int distribute(const string& key, bufferlist& bl, optional_yield y); + int distribute(const DoutPrefixProvider *dpp, const string& key, bufferlist& bl, optional_yield y); void register_watch_cb(CB *cb); }; diff --git a/src/rgw/services/svc_otp.cc b/src/rgw/services/svc_otp.cc index 6adad51efba..fc386ae7235 100644 --- a/src/rgw/services/svc_otp.cc +++ b/src/rgw/services/svc_otp.cc @@ -119,7 +119,8 @@ int RGWSI_OTP::read_all(RGWSI_OTP_BE_Ctx& ctx, dpp); } -int RGWSI_OTP::store_all(RGWSI_OTP_BE_Ctx& ctx, +int RGWSI_OTP::store_all(const DoutPrefixProvider *dpp, + RGWSI_OTP_BE_Ctx& ctx, const string& key, const otp_devices_list_t& devices, real_time mtime, @@ -130,7 +131,7 @@ int RGWSI_OTP::store_all(RGWSI_OTP_BE_Ctx& ctx, params.mtime = mtime; params.devices = devices; - int ret = svc.meta_be->put_entry(ctx.get(), key, params, objv_tracker, y); + int ret = svc.meta_be->put_entry(dpp, ctx.get(), key, params, objv_tracker, y); if (ret < 0) { return ret; } @@ -138,14 +139,15 @@ int RGWSI_OTP::store_all(RGWSI_OTP_BE_Ctx& ctx, return 0; } -int RGWSI_OTP::store_all(RGWSI_OTP_BE_Ctx& ctx, +int RGWSI_OTP::store_all(const DoutPrefixProvider *dpp, + RGWSI_OTP_BE_Ctx& ctx, const rgw_user& uid, const otp_devices_list_t& devices, real_time mtime, RGWObjVersionTracker *objv_tracker, optional_yield y) { - return store_all(ctx, + return store_all(dpp, ctx, uid.to_str(), devices, mtime, @@ -153,14 +155,15 @@ int RGWSI_OTP::store_all(RGWSI_OTP_BE_Ctx& ctx, y); } -int RGWSI_OTP::remove_all(RGWSI_OTP_BE_Ctx& ctx, +int RGWSI_OTP::remove_all(const DoutPrefixProvider *dpp, + RGWSI_OTP_BE_Ctx& ctx, const string& key, RGWObjVersionTracker *objv_tracker, optional_yield y) { RGWSI_MBOTP_RemoveParams params; - int ret = svc.meta_be->remove_entry(ctx.get(), key, params, objv_tracker, y); + int ret = svc.meta_be->remove_entry(dpp, ctx.get(), key, params, objv_tracker, y); if (ret < 0) { return ret; } @@ -168,12 +171,13 @@ int RGWSI_OTP::remove_all(RGWSI_OTP_BE_Ctx& ctx, return 0; } -int RGWSI_OTP::remove_all(RGWSI_OTP_BE_Ctx& ctx, +int RGWSI_OTP::remove_all(const DoutPrefixProvider *dpp, + RGWSI_OTP_BE_Ctx& ctx, const rgw_user& uid, RGWObjVersionTracker *objv_tracker, optional_yield y) { - return remove_all(ctx, + return remove_all(dpp,ctx, uid.to_str(), objv_tracker, y); diff --git a/src/rgw/services/svc_otp.h b/src/rgw/services/svc_otp.h index 673ba4d9f5c..f4b2e4ed2cc 100644 --- a/src/rgw/services/svc_otp.h +++ b/src/rgw/services/svc_otp.h @@ -66,23 +66,27 @@ public: RGWObjVersionTracker *objv_tracker, optional_yield y, const DoutPrefixProvider *dpp); - int store_all(RGWSI_OTP_BE_Ctx& ctx, + int store_all(const DoutPrefixProvider *dpp, + RGWSI_OTP_BE_Ctx& ctx, const string& key, const otp_devices_list_t& devices, real_time mtime, RGWObjVersionTracker *objv_tracker, optional_yield y); - int store_all(RGWSI_OTP_BE_Ctx& ctx, + int store_all(const DoutPrefixProvider *dpp, + RGWSI_OTP_BE_Ctx& ctx, const rgw_user& uid, const otp_devices_list_t& devices, real_time mtime, RGWObjVersionTracker *objv_tracker, optional_yield y); - int remove_all(RGWSI_OTP_BE_Ctx& ctx, + int remove_all(const DoutPrefixProvider *dpp, + RGWSI_OTP_BE_Ctx& ctx, const string& key, RGWObjVersionTracker *objv_tracker, optional_yield y); - int remove_all(RGWSI_OTP_BE_Ctx& ctx, + int remove_all(const DoutPrefixProvider *dpp, + RGWSI_OTP_BE_Ctx& ctx, const rgw_user& uid, RGWObjVersionTracker *objv_tracker, optional_yield y); diff --git a/src/rgw/services/svc_rados.cc b/src/rgw/services/svc_rados.cc index 6d207581ba2..32a6b3a3e39 100644 --- a/src/rgw/services/svc_rados.cc +++ b/src/rgw/services/svc_rados.cc @@ -55,10 +55,10 @@ uint64_t RGWSI_RADOS::instance_id() return get_rados_handle()->get_instance_id(); } -int RGWSI_RADOS::open_pool_ctx(const rgw_pool& pool, librados::IoCtx& io_ctx, +int RGWSI_RADOS::open_pool_ctx(const DoutPrefixProvider *dpp, const rgw_pool& pool, librados::IoCtx& io_ctx, const OpenParams& params) { - return rgw_init_ioctx(get_rados_handle(), pool, io_ctx, + return rgw_init_ioctx(dpp, get_rados_handle(), pool, io_ctx, params.create, params.mostly_omap); } @@ -106,9 +106,9 @@ void RGWSI_RADOS::Obj::init(const rgw_raw_obj& obj) ref.obj = obj; } -int RGWSI_RADOS::Obj::open() +int RGWSI_RADOS::Obj::open(const DoutPrefixProvider *dpp) { - int r = ref.pool.open(); + int r = ref.pool.open(dpp); if (r < 0) { return r; } @@ -118,16 +118,16 @@ int RGWSI_RADOS::Obj::open() return 0; } -int RGWSI_RADOS::Obj::operate(librados::ObjectWriteOperation *op, +int RGWSI_RADOS::Obj::operate(const DoutPrefixProvider *dpp, librados::ObjectWriteOperation *op, optional_yield y, int flags) { - return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, op, y, flags); + return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, op, y, flags); } -int RGWSI_RADOS::Obj::operate(librados::ObjectReadOperation *op, +int RGWSI_RADOS::Obj::operate(const DoutPrefixProvider *dpp, librados::ObjectReadOperation *op, bufferlist *pbl, optional_yield y, int flags) { - return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, op, pbl, y, flags); + return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, op, pbl, y, flags); } int RGWSI_RADOS::Obj::aio_operate(librados::AioCompletion *c, librados::ObjectWriteOperation *op) @@ -156,10 +156,10 @@ int RGWSI_RADOS::Obj::unwatch(uint64_t handle) return ref.pool.ioctx().unwatch2(handle); } -int RGWSI_RADOS::Obj::notify(bufferlist& bl, uint64_t timeout_ms, +int RGWSI_RADOS::Obj::notify(const DoutPrefixProvider *dpp, bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl, optional_yield y) { - return rgw_rados_notify(ref.pool.ioctx(), ref.obj.oid, bl, timeout_ms, pbl, y); + return rgw_rados_notify(dpp, ref.pool.ioctx(), ref.obj.oid, bl, timeout_ms, pbl, y); } void RGWSI_RADOS::Obj::notify_ack(uint64_t notify_id, @@ -286,12 +286,12 @@ int RGWSI_RADOS::Pool::lookup() return 0; } -int RGWSI_RADOS::Pool::open(const OpenParams& params) +int RGWSI_RADOS::Pool::open(const DoutPrefixProvider *dpp, const OpenParams& params) { - return rados_svc->open_pool_ctx(pool, state.ioctx, params); + return rados_svc->open_pool_ctx(dpp, pool, state.ioctx, params); } -int RGWSI_RADOS::Pool::List::init(const string& marker, RGWAccessListFilter *filter) +int RGWSI_RADOS::Pool::List::init(const DoutPrefixProvider *dpp, const string& marker, RGWAccessListFilter *filter) { if (ctx.initialized) { return -EINVAL; @@ -301,14 +301,14 @@ int RGWSI_RADOS::Pool::List::init(const string& marker, RGWAccessListFilter *fil return -EINVAL; } - int r = pool->rados_svc->open_pool_ctx(pool->pool, ctx.ioctx); + int r = pool->rados_svc->open_pool_ctx(dpp, pool->pool, ctx.ioctx); if (r < 0) { return r; } librados::ObjectCursor oc; if (!oc.from_str(marker)) { - ldout(pool->rados_svc->cct, 10) << "failed to parse cursor: " << marker << dendl; + ldpp_dout(dpp, 10) << "failed to parse cursor: " << marker << dendl; return -EINVAL; } diff --git a/src/rgw/services/svc_rados.h b/src/rgw/services/svc_rados.h index c167019e241..46a7f93f868 100644 --- a/src/rgw/services/svc_rados.h +++ b/src/rgw/services/svc_rados.h @@ -50,7 +50,7 @@ public: }; private: - int open_pool_ctx(const rgw_pool& pool, librados::IoCtx& io_ctx, + int open_pool_ctx(const DoutPrefixProvider *dpp, const rgw_pool& pool, librados::IoCtx& io_ctx, const OpenParams& params = {}); int pool_iterate(librados::IoCtx& ioctx, librados::NObjectIterator& iter, @@ -100,7 +100,7 @@ public: int create(); int create(const std::vector& pools, std::vector *retcodes); int lookup(); - int open(const OpenParams& params = {}); + int open(const DoutPrefixProvider *dpp, const OpenParams& params = {}); const rgw_pool& get_pool() { return pool; @@ -123,7 +123,7 @@ public: List() {} List(Pool *_pool) : pool(_pool) {} - int init(const string& marker, RGWAccessListFilter *filter = nullptr); + int init(const DoutPrefixProvider *dpp, const string& marker, RGWAccessListFilter *filter = nullptr); int get_next(int max, std::vector *oids, bool *is_truncated); @@ -163,11 +163,11 @@ public: public: Obj() {} - int open(); + int open(const DoutPrefixProvider *dpp); - int operate(librados::ObjectWriteOperation *op, optional_yield y, + int operate(const DoutPrefixProvider *dpp, librados::ObjectWriteOperation *op, optional_yield y, int flags = 0); - int operate(librados::ObjectReadOperation *op, bufferlist *pbl, + int operate(const DoutPrefixProvider *dpp, librados::ObjectReadOperation *op, bufferlist *pbl, optional_yield y, int flags = 0); int aio_operate(librados::AioCompletion *c, librados::ObjectWriteOperation *op); int aio_operate(librados::AioCompletion *c, librados::ObjectReadOperation *op, @@ -176,7 +176,7 @@ public: int watch(uint64_t *handle, librados::WatchCtx2 *ctx); int aio_watch(librados::AioCompletion *c, uint64_t *handle, librados::WatchCtx2 *ctx); int unwatch(uint64_t handle); - int notify(bufferlist& bl, uint64_t timeout_ms, + int notify(const DoutPrefixProvider *dpp, bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl, optional_yield y); void notify_ack(uint64_t notify_id, uint64_t cookie, diff --git a/src/rgw/services/svc_sys_obj.cc b/src/rgw/services/svc_sys_obj.cc index b03f339ff7b..be30e45c56c 100644 --- a/src/rgw/services/svc_sys_obj.cc +++ b/src/rgw/services/svc_sys_obj.cc @@ -40,13 +40,14 @@ int RGWSI_SysObj::Obj::ROp::stat(optional_yield y, const DoutPrefixProvider *dpp objv_tracker, y, dpp); } -int RGWSI_SysObj::Obj::ROp::read(int64_t ofs, int64_t end, bufferlist *bl, +int RGWSI_SysObj::Obj::ROp::read(const DoutPrefixProvider *dpp, + int64_t ofs, int64_t end, bufferlist *bl, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.get_obj(); - return svc->read(source.get_ctx(), *state, + return svc->read(dpp, source.get_ctx(), *state, objv_tracker, obj, bl, ofs, end, attrs, @@ -55,51 +56,52 @@ int RGWSI_SysObj::Obj::ROp::read(int64_t ofs, int64_t end, bufferlist *bl, refresh_version, y); } -int RGWSI_SysObj::Obj::ROp::get_attr(const char *name, bufferlist *dest, +int RGWSI_SysObj::Obj::ROp::get_attr(const DoutPrefixProvider *dpp, + const char *name, bufferlist *dest, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.get_obj(); - return svc->get_attr(obj, name, dest, y); + return svc->get_attr(dpp, obj, name, dest, y); } -int RGWSI_SysObj::Obj::WOp::remove(optional_yield y) +int RGWSI_SysObj::Obj::WOp::remove(const DoutPrefixProvider *dpp, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.get_obj(); - return svc->remove(source.get_ctx(), + return svc->remove(dpp, source.get_ctx(), objv_tracker, obj, y); } -int RGWSI_SysObj::Obj::WOp::write(bufferlist& bl, optional_yield y) +int RGWSI_SysObj::Obj::WOp::write(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.get_obj(); - return svc->write(obj, pmtime, attrs, exclusive, + return svc->write(dpp, obj, pmtime, attrs, exclusive, bl, objv_tracker, mtime, y); } -int RGWSI_SysObj::Obj::WOp::write_data(bufferlist& bl, optional_yield y) +int RGWSI_SysObj::Obj::WOp::write_data(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.get_obj(); - return svc->write_data(obj, bl, exclusive, objv_tracker, y); + return svc->write_data(dpp, obj, bl, exclusive, objv_tracker, y); } -int RGWSI_SysObj::Obj::WOp::write_attrs(optional_yield y) +int RGWSI_SysObj::Obj::WOp::write_attrs(const DoutPrefixProvider *dpp, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.get_obj(); - return svc->set_attrs(obj, attrs, nullptr, objv_tracker, y); + return svc->set_attrs(dpp, obj, attrs, nullptr, objv_tracker, y); } -int RGWSI_SysObj::Obj::WOp::write_attr(const char *name, bufferlist& bl, +int RGWSI_SysObj::Obj::WOp::write_attr(const DoutPrefixProvider *dpp, const char *name, bufferlist& bl, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; @@ -108,17 +110,17 @@ int RGWSI_SysObj::Obj::WOp::write_attr(const char *name, bufferlist& bl, map m; m[name] = bl; - return svc->set_attrs(obj, m, nullptr, objv_tracker, y); + return svc->set_attrs(dpp, obj, m, nullptr, objv_tracker, y); } -int RGWSI_SysObj::Pool::list_prefixed_objs(const string& prefix, std::function cb) +int RGWSI_SysObj::Pool::list_prefixed_objs(const DoutPrefixProvider *dpp, const string& prefix, std::function cb) { - return core_svc->pool_list_prefixed_objs(pool, prefix, cb); + return core_svc->pool_list_prefixed_objs(dpp, pool, prefix, cb); } -int RGWSI_SysObj::Pool::Op::init(const string& marker, const string& prefix) +int RGWSI_SysObj::Pool::Op::init(const DoutPrefixProvider *dpp, const string& marker, const string& prefix) { - return source.core_svc->pool_list_objects_init(source.pool, marker, prefix, &ctx); + return source.core_svc->pool_list_objects_init(dpp, source.pool, marker, prefix, &ctx); } int RGWSI_SysObj::Pool::Op::get_next(int max, vector *oids, bool *is_truncated) @@ -131,58 +133,59 @@ int RGWSI_SysObj::Pool::Op::get_marker(string *marker) return source.core_svc->pool_list_objects_get_marker(ctx, marker); } -int RGWSI_SysObj::Obj::OmapOp::get_all(std::map *m, +int RGWSI_SysObj::Obj::OmapOp::get_all(const DoutPrefixProvider *dpp, std::map *m, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.obj; - return svc->omap_get_all(obj, m, y); + return svc->omap_get_all(dpp, obj, m, y); } -int RGWSI_SysObj::Obj::OmapOp::get_vals(const string& marker, uint64_t count, +int RGWSI_SysObj::Obj::OmapOp::get_vals(const DoutPrefixProvider *dpp, + const string& marker, uint64_t count, std::map *m, bool *pmore, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.obj; - return svc->omap_get_vals(obj, marker, count, m, pmore, y); + return svc->omap_get_vals(dpp, obj, marker, count, m, pmore, y); } -int RGWSI_SysObj::Obj::OmapOp::set(const std::string& key, bufferlist& bl, +int RGWSI_SysObj::Obj::OmapOp::set(const DoutPrefixProvider *dpp, const std::string& key, bufferlist& bl, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.obj; - return svc->omap_set(obj, key, bl, must_exist, y); + return svc->omap_set(dpp, obj, key, bl, must_exist, y); } -int RGWSI_SysObj::Obj::OmapOp::set(const map& m, +int RGWSI_SysObj::Obj::OmapOp::set(const DoutPrefixProvider *dpp, const map& m, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.obj; - return svc->omap_set(obj, m, must_exist, y); + return svc->omap_set(dpp, obj, m, must_exist, y); } -int RGWSI_SysObj::Obj::OmapOp::del(const std::string& key, optional_yield y) +int RGWSI_SysObj::Obj::OmapOp::del(const DoutPrefixProvider *dpp, const std::string& key, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.obj; - return svc->omap_del(obj, key, y); + return svc->omap_del(dpp, obj, key, y); } -int RGWSI_SysObj::Obj::WNOp::notify(bufferlist& bl, uint64_t timeout_ms, +int RGWSI_SysObj::Obj::WNOp::notify(const DoutPrefixProvider *dpp, bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl, optional_yield y) { RGWSI_SysObj_Core *svc = source.core_svc; rgw_raw_obj& obj = source.obj; - return svc->notify(obj, bl, timeout_ms, pbl, y); + return svc->notify(dpp, obj, bl, timeout_ms, pbl, y); } RGWSI_Zone *RGWSI_SysObj::get_zone_svc() diff --git a/src/rgw/services/svc_sys_obj.h b/src/rgw/services/svc_sys_obj.h index 05c4929084f..48ae302408a 100644 --- a/src/rgw/services/svc_sys_obj.h +++ b/src/rgw/services/svc_sys_obj.h @@ -98,11 +98,11 @@ public: ROp(Obj& _source); int stat(optional_yield y, const DoutPrefixProvider *dpp); - int read(int64_t ofs, int64_t end, bufferlist *pbl, optional_yield y); - int read(bufferlist *pbl, optional_yield y) { - return read(0, -1, pbl, y); + int read(const DoutPrefixProvider *dpp, int64_t ofs, int64_t end, bufferlist *pbl, optional_yield y); + int read(const DoutPrefixProvider *dpp, bufferlist *pbl, optional_yield y) { + return read(dpp, 0, -1, pbl, y); } - int get_attr(const char *name, bufferlist *dest, optional_yield y); + int get_attr(const DoutPrefixProvider *dpp, const char *name, bufferlist *dest, optional_yield y); }; struct WOp { @@ -146,12 +146,12 @@ public: WOp(Obj& _source) : source(_source) {} - int remove(optional_yield y); - int write(bufferlist& bl, optional_yield y); + int remove(const DoutPrefixProvider *dpp, optional_yield y); + int write(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y); - int write_data(bufferlist& bl, optional_yield y); /* write data only */ - int write_attrs(optional_yield y); /* write attrs only */ - int write_attr(const char *name, bufferlist& bl, + int write_data(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y); /* write data only */ + int write_attrs(const DoutPrefixProvider *dpp, optional_yield y); /* write attrs only */ + int write_attr(const DoutPrefixProvider *dpp, const char *name, bufferlist& bl, optional_yield y); /* write attrs only */ }; @@ -167,13 +167,13 @@ public: OmapOp(Obj& _source) : source(_source) {} - int get_all(std::map *m, optional_yield y); - int get_vals(const string& marker, uint64_t count, + int get_all(const DoutPrefixProvider *dpp, std::map *m, optional_yield y); + int get_vals(const DoutPrefixProvider *dpp, const string& marker, uint64_t count, std::map *m, bool *pmore, optional_yield y); - int set(const std::string& key, bufferlist& bl, optional_yield y); - int set(const map& m, optional_yield y); - int del(const std::string& key, optional_yield y); + int set(const DoutPrefixProvider *dpp, const std::string& key, bufferlist& bl, optional_yield y); + int set(const DoutPrefixProvider *dpp, const map& m, optional_yield y); + int del(const DoutPrefixProvider *dpp, const std::string& key, optional_yield y); }; struct WNOp { @@ -181,7 +181,7 @@ public: WNOp(Obj& _source) : source(_source) {} - int notify(bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl, + int notify(const DoutPrefixProvider *dpp, bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl, optional_yield y); }; ROp rop() { @@ -230,17 +230,17 @@ public: Op(Pool& _source) : source(_source) {} - int init(const std::string& marker, const std::string& prefix); + int init(const DoutPrefixProvider *dpp, const std::string& marker, const std::string& prefix); int get_next(int max, std::vector *oids, bool *is_truncated); int get_marker(string *marker); }; - int list_prefixed_objs(const std::string& prefix, std::function cb); + int list_prefixed_objs(const DoutPrefixProvider *dpp, const std::string& prefix, std::function cb); template - int list_prefixed_objs(const string& prefix, + int list_prefixed_objs(const DoutPrefixProvider *dpp, const string& prefix, Container *result) { - return list_prefixed_objs(prefix, [&](const string& val) { + return list_prefixed_objs(dpp, prefix, [&](const string& val) { result->push_back(val); }); } diff --git a/src/rgw/services/svc_sys_obj_cache.cc b/src/rgw/services/svc_sys_obj_cache.cc index ad67f29e031..68b90888121 100644 --- a/src/rgw/services/svc_sys_obj_cache.cc +++ b/src/rgw/services/svc_sys_obj_cache.cc @@ -18,11 +18,12 @@ class RGWSI_SysObj_Cache_CB : public RGWSI_Notify::CB RGWSI_SysObj_Cache *svc; public: RGWSI_SysObj_Cache_CB(RGWSI_SysObj_Cache *_svc) : svc(_svc) {} - int watch_cb(uint64_t notify_id, + int watch_cb(const DoutPrefixProvider *dpp, + uint64_t notify_id, uint64_t cookie, uint64_t notifier_id, bufferlist& bl) { - return svc->watch_cb(notify_id, cookie, notifier_id, bl); + return svc->watch_cb(dpp, notify_id, cookie, notifier_id, bl); } void set_enabled(bool status) { @@ -81,7 +82,8 @@ void RGWSI_SysObj_Cache::normalize_pool_and_obj(const rgw_pool& src_pool, const } -int RGWSI_SysObj_Cache::remove(RGWSysObjectCtxBase& obj_ctx, +int RGWSI_SysObj_Cache::remove(const DoutPrefixProvider *dpp, + RGWSysObjectCtxBase& obj_ctx, RGWObjVersionTracker *objv_tracker, const rgw_raw_obj& obj, optional_yield y) @@ -92,18 +94,19 @@ int RGWSI_SysObj_Cache::remove(RGWSysObjectCtxBase& obj_ctx, normalize_pool_and_obj(obj.pool, obj.oid, pool, oid); string name = normal_name(pool, oid); - cache.remove(name); + cache.remove(dpp, name); ObjectCacheInfo info; - int r = distribute_cache(name, obj, info, REMOVE_OBJ, y); + int r = distribute_cache(dpp, name, obj, info, REMOVE_OBJ, y); if (r < 0) { - ldout(cct, 0) << "ERROR: " << __func__ << "(): failed to distribute cache: r=" << r << dendl; + ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to distribute cache: r=" << r << dendl; } - return RGWSI_SysObj_Core::remove(obj_ctx, objv_tracker, obj, y); + return RGWSI_SysObj_Core::remove(dpp, obj_ctx, objv_tracker, obj, y); } -int RGWSI_SysObj_Cache::read(RGWSysObjectCtxBase& obj_ctx, +int RGWSI_SysObj_Cache::read(const DoutPrefixProvider *dpp, + RGWSysObjectCtxBase& obj_ctx, RGWSI_SysObj_Obj_GetObjState& read_state, RGWObjVersionTracker *objv_tracker, const rgw_raw_obj& obj, @@ -117,7 +120,7 @@ int RGWSI_SysObj_Cache::read(RGWSysObjectCtxBase& obj_ctx, rgw_pool pool; string oid; if (ofs != 0) { - return RGWSI_SysObj_Core::read(obj_ctx, read_state, objv_tracker, + return RGWSI_SysObj_Core::read(dpp, obj_ctx, read_state, objv_tracker, obj, obl, ofs, end, attrs, raw_attrs, cache_info, refresh_version, y); } @@ -133,7 +136,7 @@ int RGWSI_SysObj_Cache::read(RGWSysObjectCtxBase& obj_ctx, if (attrs) flags |= CACHE_FLAG_XATTRS; - int r = cache.get(name, info, flags, cache_info); + int r = cache.get(dpp, name, info, flags, cache_info); if (r == 0 && (!refresh_version || !info.version.compare(&(*refresh_version)))) { if (info.status < 0) @@ -161,7 +164,7 @@ int RGWSI_SysObj_Cache::read(RGWSysObjectCtxBase& obj_ctx, return -ENOENT; map unfiltered_attrset; - r = RGWSI_SysObj_Core::read(obj_ctx, read_state, objv_tracker, + r = RGWSI_SysObj_Core::read(dpp, obj_ctx, read_state, objv_tracker, obj, obl, ofs, end, (attrs ? &unfiltered_attrset : nullptr), true, /* cache unfiltered attrs */ @@ -170,7 +173,7 @@ int RGWSI_SysObj_Cache::read(RGWSysObjectCtxBase& obj_ctx, if (r < 0) { if (r == -ENOENT) { // only update ENOENT, we'd rather retry other errors info.status = r; - cache.put(name, info, cache_info); + cache.put(dpp, name, info, cache_info); } return r; } @@ -199,11 +202,12 @@ int RGWSI_SysObj_Cache::read(RGWSysObjectCtxBase& obj_ctx, rgw_filter_attrset(info.xattrs, RGW_ATTR_PREFIX, attrs); } } - cache.put(name, info, cache_info); + cache.put(dpp, name, info, cache_info); return r; } -int RGWSI_SysObj_Cache::get_attr(const rgw_raw_obj& obj, +int RGWSI_SysObj_Cache::get_attr(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, const char *attr_name, bufferlist *dest, optional_yield y) @@ -218,7 +222,7 @@ int RGWSI_SysObj_Cache::get_attr(const rgw_raw_obj& obj, uint32_t flags = CACHE_FLAG_XATTRS; - int r = cache.get(name, info, flags, nullptr); + int r = cache.get(dpp, name, info, flags, nullptr); if (r == 0) { if (info.status < 0) return info.status; @@ -234,10 +238,11 @@ int RGWSI_SysObj_Cache::get_attr(const rgw_raw_obj& obj, return -ENOENT; } /* don't try to cache this one */ - return RGWSI_SysObj_Core::get_attr(obj, attr_name, dest, y); + return RGWSI_SysObj_Core::get_attr(dpp, obj, attr_name, dest, y); } -int RGWSI_SysObj_Cache::set_attrs(const rgw_raw_obj& obj, +int RGWSI_SysObj_Cache::set_attrs(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, map& attrs, map *rmattrs, RGWObjVersionTracker *objv_tracker, @@ -253,25 +258,26 @@ int RGWSI_SysObj_Cache::set_attrs(const rgw_raw_obj& obj, } info.status = 0; info.flags = CACHE_FLAG_MODIFY_XATTRS; - int ret = RGWSI_SysObj_Core::set_attrs(obj, attrs, rmattrs, objv_tracker, y); + int ret = RGWSI_SysObj_Core::set_attrs(dpp, obj, attrs, rmattrs, objv_tracker, y); string name = normal_name(pool, oid); if (ret >= 0) { if (objv_tracker && objv_tracker->read_version.ver) { info.version = objv_tracker->read_version; info.flags |= CACHE_FLAG_OBJV; } - cache.put(name, info, NULL); - int r = distribute_cache(name, obj, info, UPDATE_OBJ, y); + cache.put(dpp, name, info, NULL); + int r = distribute_cache(dpp, name, obj, info, UPDATE_OBJ, y); if (r < 0) - ldout(cct, 0) << "ERROR: failed to distribute cache for " << obj << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to distribute cache for " << obj << dendl; } else { - cache.remove(name); + cache.remove(dpp, name); } return ret; } -int RGWSI_SysObj_Cache::write(const rgw_raw_obj& obj, +int RGWSI_SysObj_Cache::write(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, real_time *pmtime, map& attrs, bool exclusive, @@ -289,7 +295,7 @@ int RGWSI_SysObj_Cache::write(const rgw_raw_obj& obj, info.data = data; info.flags = CACHE_FLAG_XATTRS | CACHE_FLAG_DATA | CACHE_FLAG_META; ceph::real_time result_mtime; - int ret = RGWSI_SysObj_Core::write(obj, &result_mtime, attrs, + int ret = RGWSI_SysObj_Core::write(dpp, obj, &result_mtime, attrs, exclusive, data, objv_tracker, set_mtime, y); if (pmtime) { @@ -303,18 +309,19 @@ int RGWSI_SysObj_Cache::write(const rgw_raw_obj& obj, info.meta.size = data.length(); string name = normal_name(pool, oid); if (ret >= 0) { - cache.put(name, info, NULL); - int r = distribute_cache(name, obj, info, UPDATE_OBJ, y); + cache.put(dpp, name, info, NULL); + int r = distribute_cache(dpp, name, obj, info, UPDATE_OBJ, y); if (r < 0) - ldout(cct, 0) << "ERROR: failed to distribute cache for " << obj << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to distribute cache for " << obj << dendl; } else { - cache.remove(name); + cache.remove(dpp, name); } return ret; } -int RGWSI_SysObj_Cache::write_data(const rgw_raw_obj& obj, +int RGWSI_SysObj_Cache::write_data(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, const bufferlist& data, bool exclusive, RGWObjVersionTracker *objv_tracker, @@ -330,25 +337,25 @@ int RGWSI_SysObj_Cache::write_data(const rgw_raw_obj& obj, info.status = 0; info.flags = CACHE_FLAG_DATA; - int ret = RGWSI_SysObj_Core::write_data(obj, data, exclusive, objv_tracker, y); + int ret = RGWSI_SysObj_Core::write_data(dpp, obj, data, exclusive, objv_tracker, y); string name = normal_name(pool, oid); if (ret >= 0) { if (objv_tracker && objv_tracker->read_version.ver) { info.version = objv_tracker->read_version; info.flags |= CACHE_FLAG_OBJV; } - cache.put(name, info, NULL); - int r = distribute_cache(name, obj, info, UPDATE_OBJ, y); + cache.put(dpp, name, info, NULL); + int r = distribute_cache(dpp, name, obj, info, UPDATE_OBJ, y); if (r < 0) - ldout(cct, 0) << "ERROR: failed to distribute cache for " << obj << dendl; + ldpp_dout(dpp, 0) << "ERROR: failed to distribute cache for " << obj << dendl; } else { - cache.remove(name); + cache.remove(dpp, name); } return ret; } -int RGWSI_SysObj_Cache::raw_stat(const rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *pepoch, +int RGWSI_SysObj_Cache::raw_stat(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *pepoch, map *attrs, bufferlist *first_chunk, RGWObjVersionTracker *objv_tracker, optional_yield y) @@ -367,7 +374,7 @@ int RGWSI_SysObj_Cache::raw_stat(const rgw_raw_obj& obj, uint64_t *psize, real_t uint32_t flags = CACHE_FLAG_META | CACHE_FLAG_XATTRS; if (objv_tracker) flags |= CACHE_FLAG_OBJV; - int r = cache.get(name, info, flags, NULL); + int r = cache.get(dpp, name, info, flags, NULL); if (r == 0) { if (info.status < 0) return info.status; @@ -382,12 +389,12 @@ int RGWSI_SysObj_Cache::raw_stat(const rgw_raw_obj& obj, uint64_t *psize, real_t if (r == -ENODATA) { return -ENOENT; } - r = RGWSI_SysObj_Core::raw_stat(obj, &size, &mtime, &epoch, &info.xattrs, + r = RGWSI_SysObj_Core::raw_stat(dpp, obj, &size, &mtime, &epoch, &info.xattrs, first_chunk, objv_tracker, y); if (r < 0) { if (r == -ENOENT) { info.status = r; - cache.put(name, info, NULL); + cache.put(dpp, name, info, NULL); } return r; } @@ -400,7 +407,7 @@ int RGWSI_SysObj_Cache::raw_stat(const rgw_raw_obj& obj, uint64_t *psize, real_t info.flags |= CACHE_FLAG_OBJV; info.version = objv_tracker->read_version; } - cache.put(name, info, NULL); + cache.put(dpp, name, info, NULL); done: if (psize) *psize = size; @@ -413,7 +420,8 @@ done: return 0; } -int RGWSI_SysObj_Cache::distribute_cache(const string& normal_name, +int RGWSI_SysObj_Cache::distribute_cache(const DoutPrefixProvider *dpp, + const string& normal_name, const rgw_raw_obj& obj, ObjectCacheInfo& obj_info, int op, optional_yield y) @@ -424,10 +432,11 @@ int RGWSI_SysObj_Cache::distribute_cache(const string& normal_name, info.obj = obj; bufferlist bl; encode(info, bl); - return notify_svc->distribute(normal_name, bl, y); + return notify_svc->distribute(dpp, normal_name, bl, y); } -int RGWSI_SysObj_Cache::watch_cb(uint64_t notify_id, +int RGWSI_SysObj_Cache::watch_cb(const DoutPrefixProvider *dpp, + uint64_t notify_id, uint64_t cookie, uint64_t notifier_id, bufferlist& bl) @@ -452,10 +461,10 @@ int RGWSI_SysObj_Cache::watch_cb(uint64_t notify_id, switch (info.op) { case UPDATE_OBJ: - cache.put(name, info.obj_info, NULL); + cache.put(dpp, name, info.obj_info, NULL); break; case REMOVE_OBJ: - cache.remove(name); + cache.remove(dpp, name); break; default: ldout(cct, 0) << "WARNING: got unknown notification op: " << info.op << dendl; @@ -470,10 +479,11 @@ void RGWSI_SysObj_Cache::set_enabled(bool status) cache.set_enabled(status); } -bool RGWSI_SysObj_Cache::chain_cache_entry(std::initializer_list cache_info_entries, +bool RGWSI_SysObj_Cache::chain_cache_entry(const DoutPrefixProvider *dpp, + std::initializer_list cache_info_entries, RGWChainedCache::Entry *chained_entry) { - return cache.chain_cache_entry(cache_info_entries, chained_entry); + return cache.chain_cache_entry(dpp, cache_info_entries, chained_entry); } void RGWSI_SysObj_Cache::register_chained_cache(RGWChainedCache *cc) @@ -580,7 +590,7 @@ int RGWSI_SysObj_Cache_ASocketHook::call( return -ENOSYS; } -RGWSI_SysObj_Cache::ASocketHandler::ASocketHandler(RGWSI_SysObj_Cache *_svc) : svc(_svc) +RGWSI_SysObj_Cache::ASocketHandler::ASocketHandler(const DoutPrefixProvider *_dpp, RGWSI_SysObj_Cache *_svc) : dpp(_dpp), svc(_svc) { hook.reset(new RGWSI_SysObj_Cache_ASocketHook(_svc)); } @@ -612,7 +622,7 @@ void RGWSI_SysObj_Cache::ASocketHandler::call_list(const std::optionalcache.get(target)) { + if (const auto entry = svc->cache.get(dpp, target)) { f->open_object_section("cache_entry"); f->dump_string("name", target.c_str()); entry->dump(f); @@ -625,7 +635,7 @@ int RGWSI_SysObj_Cache::ASocketHandler::call_inspect(const std::string& target, int RGWSI_SysObj_Cache::ASocketHandler::call_erase(const std::string& target) { - return svc->cache.remove(target); + return svc->cache.remove(dpp, target); } int RGWSI_SysObj_Cache::ASocketHandler::call_zap() diff --git a/src/rgw/services/svc_sys_obj_cache.h b/src/rgw/services/svc_sys_obj_cache.h index c4ca6b21be6..9b99d6dbda3 100644 --- a/src/rgw/services/svc_sys_obj_cache.h +++ b/src/rgw/services/svc_sys_obj_cache.h @@ -37,12 +37,13 @@ protected: int do_start(optional_yield, const DoutPrefixProvider *dpp) override; void shutdown() override; - int raw_stat(const rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *epoch, + int raw_stat(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *epoch, map *attrs, bufferlist *first_chunk, RGWObjVersionTracker *objv_tracker, optional_yield y) override; - int read(RGWSysObjectCtxBase& obj_ctx, + int read(const DoutPrefixProvider *dpp, + RGWSysObjectCtxBase& obj_ctx, RGWSI_SysObj_Obj_GetObjState& read_state, RGWObjVersionTracker *objv_tracker, const rgw_raw_obj& obj, @@ -53,21 +54,24 @@ protected: boost::optional, optional_yield y) override; - int get_attr(const rgw_raw_obj& obj, const char *name, bufferlist *dest, + int get_attr(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const char *name, bufferlist *dest, optional_yield y) override; - int set_attrs(const rgw_raw_obj& obj, + int set_attrs(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, map& attrs, map *rmattrs, RGWObjVersionTracker *objv_tracker, optional_yield y); - int remove(RGWSysObjectCtxBase& obj_ctx, + int remove(const DoutPrefixProvider *dpp, + RGWSysObjectCtxBase& obj_ctx, RGWObjVersionTracker *objv_tracker, const rgw_raw_obj& obj, optional_yield y) override; - int write(const rgw_raw_obj& obj, + int write(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, real_time *pmtime, map& attrs, bool exclusive, @@ -76,17 +80,19 @@ protected: real_time set_mtime, optional_yield y) override; - int write_data(const rgw_raw_obj& obj, + int write_data(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, const bufferlist& bl, bool exclusive, RGWObjVersionTracker *objv_tracker, optional_yield y); - int distribute_cache(const string& normal_name, const rgw_raw_obj& obj, + int distribute_cache(const DoutPrefixProvider *dpp, const string& normal_name, const rgw_raw_obj& obj, ObjectCacheInfo& obj_info, int op, optional_yield y); - int watch_cb(uint64_t notify_id, + int watch_cb(const DoutPrefixProvider *dpp, + uint64_t notify_id, uint64_t cookie, uint64_t notifier_id, bufferlist& bl); @@ -94,22 +100,24 @@ protected: void set_enabled(bool status); public: - RGWSI_SysObj_Cache(CephContext *cct) : RGWSI_SysObj_Core(cct), asocket(this) { + RGWSI_SysObj_Cache(const DoutPrefixProvider *dpp, CephContext *cct) : RGWSI_SysObj_Core(cct), asocket(dpp, this) { cache.set_ctx(cct); } - bool chain_cache_entry(std::initializer_list cache_info_entries, + bool chain_cache_entry(const DoutPrefixProvider *dpp, + std::initializer_list cache_info_entries, RGWChainedCache::Entry *chained_entry); void register_chained_cache(RGWChainedCache *cc); void unregister_chained_cache(RGWChainedCache *cc); class ASocketHandler { + const DoutPrefixProvider *dpp; RGWSI_SysObj_Cache *svc; std::unique_ptr hook; public: - ASocketHandler(RGWSI_SysObj_Cache *_svc); + ASocketHandler(const DoutPrefixProvider *dpp, RGWSI_SysObj_Cache *_svc); ~ASocketHandler(); int start(); @@ -181,7 +189,7 @@ public: return iter->second.first; } - bool put(RGWSI_SysObj_Cache *svc, const string& key, T *entry, + bool put(const DoutPrefixProvider *dpp, RGWSI_SysObj_Cache *svc, const string& key, T *entry, std::initializer_list cache_info_entries) { if (!svc) { return false; @@ -190,7 +198,7 @@ public: Entry chain_entry(this, key, entry); /* we need the svc cache to call us under its lock to maintain lock ordering */ - return svc->chain_cache_entry(cache_info_entries, &chain_entry); + return svc->chain_cache_entry(dpp, cache_info_entries, &chain_entry); } void chain_cb(const string& key, void *data) override { diff --git a/src/rgw/services/svc_sys_obj_core.cc b/src/rgw/services/svc_sys_obj_core.cc index dee172b5f42..2e194dfeeb2 100644 --- a/src/rgw/services/svc_sys_obj_core.cc +++ b/src/rgw/services/svc_sys_obj_core.cc @@ -9,19 +9,20 @@ #define dout_subsys ceph_subsys_rgw -int RGWSI_SysObj_Core_GetObjState::get_rados_obj(RGWSI_RADOS *rados_svc, +int RGWSI_SysObj_Core_GetObjState::get_rados_obj(const DoutPrefixProvider *dpp, + RGWSI_RADOS *rados_svc, RGWSI_Zone *zone_svc, const rgw_raw_obj& obj, RGWSI_RADOS::Obj **pobj) { if (!has_rados_obj) { if (obj.oid.empty()) { - ldout(rados_svc->ctx(), 0) << "ERROR: obj.oid is empty" << dendl; + ldpp_dout(dpp, 0) << "ERROR: obj.oid is empty" << dendl; return -EINVAL; } rados_obj = rados_svc->obj(obj); - int r = rados_obj.open(); + int r = rados_obj.open(dpp); if (r < 0) { return r; } @@ -31,17 +32,18 @@ int RGWSI_SysObj_Core_GetObjState::get_rados_obj(RGWSI_RADOS *rados_svc, return 0; } -int RGWSI_SysObj_Core::get_rados_obj(RGWSI_Zone *zone_svc, +int RGWSI_SysObj_Core::get_rados_obj(const DoutPrefixProvider *dpp, + RGWSI_Zone *zone_svc, const rgw_raw_obj& obj, RGWSI_RADOS::Obj *pobj) { if (obj.oid.empty()) { - ldout(rados_svc->ctx(), 0) << "ERROR: obj.oid is empty" << dendl; + ldpp_dout(dpp, 0) << "ERROR: obj.oid is empty" << dendl; return -EINVAL; } *pobj = rados_svc->obj(obj); - int r = pobj->open(); + int r = pobj->open(dpp); if (r < 0) { return r; } @@ -69,7 +71,7 @@ int RGWSI_SysObj_Core::get_system_obj_state_impl(RGWSysObjectCtxBase *rctx, s->obj = obj; - int r = raw_stat(obj, &s->size, &s->mtime, &s->epoch, &s->attrset, + int r = raw_stat(dpp, obj, &s->size, &s->mtime, &s->epoch, &s->attrset, (s->prefetch_data ? &s->data : nullptr), objv_tracker, y); if (r == -ENOENT) { s->exists = false; @@ -109,13 +111,13 @@ int RGWSI_SysObj_Core::get_system_obj_state(RGWSysObjectCtxBase *rctx, return ret; } -int RGWSI_SysObj_Core::raw_stat(const rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *epoch, +int RGWSI_SysObj_Core::raw_stat(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *epoch, map *attrs, bufferlist *first_chunk, RGWObjVersionTracker *objv_tracker, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { return r; } @@ -135,7 +137,7 @@ int RGWSI_SysObj_Core::raw_stat(const rgw_raw_obj& obj, uint64_t *psize, real_ti op.read(0, cct->_conf->rgw_max_chunk_size, first_chunk, nullptr); } bufferlist outbl; - r = rados_obj.operate(&op, &outbl, y); + r = rados_obj.operate(dpp, &op, &outbl, y); if (epoch) { *epoch = rados_obj.get_last_version(); @@ -195,7 +197,8 @@ int RGWSI_SysObj_Core::stat(RGWSysObjectCtxBase& obj_ctx, return 0; } -int RGWSI_SysObj_Core::read(RGWSysObjectCtxBase& obj_ctx, +int RGWSI_SysObj_Core::read(const DoutPrefixProvider *dpp, + RGWSysObjectCtxBase& obj_ctx, RGWSI_SysObj_Obj_GetObjState& _read_state, RGWObjVersionTracker *objv_tracker, const rgw_raw_obj& obj, @@ -220,7 +223,7 @@ int RGWSI_SysObj_Core::read(RGWSysObjectCtxBase& obj_ctx, objv_tracker->prepare_op_for_read(&op); } - ldout(cct, 20) << "rados->read ofs=" << ofs << " len=" << len << dendl; + ldpp_dout(dpp, 20) << "rados->read ofs=" << ofs << " len=" << len << dendl; op.read(ofs, len, bl, nullptr); map unfiltered_attrset; @@ -234,23 +237,23 @@ int RGWSI_SysObj_Core::read(RGWSysObjectCtxBase& obj_ctx, } RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } - r = rados_obj.operate(&op, nullptr, y); + r = rados_obj.operate(dpp, &op, nullptr, y); if (r < 0) { - ldout(cct, 20) << "rados_obj.operate() r=" << r << " bl.length=" << bl->length() << dendl; + ldpp_dout(dpp, 20) << "rados_obj.operate() r=" << r << " bl.length=" << bl->length() << dendl; return r; } - ldout(cct, 20) << "rados_obj.operate() r=" << r << " bl.length=" << bl->length() << dendl; + ldpp_dout(dpp, 20) << "rados_obj.operate() r=" << r << " bl.length=" << bl->length() << dendl; uint64_t op_ver = rados_obj.get_last_version(); if (read_state.last_ver > 0 && read_state.last_ver != op_ver) { - ldout(cct, 5) << "raced with an object write, abort" << dendl; + ldpp_dout(dpp, 5) << "raced with an object write, abort" << dendl; return -ECANCELED; } @@ -270,15 +273,16 @@ int RGWSI_SysObj_Core::read(RGWSysObjectCtxBase& obj_ctx, * dest: bufferlist to store the result in * Returns: 0 on success, -ERR# otherwise. */ -int RGWSI_SysObj_Core::get_attr(const rgw_raw_obj& obj, +int RGWSI_SysObj_Core::get_attr(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, const char *name, bufferlist *dest, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } @@ -287,23 +291,24 @@ int RGWSI_SysObj_Core::get_attr(const rgw_raw_obj& obj, int rval; op.getxattr(name, dest, &rval); - r = rados_obj.operate(&op, nullptr, y); + r = rados_obj.operate(dpp, &op, nullptr, y); if (r < 0) return r; return 0; } -int RGWSI_SysObj_Core::set_attrs(const rgw_raw_obj& obj, +int RGWSI_SysObj_Core::set_attrs(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, map& attrs, map *rmattrs, RGWObjVersionTracker *objv_tracker, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } @@ -336,7 +341,7 @@ int RGWSI_SysObj_Core::set_attrs(const rgw_raw_obj& obj, bufferlist bl; - r = rados_obj.operate(&op, y); + r = rados_obj.operate(dpp, &op, y); if (r < 0) return r; @@ -346,7 +351,8 @@ int RGWSI_SysObj_Core::set_attrs(const rgw_raw_obj& obj, return 0; } -int RGWSI_SysObj_Core::omap_get_vals(const rgw_raw_obj& obj, +int RGWSI_SysObj_Core::omap_get_vals(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, const string& marker, uint64_t count, std::map *m, @@ -354,9 +360,9 @@ int RGWSI_SysObj_Core::omap_get_vals(const rgw_raw_obj& obj, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } @@ -370,7 +376,7 @@ int RGWSI_SysObj_Core::omap_get_vals(const rgw_raw_obj& obj, int rval; op.omap_get_vals2(start_after, count, &t, &more, &rval); - r = rados_obj.operate(&op, nullptr, y); + r = rados_obj.operate(dpp, &op, nullptr, y); if (r < 0) { return r; } @@ -388,14 +394,15 @@ int RGWSI_SysObj_Core::omap_get_vals(const rgw_raw_obj& obj, return 0; } -int RGWSI_SysObj_Core::omap_get_all(const rgw_raw_obj& obj, +int RGWSI_SysObj_Core::omap_get_all(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, std::map *m, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } @@ -411,7 +418,7 @@ int RGWSI_SysObj_Core::omap_get_all(const rgw_raw_obj& obj, int rval; op.omap_get_vals2(start_after, count, &t, &more, &rval); - r = rados_obj.operate(&op, nullptr, y); + r = rados_obj.operate(dpp, &op, nullptr, y); if (r < 0) { return r; } @@ -424,18 +431,18 @@ int RGWSI_SysObj_Core::omap_get_all(const rgw_raw_obj& obj, return 0; } -int RGWSI_SysObj_Core::omap_set(const rgw_raw_obj& obj, const std::string& key, +int RGWSI_SysObj_Core::omap_set(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const std::string& key, bufferlist& bl, bool must_exist, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } - ldout(cct, 15) << "omap_set obj=" << obj << " key=" << key << dendl; + ldpp_dout(dpp, 15) << "omap_set obj=" << obj << " key=" << key << dendl; map m; m[key] = bl; @@ -443,18 +450,18 @@ int RGWSI_SysObj_Core::omap_set(const rgw_raw_obj& obj, const std::string& key, if (must_exist) op.assert_exists(); op.omap_set(m); - r = rados_obj.operate(&op, y); + r = rados_obj.operate(dpp, &op, y); return r; } -int RGWSI_SysObj_Core::omap_set(const rgw_raw_obj& obj, +int RGWSI_SysObj_Core::omap_set(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const std::map& m, bool must_exist, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } @@ -462,17 +469,17 @@ int RGWSI_SysObj_Core::omap_set(const rgw_raw_obj& obj, if (must_exist) op.assert_exists(); op.omap_set(m); - r = rados_obj.operate(&op, y); + r = rados_obj.operate(dpp, &op, y); return r; } -int RGWSI_SysObj_Core::omap_del(const rgw_raw_obj& obj, const std::string& key, +int RGWSI_SysObj_Core::omap_del(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const std::string& key, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } @@ -483,34 +490,35 @@ int RGWSI_SysObj_Core::omap_del(const rgw_raw_obj& obj, const std::string& key, op.omap_rm_keys(k); - r = rados_obj.operate(&op, y); + r = rados_obj.operate(dpp, &op, y); return r; } -int RGWSI_SysObj_Core::notify(const rgw_raw_obj& obj, bufferlist& bl, +int RGWSI_SysObj_Core::notify(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } - r = rados_obj.notify(bl, timeout_ms, pbl, y); + r = rados_obj.notify(dpp, bl, timeout_ms, pbl, y); return r; } -int RGWSI_SysObj_Core::remove(RGWSysObjectCtxBase& obj_ctx, +int RGWSI_SysObj_Core::remove(const DoutPrefixProvider *dpp, + RGWSysObjectCtxBase& obj_ctx, RGWObjVersionTracker *objv_tracker, const rgw_raw_obj& obj, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } @@ -521,14 +529,15 @@ int RGWSI_SysObj_Core::remove(RGWSysObjectCtxBase& obj_ctx, } op.remove(); - r = rados_obj.operate(&op, y); + r = rados_obj.operate(dpp, &op, y); if (r < 0) return r; return 0; } -int RGWSI_SysObj_Core::write(const rgw_raw_obj& obj, +int RGWSI_SysObj_Core::write(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, real_time *pmtime, map& attrs, bool exclusive, @@ -538,9 +547,9 @@ int RGWSI_SysObj_Core::write(const rgw_raw_obj& obj, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } @@ -578,7 +587,7 @@ int RGWSI_SysObj_Core::write(const rgw_raw_obj& obj, op.setxattr(name.c_str(), bl); } - r = rados_obj.operate(&op, y); + r = rados_obj.operate(dpp, &op, y); if (r < 0) { return r; } @@ -595,16 +604,17 @@ int RGWSI_SysObj_Core::write(const rgw_raw_obj& obj, } -int RGWSI_SysObj_Core::write_data(const rgw_raw_obj& obj, +int RGWSI_SysObj_Core::write_data(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, const bufferlist& bl, bool exclusive, RGWObjVersionTracker *objv_tracker, optional_yield y) { RGWSI_RADOS::Obj rados_obj; - int r = get_rados_obj(zone_svc, obj, &rados_obj); + int r = get_rados_obj(dpp, zone_svc, obj, &rados_obj); if (r < 0) { - ldout(cct, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; + ldpp_dout(dpp, 20) << "get_rados_obj() on obj=" << obj << " returned " << r << dendl; return r; } @@ -618,7 +628,7 @@ int RGWSI_SysObj_Core::write_data(const rgw_raw_obj& obj, objv_tracker->prepare_op_for_write(&op); } op.write_full(bl); - r = rados_obj.operate(&op, y); + r = rados_obj.operate(dpp, &op, y); if (r < 0) return r; @@ -628,7 +638,8 @@ int RGWSI_SysObj_Core::write_data(const rgw_raw_obj& obj, return 0; } -int RGWSI_SysObj_Core::pool_list_prefixed_objs(const rgw_pool& pool, const string& prefix, +int RGWSI_SysObj_Core::pool_list_prefixed_objs(const DoutPrefixProvider *dpp, + const rgw_pool& pool, const string& prefix, std::function cb) { bool is_truncated; @@ -639,7 +650,7 @@ int RGWSI_SysObj_Core::pool_list_prefixed_objs(const rgw_pool& pool, const strin RGWAccessListFilterPrefix filter(prefix); - int r = op.init(string(), &filter); + int r = op.init(dpp, string(), &filter); if (r < 0) { return r; } @@ -661,7 +672,8 @@ int RGWSI_SysObj_Core::pool_list_prefixed_objs(const rgw_pool& pool, const strin return 0; } -int RGWSI_SysObj_Core::pool_list_objects_init(const rgw_pool& pool, +int RGWSI_SysObj_Core::pool_list_objects_init(const DoutPrefixProvider *dpp, + const rgw_pool& pool, const string& marker, const string& prefix, RGWSI_SysObj::Pool::ListCtx *_ctx) @@ -673,9 +685,9 @@ int RGWSI_SysObj_Core::pool_list_objects_init(const rgw_pool& pool, ctx.pool = rados_svc->pool(pool); ctx.op = ctx.pool.op(); - int r = ctx.op.init(marker, &ctx.filter); + int r = ctx.op.init(dpp, marker, &ctx.filter); if (r < 0) { - ldout(cct, 10) << "failed to list objects pool_iterate_begin() returned r=" << r << dendl; + ldpp_dout(dpp, 10) << "failed to list objects pool_iterate_begin() returned r=" << r << dendl; return r; } return 0; diff --git a/src/rgw/services/svc_sys_obj_core.h b/src/rgw/services/svc_sys_obj_core.h index 39a4f34a551..52c94051c71 100644 --- a/src/rgw/services/svc_sys_obj_core.h +++ b/src/rgw/services/svc_sys_obj_core.h @@ -31,14 +31,16 @@ protected: rados_svc = _rados_svc; zone_svc = _zone_svc; } - int get_rados_obj(RGWSI_Zone *zone_svc, const rgw_raw_obj& obj, RGWSI_RADOS::Obj *pobj); + int get_rados_obj(const DoutPrefixProvider *dpp, RGWSI_Zone *zone_svc, const rgw_raw_obj& obj, RGWSI_RADOS::Obj *pobj); - virtual int raw_stat(const rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *epoch, + virtual int raw_stat(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, uint64_t *psize, + real_time *pmtime, uint64_t *epoch, map *attrs, bufferlist *first_chunk, RGWObjVersionTracker *objv_tracker, optional_yield y); - virtual int read(RGWSysObjectCtxBase& obj_ctx, + virtual int read(const DoutPrefixProvider *dpp, + RGWSysObjectCtxBase& obj_ctx, RGWSI_SysObj_Obj_GetObjState& read_state, RGWObjVersionTracker *objv_tracker, const rgw_raw_obj& obj, @@ -49,12 +51,14 @@ protected: boost::optional, optional_yield y); - virtual int remove(RGWSysObjectCtxBase& obj_ctx, + virtual int remove(const DoutPrefixProvider *dpp, + RGWSysObjectCtxBase& obj_ctx, RGWObjVersionTracker *objv_tracker, const rgw_raw_obj& obj, optional_yield y); - virtual int write(const rgw_raw_obj& obj, + virtual int write(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, real_time *pmtime, map& attrs, bool exclusive, @@ -63,47 +67,54 @@ protected: real_time set_mtime, optional_yield y); - virtual int write_data(const rgw_raw_obj& obj, + virtual int write_data(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, const bufferlist& bl, bool exclusive, RGWObjVersionTracker *objv_tracker, optional_yield y); - virtual int get_attr(const rgw_raw_obj& obj, const char *name, bufferlist *dest, + virtual int get_attr(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, + const char *name, bufferlist *dest, optional_yield y); - virtual int set_attrs(const rgw_raw_obj& obj, + virtual int set_attrs(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, map& attrs, map *rmattrs, RGWObjVersionTracker *objv_tracker, optional_yield y); - virtual int omap_get_all(const rgw_raw_obj& obj, std::map *m, + virtual int omap_get_all(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, std::map *m, optional_yield y); - virtual int omap_get_vals(const rgw_raw_obj& obj, + virtual int omap_get_vals(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, const string& marker, uint64_t count, std::map *m, bool *pmore, optional_yield y); - virtual int omap_set(const rgw_raw_obj& obj, const std::string& key, + virtual int omap_set(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, const std::string& key, bufferlist& bl, bool must_exist, optional_yield y); - virtual int omap_set(const rgw_raw_obj& obj, + virtual int omap_set(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const map& m, bool must_exist, optional_yield y); - virtual int omap_del(const rgw_raw_obj& obj, const std::string& key, + virtual int omap_del(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, const std::string& key, optional_yield y); - virtual int notify(const rgw_raw_obj& obj, bufferlist& bl, + virtual int notify(const DoutPrefixProvider *dpp, + const rgw_raw_obj& obj, bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl, optional_yield y); - virtual int pool_list_prefixed_objs(const rgw_pool& pool, + virtual int pool_list_prefixed_objs(const DoutPrefixProvider *dpp, + const rgw_pool& pool, const string& prefix, std::function cb); - virtual int pool_list_objects_init(const rgw_pool& pool, + virtual int pool_list_objects_init(const DoutPrefixProvider *dpp, + const rgw_pool& pool, const std::string& marker, const std::string& prefix, RGWSI_SysObj::Pool::ListCtx *ctx); diff --git a/src/rgw/services/svc_sys_obj_core_types.h b/src/rgw/services/svc_sys_obj_core_types.h index 002f2763a90..f45fe77f5b4 100644 --- a/src/rgw/services/svc_sys_obj_core_types.h +++ b/src/rgw/services/svc_sys_obj_core_types.h @@ -18,7 +18,8 @@ struct RGWSI_SysObj_Core_GetObjState : public RGWSI_SysObj_Obj_GetObjState { RGWSI_SysObj_Core_GetObjState() {} - int get_rados_obj(RGWSI_RADOS *rados_svc, + int get_rados_obj(const DoutPrefixProvider *dpp, + RGWSI_RADOS *rados_svc, RGWSI_Zone *zone_svc, const rgw_raw_obj& obj, RGWSI_RADOS::Obj **pobj); diff --git a/src/rgw/services/svc_user.h b/src/rgw/services/svc_user.h index 902be756d91..37e533d6dfc 100644 --- a/src/rgw/services/svc_user.h +++ b/src/rgw/services/svc_user.h @@ -89,15 +89,18 @@ public: optional_yield y, const DoutPrefixProvider *dpp) = 0; - virtual int add_bucket(RGWSI_MetaBackend::Context *ctx, + virtual int add_bucket(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const rgw_bucket& bucket, ceph::real_time creation_time, optional_yield y) = 0; - virtual int remove_bucket(RGWSI_MetaBackend::Context *ctx, + virtual int remove_bucket(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const rgw_bucket& _bucket, optional_yield) = 0; - virtual int list_buckets(RGWSI_MetaBackend::Context *ctx, + virtual int list_buckets(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const string& marker, const string& end_marker, @@ -106,21 +109,24 @@ public: bool *is_truncated, optional_yield y) = 0; - virtual int flush_bucket_stats(RGWSI_MetaBackend::Context *ctx, + virtual int flush_bucket_stats(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const RGWBucketEnt& ent, optional_yield y) = 0; - virtual int complete_flush_stats(RGWSI_MetaBackend::Context *ctx, + virtual int complete_flush_stats(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx, const rgw_user& user, optional_yield y) = 0; - virtual int reset_bucket_stats(RGWSI_MetaBackend::Context *ctx, + virtual int reset_bucket_stats(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, optional_yield y) = 0; - virtual int read_stats(RGWSI_MetaBackend::Context *ctx, + virtual int read_stats(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, RGWStorageStats *stats, ceph::real_time *last_stats_sync, /* last time a full stats sync completed */ ceph::real_time *last_stats_update, optional_yield y) = 0; /* last time a stats update was done */ - virtual int read_stats_async(RGWSI_MetaBackend::Context *ctx, + virtual int read_stats_async(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx, const rgw_user& user, RGWGetUserStats_CB *cb) = 0; }; diff --git a/src/rgw/services/svc_user_rados.cc b/src/rgw/services/svc_user_rados.cc index f547db32b28..eb30f3e663e 100644 --- a/src/rgw/services/svc_user_rados.cc +++ b/src/rgw/services/svc_user_rados.cc @@ -258,7 +258,7 @@ public: if (!info.user_email.empty()) { if (!old_info || old_info->user_email.compare(info.user_email) != 0) { /* only if new index changed */ - ret = rgw_put_system_obj(obj_ctx, svc.zone->get_zone_params().user_email_pool, info.user_email, + ret = rgw_put_system_obj(dpp, obj_ctx, svc.zone->get_zone_params().user_email_pool, info.user_email, link_bl, exclusive, NULL, real_time(), y); if (ret < 0) return ret; @@ -271,7 +271,7 @@ public: if (old_info && old_info->access_keys.count(iter->first) != 0 && !renamed) continue; - ret = rgw_put_system_obj(obj_ctx, svc.zone->get_zone_params().user_keys_pool, k.id, + ret = rgw_put_system_obj(dpp, obj_ctx, svc.zone->get_zone_params().user_keys_pool, k.id, link_bl, exclusive, NULL, real_time(), y); if (ret < 0) return ret; @@ -282,7 +282,7 @@ public: if (old_info && old_info->swift_keys.count(siter->first) != 0 && !renamed) continue; - ret = rgw_put_system_obj(obj_ctx, svc.zone->get_zone_params().user_swift_pool, k.id, + ret = rgw_put_system_obj(dpp, obj_ctx, svc.zone->get_zone_params().user_swift_pool, k.id, link_bl, exclusive, NULL, real_time(), y); if (ret < 0) return ret; @@ -316,7 +316,7 @@ public: if (!old_info.user_email.empty() && old_info.user_email != new_info.user_email) { - ret = svc.user->remove_email_index(ctx, old_info.user_email, y); + ret = svc.user->remove_email_index(dpp, ctx, old_info.user_email, y); if (ret < 0 && ret != -ENOENT) { set_err_msg("ERROR: could not remove index for email " + old_info.user_email); return ret; @@ -325,7 +325,7 @@ public: for ([[maybe_unused]] const auto& [name, access_key] : old_info.access_keys) { if (!new_info.access_keys.count(access_key.id)) { - ret = svc.user->remove_key_index(ctx, access_key, y); + ret = svc.user->remove_key_index(dpp, ctx, access_key, y); if (ret < 0 && ret != -ENOENT) { set_err_msg("ERROR: could not remove index for key " + access_key.id); return ret; @@ -337,7 +337,7 @@ public: const auto& swift_key = old_iter->second; auto new_iter = new_info.swift_keys.find(swift_key.id); if (new_iter == new_info.swift_keys.end()) { - ret = svc.user->remove_swift_name_index(ctx, swift_key.id, y); + ret = svc.user->remove_swift_name_index(dpp, ctx, swift_key.id, y); if (ret < 0 && ret != -ENOENT) { set_err_msg("ERROR: could not remove index for swift_name " + swift_key.id); return ret; @@ -388,17 +388,19 @@ int RGWSI_User_RADOS::store_user_info(RGWSI_MetaBackend::Context *ctx, return 0; } -int RGWSI_User_RADOS::remove_key_index(RGWSI_MetaBackend::Context *_ctx, +int RGWSI_User_RADOS::remove_key_index(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *_ctx, const RGWAccessKey& access_key, optional_yield y) { RGWSI_MetaBackend_SObj::Context_SObj *ctx = static_cast(_ctx); rgw_raw_obj obj(svc.zone->get_zone_params().user_keys_pool, access_key.id); auto sysobj = ctx->obj_ctx->get_obj(obj); - return sysobj.wop().remove(y); + return sysobj.wop().remove(dpp, y); } -int RGWSI_User_RADOS::remove_email_index(RGWSI_MetaBackend::Context *_ctx, +int RGWSI_User_RADOS::remove_email_index(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *_ctx, const string& email, optional_yield y) { @@ -408,16 +410,16 @@ int RGWSI_User_RADOS::remove_email_index(RGWSI_MetaBackend::Context *_ctx, RGWSI_MetaBackend_SObj::Context_SObj *ctx = static_cast(_ctx); rgw_raw_obj obj(svc.zone->get_zone_params().user_email_pool, email); auto sysobj = ctx->obj_ctx->get_obj(obj); - return sysobj.wop().remove(y); + return sysobj.wop().remove(dpp, y); } -int RGWSI_User_RADOS::remove_swift_name_index(RGWSI_MetaBackend::Context *_ctx, const string& swift_name, +int RGWSI_User_RADOS::remove_swift_name_index(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *_ctx, const string& swift_name, optional_yield y) { RGWSI_MetaBackend_SObj::Context_SObj *ctx = static_cast(_ctx); rgw_raw_obj obj(svc.zone->get_zone_params().user_swift_pool, swift_name); auto sysobj = ctx->obj_ctx->get_obj(obj); - return sysobj.wop().remove(y); + return sysobj.wop().remove(dpp, y); } /** @@ -434,14 +436,12 @@ int RGWSI_User_RADOS::remove_user_info(RGWSI_MetaBackend::Context *_ctx, { int ret; - auto cct = svc.meta_be->ctx(); - auto kiter = info.access_keys.begin(); for (; kiter != info.access_keys.end(); ++kiter) { ldpp_dout(dpp, 10) << "removing key index: " << kiter->first << dendl; - ret = remove_key_index(_ctx, kiter->second, y); + ret = remove_key_index(dpp, _ctx, kiter->second, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "ERROR: could not remove " << kiter->first << " (access key object), should be fixed (err=" << ret << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not remove " << kiter->first << " (access key object), should be fixed (err=" << ret << ")" << dendl; return ret; } } @@ -451,17 +451,17 @@ int RGWSI_User_RADOS::remove_user_info(RGWSI_MetaBackend::Context *_ctx, auto& k = siter->second; ldpp_dout(dpp, 10) << "removing swift subuser index: " << k.id << dendl; /* check if swift mapping exists */ - ret = remove_swift_name_index(_ctx, k.id, y); + ret = remove_swift_name_index(dpp, _ctx, k.id, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "ERROR: could not remove " << k.id << " (swift name object), should be fixed (err=" << ret << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not remove " << k.id << " (swift name object), should be fixed (err=" << ret << ")" << dendl; return ret; } } ldpp_dout(dpp, 10) << "removing email index: " << info.user_email << dendl; - ret = remove_email_index(_ctx, info.user_email, y); + ret = remove_email_index(dpp, _ctx, info.user_email, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "ERROR: could not remove email index object for " + ldpp_dout(dpp, 0) << "ERROR: could not remove email index object for " << info.user_email << ", should be fixed (err=" << ret << ")" << dendl; return ret; } @@ -470,9 +470,9 @@ int RGWSI_User_RADOS::remove_user_info(RGWSI_MetaBackend::Context *_ctx, ldpp_dout(dpp, 10) << "removing user buckets index" << dendl; RGWSI_MetaBackend_SObj::Context_SObj *ctx = static_cast(_ctx); auto sysobj = ctx->obj_ctx->get_obj(uid_bucks); - ret = sysobj.wop().remove(y); + ret = sysobj.wop().remove(dpp, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "ERROR: could not remove " << info.user_id << ":" << uid_bucks << ", should be fixed (err=" << ret << ")" << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not remove " << info.user_id << ":" << uid_bucks << ", should be fixed (err=" << ret << ")" << dendl; return ret; } @@ -547,7 +547,7 @@ int RGWSI_User_RADOS::get_user_info_from_index(RGWSI_MetaBackend::Context *_ctx, return -EIO; } - uinfo_cache->put(svc.cache, cache_key, &e, { &cache_info }); + uinfo_cache->put(dpp, svc.cache, cache_key, &e, { &cache_info }); *info = e.info; if (objv_tracker) @@ -606,17 +606,17 @@ int RGWSI_User_RADOS::get_user_info_by_access_key(RGWSI_MetaBackend::Context *ct info, objv_tracker, pmtime, y, dpp); } -int RGWSI_User_RADOS::cls_user_update_buckets(rgw_raw_obj& obj, list& entries, bool add, optional_yield y) +int RGWSI_User_RADOS::cls_user_update_buckets(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, list& entries, bool add, optional_yield y) { auto rados_obj = svc.rados->obj(obj); - int r = rados_obj.open(); + int r = rados_obj.open(dpp); if (r < 0) { return r; } librados::ObjectWriteOperation op; cls_user_set_buckets(op, entries, add); - r = rados_obj.operate(&op, y); + r = rados_obj.operate(dpp, &op, y); if (r < 0) { return r; } @@ -624,32 +624,33 @@ int RGWSI_User_RADOS::cls_user_update_buckets(rgw_raw_obj& obj, list l; l.push_back(entry); - return cls_user_update_buckets(obj, l, true, y); + return cls_user_update_buckets(dpp, obj, l, true, y); } -int RGWSI_User_RADOS::cls_user_remove_bucket(rgw_raw_obj& obj, const cls_user_bucket& bucket, optional_yield y) +int RGWSI_User_RADOS::cls_user_remove_bucket(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, const cls_user_bucket& bucket, optional_yield y) { auto rados_obj = svc.rados->obj(obj); - int r = rados_obj.open(); + int r = rados_obj.open(dpp); if (r < 0) { return r; } librados::ObjectWriteOperation op; ::cls_user_remove_bucket(op, bucket); - r = rados_obj.operate(&op, y); + r = rados_obj.operate(dpp, &op, y); if (r < 0) return r; return 0; } -int RGWSI_User_RADOS::add_bucket(RGWSI_MetaBackend::Context *ctx, +int RGWSI_User_RADOS::add_bucket(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const rgw_bucket& bucket, ceph::real_time creation_time, @@ -667,9 +668,9 @@ int RGWSI_User_RADOS::add_bucket(RGWSI_MetaBackend::Context *ctx, new_bucket.creation_time = creation_time; rgw_raw_obj obj = get_buckets_obj(user); - ret = cls_user_add_bucket(obj, new_bucket, y); + ret = cls_user_add_bucket(dpp, obj, new_bucket, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: error adding bucket to user: ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: error adding bucket to user: ret=" << ret << dendl; return ret; } @@ -677,7 +678,8 @@ int RGWSI_User_RADOS::add_bucket(RGWSI_MetaBackend::Context *ctx, } -int RGWSI_User_RADOS::remove_bucket(RGWSI_MetaBackend::Context *ctx, +int RGWSI_User_RADOS::remove_bucket(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const rgw_bucket& _bucket, optional_yield y) @@ -685,15 +687,16 @@ int RGWSI_User_RADOS::remove_bucket(RGWSI_MetaBackend::Context *ctx, cls_user_bucket bucket; bucket.name = _bucket.name; rgw_raw_obj obj = get_buckets_obj(user); - int ret = cls_user_remove_bucket(obj, bucket, y); + int ret = cls_user_remove_bucket(dpp, obj, bucket, y); if (ret < 0) { - ldout(cct, 0) << "ERROR: error removing bucket from user: ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "ERROR: error removing bucket from user: ret=" << ret << dendl; } return 0; } -int RGWSI_User_RADOS::cls_user_flush_bucket_stats(rgw_raw_obj& user_obj, +int RGWSI_User_RADOS::cls_user_flush_bucket_stats(const DoutPrefixProvider *dpp, + rgw_raw_obj& user_obj, const RGWBucketEnt& ent, optional_yield y) { cls_user_bucket_entry entry; @@ -702,16 +705,17 @@ int RGWSI_User_RADOS::cls_user_flush_bucket_stats(rgw_raw_obj& user_obj, list entries; entries.push_back(entry); - int r = cls_user_update_buckets(user_obj, entries, false, y); + int r = cls_user_update_buckets(dpp, user_obj, entries, false, y); if (r < 0) { - ldout(cct, 20) << "cls_user_update_buckets() returned " << r << dendl; + ldpp_dout(dpp, 20) << "cls_user_update_buckets() returned " << r << dendl; return r; } return 0; } -int RGWSI_User_RADOS::cls_user_list_buckets(rgw_raw_obj& obj, +int RGWSI_User_RADOS::cls_user_list_buckets(const DoutPrefixProvider *dpp, + rgw_raw_obj& obj, const string& in_marker, const string& end_marker, const int max_entries, @@ -721,7 +725,7 @@ int RGWSI_User_RADOS::cls_user_list_buckets(rgw_raw_obj& obj, optional_yield y) { auto rados_obj = svc.rados->obj(obj); - int r = rados_obj.open(); + int r = rados_obj.open(dpp); if (r < 0) { return r; } @@ -731,7 +735,7 @@ int RGWSI_User_RADOS::cls_user_list_buckets(rgw_raw_obj& obj, cls_user_bucket_list(op, in_marker, end_marker, max_entries, entries, out_marker, truncated, &rc); bufferlist ibl; - r = rados_obj.operate(&op, &ibl, y); + r = rados_obj.operate(dpp, &op, &ibl, y); if (r < 0) return r; if (rc < 0) @@ -740,7 +744,8 @@ int RGWSI_User_RADOS::cls_user_list_buckets(rgw_raw_obj& obj, return 0; } -int RGWSI_User_RADOS::list_buckets(RGWSI_MetaBackend::Context *ctx, +int RGWSI_User_RADOS::list_buckets(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const string& marker, const string& end_marker, @@ -752,7 +757,7 @@ int RGWSI_User_RADOS::list_buckets(RGWSI_MetaBackend::Context *ctx, buckets->clear(); if (user.id == RGW_USER_ANON_ID) { - ldout(cct, 20) << "RGWSI_User_RADOS::list_buckets(): anonymous user" << dendl; + ldpp_dout(dpp, 20) << "RGWSI_User_RADOS::list_buckets(): anonymous user" << dendl; *is_truncated = false; return 0; } @@ -765,7 +770,7 @@ int RGWSI_User_RADOS::list_buckets(RGWSI_MetaBackend::Context *ctx, do { std::list entries; - ret = cls_user_list_buckets(obj, m, end_marker, max - total, entries, &m, &truncated, y); + ret = cls_user_list_buckets(dpp, obj, m, end_marker, max - total, entries, &m, &truncated, y); if (ret == -ENOENT) { ret = 0; } @@ -788,28 +793,30 @@ int RGWSI_User_RADOS::list_buckets(RGWSI_MetaBackend::Context *ctx, return 0; } -int RGWSI_User_RADOS::flush_bucket_stats(RGWSI_MetaBackend::Context *ctx, +int RGWSI_User_RADOS::flush_bucket_stats(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const RGWBucketEnt& ent, optional_yield y) { rgw_raw_obj obj = get_buckets_obj(user); - return cls_user_flush_bucket_stats(obj, ent, y); + return cls_user_flush_bucket_stats(dpp, obj, ent, y); } -int RGWSI_User_RADOS::reset_bucket_stats(RGWSI_MetaBackend::Context *ctx, +int RGWSI_User_RADOS::reset_bucket_stats(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, optional_yield y) { - return cls_user_reset_stats(user, y); + return cls_user_reset_stats(dpp, user, y); } -int RGWSI_User_RADOS::cls_user_reset_stats(const rgw_user& user, optional_yield y) +int RGWSI_User_RADOS::cls_user_reset_stats(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y) { rgw_raw_obj obj = get_buckets_obj(user); auto rados_obj = svc.rados->obj(obj); - int rval, r = rados_obj.open(); + int rval, r = rados_obj.open(dpp); if (r < 0) { return r; } @@ -826,7 +833,7 @@ int RGWSI_User_RADOS::cls_user_reset_stats(const rgw_user& user, optional_yield encode(call, in); op.exec("user", "reset_user_stats2", in, &out, &rval); - r = rados_obj.operate(&op, y, librados::OPERATION_RETURNVEC); + r = rados_obj.operate(dpp, &op, y, librados::OPERATION_RETURNVEC); if (r < 0) { return r; } @@ -841,26 +848,28 @@ int RGWSI_User_RADOS::cls_user_reset_stats(const rgw_user& user, optional_yield return rval; } -int RGWSI_User_RADOS::complete_flush_stats(RGWSI_MetaBackend::Context *ctx, +int RGWSI_User_RADOS::complete_flush_stats(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, optional_yield y) { rgw_raw_obj obj = get_buckets_obj(user); auto rados_obj = svc.rados->obj(obj); - int r = rados_obj.open(); + int r = rados_obj.open(dpp); if (r < 0) { return r; } librados::ObjectWriteOperation op; ::cls_user_complete_stats_sync(op); - return rados_obj.operate(&op, y); + return rados_obj.operate(dpp, &op, y); } -int RGWSI_User_RADOS::cls_user_get_header(const rgw_user& user, cls_user_header *header, +int RGWSI_User_RADOS::cls_user_get_header(const DoutPrefixProvider *dpp, + const rgw_user& user, cls_user_header *header, optional_yield y) { rgw_raw_obj obj = get_buckets_obj(user); auto rados_obj = svc.rados->obj(obj); - int r = rados_obj.open(); + int r = rados_obj.open(dpp); if (r < 0) { return r; } @@ -868,14 +877,14 @@ int RGWSI_User_RADOS::cls_user_get_header(const rgw_user& user, cls_user_header bufferlist ibl; librados::ObjectReadOperation op; ::cls_user_get_header(op, header, &rc); - return rados_obj.operate(&op, &ibl, y); + return rados_obj.operate(dpp, &op, &ibl, y); } -int RGWSI_User_RADOS::cls_user_get_header_async(const string& user_str, RGWGetUserHeader_CB *cb) +int RGWSI_User_RADOS::cls_user_get_header_async(const DoutPrefixProvider *dpp, const string& user_str, RGWGetUserHeader_CB *cb) { rgw_raw_obj obj = get_buckets_obj(rgw_user(user_str)); auto rados_obj = svc.rados->obj(obj); - int r = rados_obj.open(); + int r = rados_obj.open(dpp); if (r < 0) { return r; } @@ -890,7 +899,8 @@ int RGWSI_User_RADOS::cls_user_get_header_async(const string& user_str, RGWGetUs return 0; } -int RGWSI_User_RADOS::read_stats(RGWSI_MetaBackend::Context *ctx, +int RGWSI_User_RADOS::read_stats(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, RGWStorageStats *stats, ceph::real_time *last_stats_sync, ceph::real_time *last_stats_update, @@ -899,7 +909,7 @@ int RGWSI_User_RADOS::read_stats(RGWSI_MetaBackend::Context *ctx, string user_str = user.to_str(); cls_user_header header; - int r = cls_user_get_header(rgw_user(user_str), &header, y); + int r = cls_user_get_header(dpp, rgw_user(user_str), &header, y); if (r < 0) return r; @@ -945,13 +955,13 @@ public: } }; -int RGWSI_User_RADOS::read_stats_async(RGWSI_MetaBackend::Context *ctx, +int RGWSI_User_RADOS::read_stats_async(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx, const rgw_user& user, RGWGetUserStats_CB *_cb) { string user_str = user.to_str(); RGWGetUserStatsContext *cb = new RGWGetUserStatsContext(_cb); - int r = cls_user_get_header_async(user_str, cb); + int r = cls_user_get_header_async(dpp, user_str, cb); if (r < 0) { _cb->put(); delete cb; diff --git a/src/rgw/services/svc_user_rados.h b/src/rgw/services/svc_user_rados.h index 05244be7bee..f0b025e9db0 100644 --- a/src/rgw/services/svc_user_rados.h +++ b/src/rgw/services/svc_user_rados.h @@ -69,19 +69,20 @@ class RGWSI_User_RADOS : public RGWSI_User int remove_uid_index(RGWSI_MetaBackend::Context *ctx, const RGWUserInfo& user_info, RGWObjVersionTracker *objv_tracker, optional_yield y, const DoutPrefixProvider *dpp); - int remove_key_index(RGWSI_MetaBackend::Context *ctx, const RGWAccessKey& access_key, optional_yield y); - int remove_email_index(RGWSI_MetaBackend::Context *ctx, const string& email, optional_yield y); - int remove_swift_name_index(RGWSI_MetaBackend::Context *ctx, const string& swift_name, optional_yield y); + int remove_key_index(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx, const RGWAccessKey& access_key, optional_yield y); + int remove_email_index(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx, const string& email, optional_yield y); + int remove_swift_name_index(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx, const string& swift_name, optional_yield y); /* admin management */ - int cls_user_update_buckets(rgw_raw_obj& obj, list& entries, bool add, optional_yield y); - int cls_user_add_bucket(rgw_raw_obj& obj, const cls_user_bucket_entry& entry, optional_yield y); - int cls_user_remove_bucket(rgw_raw_obj& obj, const cls_user_bucket& bucket, optional_yield y); + int cls_user_update_buckets(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, list& entries, bool add, optional_yield y); + int cls_user_add_bucket(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, const cls_user_bucket_entry& entry, optional_yield y); + int cls_user_remove_bucket(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, const cls_user_bucket& bucket, optional_yield y); /* quota stats */ - int cls_user_flush_bucket_stats(rgw_raw_obj& user_obj, + int cls_user_flush_bucket_stats(const DoutPrefixProvider *dpp, rgw_raw_obj& user_obj, const RGWBucketEnt& ent, optional_yield y); - int cls_user_list_buckets(rgw_raw_obj& obj, + int cls_user_list_buckets(const DoutPrefixProvider *dpp, + rgw_raw_obj& obj, const string& in_marker, const string& end_marker, const int max_entries, @@ -90,9 +91,9 @@ class RGWSI_User_RADOS : public RGWSI_User bool * const truncated, optional_yield y); - int cls_user_reset_stats(const rgw_user& user, optional_yield y); - int cls_user_get_header(const rgw_user& user, cls_user_header *header, optional_yield y); - int cls_user_get_header_async(const string& user, RGWGetUserHeader_CB *cb); + int cls_user_reset_stats(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y); + int cls_user_get_header(const DoutPrefixProvider *dpp, const rgw_user& user, cls_user_header *header, optional_yield y); + int cls_user_get_header_async(const DoutPrefixProvider *dpp, const string& user, RGWGetUserHeader_CB *cb); int do_start(optional_yield, const DoutPrefixProvider *dpp) override; public: @@ -169,16 +170,19 @@ public: /* user buckets directory */ - int add_bucket(RGWSI_MetaBackend::Context *ctx, + int add_bucket(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const rgw_bucket& bucket, ceph::real_time creation_time, optional_yield y) override; - int remove_bucket(RGWSI_MetaBackend::Context *ctx, + int remove_bucket(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const rgw_bucket& _bucket, optional_yield y) override; - int list_buckets(RGWSI_MetaBackend::Context *ctx, + int list_buckets(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const string& marker, const string& end_marker, @@ -188,23 +192,27 @@ public: optional_yield y) override; /* quota related */ - int flush_bucket_stats(RGWSI_MetaBackend::Context *ctx, + int flush_bucket_stats(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, const RGWBucketEnt& ent, optional_yield y) override; - int complete_flush_stats(RGWSI_MetaBackend::Context *ctx, + int complete_flush_stats(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, optional_yield y) override; - int reset_bucket_stats(RGWSI_MetaBackend::Context *ctx, + int reset_bucket_stats(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, optional_yield y) override; - int read_stats(RGWSI_MetaBackend::Context *ctx, + int read_stats(const DoutPrefixProvider *dpp, + RGWSI_MetaBackend::Context *ctx, const rgw_user& user, RGWStorageStats *stats, ceph::real_time *last_stats_sync, /* last time a full stats sync completed */ ceph::real_time *last_stats_update, optional_yield y) override; /* last time a stats update was done */ - int read_stats_async(RGWSI_MetaBackend::Context *ctx, + int read_stats_async(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx, const rgw_user& user, RGWGetUserStats_CB *cb) override; }; diff --git a/src/rgw/services/svc_zone.cc b/src/rgw/services/svc_zone.cc index f6f3022631f..a9448b90cf2 100644 --- a/src/rgw/services/svc_zone.cc +++ b/src/rgw/services/svc_zone.cc @@ -78,13 +78,13 @@ int RGWSI_Zone::do_start(optional_yield y, const DoutPrefixProvider *dpp) return ret; } - ret = realm->init(cct, sysobj_svc, y); + ret = realm->init(dpp, cct, sysobj_svc, y); if (ret < 0 && ret != -ENOENT) { ldpp_dout(dpp, 0) << "failed reading realm info: ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } else if (ret != -ENOENT) { ldpp_dout(dpp, 20) << "realm " << realm->get_name() << " " << realm->get_id() << dendl; - ret = current_period->init(cct, sysobj_svc, realm->get_id(), y, + ret = current_period->init(dpp, cct, sysobj_svc, realm->get_id(), y, realm->get_name()); if (ret < 0 && ret != -ENOENT) { ldpp_dout(dpp, 0) << "failed reading current period info: " << " " << cpp_strerror(-ret) << dendl; @@ -108,7 +108,7 @@ int RGWSI_Zone::do_start(optional_yield y, const DoutPrefixProvider *dpp) bool zg_initialized = false; if (!current_period->get_id().empty()) { - ret = init_zg_from_period(&zg_initialized, y); + ret = init_zg_from_period(dpp, &zg_initialized, y); if (ret < 0) { return ret; } @@ -117,30 +117,30 @@ int RGWSI_Zone::do_start(optional_yield y, const DoutPrefixProvider *dpp) bool creating_defaults = false; bool using_local = (!zg_initialized); if (using_local) { - ldout(cct, 10) << " cannot find current period zonegroup using local zonegroup" << dendl; + ldpp_dout(dpp, 10) << " cannot find current period zonegroup using local zonegroup" << dendl; ret = init_zg_from_local(dpp, &creating_defaults, y); if (ret < 0) { return ret; } // read period_config into current_period auto& period_config = current_period->get_config(); - ret = period_config.read(sysobj_svc, zonegroup->realm_id, y); + ret = period_config.read(dpp, sysobj_svc, zonegroup->realm_id, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "ERROR: failed to read period config: " + ldpp_dout(dpp, 0) << "ERROR: failed to read period config: " << cpp_strerror(ret) << dendl; return ret; } } - ldout(cct, 10) << "Cannot find current period zone using local zone" << dendl; + ldpp_dout(dpp, 10) << "Cannot find current period zone using local zone" << dendl; if (creating_defaults && cct->_conf->rgw_zone.empty()) { - ldout(cct, 10) << " Using default name "<< default_zone_name << dendl; + ldpp_dout(dpp, 10) << " Using default name "<< default_zone_name << dendl; zone_params->set_name(default_zone_name); } - ret = zone_params->init(cct, sysobj_svc, y); + ret = zone_params->init(dpp, cct, sysobj_svc, y); if (ret < 0 && ret != -ENOENT) { - lderr(cct) << "failed reading zone info: ret "<< ret << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, -1) << "failed reading zone info: ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } @@ -149,10 +149,10 @@ int RGWSI_Zone::do_start(optional_yield y, const DoutPrefixProvider *dpp) auto zone_iter = zonegroup->zones.find(zone_params->get_id()); if (zone_iter == zonegroup->zones.end()) { if (using_local) { - lderr(cct) << "Cannot find zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() << ")" << dendl; + ldpp_dout(dpp, -1) << "Cannot find zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() << ")" << dendl; return -EINVAL; } - ldout(cct, 1) << "Cannot find zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() << "), switching to local zonegroup configuration" << dendl; + ldpp_dout(dpp, 1) << "Cannot find zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() << "), switching to local zonegroup configuration" << dendl; ret = init_zg_from_local(dpp, &creating_defaults, y); if (ret < 0) { return ret; @@ -161,9 +161,9 @@ int RGWSI_Zone::do_start(optional_yield y, const DoutPrefixProvider *dpp) } if (zone_iter != zonegroup->zones.end()) { *zone_public_config = zone_iter->second; - ldout(cct, 20) << "zone " << zone_params->get_name() << " found" << dendl; + ldpp_dout(dpp, 20) << "zone " << zone_params->get_name() << " found" << dendl; } else { - lderr(cct) << "Cannot find zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() << ")" << dendl; + ldpp_dout(dpp, -1) << "Cannot find zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() << ")" << dendl; return -EINVAL; } @@ -171,9 +171,9 @@ int RGWSI_Zone::do_start(optional_yield y, const DoutPrefixProvider *dpp) for (auto ziter : zonegroup->zones) { auto zone_handler = std::make_shared(this, sync_modules_svc, bucket_sync_svc, ziter.second.id); - ret = zone_handler->init(y); + ret = zone_handler->init(dpp, y); if (ret < 0) { - lderr(cct) << "ERROR: could not initialize zone policy handler for zone=" << ziter.second.name << dendl; + ldpp_dout(dpp, -1) << "ERROR: could not initialize zone policy handler for zone=" << ziter.second.name << dendl; return ret; } sync_policy_handlers[ziter.second.id] = zone_handler; @@ -198,7 +198,7 @@ int RGWSI_Zone::do_start(optional_yield y, const DoutPrefixProvider *dpp) auto sync_modules = sync_modules_svc->get_manager(); RGWSyncModuleRef sm; if (!sync_modules->get_module(zone_public_config->tier_type, &sm)) { - lderr(cct) << "ERROR: tier type not found: " << zone_public_config->tier_type << dendl; + ldpp_dout(dpp, -1) << "ERROR: tier type not found: " << zone_public_config->tier_type << dendl; return -EINVAL; } @@ -214,7 +214,7 @@ int RGWSI_Zone::do_start(optional_yield y, const DoutPrefixProvider *dpp) } if (zone_by_id.find(zone_id()) == zone_by_id.end()) { - ldout(cct, 0) << "WARNING: could not find zone config in zonegroup for local zone (" << zone_id() << "), will use defaults" << dendl; + ldpp_dout(dpp, 0) << "WARNING: could not find zone config in zonegroup for local zone (" << zone_id() << "), will use defaults" << dendl; } for (const auto& ziter : zonegroup->zones) { @@ -224,10 +224,10 @@ int RGWSI_Zone::do_start(optional_yield y, const DoutPrefixProvider *dpp) continue; } if (z.endpoints.empty()) { - ldout(cct, 0) << "WARNING: can't generate connection for zone " << z.id << " id " << z.name << ": no endpoints defined" << dendl; + ldpp_dout(dpp, 0) << "WARNING: can't generate connection for zone " << z.id << " id " << z.name << ": no endpoints defined" << dendl; continue; } - ldout(cct, 20) << "generating connection object for zone " << z.name << " id " << z.id << dendl; + ldpp_dout(dpp, 20) << "generating connection object for zone " << z.name << " id " << z.id << dendl; RGWRESTConn *conn = new RGWRESTConn(cct, this, z.id, z.endpoints, zonegroup->api_name); zone_conn_map[id] = conn; @@ -242,11 +242,11 @@ int RGWSI_Zone::do_start(optional_yield y, const DoutPrefixProvider *dpp) zone_data_notify_to_map[id] = conn; } } else { - ldout(cct, 20) << "NOTICE: not syncing to/from zone " << z.name << " id " << z.id << dendl; + ldpp_dout(dpp, 20) << "NOTICE: not syncing to/from zone " << z.name << " id " << z.id << dendl; } } - ldout(cct, 20) << "started zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() << + ldpp_dout(dpp, 20) << "started zone id=" << zone_params->get_id() << " (name=" << zone_params->get_name() << ") with tier type = " << zone_public_config->tier_type << dendl; return 0; @@ -267,44 +267,44 @@ void RGWSI_Zone::shutdown() } } -int RGWSI_Zone::list_regions(list& regions) +int RGWSI_Zone::list_regions(const DoutPrefixProvider *dpp, list& regions) { RGWZoneGroup zonegroup; RGWSI_SysObj::Pool syspool = sysobj_svc->get_pool(zonegroup.get_pool(cct)); - return syspool.list_prefixed_objs(region_info_oid_prefix, ®ions); + return syspool.list_prefixed_objs(dpp, region_info_oid_prefix, ®ions); } -int RGWSI_Zone::list_zonegroups(list& zonegroups) +int RGWSI_Zone::list_zonegroups(const DoutPrefixProvider *dpp, list& zonegroups) { RGWZoneGroup zonegroup; RGWSI_SysObj::Pool syspool = sysobj_svc->get_pool(zonegroup.get_pool(cct)); - return syspool.list_prefixed_objs(zonegroup_names_oid_prefix, &zonegroups); + return syspool.list_prefixed_objs(dpp, zonegroup_names_oid_prefix, &zonegroups); } -int RGWSI_Zone::list_zones(list& zones) +int RGWSI_Zone::list_zones(const DoutPrefixProvider *dpp, list& zones) { RGWZoneParams zoneparams; RGWSI_SysObj::Pool syspool = sysobj_svc->get_pool(zoneparams.get_pool(cct)); - return syspool.list_prefixed_objs(zone_names_oid_prefix, &zones); + return syspool.list_prefixed_objs(dpp, zone_names_oid_prefix, &zones); } -int RGWSI_Zone::list_realms(list& realms) +int RGWSI_Zone::list_realms(const DoutPrefixProvider *dpp, list& realms) { RGWRealm realm(cct, sysobj_svc); RGWSI_SysObj::Pool syspool = sysobj_svc->get_pool(realm.get_pool(cct)); - return syspool.list_prefixed_objs(realm_names_oid_prefix, &realms); + return syspool.list_prefixed_objs(dpp, realm_names_oid_prefix, &realms); } -int RGWSI_Zone::list_periods(list& periods) +int RGWSI_Zone::list_periods(const DoutPrefixProvider *dpp, list& periods) { RGWPeriod period; list raw_periods; RGWSI_SysObj::Pool syspool = sysobj_svc->get_pool(period.get_pool(cct)); - int ret = syspool.list_prefixed_objs(period.get_info_oid_prefix(), &raw_periods); + int ret = syspool.list_prefixed_objs(dpp, period.get_info_oid_prefix(), &raw_periods); if (ret < 0) { return ret; } @@ -322,13 +322,13 @@ int RGWSI_Zone::list_periods(list& periods) } -int RGWSI_Zone::list_periods(const string& current_period, list& periods, optional_yield y) +int RGWSI_Zone::list_periods(const DoutPrefixProvider *dpp, const string& current_period, list& periods, optional_yield y) { int ret = 0; string period_id = current_period; while(!period_id.empty()) { RGWPeriod period(period_id); - ret = period.init(cct, sysobj_svc, y); + ret = period.init(dpp, cct, sysobj_svc, y); if (ret < 0) { return ret; } @@ -361,51 +361,51 @@ int RGWSI_Zone::replace_region_with_zonegroup(const DoutPrefixProvider *dpp, opt RGWSysObjectCtx obj_ctx = sysobj_svc->init_obj_ctx(); RGWSysObj sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid)); - int ret = sysobj.rop().read(&bl, y); + int ret = sysobj.rop().read(dpp, &bl, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << __func__ << " failed to read converted: ret "<< ret << " " << cpp_strerror(-ret) + ldpp_dout(dpp, 0) << __func__ << " failed to read converted: ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } else if (ret != -ENOENT) { - ldout(cct, 20) << "System already converted " << dendl; + ldpp_dout(dpp, 20) << "System already converted " << dendl; return 0; } string default_region; - ret = default_zonegroup.init(cct, sysobj_svc, y, false, true); + ret = default_zonegroup.init(dpp, cct, sysobj_svc, y, false, true); if (ret < 0) { - ldout(cct, 0) << __func__ << " failed init default region: ret "<< ret << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed init default region: ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } - ret = default_zonegroup.read_default_id(default_region, y, true); + ret = default_zonegroup.read_default_id(dpp, default_region, y, true); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << __func__ << " failed reading old default region: ret "<< ret << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed reading old default region: ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } /* convert regions to zonegroups */ list regions; - ret = list_regions(regions); + ret = list_regions(dpp, regions); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << __func__ << " failed to list regions: ret "<< ret << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed to list regions: ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } else if (ret == -ENOENT || regions.empty()) { RGWZoneParams zoneparams(default_zone_name); - int ret = zoneparams.init(cct, sysobj_svc, y); + int ret = zoneparams.init(dpp, cct, sysobj_svc, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << __func__ << ": error initializing default zone params: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << ": error initializing default zone params: " << cpp_strerror(-ret) << dendl; return ret; } /* update master zone */ RGWZoneGroup default_zg(default_zonegroup_name); - ret = default_zg.init(cct, sysobj_svc, y); + ret = default_zg.init(dpp, cct, sysobj_svc, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << __func__ << ": error in initializing default zonegroup: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << ": error in initializing default zonegroup: " << cpp_strerror(-ret) << dendl; return ret; } if (ret != -ENOENT && default_zg.master_zone.empty()) { default_zg.master_zone = zoneparams.get_id(); - return default_zg.update(y); + return default_zg.update(dpp, y); } return 0; } @@ -415,9 +415,9 @@ int RGWSI_Zone::replace_region_with_zonegroup(const DoutPrefixProvider *dpp, opt for (list::iterator iter = regions.begin(); iter != regions.end(); ++iter) { if (*iter != default_zonegroup_name){ RGWZoneGroup region(*iter); - int ret = region.init(cct, sysobj_svc, y, true, true); + int ret = region.init(dpp, cct, sysobj_svc, y, true, true); if (ret < 0) { - ldout(cct, 0) << __func__ << " failed init region "<< *iter << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed init region "<< *iter << ": " << cpp_strerror(-ret) << dendl; return ret; } if (region.is_master_zonegroup()) { @@ -440,9 +440,9 @@ int RGWSI_Zone::replace_region_with_zonegroup(const DoutPrefixProvider *dpp, opt buf_to_hex(md5, CEPH_CRYPTO_MD5_DIGESTSIZE, md5_str); string new_realm_id(md5_str); RGWRealm new_realm(new_realm_id,new_realm_name); - ret = new_realm.init(cct, sysobj_svc, y, false); + ret = new_realm.init(dpp, cct, sysobj_svc, y, false); if (ret < 0) { - ldout(cct, 0) << __func__ << " Error initing new realm: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " Error initing new realm: " << cpp_strerror(-ret) << dendl; return ret; } ret = new_realm.create(dpp, y); @@ -450,20 +450,20 @@ int RGWSI_Zone::replace_region_with_zonegroup(const DoutPrefixProvider *dpp, opt ldpp_dout(dpp, 0) << __func__ << " Error creating new realm: " << cpp_strerror(-ret) << dendl; return ret; } - ret = new_realm.set_as_default(y); + ret = new_realm.set_as_default(dpp, y); if (ret < 0) { - ldout(cct, 0) << __func__ << " Error setting realm as default: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " Error setting realm as default: " << cpp_strerror(-ret) << dendl; return ret; } - ret = realm->init(cct, sysobj_svc, y); + ret = realm->init(dpp, cct, sysobj_svc, y); if (ret < 0) { - ldout(cct, 0) << __func__ << " Error initing realm: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " Error initing realm: " << cpp_strerror(-ret) << dendl; return ret; } - ret = current_period->init(cct, sysobj_svc, realm->get_id(), y, + ret = current_period->init(dpp, cct, sysobj_svc, realm->get_id(), y, realm->get_name()); if (ret < 0) { - ldout(cct, 0) << __func__ << " Error initing current period: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " Error initing current period: " << cpp_strerror(-ret) << dendl; return ret; } } @@ -472,112 +472,112 @@ int RGWSI_Zone::replace_region_with_zonegroup(const DoutPrefixProvider *dpp, opt /* create zonegroups */ for (iter = regions.begin(); iter != regions.end(); ++iter) { - ldout(cct, 0) << __func__ << " Converting " << *iter << dendl; + ldpp_dout(dpp, 0) << __func__ << " Converting " << *iter << dendl; /* check to see if we don't have already a zonegroup with this name */ RGWZoneGroup new_zonegroup(*iter); - ret = new_zonegroup.init(cct , sysobj_svc, y); + ret = new_zonegroup.init(dpp, cct , sysobj_svc, y); if (ret == 0 && new_zonegroup.get_id() != *iter) { - ldout(cct, 0) << __func__ << " zonegroup "<< *iter << " already exists id " << new_zonegroup.get_id () << + ldpp_dout(dpp, 0) << __func__ << " zonegroup "<< *iter << " already exists id " << new_zonegroup.get_id () << " skipping conversion " << dendl; continue; } RGWZoneGroup zonegroup(*iter); zonegroup.set_id(*iter); - int ret = zonegroup.init(cct, sysobj_svc, y, true, true); + int ret = zonegroup.init(dpp, cct, sysobj_svc, y, true, true); if (ret < 0) { - ldout(cct, 0) << __func__ << " failed init zonegroup: ret "<< ret << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed init zonegroup: ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } zonegroup.realm_id = realm->get_id(); /* fix default region master zone */ if (*iter == default_zonegroup_name && zonegroup.master_zone.empty()) { - ldout(cct, 0) << __func__ << " Setting default zone as master for default region" << dendl; + ldpp_dout(dpp, 0) << __func__ << " Setting default zone as master for default region" << dendl; zonegroup.master_zone = default_zone_name; } - ret = zonegroup.update(y); + ret = zonegroup.update(dpp, y); if (ret < 0 && ret != -EEXIST) { - ldout(cct, 0) << __func__ << " failed to update zonegroup " << *iter << ": ret "<< ret << " " << cpp_strerror(-ret) + ldpp_dout(dpp, 0) << __func__ << " failed to update zonegroup " << *iter << ": ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } - ret = zonegroup.update_name(y); + ret = zonegroup.update_name(dpp, y); if (ret < 0 && ret != -EEXIST) { - ldout(cct, 0) << __func__ << " failed to update_name for zonegroup " << *iter << ": ret "<< ret << " " << cpp_strerror(-ret) + ldpp_dout(dpp, 0) << __func__ << " failed to update_name for zonegroup " << *iter << ": ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } if (zonegroup.get_name() == default_region) { - ret = zonegroup.set_as_default(y); + ret = zonegroup.set_as_default(dpp, y); if (ret < 0) { - ldout(cct, 0) << __func__ << " failed to set_as_default " << *iter << ": ret "<< ret << " " << cpp_strerror(-ret) + ldpp_dout(dpp, 0) << __func__ << " failed to set_as_default " << *iter << ": ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } } for (auto iter = zonegroup.zones.begin(); iter != zonegroup.zones.end(); ++iter) { - ldout(cct, 0) << __func__ << " Converting zone" << iter->first << dendl; + ldpp_dout(dpp, 0) << __func__ << " Converting zone" << iter->first << dendl; RGWZoneParams zoneparams(iter->first, iter->second.name); zoneparams.set_id(iter->first.id); zoneparams.realm_id = realm->get_id(); - ret = zoneparams.init(cct, sysobj_svc, y); + ret = zoneparams.init(dpp, cct, sysobj_svc, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << __func__ << " failed to init zoneparams " << iter->first << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed to init zoneparams " << iter->first << ": " << cpp_strerror(-ret) << dendl; return ret; } else if (ret == -ENOENT) { - ldout(cct, 0) << __func__ << " zone is part of another cluster " << iter->first << " skipping " << dendl; + ldpp_dout(dpp, 0) << __func__ << " zone is part of another cluster " << iter->first << " skipping " << dendl; continue; } zonegroup.realm_id = realm->get_id(); - ret = zoneparams.update(y); + ret = zoneparams.update(dpp, y); if (ret < 0 && ret != -EEXIST) { - ldout(cct, 0) << __func__ << " failed to update zoneparams " << iter->first << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed to update zoneparams " << iter->first << ": " << cpp_strerror(-ret) << dendl; return ret; } - ret = zoneparams.update_name(y); + ret = zoneparams.update_name(dpp, y); if (ret < 0 && ret != -EEXIST) { - ldout(cct, 0) << __func__ << " failed to init zoneparams " << iter->first << ": " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed to init zoneparams " << iter->first << ": " << cpp_strerror(-ret) << dendl; return ret; } } if (!current_period->get_id().empty()) { - ret = current_period->add_zonegroup(zonegroup, y); + ret = current_period->add_zonegroup(dpp, zonegroup, y); if (ret < 0) { - ldout(cct, 0) << __func__ << " failed to add zonegroup to current_period: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed to add zonegroup to current_period: " << cpp_strerror(-ret) << dendl; return ret; } } } if (!current_period->get_id().empty()) { - ret = current_period->update(y); + ret = current_period->update(dpp, y); if (ret < 0) { - ldout(cct, 0) << __func__ << " failed to update new period: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed to update new period: " << cpp_strerror(-ret) << dendl; return ret; } - ret = current_period->store_info(false, y); + ret = current_period->store_info(dpp, false, y); if (ret < 0) { - ldout(cct, 0) << __func__ << " failed to store new period: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed to store new period: " << cpp_strerror(-ret) << dendl; return ret; } - ret = current_period->reflect(y); + ret = current_period->reflect(dpp, y); if (ret < 0) { - ldout(cct, 0) << __func__ << " failed to update local objects: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed to update local objects: " << cpp_strerror(-ret) << dendl; return ret; } } for (auto const& iter : regions) { RGWZoneGroup zonegroup(iter); - int ret = zonegroup.init(cct, sysobj_svc, y, true, true); + int ret = zonegroup.init(dpp, cct, sysobj_svc, y, true, true); if (ret < 0) { - ldout(cct, 0) << __func__ << " failed init zonegroup" << iter << ": ret "<< ret << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << __func__ << " failed init zonegroup" << iter << ": ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } - ret = zonegroup.delete_obj(y, true); + ret = zonegroup.delete_obj(dpp, y, true); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << __func__ << " failed to delete region " << iter << ": ret "<< ret << " " << cpp_strerror(-ret) + ldpp_dout(dpp, 0) << __func__ << " failed to delete region " << iter << ": ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } @@ -586,9 +586,9 @@ int RGWSI_Zone::replace_region_with_zonegroup(const DoutPrefixProvider *dpp, opt /* mark as converted */ ret = sysobj.wop() .set_exclusive(true) - .write(bl, y); + .write(dpp, bl, y); if (ret < 0 ) { - ldout(cct, 0) << __func__ << " failed to mark cluster as converted: ret "<< ret << " " << cpp_strerror(-ret) + ldpp_dout(dpp, 0) << __func__ << " failed to mark cluster as converted: ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } @@ -615,7 +615,7 @@ static void add_new_connection_to_map(map &zonegroup_conn zonegroup_conn_map[zonegroup.get_id()] = new_connection; } -int RGWSI_Zone::init_zg_from_period(bool *initialized, optional_yield y) +int RGWSI_Zone::init_zg_from_period(const DoutPrefixProvider *dpp, bool *initialized, optional_yield y) { *initialized = false; @@ -623,38 +623,38 @@ int RGWSI_Zone::init_zg_from_period(bool *initialized, optional_yield y) return 0; } - int ret = zonegroup->init(cct, sysobj_svc, y); - ldout(cct, 20) << "period zonegroup init ret " << ret << dendl; + int ret = zonegroup->init(dpp, cct, sysobj_svc, y); + ldpp_dout(dpp, 20) << "period zonegroup init ret " << ret << dendl; if (ret == -ENOENT) { return 0; } if (ret < 0) { - ldout(cct, 0) << "failed reading zonegroup info: " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "failed reading zonegroup info: " << cpp_strerror(-ret) << dendl; return ret; } - ldout(cct, 20) << "period zonegroup name " << zonegroup->get_name() << dendl; + ldpp_dout(dpp, 20) << "period zonegroup name " << zonegroup->get_name() << dendl; map::const_iterator iter = current_period->get_map().zonegroups.find(zonegroup->get_id()); if (iter != current_period->get_map().zonegroups.end()) { - ldout(cct, 20) << "using current period zonegroup " << zonegroup->get_name() << dendl; + ldpp_dout(dpp, 20) << "using current period zonegroup " << zonegroup->get_name() << dendl; *zonegroup = iter->second; - ret = zonegroup->init(cct, sysobj_svc, y, false); + ret = zonegroup->init(dpp, cct, sysobj_svc, y, false); if (ret < 0) { - ldout(cct, 0) << "failed init zonegroup: " << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "failed init zonegroup: " << " " << cpp_strerror(-ret) << dendl; return ret; } - ret = zone_params->init(cct, sysobj_svc, y); + ret = zone_params->init(dpp, cct, sysobj_svc, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "failed reading zone params info: " << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "failed reading zone params info: " << " " << cpp_strerror(-ret) << dendl; return ret; } if (ret ==-ENOENT && zonegroup->get_name() == default_zonegroup_name) { - ldout(cct, 10) << " Using default name "<< default_zone_name << dendl; + ldpp_dout(dpp, 10) << " Using default name "<< default_zone_name << dendl; zone_params->set_name(default_zone_name); - ret = zone_params->init(cct, sysobj_svc, y); + ret = zone_params->init(dpp, cct, sysobj_svc, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "failed reading zone params info: " << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "failed reading zone params info: " << " " << cpp_strerror(-ret) << dendl; return ret; } } @@ -671,31 +671,31 @@ int RGWSI_Zone::init_zg_from_period(bool *initialized, optional_yield y) // fix missing master zone for a single zone zonegroup if (zg.master_zone.empty() && zg.zones.size() == 1) { master = zg.zones.begin(); - ldout(cct, 0) << "zonegroup " << zg.get_name() << " missing master_zone, setting zone " << + ldpp_dout(dpp, 0) << "zonegroup " << zg.get_name() << " missing master_zone, setting zone " << master->second.name << " id:" << master->second.id << " as master" << dendl; if (zonegroup->get_id() == zg.get_id()) { zonegroup->master_zone = master->second.id; - ret = zonegroup->update(y); + ret = zonegroup->update(dpp, y); if (ret < 0) { - ldout(cct, 0) << "error updating zonegroup : " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "error updating zonegroup : " << cpp_strerror(-ret) << dendl; return ret; } } else { RGWZoneGroup fixed_zg(zg.get_id(),zg.get_name()); - ret = fixed_zg.init(cct, sysobj_svc, y); + ret = fixed_zg.init(dpp, cct, sysobj_svc, y); if (ret < 0) { - ldout(cct, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl; return ret; } fixed_zg.master_zone = master->second.id; - ret = fixed_zg.update(y); + ret = fixed_zg.update(dpp, y); if (ret < 0) { - ldout(cct, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl; return ret; } } } else { - ldout(cct, 0) << "zonegroup " << zg.get_name() << " missing zone for master_zone=" << + ldpp_dout(dpp, 0) << "zonegroup " << zg.get_name() << " missing zone for master_zone=" << zg.master_zone << dendl; return -EINVAL; } @@ -715,22 +715,22 @@ int RGWSI_Zone::init_zg_from_period(bool *initialized, optional_yield y) int RGWSI_Zone::init_zg_from_local(const DoutPrefixProvider *dpp, bool *creating_defaults, optional_yield y) { - int ret = zonegroup->init(cct, sysobj_svc, y); + int ret = zonegroup->init(dpp, cct, sysobj_svc, y); if ( (ret < 0 && ret != -ENOENT) || (ret == -ENOENT && !cct->_conf->rgw_zonegroup.empty())) { - ldout(cct, 0) << "failed reading zonegroup info: ret "<< ret << " " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "failed reading zonegroup info: ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } else if (ret == -ENOENT) { *creating_defaults = true; - ldout(cct, 10) << "Creating default zonegroup " << dendl; + ldpp_dout(dpp, 10) << "Creating default zonegroup " << dendl; ret = zonegroup->create_default(dpp, y); if (ret < 0) { ldpp_dout(dpp, 0) << "failure in zonegroup create_default: ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } - ret = zonegroup->init(cct, sysobj_svc, y); + ret = zonegroup->init(dpp, cct, sysobj_svc, y); if (ret < 0) { - ldout(cct, 0) << "failure in zonegroup create_default: ret "<< ret << " " << cpp_strerror(-ret) + ldpp_dout(dpp, 0) << "failure in zonegroup create_default: ret "<< ret << " " << cpp_strerror(-ret) << dendl; return ret; } @@ -743,16 +743,16 @@ int RGWSI_Zone::init_zg_from_local(const DoutPrefixProvider *dpp, bool *creating // fix missing master zone for a single zone zonegroup if (zonegroup->master_zone.empty() && zonegroup->zones.size() == 1) { master = zonegroup->zones.begin(); - ldout(cct, 0) << "zonegroup " << zonegroup->get_name() << " missing master_zone, setting zone " << + ldpp_dout(dpp, 0) << "zonegroup " << zonegroup->get_name() << " missing master_zone, setting zone " << master->second.name << " id:" << master->second.id << " as master" << dendl; zonegroup->master_zone = master->second.id; - ret = zonegroup->update(y); + ret = zonegroup->update(dpp, y); if (ret < 0) { - ldout(cct, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl; + ldpp_dout(dpp, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl; return ret; } } else { - ldout(cct, 0) << "zonegroup " << zonegroup->get_name() << " missing zone for " + ldpp_dout(dpp, 0) << "zonegroup " << zonegroup->get_name() << " missing zone for " "master_zone=" << zonegroup->master_zone << dendl; return -EINVAL; } @@ -780,7 +780,7 @@ int RGWSI_Zone::convert_regionmap(const DoutPrefixProvider *dpp, optional_yield RGWSysObjectCtx obj_ctx = sysobj_svc->init_obj_ctx(); RGWSysObj sysobj = sysobj_svc->get_obj(obj_ctx, rgw_raw_obj(pool, oid)); - int ret = sysobj.rop().read(&bl, y); + int ret = sysobj.rop().read(dpp, &bl, y); if (ret < 0 && ret != -ENOENT) { return ret; } else if (ret == -ENOENT) { @@ -791,17 +791,17 @@ int RGWSI_Zone::convert_regionmap(const DoutPrefixProvider *dpp, optional_yield auto iter = bl.cbegin(); decode(zonegroupmap, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "error decoding regionmap from " << pool << ":" << oid << dendl; + ldpp_dout(dpp, 0) << "error decoding regionmap from " << pool << ":" << oid << dendl; return -EIO; } for (map::iterator iter = zonegroupmap.zonegroups.begin(); iter != zonegroupmap.zonegroups.end(); ++iter) { RGWZoneGroup& zonegroup = iter->second; - ret = zonegroup.init(cct, sysobj_svc, y, false); - ret = zonegroup.update(y); + ret = zonegroup.init(dpp, cct, sysobj_svc, y, false); + ret = zonegroup.update(dpp, y); if (ret < 0 && ret != -ENOENT) { - ldout(cct, 0) << "Error could not update zonegroup " << zonegroup.get_name() << ": " << + ldpp_dout(dpp, 0) << "Error could not update zonegroup " << zonegroup.get_name() << ": " << cpp_strerror(-ret) << dendl; return ret; } else if (ret == -ENOENT) { @@ -818,9 +818,9 @@ int RGWSI_Zone::convert_regionmap(const DoutPrefixProvider *dpp, optional_yield current_period->set_bucket_quota(zonegroupmap.bucket_quota); // remove the region_map so we don't try to convert again - ret = sysobj.wop().remove(y); + ret = sysobj.wop().remove(dpp, y); if (ret < 0) { - ldout(cct, 0) << "Error could not remove " << sysobj.get_obj() + ldpp_dout(dpp, 0) << "Error could not remove " << sysobj.get_obj() << " after upgrading to zonegroup map: " << cpp_strerror(ret) << dendl; return ret; } @@ -998,7 +998,7 @@ bool RGWSI_Zone::is_syncing_bucket_meta(const rgw_bucket& bucket) } -int RGWSI_Zone::select_new_bucket_location(const RGWUserInfo& user_info, const string& zonegroup_id, +int RGWSI_Zone::select_new_bucket_location(const DoutPrefixProvider *dpp, const RGWUserInfo& user_info, const string& zonegroup_id, const rgw_placement_rule& request_rule, rgw_placement_rule *pselected_rule_name, RGWZonePlacementInfo *rule_info, optional_yield y) @@ -1007,7 +1007,7 @@ int RGWSI_Zone::select_new_bucket_location(const RGWUserInfo& user_info, const s RGWZoneGroup zonegroup; int ret = get_zonegroup(zonegroup_id, zonegroup); if (ret < 0) { - ldout(cct, 0) << "could not find zonegroup " << zonegroup_id << " in current period" << dendl; + ldpp_dout(dpp, 0) << "could not find zonegroup " << zonegroup_id << " in current period" << dendl; return ret; } @@ -1020,7 +1020,7 @@ int RGWSI_Zone::select_new_bucket_location(const RGWUserInfo& user_info, const s used_rule = &request_rule; titer = zonegroup.placement_targets.find(request_rule.name); if (titer == zonegroup.placement_targets.end()) { - ldout(cct, 0) << "could not find requested placement id " << request_rule + ldpp_dout(dpp, 0) << "could not find requested placement id " << request_rule << " within zonegroup " << dendl; return -ERR_INVALID_LOCATION_CONSTRAINT; } @@ -1028,19 +1028,19 @@ int RGWSI_Zone::select_new_bucket_location(const RGWUserInfo& user_info, const s used_rule = &user_info.default_placement; titer = zonegroup.placement_targets.find(user_info.default_placement.name); if (titer == zonegroup.placement_targets.end()) { - ldout(cct, 0) << "could not find user default placement id " << user_info.default_placement + ldpp_dout(dpp, 0) << "could not find user default placement id " << user_info.default_placement << " within zonegroup " << dendl; return -ERR_INVALID_LOCATION_CONSTRAINT; } } else { if (zonegroup.default_placement.name.empty()) { // zonegroup default rule as fallback, it should not be empty. - ldout(cct, 0) << "misconfiguration, zonegroup default placement id should not be empty." << dendl; + ldpp_dout(dpp, 0) << "misconfiguration, zonegroup default placement id should not be empty." << dendl; return -ERR_ZONEGROUP_DEFAULT_PLACEMENT_MISCONFIGURATION; } else { used_rule = &zonegroup.default_placement; titer = zonegroup.placement_targets.find(zonegroup.default_placement.name); if (titer == zonegroup.placement_targets.end()) { - ldout(cct, 0) << "could not find zonegroup default placement id " << zonegroup.default_placement + ldpp_dout(dpp, 0) << "could not find zonegroup default placement id " << zonegroup.default_placement << " within zonegroup " << dendl; return -ERR_INVALID_LOCATION_CONSTRAINT; } @@ -1050,7 +1050,7 @@ int RGWSI_Zone::select_new_bucket_location(const RGWUserInfo& user_info, const s /* now check tag for the rule, whether user is permitted to use rule */ const auto& target_rule = titer->second; if (!target_rule.user_permitted(user_info.placement_tags)) { - ldout(cct, 0) << "user not permitted to use placement rule " << titer->first << dendl; + ldpp_dout(dpp, 0) << "user not permitted to use placement rule " << titer->first << dendl; return -EPERM; } @@ -1066,17 +1066,17 @@ int RGWSI_Zone::select_new_bucket_location(const RGWUserInfo& user_info, const s *pselected_rule_name = rule; } - return select_bucket_location_by_rule(rule, rule_info, y); + return select_bucket_location_by_rule(dpp, rule, rule_info, y); } -int RGWSI_Zone::select_bucket_location_by_rule(const rgw_placement_rule& location_rule, RGWZonePlacementInfo *rule_info, optional_yield y) +int RGWSI_Zone::select_bucket_location_by_rule(const DoutPrefixProvider *dpp, const rgw_placement_rule& location_rule, RGWZonePlacementInfo *rule_info, optional_yield y) { if (location_rule.name.empty()) { /* we can only reach here if we're trying to set a bucket location from a bucket * created on a different zone, using a legacy / default pool configuration */ if (rule_info) { - return select_legacy_bucket_placement(rule_info, y); + return select_legacy_bucket_placement(dpp, rule_info, y); } return 0; @@ -1090,14 +1090,14 @@ int RGWSI_Zone::select_bucket_location_by_rule(const rgw_placement_rule& locatio auto piter = zone_params->placement_pools.find(location_rule.name); if (piter == zone_params->placement_pools.end()) { /* couldn't find, means we cannot really place data for this bucket in this zone */ - ldout(cct, 0) << "ERROR: This zone does not contain placement rule " + ldpp_dout(dpp, 0) << "ERROR: This zone does not contain placement rule " << location_rule << " present in the zonegroup!" << dendl; return -EINVAL; } auto storage_class = location_rule.get_storage_class(); if (!piter->second.storage_class_exists(storage_class)) { - ldout(cct, 5) << "requested storage class does not exist: " << storage_class << dendl; + ldpp_dout(dpp, 5) << "requested storage class does not exist: " << storage_class << dendl; return -EINVAL; } @@ -1111,13 +1111,13 @@ int RGWSI_Zone::select_bucket_location_by_rule(const rgw_placement_rule& locatio return 0; } -int RGWSI_Zone::select_bucket_placement(const RGWUserInfo& user_info, const string& zonegroup_id, +int RGWSI_Zone::select_bucket_placement(const DoutPrefixProvider *dpp, const RGWUserInfo& user_info, const string& zonegroup_id, const rgw_placement_rule& placement_rule, rgw_placement_rule *pselected_rule, RGWZonePlacementInfo *rule_info, optional_yield y) { if (!zone_params->placement_pools.empty()) { - return select_new_bucket_location(user_info, zonegroup_id, placement_rule, + return select_new_bucket_location(dpp, user_info, zonegroup_id, placement_rule, pselected_rule, rule_info, y); } @@ -1126,13 +1126,13 @@ int RGWSI_Zone::select_bucket_placement(const RGWUserInfo& user_info, const stri } if (rule_info) { - return select_legacy_bucket_placement(rule_info, y); + return select_legacy_bucket_placement(dpp, rule_info, y); } return 0; } -int RGWSI_Zone::select_legacy_bucket_placement(RGWZonePlacementInfo *rule_info, +int RGWSI_Zone::select_legacy_bucket_placement(const DoutPrefixProvider *dpp, RGWZonePlacementInfo *rule_info, optional_yield y) { bufferlist map_bl; @@ -1145,7 +1145,7 @@ int RGWSI_Zone::select_legacy_bucket_placement(RGWZonePlacementInfo *rule_info, auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = obj_ctx.get_obj(obj); - int ret = sysobj.rop().read(&map_bl, y); + int ret = sysobj.rop().read(dpp, &map_bl, y); if (ret < 0) { goto read_omap; } @@ -1154,12 +1154,12 @@ int RGWSI_Zone::select_legacy_bucket_placement(RGWZonePlacementInfo *rule_info, auto iter = map_bl.cbegin(); decode(m, iter); } catch (buffer::error& err) { - ldout(cct, 0) << "ERROR: couldn't decode avail_pools" << dendl; + ldpp_dout(dpp, 0) << "ERROR: couldn't decode avail_pools" << dendl; } read_omap: if (m.empty()) { - ret = sysobj.omap().get_all(&m, y); + ret = sysobj.omap().get_all(dpp, &m, y); write_map = true; } @@ -1173,7 +1173,7 @@ read_omap: ret = rados_svc->pool().create(pools, &retcodes); if (ret < 0) return ret; - ret = sysobj.omap().set(s, bl, y); + ret = sysobj.omap().set(dpp, s, bl, y); if (ret < 0) return ret; m[s] = bl; @@ -1182,9 +1182,9 @@ read_omap: if (write_map) { bufferlist new_bl; encode(m, new_bl); - ret = sysobj.wop().write(new_bl, y); + ret = sysobj.wop().write(dpp, new_bl, y); if (ret < 0) { - ldout(cct, 0) << "WARNING: could not save avail pools map info ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "WARNING: could not save avail pools map info ret=" << ret << dendl; } } @@ -1206,7 +1206,7 @@ read_omap: return 0; } -int RGWSI_Zone::update_placement_map(optional_yield y) +int RGWSI_Zone::update_placement_map(const DoutPrefixProvider *dpp, optional_yield y) { bufferlist header; map m; @@ -1215,21 +1215,21 @@ int RGWSI_Zone::update_placement_map(optional_yield y) auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = obj_ctx.get_obj(obj); - int ret = sysobj.omap().get_all(&m, y); + int ret = sysobj.omap().get_all(dpp, &m, y); if (ret < 0) return ret; bufferlist new_bl; encode(m, new_bl); - ret = sysobj.wop().write(new_bl, y); + ret = sysobj.wop().write(dpp, new_bl, y); if (ret < 0) { - ldout(cct, 0) << "WARNING: could not save avail pools map info ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "WARNING: could not save avail pools map info ret=" << ret << dendl; } return ret; } -int RGWSI_Zone::add_bucket_placement(const rgw_pool& new_pool, optional_yield y) +int RGWSI_Zone::add_bucket_placement(const DoutPrefixProvider *dpp, const rgw_pool& new_pool, optional_yield y) { int ret = rados_svc->pool(new_pool).lookup(); if (ret < 0) { // DNE, or something @@ -1241,29 +1241,29 @@ int RGWSI_Zone::add_bucket_placement(const rgw_pool& new_pool, optional_yield y) auto sysobj = obj_ctx.get_obj(obj); bufferlist empty_bl; - ret = sysobj.omap().set(new_pool.to_str(), empty_bl, y); + ret = sysobj.omap().set(dpp, new_pool.to_str(), empty_bl, y); // don't care about return value - update_placement_map(y); + update_placement_map(dpp, y); return ret; } -int RGWSI_Zone::remove_bucket_placement(const rgw_pool& old_pool, optional_yield y) +int RGWSI_Zone::remove_bucket_placement(const DoutPrefixProvider *dpp, const rgw_pool& old_pool, optional_yield y) { rgw_raw_obj obj(zone_params->domain_root, avail_pools); auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = obj_ctx.get_obj(obj); - int ret = sysobj.omap().del(old_pool.to_str(), y); + int ret = sysobj.omap().del(dpp, old_pool.to_str(), y); // don't care about return value - update_placement_map(y); + update_placement_map(dpp, y); return ret; } -int RGWSI_Zone::list_placement_set(set& names, optional_yield y) +int RGWSI_Zone::list_placement_set(const DoutPrefixProvider *dpp, set& names, optional_yield y) { bufferlist header; map m; @@ -1271,7 +1271,7 @@ int RGWSI_Zone::list_placement_set(set& names, optional_yield y) rgw_raw_obj obj(zone_params->domain_root, avail_pools); auto obj_ctx = sysobj_svc->init_obj_ctx(); auto sysobj = obj_ctx.get_obj(obj); - int ret = sysobj.omap().get_all(&m, y); + int ret = sysobj.omap().get_all(dpp, &m, y); if (ret < 0) return ret; diff --git a/src/rgw/services/svc_zone.h b/src/rgw/services/svc_zone.h index d42adcdb04b..9f503df5e41 100644 --- a/src/rgw/services/svc_zone.h +++ b/src/rgw/services/svc_zone.h @@ -65,11 +65,11 @@ class RGWSI_Zone : public RGWServiceInstance void shutdown() override; int replace_region_with_zonegroup(const DoutPrefixProvider *dpp, optional_yield y); - int init_zg_from_period(bool *initialized, optional_yield y); + int init_zg_from_period(const DoutPrefixProvider *dpp, bool *initialized, optional_yield y); int init_zg_from_local(const DoutPrefixProvider *dpp, bool *creating_defaults, optional_yield y); int convert_regionmap(const DoutPrefixProvider *dpp, optional_yield y); - int update_placement_map(optional_yield y); + int update_placement_map(const DoutPrefixProvider *dpp, optional_yield y); public: RGWSI_Zone(CephContext *cct); ~RGWSI_Zone(); @@ -124,19 +124,19 @@ public: RGWRESTConn *get_zone_conn_by_name(const string& name); bool find_zone_id_by_name(const string& name, rgw_zone_id *id); - int select_bucket_placement(const RGWUserInfo& user_info, const string& zonegroup_id, + int select_bucket_placement(const DoutPrefixProvider *dpp, const RGWUserInfo& user_info, const string& zonegroup_id, const rgw_placement_rule& rule, rgw_placement_rule *pselected_rule, RGWZonePlacementInfo *rule_info, optional_yield y); - int select_legacy_bucket_placement(RGWZonePlacementInfo *rule_info, optional_yield y); - int select_new_bucket_location(const RGWUserInfo& user_info, const string& zonegroup_id, + int select_legacy_bucket_placement(const DoutPrefixProvider *dpp, RGWZonePlacementInfo *rule_info, optional_yield y); + int select_new_bucket_location(const DoutPrefixProvider *dpp, const RGWUserInfo& user_info, const string& zonegroup_id, const rgw_placement_rule& rule, rgw_placement_rule *pselected_rule_name, RGWZonePlacementInfo *rule_info, optional_yield y); - int select_bucket_location_by_rule(const rgw_placement_rule& location_rule, RGWZonePlacementInfo *rule_info, optional_yield y); + int select_bucket_location_by_rule(const DoutPrefixProvider *dpp, const rgw_placement_rule& location_rule, RGWZonePlacementInfo *rule_info, optional_yield y); - int add_bucket_placement(const rgw_pool& new_pool, optional_yield y); - int remove_bucket_placement(const rgw_pool& old_pool, optional_yield y); - int list_placement_set(set& names, optional_yield y); + int add_bucket_placement(const DoutPrefixProvider *dpp, const rgw_pool& new_pool, optional_yield y); + int remove_bucket_placement(const DoutPrefixProvider *dpp, const rgw_pool& old_pool, optional_yield y); + int list_placement_set(const DoutPrefixProvider *dpp, set& names, optional_yield y); bool is_meta_master() const; @@ -146,10 +146,10 @@ public: bool can_reshard() const; bool is_syncing_bucket_meta(const rgw_bucket& bucket); - int list_zonegroups(list& zonegroups); - int list_regions(list& regions); - int list_zones(list& zones); - int list_realms(list& realms); - int list_periods(list& periods); - int list_periods(const string& current_period, list& periods, optional_yield y); + int list_zonegroups(const DoutPrefixProvider *dpp, list& zonegroups); + int list_regions(const DoutPrefixProvider *dpp, list& regions); + int list_zones(const DoutPrefixProvider *dpp, list& zones); + int list_realms(const DoutPrefixProvider *dpp, list& realms); + int list_periods(const DoutPrefixProvider *dpp, list& periods); + int list_periods(const DoutPrefixProvider *dpp, const string& current_period, list& periods, optional_yield y); }; diff --git a/src/test/rgw/test_cls_fifo_legacy.cc b/src/test/rgw/test_cls_fifo_legacy.cc index 26d9e9a9253..02678c52212 100644 --- a/src/test/rgw/test_cls_fifo_legacy.cc +++ b/src/test/rgw/test_cls_fifo_legacy.cc @@ -19,6 +19,7 @@ #include "include/scope_guard.h" #include "include/types.h" #include "include/rados/librados.hpp" +#include "common/ceph_context.h" #include "cls/fifo/cls_fifo_ops.h" #include "test/librados/test_cxx.h" @@ -34,8 +35,11 @@ namespace cb = ceph::buffer; namespace fifo = rados::cls::fifo; namespace RCf = rgw::cls::fifo; +auto cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT); +const DoutPrefix dp(cct, 1, "test legacy cls fifo: "); + namespace { -int fifo_create(R::IoCtx& ioctx, +int fifo_create(const DoutPrefixProvider *dpp, R::IoCtx& ioctx, const std::string& oid, std::string_view id, optional_yield y, @@ -48,7 +52,7 @@ int fifo_create(R::IoCtx& ioctx, R::ObjectWriteOperation op; RCf::create_meta(&op, id, objv, oid_prefix, exclusive, max_part_size, max_entry_size); - return rgw_rados_operate(ioctx, oid, &op, y); + return rgw_rados_operate(dpp, ioctx, oid, &op, y); } } @@ -74,54 +78,54 @@ using AioLegacyFIFO = LegacyFIFO; TEST_F(LegacyClsFIFO, TestCreate) { - auto r = fifo_create(ioctx, fifo_id, ""s, null_yield); + auto r = fifo_create(&dp, ioctx, fifo_id, ""s, null_yield); EXPECT_EQ(-EINVAL, r); - r = fifo_create(ioctx, fifo_id, fifo_id, null_yield, std::nullopt, + r = fifo_create(&dp, ioctx, fifo_id, fifo_id, null_yield, std::nullopt, std::nullopt, false, 0); EXPECT_EQ(-EINVAL, r); - r = fifo_create(ioctx, fifo_id, {}, null_yield, + r = fifo_create(&dp, ioctx, fifo_id, {}, null_yield, std::nullopt, std::nullopt, false, RCf::default_max_part_size, 0); EXPECT_EQ(-EINVAL, r); - r = fifo_create(ioctx, fifo_id, fifo_id, null_yield); + r = fifo_create(&dp, ioctx, fifo_id, fifo_id, null_yield); EXPECT_EQ(0, r); std::uint64_t size; ioctx.stat(fifo_id, &size, nullptr); EXPECT_GT(size, 0); /* test idempotency */ - r = fifo_create(ioctx, fifo_id, fifo_id, null_yield); + r = fifo_create(&dp, ioctx, fifo_id, fifo_id, null_yield); EXPECT_EQ(0, r); - r = fifo_create(ioctx, fifo_id, {}, null_yield, std::nullopt, + r = fifo_create(&dp, ioctx, fifo_id, {}, null_yield, std::nullopt, std::nullopt, false); EXPECT_EQ(-EINVAL, r); - r = fifo_create(ioctx, fifo_id, {}, null_yield, std::nullopt, + r = fifo_create(&dp, ioctx, fifo_id, {}, null_yield, std::nullopt, "myprefix"sv, false); EXPECT_EQ(-EINVAL, r); - r = fifo_create(ioctx, fifo_id, "foo"sv, null_yield, + r = fifo_create(&dp, ioctx, fifo_id, "foo"sv, null_yield, std::nullopt, std::nullopt, false); EXPECT_EQ(-EEXIST, r); } TEST_F(LegacyClsFIFO, TestGetInfo) { - auto r = fifo_create(ioctx, fifo_id, fifo_id, null_yield); + auto r = fifo_create(&dp, ioctx, fifo_id, fifo_id, null_yield); fifo::info info; std::uint32_t part_header_size; std::uint32_t part_entry_overhead; - r = RCf::get_meta(ioctx, fifo_id, std::nullopt, &info, &part_header_size, + r = RCf::get_meta(&dp, ioctx, fifo_id, std::nullopt, &info, &part_header_size, &part_entry_overhead, 0, null_yield); EXPECT_EQ(0, r); EXPECT_GT(part_header_size, 0); EXPECT_GT(part_entry_overhead, 0); EXPECT_FALSE(info.version.instance.empty()); - r = RCf::get_meta(ioctx, fifo_id, info.version, &info, &part_header_size, + r = RCf::get_meta(&dp, ioctx, fifo_id, info.version, &info, &part_header_size, &part_entry_overhead, 0, null_yield); EXPECT_EQ(0, r); fifo::objv objv; objv.instance = "foo"; objv.ver = 12; - r = RCf::get_meta(ioctx, fifo_id, objv, &info, &part_header_size, + r = RCf::get_meta(&dp, ioctx, fifo_id, objv, &info, &part_header_size, &part_entry_overhead, 0, null_yield); EXPECT_EQ(-ECANCELED, r); } @@ -129,10 +133,10 @@ TEST_F(LegacyClsFIFO, TestGetInfo) TEST_F(LegacyFIFO, TestOpenDefault) { std::unique_ptr fifo; - auto r = RCf::FIFO::create(ioctx, fifo_id, &fifo, null_yield); + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &fifo, null_yield); ASSERT_EQ(0, r); // force reading from backend - r = fifo->read_meta(null_yield); + r = fifo->read_meta(&dp, null_yield); EXPECT_EQ(0, r); auto info = fifo->meta(); EXPECT_EQ(info.id, fifo_id); @@ -149,12 +153,12 @@ TEST_F(LegacyFIFO, TestOpenParams) /* first successful create */ std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, objv, oid_prefix, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, objv, oid_prefix, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); /* force reading from backend */ - r = f->read_meta(null_yield); + r = f->read_meta(&dp, null_yield); auto info = f->meta(); EXPECT_EQ(info.id, fifo_id); EXPECT_EQ(info.params.max_part_size, max_part_size); @@ -177,13 +181,13 @@ std::pair decode_entry(const RCf::list_entry& entry) TEST_F(LegacyFIFO, TestPushListTrim) { std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield); + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield); ASSERT_EQ(0, r); static constexpr auto max_entries = 10u; for (uint32_t i = 0; i < max_entries; ++i) { cb::list bl; encode(i, bl); - r = f->push(bl, null_yield); + r = f->push(&dp, bl, null_yield); ASSERT_EQ(0, r); } @@ -193,7 +197,7 @@ TEST_F(LegacyFIFO, TestPushListTrim) bool more = false; for (auto i = 0u; i < max_entries; ++i) { - r = f->list(1, marker, &result, &more, null_yield); + r = f->list(&dp, 1, marker, &result, &more, null_yield); ASSERT_EQ(0, r); bool expected_more = (i != (max_entries - 1)); @@ -210,7 +214,7 @@ TEST_F(LegacyFIFO, TestPushListTrim) /* get all entries at once */ std::string markers[max_entries]; std::uint32_t min_entry = 0; - r = f->list(max_entries * 10, std::nullopt, &result, &more, null_yield); + r = f->list(&dp, max_entries * 10, std::nullopt, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_FALSE(more); @@ -222,11 +226,11 @@ TEST_F(LegacyFIFO, TestPushListTrim) } /* trim one entry */ - r = f->trim(markers[min_entry], false, null_yield); + r = f->trim(&dp, markers[min_entry], false, null_yield); ASSERT_EQ(0, r); ++min_entry; - r = f->list(max_entries * 10, std::nullopt, &result, &more, null_yield); + r = f->list(&dp, max_entries * 10, std::nullopt, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_FALSE(more); ASSERT_EQ(max_entries - min_entry, result.size()); @@ -246,7 +250,7 @@ TEST_F(LegacyFIFO, TestPushTooBig) static constexpr auto max_entry_size = 128ull; std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, std::nullopt, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); @@ -256,7 +260,7 @@ TEST_F(LegacyFIFO, TestPushTooBig) cb::list bl; bl.append(buf, sizeof(buf)); - r = f->push(bl, null_yield); + r = f->push(&dp, bl, null_yield); EXPECT_EQ(-E2BIG, r); } @@ -266,7 +270,7 @@ TEST_F(LegacyFIFO, TestMultipleParts) static constexpr auto max_part_size = 2048ull; static constexpr auto max_entry_size = 128ull; std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, std::nullopt, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); @@ -283,7 +287,7 @@ TEST_F(LegacyFIFO, TestMultipleParts) cb::list bl; *(int *)buf = i; bl.append(buf, sizeof(buf)); - r = f->push(bl, null_yield); + r = f->push(&dp, bl, null_yield); ASSERT_EQ(0, r); } @@ -295,7 +299,7 @@ TEST_F(LegacyFIFO, TestMultipleParts) /* list all at once */ std::vector result; bool more = false; - r = f->list(max_entries, std::nullopt, &result, &more, null_yield); + r = f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); ASSERT_EQ(0, r); EXPECT_EQ(false, more); ASSERT_EQ(max_entries, result.size()); @@ -309,7 +313,7 @@ TEST_F(LegacyFIFO, TestMultipleParts) /* get entries one by one */ for (auto i = 0u; i < max_entries; ++i) { - r = f->list(1, marker, &result, &more, null_yield); + r = f->list(&dp, 1, marker, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_EQ(result.size(), 1); const bool expected_more = (i != (max_entries - 1)); @@ -328,14 +332,14 @@ TEST_F(LegacyFIFO, TestMultipleParts) marker.reset(); for (auto i = 0u; i < max_entries; ++i) { /* read single entry */ - r = f->list(1, marker, &result, &more, null_yield); + r = f->list(&dp, 1, marker, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_EQ(result.size(), 1); const bool expected_more = (i != (max_entries - 1)); ASSERT_EQ(expected_more, more); marker = result.front().marker; - r = f->trim(*marker, false, null_yield); + r = f->trim(&dp, *marker, false, null_yield); ASSERT_EQ(0, r); /* check tail */ @@ -343,7 +347,7 @@ TEST_F(LegacyFIFO, TestMultipleParts) ASSERT_EQ(info.tail_part_num, i / entries_per_part); /* try to read all again, see how many entries left */ - r = f->list(max_entries, marker, &result, &more, null_yield); + r = f->list(&dp, max_entries, marker, &result, &more, null_yield); ASSERT_EQ(max_entries - i - 1, result.size()); ASSERT_EQ(false, more); } @@ -355,11 +359,11 @@ TEST_F(LegacyFIFO, TestMultipleParts) RCf::part_info partinfo; /* check old tails are removed */ for (auto i = 0; i < info.tail_part_num; ++i) { - r = f->get_part_info(i, &partinfo, null_yield); + r = f->get_part_info(&dp, i, &partinfo, null_yield); ASSERT_EQ(-ENOENT, r); } /* check current tail exists */ - r = f->get_part_info(info.tail_part_num, &partinfo, null_yield); + r = f->get_part_info(&dp, info.tail_part_num, &partinfo, null_yield); ASSERT_EQ(0, r); } @@ -369,7 +373,7 @@ TEST_F(LegacyFIFO, TestTwoPushers) static constexpr auto max_entry_size = 128ull; std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, std::nullopt, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); @@ -381,7 +385,7 @@ TEST_F(LegacyFIFO, TestTwoPushers) (max_entry_size + part_entry_overhead)); const auto max_entries = entries_per_part * 4 + 1; std::unique_ptr f2; - r = RCf::FIFO::open(ioctx, fifo_id, &f2, null_yield); + r = RCf::FIFO::open(&dp, ioctx, fifo_id, &f2, null_yield); std::vector fifos{&f, &f2}; for (auto i = 0u; i < max_entries; ++i) { @@ -389,19 +393,19 @@ TEST_F(LegacyFIFO, TestTwoPushers) *(int *)buf = i; bl.append(buf, sizeof(buf)); auto& f = *fifos[i % fifos.size()]; - r = f->push(bl, null_yield); + r = f->push(&dp, bl, null_yield); ASSERT_EQ(0, r); } /* list all by both */ std::vector result; bool more = false; - r = f2->list(max_entries, std::nullopt, &result, &more, null_yield); + r = f2->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_EQ(false, more); ASSERT_EQ(max_entries, result.size()); - r = f2->list(max_entries, std::nullopt, &result, &more, null_yield); + r = f2->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_EQ(false, more); ASSERT_EQ(max_entries, result.size()); @@ -417,7 +421,7 @@ TEST_F(LegacyFIFO, TestTwoPushersTrim) static constexpr auto max_part_size = 2048ull; static constexpr auto max_entry_size = 128ull; std::unique_ptr f1; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f1, null_yield, std::nullopt, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f1, null_yield, std::nullopt, std::nullopt, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); @@ -431,7 +435,7 @@ TEST_F(LegacyFIFO, TestTwoPushersTrim) const auto max_entries = entries_per_part * 4 + 1; std::unique_ptr f2; - r = RCf::FIFO::open(ioctx, fifo_id, &f2, null_yield); + r = RCf::FIFO::open(&dp, ioctx, fifo_id, &f2, null_yield); ASSERT_EQ(0, r); /* push one entry to f2 and the rest to f1 */ @@ -440,7 +444,7 @@ TEST_F(LegacyFIFO, TestTwoPushersTrim) *(int *)buf = i; bl.append(buf, sizeof(buf)); auto& f = (i < 1 ? f2 : f1); - r = f->push(bl, null_yield); + r = f->push(&dp, bl, null_yield); ASSERT_EQ(0, r); } @@ -449,7 +453,7 @@ TEST_F(LegacyFIFO, TestTwoPushersTrim) std::string marker; std::vector result; bool more = false; - r = f1->list(num, std::nullopt, &result, &more, null_yield); + r = f1->list(&dp, num, std::nullopt, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_EQ(true, more); ASSERT_EQ(num, result.size()); @@ -461,11 +465,11 @@ TEST_F(LegacyFIFO, TestTwoPushersTrim) auto& entry = result[num - 1]; marker = entry.marker; - r = f1->trim(marker, false, null_yield); + r = f1->trim(&dp, marker, false, null_yield); /* list what's left by fifo2 */ const auto left = max_entries - num; - f2->list(left, marker, &result, &more, null_yield); + f2->list(&dp, left, marker, &result, &more, null_yield); ASSERT_EQ(left, result.size()); ASSERT_EQ(false, more); @@ -481,7 +485,7 @@ TEST_F(LegacyFIFO, TestPushBatch) static constexpr auto max_entry_size = 128ull; std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, std::nullopt, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); @@ -501,14 +505,14 @@ TEST_F(LegacyFIFO, TestPushBatch) } ASSERT_EQ(max_entries, bufs.size()); - r = f->push(bufs, null_yield); + r = f->push(&dp, bufs, null_yield); ASSERT_EQ(0, r); /* list all */ std::vector result; bool more = false; - r = f->list(max_entries, std::nullopt, &result, &more, null_yield); + r = f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_EQ(false, more); ASSERT_EQ(max_entries, result.size()); @@ -525,7 +529,7 @@ TEST_F(LegacyFIFO, TestAioTrim) static constexpr auto max_part_size = 2048ull; static constexpr auto max_entry_size = 128ull; std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, std::nullopt, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); @@ -547,7 +551,7 @@ TEST_F(LegacyFIFO, TestAioTrim) } ASSERT_EQ(max_entries, bufs.size()); - r = f->push(bufs, null_yield); + r = f->push(&dp, bufs, null_yield); ASSERT_EQ(0, r); auto info = f->meta(); @@ -558,7 +562,7 @@ TEST_F(LegacyFIFO, TestAioTrim) /* list all at once */ std::vector result; bool more = false; - r = f->list(max_entries, std::nullopt, &result, &more, null_yield); + r = f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_EQ(false, more); ASSERT_EQ(max_entries, result.size()); @@ -570,7 +574,7 @@ TEST_F(LegacyFIFO, TestAioTrim) marker.reset(); for (auto i = 0u; i < max_entries; ++i) { /* read single entry */ - r = f->list(1, marker, &result, &more, null_yield); + r = f->list(&dp, 1, marker, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_EQ(result.size(), 1); const bool expected_more = (i != (max_entries - 1)); @@ -579,7 +583,7 @@ TEST_F(LegacyFIFO, TestAioTrim) marker = result.front().marker; std::unique_ptr c(rados.aio_create_completion(nullptr, nullptr)); - f->trim(*marker, false, c.get()); + f->trim(&dp, *marker, false, c.get()); c->wait_for_complete(); r = c->get_return_value(); ASSERT_EQ(0, r); @@ -589,7 +593,7 @@ TEST_F(LegacyFIFO, TestAioTrim) ASSERT_EQ(info.tail_part_num, i / entries_per_part); /* try to read all again, see how many entries left */ - r = f->list(max_entries, marker, &result, &more, null_yield); + r = f->list(&dp, max_entries, marker, &result, &more, null_yield); ASSERT_EQ(max_entries - i - 1, result.size()); ASSERT_EQ(false, more); } @@ -601,17 +605,17 @@ TEST_F(LegacyFIFO, TestAioTrim) RCf::part_info partinfo; /* check old tails are removed */ for (auto i = 0; i < info.tail_part_num; ++i) { - r = f->get_part_info(i, &partinfo, null_yield); + r = f->get_part_info(&dp, i, &partinfo, null_yield); ASSERT_EQ(-ENOENT, r); } /* check current tail exists */ - r = f->get_part_info(info.tail_part_num, &partinfo, null_yield); + r = f->get_part_info(&dp, info.tail_part_num, &partinfo, null_yield); ASSERT_EQ(0, r); } TEST_F(LegacyFIFO, TestTrimExclusive) { std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield); + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield); ASSERT_EQ(0, r); std::vector result; bool more = false; @@ -620,28 +624,28 @@ TEST_F(LegacyFIFO, TestTrimExclusive) { for (uint32_t i = 0; i < max_entries; ++i) { cb::list bl; encode(i, bl); - f->push(bl, null_yield); + f->push(&dp, bl, null_yield); } - f->list(1, std::nullopt, &result, &more, null_yield); + f->list(&dp, 1, std::nullopt, &result, &more, null_yield); auto [val, marker] = decode_entry(result.front()); ASSERT_EQ(0, val); - f->trim(marker, true, null_yield); + f->trim(&dp, marker, true, null_yield); result.clear(); - f->list(max_entries, std::nullopt, &result, &more, null_yield); + f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); std::tie(val, marker) = decode_entry(result.front()); ASSERT_EQ(0, val); - f->trim(result[4].marker, true, null_yield); + f->trim(&dp, result[4].marker, true, null_yield); result.clear(); - f->list(max_entries, std::nullopt, &result, &more, null_yield); + f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); std::tie(val, marker) = decode_entry(result.front()); ASSERT_EQ(4, val); - f->trim(result.back().marker, true, null_yield); + f->trim(&dp, result.back().marker, true, null_yield); result.clear(); - f->list(max_entries, std::nullopt, &result, &more, null_yield); + f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); std::tie(val, marker) = decode_entry(result.front()); ASSERT_EQ(result.size(), 1); ASSERT_EQ(max_entries - 1, val); @@ -650,14 +654,14 @@ TEST_F(LegacyFIFO, TestTrimExclusive) { TEST_F(AioLegacyFIFO, TestPushListTrim) { std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield); + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield); ASSERT_EQ(0, r); static constexpr auto max_entries = 10u; for (uint32_t i = 0; i < max_entries; ++i) { cb::list bl; encode(i, bl); auto c = R::Rados::aio_create_completion(); - f->push(bl, c); + f->push(&dp, bl, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -670,7 +674,7 @@ TEST_F(AioLegacyFIFO, TestPushListTrim) bool more = false; for (auto i = 0u; i < max_entries; ++i) { auto c = R::Rados::aio_create_completion(); - f->list(1, marker, &result, &more, c); + f->list(&dp, 1, marker, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -691,7 +695,7 @@ TEST_F(AioLegacyFIFO, TestPushListTrim) std::string markers[max_entries]; std::uint32_t min_entry = 0; auto c = R::Rados::aio_create_completion(); - f->list(max_entries * 10, std::nullopt, &result, &more, c); + f->list(&dp, max_entries * 10, std::nullopt, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -707,7 +711,7 @@ TEST_F(AioLegacyFIFO, TestPushListTrim) /* trim one entry */ c = R::Rados::aio_create_completion(); - f->trim(markers[min_entry], false, c); + f->trim(&dp, markers[min_entry], false, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -715,7 +719,7 @@ TEST_F(AioLegacyFIFO, TestPushListTrim) ++min_entry; c = R::Rados::aio_create_completion(); - f->list(max_entries * 10, std::nullopt, &result, &more, c); + f->list(&dp, max_entries * 10, std::nullopt, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -738,7 +742,7 @@ TEST_F(AioLegacyFIFO, TestPushTooBig) static constexpr auto max_entry_size = 128ull; std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, std::nullopt, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); @@ -749,14 +753,14 @@ TEST_F(AioLegacyFIFO, TestPushTooBig) bl.append(buf, sizeof(buf)); auto c = R::Rados::aio_create_completion(); - f->push(bl, c); + f->push(&dp, bl, c); c->wait_for_complete(); r = c->get_return_value(); ASSERT_EQ(-E2BIG, r); c->release(); c = R::Rados::aio_create_completion(); - f->push(std::vector{}, c); + f->push(&dp, std::vector{}, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -769,14 +773,14 @@ TEST_F(AioLegacyFIFO, TestMultipleParts) static constexpr auto max_part_size = 2048ull; static constexpr auto max_entry_size = 128ull; std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, std::nullopt, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); { auto c = R::Rados::aio_create_completion(); - f->get_head_info([&](int r, RCf::part_info&& p) { + f->get_head_info(&dp, [&](int r, RCf::part_info&& p) { ASSERT_TRUE(p.tag.empty()); ASSERT_EQ(0, p.magic); ASSERT_EQ(0, p.min_ofs); @@ -804,7 +808,7 @@ TEST_F(AioLegacyFIFO, TestMultipleParts) *(int *)buf = i; bl.append(buf, sizeof(buf)); auto c = R::Rados::aio_create_completion(); - f->push(bl, c); + f->push(&dp, bl, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -820,7 +824,7 @@ TEST_F(AioLegacyFIFO, TestMultipleParts) std::vector result; bool more = false; auto c = R::Rados::aio_create_completion(); - f->list(max_entries, std::nullopt, &result, &more, c); + f->list(&dp, max_entries, std::nullopt, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -838,7 +842,7 @@ TEST_F(AioLegacyFIFO, TestMultipleParts) for (auto i = 0u; i < max_entries; ++i) { c = R::Rados::aio_create_completion(); - f->list(1, marker, &result, &more, c); + f->list(&dp, 1, marker, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -861,7 +865,7 @@ TEST_F(AioLegacyFIFO, TestMultipleParts) for (auto i = 0u; i < max_entries; ++i) { /* read single entry */ c = R::Rados::aio_create_completion(); - f->list(1, marker, &result, &more, c); + f->list(&dp, 1, marker, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -872,7 +876,7 @@ TEST_F(AioLegacyFIFO, TestMultipleParts) marker = result.front().marker; c = R::Rados::aio_create_completion(); - f->trim(*marker, false, c); + f->trim(&dp, *marker, false, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -885,7 +889,7 @@ TEST_F(AioLegacyFIFO, TestMultipleParts) /* try to read all again, see how many entries left */ c = R::Rados::aio_create_completion(); - f->list(max_entries, marker, &result, &more, c); + f->list(&dp, max_entries, marker, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -922,7 +926,7 @@ TEST_F(AioLegacyFIFO, TestMultipleParts) ASSERT_EQ(0, r); c = R::Rados::aio_create_completion(); - f->get_head_info([&](int r, RCf::part_info&& p) { + f->get_head_info(&dp, [&](int r, RCf::part_info&& p) { ASSERT_EQ(next_ofs, p.next_ofs); }, c); c->wait_for_complete(); @@ -937,7 +941,7 @@ TEST_F(AioLegacyFIFO, TestTwoPushers) static constexpr auto max_entry_size = 128ull; std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, std::nullopt, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); @@ -949,7 +953,7 @@ TEST_F(AioLegacyFIFO, TestTwoPushers) (max_entry_size + part_entry_overhead)); const auto max_entries = entries_per_part * 4 + 1; std::unique_ptr f2; - r = RCf::FIFO::open(ioctx, fifo_id, &f2, null_yield); + r = RCf::FIFO::open(&dp, ioctx, fifo_id, &f2, null_yield); std::vector fifos{&f, &f2}; for (auto i = 0u; i < max_entries; ++i) { @@ -958,7 +962,7 @@ TEST_F(AioLegacyFIFO, TestTwoPushers) bl.append(buf, sizeof(buf)); auto& f = *fifos[i % fifos.size()]; auto c = R::Rados::aio_create_completion(); - f->push(bl, c); + f->push(&dp, bl, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -969,7 +973,7 @@ TEST_F(AioLegacyFIFO, TestTwoPushers) std::vector result; bool more = false; auto c = R::Rados::aio_create_completion(); - f2->list(max_entries, std::nullopt, &result, &more, c); + f2->list(&dp, max_entries, std::nullopt, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -978,7 +982,7 @@ TEST_F(AioLegacyFIFO, TestTwoPushers) ASSERT_EQ(max_entries, result.size()); c = R::Rados::aio_create_completion(); - f2->list(max_entries, std::nullopt, &result, &more, c); + f2->list(&dp, max_entries, std::nullopt, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -997,7 +1001,7 @@ TEST_F(AioLegacyFIFO, TestTwoPushersTrim) static constexpr auto max_part_size = 2048ull; static constexpr auto max_entry_size = 128ull; std::unique_ptr f1; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f1, null_yield, std::nullopt, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f1, null_yield, std::nullopt, std::nullopt, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); @@ -1011,7 +1015,7 @@ TEST_F(AioLegacyFIFO, TestTwoPushersTrim) const auto max_entries = entries_per_part * 4 + 1; std::unique_ptr f2; - r = RCf::FIFO::open(ioctx, fifo_id, &f2, null_yield); + r = RCf::FIFO::open(&dp, ioctx, fifo_id, &f2, null_yield); ASSERT_EQ(0, r); /* push one entry to f2 and the rest to f1 */ @@ -1021,7 +1025,7 @@ TEST_F(AioLegacyFIFO, TestTwoPushersTrim) bl.append(buf, sizeof(buf)); auto& f = (i < 1 ? f2 : f1); auto c = R::Rados::aio_create_completion(); - f->push(bl, c); + f->push(&dp, bl, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -1034,7 +1038,7 @@ TEST_F(AioLegacyFIFO, TestTwoPushersTrim) std::vector result; bool more = false; auto c = R::Rados::aio_create_completion(); - f1->list(num, std::nullopt, &result, &more, c); + f1->list(&dp, num, std::nullopt, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -1050,7 +1054,7 @@ TEST_F(AioLegacyFIFO, TestTwoPushersTrim) auto& entry = result[num - 1]; marker = entry.marker; c = R::Rados::aio_create_completion(); - f1->trim(marker, false, c); + f1->trim(&dp, marker, false, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -1059,7 +1063,7 @@ TEST_F(AioLegacyFIFO, TestTwoPushersTrim) const auto left = max_entries - num; c = R::Rados::aio_create_completion(); - f2->list(left, marker, &result, &more, c); + f2->list(&dp, left, marker, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -1079,7 +1083,7 @@ TEST_F(AioLegacyFIFO, TestPushBatch) static constexpr auto max_entry_size = 128ull; std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield, std::nullopt, + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, std::nullopt, false, max_part_size, max_entry_size); ASSERT_EQ(0, r); @@ -1100,7 +1104,7 @@ TEST_F(AioLegacyFIFO, TestPushBatch) ASSERT_EQ(max_entries, bufs.size()); auto c = R::Rados::aio_create_completion(); - f->push(bufs, c); + f->push(&dp, bufs, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -1111,7 +1115,7 @@ TEST_F(AioLegacyFIFO, TestPushBatch) std::vector result; bool more = false; c = R::Rados::aio_create_completion(); - f->list(max_entries, std::nullopt, &result, &more, c); + f->list(&dp, max_entries, std::nullopt, &result, &more, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -1129,23 +1133,23 @@ TEST_F(AioLegacyFIFO, TestPushBatch) TEST_F(LegacyFIFO, TrimAll) { std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield); + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield); ASSERT_EQ(0, r); static constexpr auto max_entries = 10u; for (uint32_t i = 0; i < max_entries; ++i) { cb::list bl; encode(i, bl); - r = f->push(bl, null_yield); + r = f->push(&dp, bl, null_yield); ASSERT_EQ(0, r); } /* trim one entry */ - r = f->trim(RCf::marker::max().to_string(), false, null_yield); + r = f->trim(&dp, RCf::marker::max().to_string(), false, null_yield); ASSERT_EQ(-ENODATA, r); std::vector result; bool more; - r = f->list(1, std::nullopt, &result, &more, null_yield); + r = f->list(&dp, 1, std::nullopt, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_TRUE(result.empty()); } @@ -1153,18 +1157,18 @@ TEST_F(LegacyFIFO, TrimAll) TEST_F(LegacyFIFO, AioTrimAll) { std::unique_ptr f; - auto r = RCf::FIFO::create(ioctx, fifo_id, &f, null_yield); + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield); ASSERT_EQ(0, r); static constexpr auto max_entries = 10u; for (uint32_t i = 0; i < max_entries; ++i) { cb::list bl; encode(i, bl); - r = f->push(bl, null_yield); + r = f->push(&dp, bl, null_yield); ASSERT_EQ(0, r); } auto c = R::Rados::aio_create_completion(); - f->trim(RCf::marker::max().to_string(), false, c); + f->trim(&dp, RCf::marker::max().to_string(), false, c); c->wait_for_complete(); r = c->get_return_value(); c->release(); @@ -1172,7 +1176,7 @@ TEST_F(LegacyFIFO, AioTrimAll) std::vector result; bool more; - r = f->list(1, std::nullopt, &result, &more, null_yield); + r = f->list(&dp, 1, std::nullopt, &result, &more, null_yield); ASSERT_EQ(0, r); ASSERT_TRUE(result.empty()); } diff --git a/src/test/rgw/test_log_backing.cc b/src/test/rgw/test_log_backing.cc index 95f1e613936..f1bc30c762a 100644 --- a/src/test/rgw/test_log_backing.cc +++ b/src/test/rgw/test_log_backing.cc @@ -40,6 +40,9 @@ namespace cb = ceph::buffer; namespace fifo = rados::cls::fifo; namespace RCf = rgw::cls::fifo; +auto cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT); +const DoutPrefix dp(cct, 1, "test log backing: "); + class LogBacking : public testing::Test { protected: static constexpr int SHARDS = 3; @@ -72,7 +75,7 @@ protected: cb::list bl; encode(i, bl); cls_log_add(op, ceph_clock_now(), {}, "meow", bl); - auto r = rgw_rados_operate(ioctx, get_oid(0, i), &op, null_yield); + auto r = rgw_rados_operate(&dp, ioctx, get_oid(0, i), &op, null_yield); ASSERT_GE(r, 0); } } @@ -83,7 +86,7 @@ protected: cb::list bl; encode(i, bl); cls_log_add(op, ceph_clock_now(), {}, "meow", bl); - auto r = rgw_rados_operate(ioctx, get_oid(0, i), &op, null_yield); + auto r = rgw_rados_operate(&dp, ioctx, get_oid(0, i), &op, null_yield); ASSERT_GE(r, 0); } @@ -96,14 +99,14 @@ protected: std::list entries; bool truncated = false; cls_log_list(op, {}, {}, {}, 1, entries, &to_marker, &truncated); - auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, null_yield); + auto r = rgw_rados_operate(&dp, ioctx, oid, &op, nullptr, null_yield); ASSERT_GE(r, 0); ASSERT_FALSE(entries.empty()); } { lr::ObjectWriteOperation op; cls_log_trim(op, {}, {}, {}, to_marker); - auto r = rgw_rados_operate(ioctx, oid, &op, null_yield); + auto r = rgw_rados_operate(&dp, ioctx, oid, &op, null_yield); ASSERT_GE(r, 0); } { @@ -111,7 +114,7 @@ protected: std::list entries; bool truncated = false; cls_log_list(op, {}, {}, {}, 1, entries, &to_marker, &truncated); - auto r = rgw_rados_operate(ioctx, oid, &op, nullptr, null_yield); + auto r = rgw_rados_operate(&dp, ioctx, oid, &op, nullptr, null_yield); ASSERT_GE(r, 0); ASSERT_TRUE(entries.empty()); } @@ -122,7 +125,7 @@ protected: { for (int i = 0; i < SHARDS; ++i) { std::unique_ptr fifo; - auto r = RCf::FIFO::create(ioctx, get_oid(0, i), &fifo, null_yield); + auto r = RCf::FIFO::create(&dp, ioctx, get_oid(0, i), &fifo, null_yield); ASSERT_EQ(0, r); ASSERT_TRUE(fifo); } @@ -132,12 +135,12 @@ protected: { using ceph::encode; std::unique_ptr fifo; - auto r = RCf::FIFO::open(ioctx, get_oid(0, i), &fifo, null_yield); + auto r = RCf::FIFO::open(&dp, ioctx, get_oid(0, i), &fifo, null_yield); ASSERT_GE(0, r); ASSERT_TRUE(fifo); cb::list bl; encode(i, bl); - r = fifo->push(bl, null_yield); + r = fifo->push(&dp, bl, null_yield); ASSERT_GE(0, r); } @@ -154,7 +157,7 @@ protected: TEST_F(LogBacking, TestOmap) { make_omap(); - auto stat = log_backing_type(ioctx, log_type::fifo, SHARDS, + auto stat = log_backing_type(&dp, ioctx, log_type::fifo, SHARDS, [this](int shard){ return get_oid(0, shard); }, null_yield); ASSERT_EQ(log_type::omap, *stat); @@ -162,7 +165,7 @@ TEST_F(LogBacking, TestOmap) TEST_F(LogBacking, TestOmapEmpty) { - auto stat = log_backing_type(ioctx, log_type::omap, SHARDS, + auto stat = log_backing_type(&dp, ioctx, log_type::omap, SHARDS, [this](int shard){ return get_oid(0, shard); }, null_yield); ASSERT_EQ(log_type::omap, *stat); @@ -171,7 +174,7 @@ TEST_F(LogBacking, TestOmapEmpty) TEST_F(LogBacking, TestFIFO) { make_fifo(); - auto stat = log_backing_type(ioctx, log_type::fifo, SHARDS, + auto stat = log_backing_type(&dp, ioctx, log_type::fifo, SHARDS, [this](int shard){ return get_oid(0, shard); }, null_yield); ASSERT_EQ(log_type::fifo, *stat); @@ -179,7 +182,7 @@ TEST_F(LogBacking, TestFIFO) TEST_F(LogBacking, TestFIFOEmpty) { - auto stat = log_backing_type(ioctx, log_type::fifo, SHARDS, + auto stat = log_backing_type(&dp, ioctx, log_type::fifo, SHARDS, [this](int shard){ return get_oid(0, shard); }, null_yield); ASSERT_EQ(log_type::fifo, *stat); @@ -230,7 +233,7 @@ public: TEST_F(LogBacking, GenerationSingle) { auto lgr = logback_generations::init( - ioctx, "foobar", [this](uint64_t gen_id, int shard) { + &dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) { return get_oid(gen_id, shard); }, SHARDS, log_type::fifo, null_yield); ASSERT_TRUE(lgr); @@ -243,14 +246,14 @@ TEST_F(LogBacking, GenerationSingle) ASSERT_EQ(log_type::fifo, lg->got_entries[0].type); ASSERT_FALSE(lg->got_entries[0].pruned); - auto ec = lg->empty_to(0, null_yield); + auto ec = lg->empty_to(&dp, 0, null_yield); ASSERT_TRUE(ec); lg.reset(); lg = *logback_generations::init( - ioctx, "foobar", [this](uint64_t gen_id, int shard) { + &dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) { return get_oid(gen_id, shard); }, SHARDS, log_type::fifo, null_yield); @@ -262,7 +265,7 @@ TEST_F(LogBacking, GenerationSingle) lg->got_entries.clear(); - ec = lg->new_backing(log_type::omap, null_yield); + ec = lg->new_backing(&dp, log_type::omap, null_yield); ASSERT_FALSE(ec); ASSERT_EQ(1, lg->got_entries.size()); @@ -273,7 +276,7 @@ TEST_F(LogBacking, GenerationSingle) lg.reset(); lg = *logback_generations::init( - ioctx, "foobar", [this](uint64_t gen_id, int shard) { + &dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) { return get_oid(gen_id, shard); }, SHARDS, log_type::fifo, null_yield); @@ -286,7 +289,7 @@ TEST_F(LogBacking, GenerationSingle) ASSERT_EQ(log_type::omap, lg->got_entries[1].type); ASSERT_FALSE(lg->got_entries[1].pruned); - ec = lg->empty_to(0, null_yield); + ec = lg->empty_to(&dp, 0, null_yield); ASSERT_FALSE(ec); ASSERT_EQ(0, *lg->tail); @@ -294,7 +297,7 @@ TEST_F(LogBacking, GenerationSingle) lg.reset(); lg = *logback_generations::init( - ioctx, "foobar", [this](uint64_t gen_id, int shard) { + &dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) { return get_oid(gen_id, shard); }, SHARDS, log_type::fifo, null_yield); @@ -303,7 +306,7 @@ TEST_F(LogBacking, GenerationSingle) ASSERT_EQ(log_type::omap, lg->got_entries[1].type); ASSERT_FALSE(lg->got_entries[1].pruned); - ec = lg->remove_empty(null_yield); + ec = lg->remove_empty(&dp, null_yield); ASSERT_FALSE(ec); auto entries = lg->entries(); @@ -319,11 +322,11 @@ TEST_F(LogBacking, GenerationSingle) TEST_F(LogBacking, GenerationWN) { auto lg1 = *logback_generations::init( - ioctx, "foobar", [this](uint64_t gen_id, int shard) { + &dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) { return get_oid(gen_id, shard); }, SHARDS, log_type::fifo, null_yield); - auto ec = lg1->new_backing(log_type::omap, null_yield); + auto ec = lg1->new_backing(&dp, log_type::omap, null_yield); ASSERT_FALSE(ec); ASSERT_EQ(1, lg1->got_entries.size()); @@ -334,7 +337,7 @@ TEST_F(LogBacking, GenerationWN) lg1->got_entries.clear(); auto lg2 = *logback_generations::init( - ioctx2, "foobar", [this](uint64_t gen_id, int shard) { + &dp, ioctx2, "foobar", [this](uint64_t gen_id, int shard) { return get_oid(gen_id, shard); }, SHARDS, log_type::fifo, null_yield); @@ -350,7 +353,7 @@ TEST_F(LogBacking, GenerationWN) lg2->got_entries.clear(); - ec = lg1->new_backing(log_type::fifo, null_yield); + ec = lg1->new_backing(&dp, log_type::fifo, null_yield); ASSERT_FALSE(ec); ASSERT_EQ(1, lg1->got_entries.size()); @@ -366,7 +369,7 @@ TEST_F(LogBacking, GenerationWN) lg1->got_entries.clear(); lg2->got_entries.clear(); - ec = lg2->empty_to(1, null_yield); + ec = lg2->empty_to(&dp, 1, null_yield); ASSERT_FALSE(ec); ASSERT_EQ(1, *lg1->tail); diff --git a/src/test/rgw/test_rgw_lua.cc b/src/test/rgw/test_rgw_lua.cc index 6cacef01587..63cfacd803b 100644 --- a/src/test/rgw/test_rgw_lua.cc +++ b/src/test/rgw/test_rgw_lua.cc @@ -38,23 +38,23 @@ public: return 0; } - virtual int read_stats(optional_yield y, RGWStorageStats* stats, ceph::real_time *last_stats_sync, ceph::real_time *last_stats_update) override { + virtual int read_stats(const DoutPrefixProvider *dpp, optional_yield y, RGWStorageStats* stats, ceph::real_time *last_stats_sync, ceph::real_time *last_stats_update) override { return 0; } - virtual int read_stats_async(RGWGetUserStats_CB *cb) override { + virtual int read_stats_async(const DoutPrefixProvider *dpp, RGWGetUserStats_CB *cb) override { return 0; } - virtual int complete_flush_stats(optional_yield y) override { + virtual int complete_flush_stats(const DoutPrefixProvider *dpp, optional_yield y) override { return 0; } - virtual int read_usage(uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool *is_truncated, RGWUsageIter& usage_iter, map& usage) override { + virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool *is_truncated, RGWUsageIter& usage_iter, map& usage) override { return 0; } - virtual int trim_usage(uint64_t start_epoch, uint64_t end_epoch) override { + virtual int trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) override { return 0; } diff --git a/src/test/rgw/test_rgw_manifest.cc b/src/test/rgw/test_rgw_manifest.cc index 0c22b908aff..8ccf17c9471 100644 --- a/src/test/rgw/test_rgw_manifest.cc +++ b/src/test/rgw/test_rgw_manifest.cc @@ -21,6 +21,9 @@ using namespace std; +auto cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT); +const DoutPrefix dp(cct, 1, "test rgw manifest: "); + struct OldObjManifestPart { old_rgw_obj loc; /* the object where the data is located */ uint64_t loc_ofs; /* the offset at that object where the data is located */ @@ -222,18 +225,18 @@ TEST(TestRGWManifest, head_only_obj) { list::iterator liter; RGWObjManifest::obj_iterator iter; - for (iter = manifest.obj_begin(), liter = objs.begin(); - iter != manifest.obj_end() && liter != objs.end(); + for (iter = manifest.obj_begin(&dp), liter = objs.begin(); + iter != manifest.obj_end(&dp) && liter != objs.end(); ++iter, ++liter) { ASSERT_TRUE(env.get_raw(*liter) == env.get_raw(iter.get_location())); } - ASSERT_TRUE(iter == manifest.obj_end()); + ASSERT_TRUE(iter == manifest.obj_end(&dp)); ASSERT_TRUE(liter == objs.end()); rgw_raw_obj raw_head; - iter = manifest.obj_find(100 * 1024); + iter = manifest.obj_find(&dp, 100 * 1024); ASSERT_TRUE(env.get_raw(iter.get_location()) == env.get_raw(head)); ASSERT_EQ((int)iter.get_stripe_size(), obj_size); } @@ -258,8 +261,8 @@ TEST(TestRGWManifest, obj_with_head_and_tail) { rgw_obj_select last_obj; RGWObjManifest::obj_iterator iter; - for (iter = manifest.obj_begin(), liter = objs.begin(); - iter != manifest.obj_end() && liter != objs.end(); + for (iter = manifest.obj_begin(&dp), liter = objs.begin(); + iter != manifest.obj_end(&dp) && liter != objs.end(); ++iter, ++liter) { cout << "*liter=" << *liter << " iter.get_location()=" << env.get_raw(iter.get_location()) << std::endl; ASSERT_TRUE(env.get_raw(*liter) == env.get_raw(iter.get_location())); @@ -267,15 +270,15 @@ TEST(TestRGWManifest, obj_with_head_and_tail) { last_obj = iter.get_location(); } - ASSERT_TRUE(iter == manifest.obj_end()); + ASSERT_TRUE(iter == manifest.obj_end(&dp)); ASSERT_TRUE(liter == objs.end()); - iter = manifest.obj_find(100 * 1024); + iter = manifest.obj_find(&dp, 100 * 1024); ASSERT_TRUE(env.get_raw(iter.get_location()) == env.get_raw(head)); ASSERT_EQ((int)iter.get_stripe_size(), head_size); uint64_t ofs = 20 * 1024 * 1024 + head_size; - iter = manifest.obj_find(ofs + 100); + iter = manifest.obj_find(&dp, ofs + 100); ASSERT_TRUE(env.get_raw(iter.get_location()) == env.get_raw(last_obj)); ASSERT_EQ(iter.get_stripe_ofs(), ofs); @@ -319,11 +322,11 @@ TEST(TestRGWManifest, multipart) { RGWObjManifest m; for (int i = 0; i < num_parts; i++) { - m.append(pm[i], env.zonegroup, env.zone_params); + m.append(&dp, pm[i], env.zonegroup, env.zone_params); } RGWObjManifest::obj_iterator iter; - for (iter = m.obj_begin(); iter != m.obj_end(); ++iter) { - RGWObjManifest::obj_iterator fiter = m.obj_find(iter.get_ofs()); + for (iter = m.obj_begin(&dp); iter != m.obj_end(&dp); ++iter) { + RGWObjManifest::obj_iterator fiter = m.obj_find(&dp, iter.get_ofs()); ASSERT_TRUE(env.get_raw(fiter.get_location()) == env.get_raw(iter.get_location())); } @@ -363,8 +366,8 @@ TEST(TestRGWManifest, old_obj_manifest) { RGWObjManifest::obj_iterator iter; auto liter = old_objs.begin(); - for (iter = manifest.obj_begin(); - iter != manifest.obj_end() && liter != old_objs.end(); + for (iter = manifest.obj_begin(&dp); + iter != manifest.obj_end(&dp) && liter != old_objs.end(); ++iter, ++liter) { rgw_pool old_pool(liter->bucket.data_pool); string old_oid; @@ -377,7 +380,7 @@ TEST(TestRGWManifest, old_obj_manifest) { } ASSERT_TRUE(liter == old_objs.end()); - ASSERT_TRUE(iter == manifest.obj_end()); + ASSERT_TRUE(iter == manifest.obj_end(&dp)); } diff --git a/src/test/rgw/test_rgw_period_history.cc b/src/test/rgw/test_rgw_period_history.cc index 2bac99b0bb9..a4854ea778f 100644 --- a/src/test/rgw/test_rgw_period_history.cc +++ b/src/test/rgw/test_rgw_period_history.cc @@ -35,7 +35,7 @@ const auto current_period = make_period("5", 5, "4"); // mock puller that throws an exception if it's called struct ErrorPuller : public RGWPeriodHistory::Puller { - int pull(const std::string& id, RGWPeriod& period, optional_yield) override { + int pull(const DoutPrefixProvider *dpp, const std::string& id, RGWPeriod& period, optional_yield) override { throw std::runtime_error("unexpected call to pull"); } }; @@ -48,7 +48,7 @@ class RecordingPuller : public RGWPeriodHistory::Puller { public: explicit RecordingPuller(int error) : error(error) {} Ids ids; - int pull(const std::string& id, RGWPeriod& period, optional_yield) override { + int pull(const DoutPrefixProvider *dpp, const std::string& id, RGWPeriod& period, optional_yield) override { ids.push_back(id); return error; } @@ -56,7 +56,7 @@ class RecordingPuller : public RGWPeriodHistory::Puller { // mock puller that returns a fake period by parsing the period id struct NumericPuller : public RGWPeriodHistory::Puller { - int pull(const std::string& id, RGWPeriod& period, optional_yield) override { + int pull(const DoutPrefixProvider *dpp, const std::string& id, RGWPeriod& period, optional_yield) override { // relies on numeric period ids to divine the realm_epoch auto realm_epoch = boost::lexical_cast(id); auto predecessor = boost::lexical_cast(realm_epoch-1); @@ -130,10 +130,11 @@ TEST(PeriodHistory, PullPredecessorsBeforeCurrent) { RecordingPuller puller{-EFAULT}; RGWPeriodHistory history(g_ceph_context, &puller, current_period); + const DoutPrefix dp(g_ceph_context, 1, "test rgw period history: "); // create a disjoint history at 1 and verify that periods are requested // backwards from current_period - auto c1 = history.attach(make_period("1", 1, ""), null_yield); + auto c1 = history.attach(&dp, make_period("1", 1, ""), null_yield); ASSERT_FALSE(c1); ASSERT_EQ(-EFAULT, c1.get_error()); ASSERT_EQ(Ids{"4"}, puller.ids); @@ -141,7 +142,7 @@ TEST(PeriodHistory, PullPredecessorsBeforeCurrent) auto c4 = history.insert(make_period("4", 4, "3")); ASSERT_TRUE(c4); - c1 = history.attach(make_period("1", 1, ""), null_yield); + c1 = history.attach(&dp, make_period("1", 1, ""), null_yield); ASSERT_FALSE(c1); ASSERT_EQ(-EFAULT, c1.get_error()); ASSERT_EQ(Ids({"4", "3"}), puller.ids); @@ -149,7 +150,7 @@ TEST(PeriodHistory, PullPredecessorsBeforeCurrent) auto c3 = history.insert(make_period("3", 3, "2")); ASSERT_TRUE(c3); - c1 = history.attach(make_period("1", 1, ""), null_yield); + c1 = history.attach(&dp, make_period("1", 1, ""), null_yield); ASSERT_FALSE(c1); ASSERT_EQ(-EFAULT, c1.get_error()); ASSERT_EQ(Ids({"4", "3", "2"}), puller.ids); @@ -157,7 +158,7 @@ TEST(PeriodHistory, PullPredecessorsBeforeCurrent) auto c2 = history.insert(make_period("2", 2, "1")); ASSERT_TRUE(c2); - c1 = history.attach(make_period("1", 1, ""), null_yield); + c1 = history.attach(&dp, make_period("1", 1, ""), null_yield); ASSERT_TRUE(c1); ASSERT_EQ(Ids({"4", "3", "2"}), puller.ids); } @@ -166,25 +167,26 @@ TEST(PeriodHistory, PullPredecessorsAfterCurrent) { RecordingPuller puller{-EFAULT}; RGWPeriodHistory history(g_ceph_context, &puller, current_period); + const DoutPrefix dp(g_ceph_context, 1, "test rgw period history: "); // create a disjoint history at 9 and verify that periods are requested // backwards down to current_period - auto c9 = history.attach(make_period("9", 9, "8"), null_yield); + auto c9 = history.attach(&dp, make_period("9", 9, "8"), null_yield); ASSERT_FALSE(c9); ASSERT_EQ(-EFAULT, c9.get_error()); ASSERT_EQ(Ids{"8"}, puller.ids); - auto c8 = history.attach(make_period("8", 8, "7"), null_yield); + auto c8 = history.attach(&dp, make_period("8", 8, "7"), null_yield); ASSERT_FALSE(c8); ASSERT_EQ(-EFAULT, c8.get_error()); ASSERT_EQ(Ids({"8", "7"}), puller.ids); - auto c7 = history.attach(make_period("7", 7, "6"), null_yield); + auto c7 = history.attach(&dp, make_period("7", 7, "6"), null_yield); ASSERT_FALSE(c7); ASSERT_EQ(-EFAULT, c7.get_error()); ASSERT_EQ(Ids({"8", "7", "6"}), puller.ids); - auto c6 = history.attach(make_period("6", 6, "5"), null_yield); + auto c6 = history.attach(&dp, make_period("6", 6, "5"), null_yield); ASSERT_TRUE(c6); ASSERT_EQ(Ids({"8", "7", "6"}), puller.ids); } @@ -270,8 +272,9 @@ TEST(PeriodHistory, AttachBefore) { NumericPuller puller; RGWPeriodHistory history(g_ceph_context, &puller, current_period); + const DoutPrefix dp(g_ceph_context, 1, "test rgw period history: "); - auto c1 = history.attach(make_period("1", 1, ""), null_yield); + auto c1 = history.attach(&dp, make_period("1", 1, ""), null_yield); ASSERT_TRUE(c1); // verify that we pulled and merged all periods from 1-5 @@ -296,8 +299,9 @@ TEST(PeriodHistory, AttachAfter) { NumericPuller puller; RGWPeriodHistory history(g_ceph_context, &puller, current_period); + const DoutPrefix dp(g_ceph_context, 1, "test rgw period history: "); - auto c9 = history.attach(make_period("9", 9, "8"), null_yield); + auto c9 = history.attach(&dp, make_period("9", 9, "8"), null_yield); ASSERT_TRUE(c9); // verify that we pulled and merged all periods from 5-9 diff --git a/src/test/rgw/test_rgw_throttle.cc b/src/test/rgw/test_rgw_throttle.cc index c2e2540ca46..109dd100619 100644 --- a/src/test/rgw/test_rgw_throttle.cc +++ b/src/test/rgw/test_rgw_throttle.cc @@ -50,7 +50,8 @@ class RadosFixture : public ::testing::Test { protected: RGWSI_RADOS::Obj make_obj(const std::string& oid) { auto obj = RadosEnv::rados->obj({{RadosEnv::poolname}, oid}); - ceph_assert_always(0 == obj.open()); + const NoDoutPrefix no_dpp(g_ceph_context, 1); + ceph_assert_always(0 == obj.open(&no_dpp)); return obj; } }; diff --git a/src/test/test_cors.cc b/src/test/test_cors.cc index ebb55c5b270..9cb735b5c1a 100644 --- a/src/test/test_cors.cc +++ b/src/test/test_cors.cc @@ -307,7 +307,8 @@ static int delete_bucket(void){ RGWCORSRule *xml_to_cors_rule(string s){ RGWCORSConfiguration_S3 *cors_config; - RGWCORSXMLParser_S3 parser(g_ceph_context); + const DoutPrefix dp(g_ceph_context, 1, "test cors: "); + RGWCORSXMLParser_S3 parser(&dp, g_ceph_context); const string *data = g_test->get_response_data(); if (!parser.init()) { return NULL; -- 2.47.3