From 9619e05288eaaa590bb18b1be4be347b15c1dbb4 Mon Sep 17 00:00:00 2001 From: Ali Maredia Date: Fri, 21 Sep 2018 09:57:01 -0400 Subject: [PATCH] rgw: pass DoutPrefixProvider into the entrypoints for trim Also add a DoutPrefixProvider to all of the cascading classes that need one as a result Signed-off-by: Ali Maredia --- src/rgw/rgw_admin.cc | 4 ++-- src/rgw/rgw_data_sync.cc | 14 ++++++------ src/rgw/rgw_data_sync.h | 18 +++++++++------- src/rgw/rgw_rados.cc | 17 +++++++++++++-- src/rgw/rgw_rest_log.cc | 2 +- src/rgw/rgw_sync.cc | 46 +++++++++++++++++++++------------------- src/rgw/rgw_sync.h | 8 +++---- 7 files changed, 63 insertions(+), 46 deletions(-) diff --git a/src/rgw/rgw_admin.cc b/src/rgw/rgw_admin.cc index 2d2990939ef87..4b6c04afd608f 100644 --- a/src/rgw/rgw_admin.cc +++ b/src/rgw/rgw_admin.cc @@ -2305,7 +2305,7 @@ static int bucket_source_sync_status(RGWRados *store, const RGWZone& zone, return 0; } std::vector status; - int r = rgw_bucket_sync_status(store, source.id, bucket_info, &status); + int r = rgw_bucket_sync_status(dpp(), store, source.id, bucket_info, &status); if (r < 0) { lderr(store->ctx()) << "failed to read bucket sync status: " << cpp_strerror(r) << dendl; return r; @@ -6701,7 +6701,7 @@ next: } auto num_shards = g_conf()->rgw_md_log_max_shards; - ret = crs.run(create_admin_meta_log_trim_cr(store, &http, num_shards)); + ret = crs.run(create_admin_meta_log_trim_cr(dpp(), store, &http, num_shards)); if (ret < 0) { cerr << "automated mdlog trim failed with " << cpp_strerror(ret) << std::endl; return -ret; diff --git a/src/rgw/rgw_data_sync.cc b/src/rgw/rgw_data_sync.cc index 8fda3ffda83ad..fbf6f95a2f20b 100644 --- a/src/rgw/rgw_data_sync.cc +++ b/src/rgw/rgw_data_sync.cc @@ -589,10 +589,10 @@ int RGWRemoteDataLog::read_source_log_shards_next(map shard_markers return run(new RGWListRemoteDataLogCR(&sync_env, shard_markers, 1, result)); } -int RGWRemoteDataLog::init(const string& _source_zone, RGWRESTConn *_conn, RGWSyncErrorLogger *_error_logger, +int RGWRemoteDataLog::init(const DoutPrefixProvider *_dpp, const string& _source_zone, RGWRESTConn *_conn, RGWSyncErrorLogger *_error_logger, RGWSyncTraceManager *_sync_tracer, RGWSyncModuleInstanceRef& _sync_module) { - sync_env.init(store->ctx(), store, _conn, async_rados, &http_manager, _error_logger, + sync_env.init(_dpp, store->ctx(), store, _conn, async_rados, &http_manager, _error_logger, _sync_tracer, _source_zone, _sync_module); if (initialized) { @@ -1775,7 +1775,7 @@ int RGWDataSyncStatusManager::init() error_logger = new RGWSyncErrorLogger(store, RGW_SYNC_ERROR_LOG_SHARD_PREFIX, ERROR_LOGGER_SHARDS); - int r = source_log.init(source_zone, conn, error_logger, store->get_sync_tracer(), sync_module); + int r = source_log.init(this, source_zone, conn, error_logger, store->get_sync_tracer(), sync_module); if (r < 0) { ldpp_dout(this, 0) << "ERROR: failed to init remote log, r=" << r << dendl; finalize(); @@ -1843,7 +1843,7 @@ int RGWRemoteBucketLog::init(const string& _source_zone, RGWRESTConn *_conn, bs.bucket = bucket; bs.shard_id = shard_id; - sync_env.init(store->ctx(), store, conn, async_rados, http_manager, + sync_env.init(dpp, store->ctx(), store, conn, async_rados, http_manager, _error_logger, _sync_tracer, source_zone, _sync_module); return 0; @@ -3088,7 +3088,7 @@ int RGWRunBucketSyncCoroutine::operate() tn->log(10, SSTR("no local info for bucket:" << ": fetching metadata")); string raw_key = string("bucket.instance:") + bs.bucket.get_key(); - meta_sync_env.init(cct, sync_env->store, sync_env->store->rest_master_conn, sync_env->async_rados, + meta_sync_env.init(sync_env->dpp, cct, sync_env->store, sync_env->store->rest_master_conn, sync_env->async_rados, sync_env->http_manager, sync_env->error_logger, sync_env->sync_tracer); call(new RGWMetaSyncSingleEntryCR(&meta_sync_env, raw_key, @@ -3337,7 +3337,7 @@ class RGWCollectBucketSyncStatusCR : public RGWShardCollectCR { } }; -int rgw_bucket_sync_status(RGWRados *store, const std::string& source_zone, +int rgw_bucket_sync_status(const DoutPrefixProvider *dpp, RGWRados *store, const std::string& source_zone, const RGWBucketInfo& bucket_info, std::vector *status) { @@ -3347,7 +3347,7 @@ int rgw_bucket_sync_status(RGWRados *store, const std::string& source_zone, RGWDataSyncEnv env; RGWSyncModuleInstanceRef module; // null sync module - env.init(store->ctx(), store, nullptr, store->get_async_rados(), + env.init(dpp, store->ctx(), store, nullptr, store->get_async_rados(), nullptr, nullptr, nullptr, source_zone, module); RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry()); diff --git a/src/rgw/rgw_data_sync.h b/src/rgw/rgw_data_sync.h index 70cfee2888a17..24a55c4b3baba 100644 --- a/src/rgw/rgw_data_sync.h +++ b/src/rgw/rgw_data_sync.h @@ -230,6 +230,7 @@ struct rgw_bucket_entry_owner { class RGWSyncErrorLogger; struct RGWDataSyncEnv { + const DoutPrefixProvider *dpp{nullptr}; CephContext *cct{nullptr}; RGWRados *store{nullptr}; RGWRESTConn *conn{nullptr}; @@ -242,10 +243,11 @@ struct RGWDataSyncEnv { RGWDataSyncEnv() {} - void init(CephContext *_cct, RGWRados *_store, RGWRESTConn *_conn, + void init(const DoutPrefixProvider *_dpp, CephContext *_cct, RGWRados *_store, RGWRESTConn *_conn, RGWAsyncRadosProcessor *_async_rados, RGWHTTPManager *_http_manager, RGWSyncErrorLogger *_error_logger, RGWSyncTraceManager *_sync_tracer, const string& _source_zone, RGWSyncModuleInstanceRef& _sync_module) { + dpp = _dpp; cct = _cct; store = _store; conn = _conn; @@ -284,7 +286,7 @@ public: http_manager(store->ctx(), completion_mgr), lock("RGWRemoteDataLog::lock"), data_sync_cr(NULL), initialized(false) {} - int init(const string& _source_zone, RGWRESTConn *_conn, RGWSyncErrorLogger *_error_logger, + int init(const DoutPrefixProvider *_dpp, const string& _source_zone, RGWRESTConn *_conn, RGWSyncErrorLogger *_error_logger, RGWSyncTraceManager *_sync_tracer, RGWSyncModuleInstanceRef& module); void finish(); @@ -503,12 +505,12 @@ class RGWRemoteBucketLog : public RGWCoroutinesManager { RGWBucketSyncCR *sync_cr{nullptr}; public: - RGWRemoteBucketLog(const DoutPrefixProvider *dpp, RGWRados *_store, + RGWRemoteBucketLog(const DoutPrefixProvider *_dpp, RGWRados *_store, RGWBucketSyncStatusManager *_sm, RGWAsyncRadosProcessor *_async_rados, RGWHTTPManager *_http_manager) : RGWCoroutinesManager(_store->ctx(), _store->get_cr_registry()), - dpp(dpp), store(_store), status_manager(_sm), + dpp(_dpp), store(_store), status_manager(_sm), async_rados(_async_rados), http_manager(_http_manager) {} @@ -569,17 +571,17 @@ public: static string status_oid(const string& source_zone, const rgw_bucket_shard& bs); static string obj_status_oid(const string& source_zone, const rgw_obj& obj); /* can be used by sync modules */ - int read_sync_status(); - int run(); - // implements DoutPrefixProvider CephContext *get_cct() const override { return store->ctx(); } unsigned get_subsys() const override; std::ostream& gen_prefix(std::ostream& out) const override; + + int read_sync_status(); + int run(); }; /// read the sync status of all bucket shards from the given source zone -int rgw_bucket_sync_status(RGWRados *store, const std::string& source_zone, +int rgw_bucket_sync_status(const DoutPrefixProvider *dpp, RGWRados *store, const std::string& source_zone, const RGWBucketInfo& bucket_info, std::vector *status); diff --git a/src/rgw/rgw_rados.cc b/src/rgw/rgw_rados.cc index 0e7346a3529f4..b611a87a4c373 100644 --- a/src/rgw/rgw_rados.cc +++ b/src/rgw/rgw_rados.cc @@ -3313,7 +3313,7 @@ public: } }; -class RGWSyncLogTrimThread : public RGWSyncProcessorThread +class RGWSyncLogTrimThread : public RGWSyncProcessorThread, DoutPrefixProvider { RGWCoroutinesManager crs; RGWRados *store; @@ -3339,7 +3339,7 @@ public: int process() override { list stacks; auto meta = new RGWCoroutinesStack(store->ctx(), &crs); - meta->call(create_meta_log_trim_cr(store, &http, + meta->call(create_meta_log_trim_cr(this, store, &http, cct->_conf->rgw_md_log_max_shards, trim_interval)); stacks.push_back(meta); @@ -3357,6 +3357,19 @@ public: crs.run(stacks); return 0; } + + // implements DoutPrefixProvider + CephContext *get_cct() const override { return store->ctx(); } + unsigned get_subsys() const + { + return dout_subsys; + } + + std::ostream& gen_prefix(std::ostream& out) const + { + return out << "sync log trim: "; + } + }; void RGWRados::wakeup_meta_sync_shards(set& shard_ids) diff --git a/src/rgw/rgw_rest_log.cc b/src/rgw/rgw_rest_log.cc index 92a85dc3cf5e4..fbbd6965f53b2 100644 --- a/src/rgw/rgw_rest_log.cc +++ b/src/rgw/rgw_rest_log.cc @@ -914,7 +914,7 @@ void RGWOp_BILog_Status::execute() ldout(s->cct, 4) << "failed to read bucket info: " << cpp_strerror(http_ret) << dendl; return; } - http_ret = rgw_bucket_sync_status(store, source_zone, info, &status); + http_ret = rgw_bucket_sync_status(this, store, source_zone, info, &status); } void RGWOp_BILog_Status::send_response() diff --git a/src/rgw/rgw_sync.cc b/src/rgw/rgw_sync.cc index 168db0a2c493d..31f7a539dee96 100644 --- a/src/rgw/rgw_sync.cc +++ b/src/rgw/rgw_sync.cc @@ -359,9 +359,10 @@ std::ostream& RGWMetaSyncStatusManager::gen_prefix(std::ostream& out) const return out << "meta sync: "; } -void RGWMetaSyncEnv::init(CephContext *_cct, RGWRados *_store, RGWRESTConn *_conn, +void RGWMetaSyncEnv::init(const DoutPrefixProvider *_dpp, CephContext *_cct, RGWRados *_store, RGWRESTConn *_conn, RGWAsyncRadosProcessor *_async_rados, RGWHTTPManager *_http_manager, RGWSyncErrorLogger *_error_logger, RGWSyncTraceManager *_sync_tracer) { + dpp = _dpp; cct = _cct; store = _store; conn = _conn; @@ -2620,6 +2621,7 @@ int take_min_status(CephContext *cct, Iter first, Iter last, } struct TrimEnv { + const DoutPrefixProvider *dpp; RGWRados *const store; RGWHTTPManager *const http; int num_shards; @@ -2627,8 +2629,8 @@ struct TrimEnv { Cursor current; //< cursor to current period epoch_t last_trim_epoch{0}; //< epoch of last mdlog that was purged - TrimEnv(RGWRados *store, RGWHTTPManager *http, int num_shards) - : store(store), http(http), num_shards(num_shards), + TrimEnv(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards) + : dpp(dpp), store(store), http(http), num_shards(num_shards), zone(store->get_zone_params().get_id()), current(store->period_history->get_current()) {} @@ -2640,8 +2642,8 @@ struct MasterTrimEnv : public TrimEnv { /// last trim marker for each shard, only applies to current period's mdlog std::vector last_trim_markers; - MasterTrimEnv(RGWRados *store, RGWHTTPManager *http, int num_shards) - : TrimEnv(store, http, num_shards), + MasterTrimEnv(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards) + : TrimEnv(dpp, store, http, num_shards), last_trim_markers(num_shards) { auto& period = current.get_period(); @@ -2655,8 +2657,8 @@ struct PeerTrimEnv : public TrimEnv { /// last trim timestamp for each shard, only applies to current period's mdlog std::vector last_trim_timestamps; - PeerTrimEnv(RGWRados *store, RGWHTTPManager *http, int num_shards) - : TrimEnv(store, http, num_shards), + PeerTrimEnv(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards) + : TrimEnv(dpp, store, http, num_shards), last_trim_timestamps(num_shards) {} @@ -2944,7 +2946,7 @@ class MetaPeerTrimShardCollectCR : public RGWShardCollectCR { : RGWShardCollectCR(env.store->ctx(), MAX_CONCURRENT_SHARDS), env(env), mdlog(mdlog), period_id(env.current.get_period().get_id()) { - meta_env.init(cct, env.store, env.store->rest_master_conn, + meta_env.init(env.dpp, cct, env.store, env.store->rest_master_conn, env.store->get_async_rados(), env.http, nullptr, env.store->get_sync_tracer()); } @@ -3076,10 +3078,10 @@ class MetaMasterTrimPollCR : public MetaTrimPollCR { return new MetaMasterTrimCR(env); } public: - MetaMasterTrimPollCR(RGWRados *store, RGWHTTPManager *http, + MetaMasterTrimPollCR(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards, utime_t interval) : MetaTrimPollCR(store, interval), - env(store, http, num_shards) + env(dpp, store, http, num_shards) {} }; @@ -3089,43 +3091,43 @@ class MetaPeerTrimPollCR : public MetaTrimPollCR { return new MetaPeerTrimCR(env); } public: - MetaPeerTrimPollCR(RGWRados *store, RGWHTTPManager *http, + MetaPeerTrimPollCR(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards, utime_t interval) : MetaTrimPollCR(store, interval), - env(store, http, num_shards) + env(dpp, store, http, num_shards) {} }; -RGWCoroutine* create_meta_log_trim_cr(RGWRados *store, RGWHTTPManager *http, +RGWCoroutine* create_meta_log_trim_cr(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards, utime_t interval) { if (store->is_meta_master()) { - return new MetaMasterTrimPollCR(store, http, num_shards, interval); + return new MetaMasterTrimPollCR(dpp, store, http, num_shards, interval); } - return new MetaPeerTrimPollCR(store, http, num_shards, interval); + return new MetaPeerTrimPollCR(dpp, store, http, num_shards, interval); } struct MetaMasterAdminTrimCR : private MasterTrimEnv, public MetaMasterTrimCR { - MetaMasterAdminTrimCR(RGWRados *store, RGWHTTPManager *http, int num_shards) - : MasterTrimEnv(store, http, num_shards), + MetaMasterAdminTrimCR(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards) + : MasterTrimEnv(dpp, store, http, num_shards), MetaMasterTrimCR(*static_cast(this)) {} }; struct MetaPeerAdminTrimCR : private PeerTrimEnv, public MetaPeerTrimCR { - MetaPeerAdminTrimCR(RGWRados *store, RGWHTTPManager *http, int num_shards) - : PeerTrimEnv(store, http, num_shards), + MetaPeerAdminTrimCR(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards) + : PeerTrimEnv(dpp, store, http, num_shards), MetaPeerTrimCR(*static_cast(this)) {} }; -RGWCoroutine* create_admin_meta_log_trim_cr(RGWRados *store, +RGWCoroutine* create_admin_meta_log_trim_cr(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards) { if (store->is_meta_master()) { - return new MetaMasterAdminTrimCR(store, http, num_shards); + return new MetaMasterAdminTrimCR(dpp, store, http, num_shards); } - return new MetaPeerAdminTrimCR(store, http, num_shards); + return new MetaPeerAdminTrimCR(dpp, store, http, num_shards); } diff --git a/src/rgw/rgw_sync.h b/src/rgw/rgw_sync.h index f5fede9ae88db..78a6e8346f678 100644 --- a/src/rgw/rgw_sync.h +++ b/src/rgw/rgw_sync.h @@ -178,7 +178,7 @@ struct RGWMetaSyncEnv { RGWMetaSyncEnv() {} - void init(CephContext *_cct, RGWRados *_store, RGWRESTConn *_conn, + void init(const DoutPrefixProvider *_dpp, CephContext *_cct, RGWRados *_store, RGWRESTConn *_conn, RGWAsyncRadosProcessor *_async_rados, RGWHTTPManager *_http_manager, RGWSyncErrorLogger *_error_logger, RGWSyncTraceManager *_sync_tracer); @@ -211,7 +211,7 @@ class RGWRemoteMetaLog : public RGWCoroutinesManager { RGWSyncTraceNodeRef tn; public: - RGWRemoteMetaLog(DoutPrefixProvider *dpp, RGWRados *_store, + RGWRemoteMetaLog(const DoutPrefixProvider *dpp, RGWRados *_store, RGWAsyncRadosProcessor *async_rados, RGWMetaSyncStatusManager *_sm) : RGWCoroutinesManager(_store->ctx(), _store->get_cr_registry()), @@ -520,11 +520,11 @@ public: }; // MetaLogTrimCR factory function -RGWCoroutine* create_meta_log_trim_cr(RGWRados *store, RGWHTTPManager *http, +RGWCoroutine* create_meta_log_trim_cr(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards, utime_t interval); // factory function for mdlog trim via radosgw-admin -RGWCoroutine* create_admin_meta_log_trim_cr(RGWRados *store, +RGWCoroutine* create_admin_meta_log_trim_cr(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards); -- 2.39.5