return 0;
}
std::vector<rgw_bucket_shard_sync_info> status;
- int r = rgw_bucket_sync_status(store, source.id, bucket_info, &status);
+ int r = rgw_bucket_sync_status(dpp(), store, source.id, bucket_info, &status);
if (r < 0) {
lderr(store->ctx()) << "failed to read bucket sync status: " << cpp_strerror(r) << dendl;
return r;
}
auto num_shards = g_conf()->rgw_md_log_max_shards;
- ret = crs.run(create_admin_meta_log_trim_cr(store, &http, num_shards));
+ ret = crs.run(create_admin_meta_log_trim_cr(dpp(), store, &http, num_shards));
if (ret < 0) {
cerr << "automated mdlog trim failed with " << cpp_strerror(ret) << std::endl;
return -ret;
return run(new RGWListRemoteDataLogCR(&sync_env, shard_markers, 1, result));
}
-int RGWRemoteDataLog::init(const string& _source_zone, RGWRESTConn *_conn, RGWSyncErrorLogger *_error_logger,
+int RGWRemoteDataLog::init(const DoutPrefixProvider *_dpp, const string& _source_zone, RGWRESTConn *_conn, RGWSyncErrorLogger *_error_logger,
RGWSyncTraceManager *_sync_tracer, RGWSyncModuleInstanceRef& _sync_module)
{
- sync_env.init(store->ctx(), store, _conn, async_rados, &http_manager, _error_logger,
+ sync_env.init(_dpp, store->ctx(), store, _conn, async_rados, &http_manager, _error_logger,
_sync_tracer, _source_zone, _sync_module);
if (initialized) {
error_logger = new RGWSyncErrorLogger(store, RGW_SYNC_ERROR_LOG_SHARD_PREFIX, ERROR_LOGGER_SHARDS);
- int r = source_log.init(source_zone, conn, error_logger, store->get_sync_tracer(), sync_module);
+ int r = source_log.init(this, source_zone, conn, error_logger, store->get_sync_tracer(), sync_module);
if (r < 0) {
ldpp_dout(this, 0) << "ERROR: failed to init remote log, r=" << r << dendl;
finalize();
bs.bucket = bucket;
bs.shard_id = shard_id;
- sync_env.init(store->ctx(), store, conn, async_rados, http_manager,
+ sync_env.init(dpp, store->ctx(), store, conn, async_rados, http_manager,
_error_logger, _sync_tracer, source_zone, _sync_module);
return 0;
tn->log(10, SSTR("no local info for bucket:" << ": fetching metadata"));
string raw_key = string("bucket.instance:") + bs.bucket.get_key();
- meta_sync_env.init(cct, sync_env->store, sync_env->store->rest_master_conn, sync_env->async_rados,
+ meta_sync_env.init(sync_env->dpp, cct, sync_env->store, sync_env->store->rest_master_conn, sync_env->async_rados,
sync_env->http_manager, sync_env->error_logger, sync_env->sync_tracer);
call(new RGWMetaSyncSingleEntryCR(&meta_sync_env, raw_key,
}
};
-int rgw_bucket_sync_status(RGWRados *store, const std::string& source_zone,
+int rgw_bucket_sync_status(const DoutPrefixProvider *dpp, RGWRados *store, const std::string& source_zone,
const RGWBucketInfo& bucket_info,
std::vector<rgw_bucket_shard_sync_info> *status)
{
RGWDataSyncEnv env;
RGWSyncModuleInstanceRef module; // null sync module
- env.init(store->ctx(), store, nullptr, store->get_async_rados(),
+ env.init(dpp, store->ctx(), store, nullptr, store->get_async_rados(),
nullptr, nullptr, nullptr, source_zone, module);
RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
class RGWSyncErrorLogger;
struct RGWDataSyncEnv {
+ const DoutPrefixProvider *dpp{nullptr};
CephContext *cct{nullptr};
RGWRados *store{nullptr};
RGWRESTConn *conn{nullptr};
RGWDataSyncEnv() {}
- void init(CephContext *_cct, RGWRados *_store, RGWRESTConn *_conn,
+ void init(const DoutPrefixProvider *_dpp, CephContext *_cct, RGWRados *_store, RGWRESTConn *_conn,
RGWAsyncRadosProcessor *_async_rados, RGWHTTPManager *_http_manager,
RGWSyncErrorLogger *_error_logger, RGWSyncTraceManager *_sync_tracer,
const string& _source_zone, RGWSyncModuleInstanceRef& _sync_module) {
+ dpp = _dpp;
cct = _cct;
store = _store;
conn = _conn;
http_manager(store->ctx(), completion_mgr),
lock("RGWRemoteDataLog::lock"), data_sync_cr(NULL),
initialized(false) {}
- int init(const string& _source_zone, RGWRESTConn *_conn, RGWSyncErrorLogger *_error_logger,
+ int init(const DoutPrefixProvider *_dpp, const string& _source_zone, RGWRESTConn *_conn, RGWSyncErrorLogger *_error_logger,
RGWSyncTraceManager *_sync_tracer, RGWSyncModuleInstanceRef& module);
void finish();
RGWBucketSyncCR *sync_cr{nullptr};
public:
- RGWRemoteBucketLog(const DoutPrefixProvider *dpp, RGWRados *_store,
+ RGWRemoteBucketLog(const DoutPrefixProvider *_dpp, RGWRados *_store,
RGWBucketSyncStatusManager *_sm,
RGWAsyncRadosProcessor *_async_rados,
RGWHTTPManager *_http_manager)
: RGWCoroutinesManager(_store->ctx(), _store->get_cr_registry()),
- dpp(dpp), store(_store), status_manager(_sm),
+ dpp(_dpp), store(_store), status_manager(_sm),
async_rados(_async_rados), http_manager(_http_manager)
{}
static string status_oid(const string& source_zone, const rgw_bucket_shard& bs);
static string obj_status_oid(const string& source_zone, const rgw_obj& obj); /* can be used by sync modules */
- int read_sync_status();
- int run();
-
// implements DoutPrefixProvider
CephContext *get_cct() const override { return store->ctx(); }
unsigned get_subsys() const override;
std::ostream& gen_prefix(std::ostream& out) const override;
+
+ int read_sync_status();
+ int run();
};
/// read the sync status of all bucket shards from the given source zone
-int rgw_bucket_sync_status(RGWRados *store, const std::string& source_zone,
+int rgw_bucket_sync_status(const DoutPrefixProvider *dpp, RGWRados *store, const std::string& source_zone,
const RGWBucketInfo& bucket_info,
std::vector<rgw_bucket_shard_sync_info> *status);
}
};
-class RGWSyncLogTrimThread : public RGWSyncProcessorThread
+class RGWSyncLogTrimThread : public RGWSyncProcessorThread, DoutPrefixProvider
{
RGWCoroutinesManager crs;
RGWRados *store;
int process() override {
list<RGWCoroutinesStack*> stacks;
auto meta = new RGWCoroutinesStack(store->ctx(), &crs);
- meta->call(create_meta_log_trim_cr(store, &http,
+ meta->call(create_meta_log_trim_cr(this, store, &http,
cct->_conf->rgw_md_log_max_shards,
trim_interval));
stacks.push_back(meta);
crs.run(stacks);
return 0;
}
+
+ // implements DoutPrefixProvider
+ CephContext *get_cct() const override { return store->ctx(); }
+ unsigned get_subsys() const
+ {
+ return dout_subsys;
+ }
+
+ std::ostream& gen_prefix(std::ostream& out) const
+ {
+ return out << "sync log trim: ";
+ }
+
};
void RGWRados::wakeup_meta_sync_shards(set<int>& shard_ids)
ldout(s->cct, 4) << "failed to read bucket info: " << cpp_strerror(http_ret) << dendl;
return;
}
- http_ret = rgw_bucket_sync_status(store, source_zone, info, &status);
+ http_ret = rgw_bucket_sync_status(this, store, source_zone, info, &status);
}
void RGWOp_BILog_Status::send_response()
return out << "meta sync: ";
}
-void RGWMetaSyncEnv::init(CephContext *_cct, RGWRados *_store, RGWRESTConn *_conn,
+void RGWMetaSyncEnv::init(const DoutPrefixProvider *_dpp, CephContext *_cct, RGWRados *_store, RGWRESTConn *_conn,
RGWAsyncRadosProcessor *_async_rados, RGWHTTPManager *_http_manager,
RGWSyncErrorLogger *_error_logger, RGWSyncTraceManager *_sync_tracer) {
+ dpp = _dpp;
cct = _cct;
store = _store;
conn = _conn;
}
struct TrimEnv {
+ const DoutPrefixProvider *dpp;
RGWRados *const store;
RGWHTTPManager *const http;
int num_shards;
Cursor current; //< cursor to current period
epoch_t last_trim_epoch{0}; //< epoch of last mdlog that was purged
- TrimEnv(RGWRados *store, RGWHTTPManager *http, int num_shards)
- : store(store), http(http), num_shards(num_shards),
+ TrimEnv(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards)
+ : dpp(dpp), store(store), http(http), num_shards(num_shards),
zone(store->get_zone_params().get_id()),
current(store->period_history->get_current())
{}
/// last trim marker for each shard, only applies to current period's mdlog
std::vector<std::string> last_trim_markers;
- MasterTrimEnv(RGWRados *store, RGWHTTPManager *http, int num_shards)
- : TrimEnv(store, http, num_shards),
+ MasterTrimEnv(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards)
+ : TrimEnv(dpp, store, http, num_shards),
last_trim_markers(num_shards)
{
auto& period = current.get_period();
/// last trim timestamp for each shard, only applies to current period's mdlog
std::vector<ceph::real_time> last_trim_timestamps;
- PeerTrimEnv(RGWRados *store, RGWHTTPManager *http, int num_shards)
- : TrimEnv(store, http, num_shards),
+ PeerTrimEnv(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards)
+ : TrimEnv(dpp, store, http, num_shards),
last_trim_timestamps(num_shards)
{}
: RGWShardCollectCR(env.store->ctx(), MAX_CONCURRENT_SHARDS),
env(env), mdlog(mdlog), period_id(env.current.get_period().get_id())
{
- meta_env.init(cct, env.store, env.store->rest_master_conn,
+ meta_env.init(env.dpp, cct, env.store, env.store->rest_master_conn,
env.store->get_async_rados(), env.http, nullptr,
env.store->get_sync_tracer());
}
return new MetaMasterTrimCR(env);
}
public:
- MetaMasterTrimPollCR(RGWRados *store, RGWHTTPManager *http,
+ MetaMasterTrimPollCR(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http,
int num_shards, utime_t interval)
: MetaTrimPollCR(store, interval),
- env(store, http, num_shards)
+ env(dpp, store, http, num_shards)
{}
};
return new MetaPeerTrimCR(env);
}
public:
- MetaPeerTrimPollCR(RGWRados *store, RGWHTTPManager *http,
+ MetaPeerTrimPollCR(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http,
int num_shards, utime_t interval)
: MetaTrimPollCR(store, interval),
- env(store, http, num_shards)
+ env(dpp, store, http, num_shards)
{}
};
-RGWCoroutine* create_meta_log_trim_cr(RGWRados *store, RGWHTTPManager *http,
+RGWCoroutine* create_meta_log_trim_cr(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http,
int num_shards, utime_t interval)
{
if (store->is_meta_master()) {
- return new MetaMasterTrimPollCR(store, http, num_shards, interval);
+ return new MetaMasterTrimPollCR(dpp, store, http, num_shards, interval);
}
- return new MetaPeerTrimPollCR(store, http, num_shards, interval);
+ return new MetaPeerTrimPollCR(dpp, store, http, num_shards, interval);
}
struct MetaMasterAdminTrimCR : private MasterTrimEnv, public MetaMasterTrimCR {
- MetaMasterAdminTrimCR(RGWRados *store, RGWHTTPManager *http, int num_shards)
- : MasterTrimEnv(store, http, num_shards),
+ MetaMasterAdminTrimCR(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards)
+ : MasterTrimEnv(dpp, store, http, num_shards),
MetaMasterTrimCR(*static_cast<MasterTrimEnv*>(this))
{}
};
struct MetaPeerAdminTrimCR : private PeerTrimEnv, public MetaPeerTrimCR {
- MetaPeerAdminTrimCR(RGWRados *store, RGWHTTPManager *http, int num_shards)
- : PeerTrimEnv(store, http, num_shards),
+ MetaPeerAdminTrimCR(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards)
+ : PeerTrimEnv(dpp, store, http, num_shards),
MetaPeerTrimCR(*static_cast<PeerTrimEnv*>(this))
{}
};
-RGWCoroutine* create_admin_meta_log_trim_cr(RGWRados *store,
+RGWCoroutine* create_admin_meta_log_trim_cr(const DoutPrefixProvider *dpp, RGWRados *store,
RGWHTTPManager *http,
int num_shards)
{
if (store->is_meta_master()) {
- return new MetaMasterAdminTrimCR(store, http, num_shards);
+ return new MetaMasterAdminTrimCR(dpp, store, http, num_shards);
}
- return new MetaPeerAdminTrimCR(store, http, num_shards);
+ return new MetaPeerAdminTrimCR(dpp, store, http, num_shards);
}
RGWMetaSyncEnv() {}
- void init(CephContext *_cct, RGWRados *_store, RGWRESTConn *_conn,
+ void init(const DoutPrefixProvider *_dpp, CephContext *_cct, RGWRados *_store, RGWRESTConn *_conn,
RGWAsyncRadosProcessor *_async_rados, RGWHTTPManager *_http_manager,
RGWSyncErrorLogger *_error_logger, RGWSyncTraceManager *_sync_tracer);
RGWSyncTraceNodeRef tn;
public:
- RGWRemoteMetaLog(DoutPrefixProvider *dpp, RGWRados *_store,
+ RGWRemoteMetaLog(const DoutPrefixProvider *dpp, RGWRados *_store,
RGWAsyncRadosProcessor *async_rados,
RGWMetaSyncStatusManager *_sm)
: RGWCoroutinesManager(_store->ctx(), _store->get_cr_registry()),
};
// MetaLogTrimCR factory function
-RGWCoroutine* create_meta_log_trim_cr(RGWRados *store, RGWHTTPManager *http,
+RGWCoroutine* create_meta_log_trim_cr(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http,
int num_shards, utime_t interval);
// factory function for mdlog trim via radosgw-admin
-RGWCoroutine* create_admin_meta_log_trim_cr(RGWRados *store,
+RGWCoroutine* create_admin_meta_log_trim_cr(const DoutPrefixProvider *dpp, RGWRados *store,
RGWHTTPManager *http,
int num_shards);