From: Yehuda Sadeh Date: Thu, 6 Sep 2018 11:00:31 +0000 (-0700) Subject: rgw: svc_finisher: create service X-Git-Tag: v14.1.0~965^2~35 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=91601cf98a2e6b2b2920ffa5add85f77dd42f9b0;p=ceph.git rgw: svc_finisher: create service and other fixes Signed-off-by: Yehuda Sadeh --- diff --git a/src/rgw/CMakeLists.txt b/src/rgw/CMakeLists.txt index 24046a12b428..55e7aada6a65 100644 --- a/src/rgw/CMakeLists.txt +++ b/src/rgw/CMakeLists.txt @@ -39,6 +39,7 @@ function(gperf_generate input output) endfunction() set(librgw_common_srcs + services/svc_finisher.cc services/svc_notify.cc services/svc_quota.cc services/svc_rados.cc diff --git a/src/rgw/rgw_op.cc b/src/rgw/rgw_op.cc index 1d10ea15f07c..bd5c4d4d98f6 100644 --- a/src/rgw/rgw_op.cc +++ b/src/rgw/rgw_op.cc @@ -5609,7 +5609,7 @@ void RGWCompleteMultipart::execute() op_ret = -ERR_INVALID_PART; return; } else { - manifest.append(obj_part.manifest, store->svc.zone.get()); + manifest.append(obj_part.manifest, store->svc.zone); } bool part_compressed = (obj_part.cs_info.compression_type != "none"); diff --git a/src/rgw/rgw_period_puller.cc b/src/rgw/rgw_period_puller.cc index 0379739bce11..934eb00046d4 100644 --- a/src/rgw/rgw_period_puller.cc +++ b/src/rgw/rgw_period_puller.cc @@ -61,7 +61,7 @@ int RGWPeriodPuller::pull(const std::string& period_id, RGWPeriod& period) // try to read the period from rados period.set_id(period_id); period.set_epoch(0); - int r = period.init(store->ctx(), store->svc.sysobj.get()); + int r = period.init(store->ctx(), store->svc.sysobj); if (r < 0) { if (store->svc.zone->is_meta_master()) { // can't pull if we're the master diff --git a/src/rgw/rgw_rados.cc b/src/rgw/rgw_rados.cc index e140e8b98695..db43e307d14a 100644 --- a/src/rgw/rgw_rados.cc +++ b/src/rgw/rgw_rados.cc @@ -18,7 +18,6 @@ #include "common/errno.h" #include "common/Formatter.h" #include "common/Throttle.h" -#include "common/Finisher.h" #include "rgw_rados.h" #include "rgw_zone.h" @@ -76,6 +75,7 @@ using namespace librados; #include "services/svc_zone_utils.h" #include "services/svc_quota.h" #include "services/svc_sys_obj.h" +#include "services/svc_sys_obj_cache.h" #include "compressor/Compressor.h" @@ -93,8 +93,6 @@ using namespace librados; #define dout_subsys ceph_subsys_rgw -static string notify_oid_prefix = "notify"; -static string *notify_oids = NULL; static string shadow_ns = "shadow"; static string dir_oid_prefix = ".dir."; static string default_bucket_index_pool_suffix = "rgw.buckets.index"; @@ -1358,16 +1356,6 @@ void RGWRados::finalize() sync_log_trimmer = nullptr; bucket_trim = boost::none; } - if (finisher) { - finisher->stop(); - } - if (finisher) { - /* delete finisher only after cleaning up watches, as watch error path might call - * into finisher. We stop finisher before finalizing watch to make sure we don't - * actually handle any racing work - */ - delete finisher; - } if (meta_notifier) { meta_notifier->stop(); delete meta_notifier; @@ -1518,9 +1506,6 @@ int RGWRados::init_complete() } } - finisher = new Finisher(cct); - finisher->start(); - period_puller.reset(new RGWPeriodPuller(this)); period_history.reset(new RGWPeriodHistory(cct, period_puller.get(), svc.zone->get_current_period())); @@ -1562,7 +1547,6 @@ int RGWRados::init_complete() auto& zone_params = svc.zone->get_zone_params(); auto& zone = svc.zone->get_zone(); -#warning sync service needed /* no point of running sync thread if we don't have a master zone configured or there is no rest_master_conn */ if (zonegroup.master_zone.empty() || !svc.zone->get_master_conn() @@ -1696,6 +1680,27 @@ int RGWRados::init_complete() return ret; } +/* + * FIXME: in the future formattable will derive from formatter, so formattable + * could be constructed directly + */ +static bool to_formattable(CephContext *cct, JSONFormatter& f, JSONFormattable *result) +{ + stringstream ss; + f.flush(ss); + string s = ss.str(); + + JSONParser jp; + if (!jp.parse(s.c_str(), s.size())) { + ldout(cct, 0) << "failed to parse formatter string: data=" << s << dendl; + return false; + } + + result->decode_json(&jp); + + return true; +} + /** * Initialize the RADOS instance and prepare to do other ops * Returns 0 on success, -ERR# on failure. @@ -1711,28 +1716,47 @@ int RGWRados::initialize() svc_registry = std::make_unique(cct); JSONFormattable zone_svc_conf; - ret = svc_registry->get_instance("zone", zone_svc_conf, &svc.zone); + ret = svc_registry->get_instance("zone", zone_svc_conf, &_svc.zone); if (ret < 0) { return ret; } + svc.zone = _svc.zone.get(); JSONFormattable zone_utils_svc_conf; - ret = svc_registry->get_instance("zone_utils", zone_utils_svc_conf, &svc.zone_utils); + ret = svc_registry->get_instance("zone_utils", zone_utils_svc_conf, &_svc.zone_utils); if (ret < 0) { return ret; } + svc.zone_utils = _svc.zone_utils.get(); JSONFormattable quota_svc_conf; - ret = svc_registry->get_instance("quota", quota_svc_conf, &svc.quota); + ret = svc_registry->get_instance("quota", quota_svc_conf, &_svc.quota); if (ret < 0) { return ret; } + svc.quota = _svc.quota.get(); + + if (use_cache) { + JSONFormattable cache_svc_conf; + ret = svc_registry->get_instance("sys_obj_cache", cache_svc_conf, &_svc.cache); + if (ret < 0) { + return ret; + } + svc.cache = _svc.cache.get(); + } JSONFormattable sysobj_svc_conf; - ret = svc_registry->get_instance("sys_obj", quota_svc_conf, &svc.sysobj); + + JSONFormatter f; + encode_json("cache", use_cache, &f); + if (!to_formattable(cct, f, &sysobj_svc_conf)) { + assert(0); + } + ret = svc_registry->get_instance("sys_obj", sysobj_svc_conf, &_svc.sysobj); if (ret < 0) { return ret; } + svc.sysobj = _svc.sysobj.get(); host_id = svc.zone_utils->gen_host_id(); @@ -1743,10 +1767,6 @@ int RGWRados::initialize() return init_complete(); } -void RGWRados::schedule_context(Context *c) { - finisher->queue(c); -} - int RGWRados::list_raw_prefixed_objs(const rgw_pool& pool, const string& prefix, list& result) { bool is_truncated; @@ -10431,12 +10451,9 @@ uint64_t RGWRados::next_bucket_id() RGWRados *RGWStoreManager::init_storage_provider(CephContext *cct, bool use_gc_thread, bool use_lc_thread, bool quota_threads, bool run_sync_thread, bool run_reshard_thread, bool use_cache) { - RGWRados *store = NULL; - if (!use_cache) { - store = new RGWRados; - } else { - store = new RGWCache; - } + RGWRados *store = new RGWRados; + + store->set_use_cache(use_cache); if (store->initialize(cct, use_gc_thread, use_lc_thread, quota_threads, run_sync_thread, run_reshard_thread) < 0) { delete store; @@ -10655,7 +10672,7 @@ bool RGWRados::call_inspect(const std::string& s, Formatter *f) if (!svc.cache) { return false; } - return svc.cache->call_inspec(s, f); + return svc.cache->call_inspect(s, f); } bool RGWRados::call_erase(const std::string& s) { diff --git a/src/rgw/rgw_rados.h b/src/rgw/rgw_rados.h index bb72f92fad8c..184addee7804 100644 --- a/src/rgw/rgw_rados.h +++ b/src/rgw/rgw_rados.h @@ -50,6 +50,7 @@ class RGWSI_Zone; class RGWSI_ZoneUtils; class RGWSI_Quota; class RGWSI_SysObj; +class RGWSI_SysObj_Cache; /* flags for put_obj_meta() */ #define PUT_OBJ_CREATE 0x01 @@ -1132,7 +1133,6 @@ struct RGWObjectCtx { explicit RGWObjectCtx(RGWRados *_store, req_state *_s) : store(_store), s(_s), obj(store), raw(store) { } }; -class Finisher; class RGWAsyncRadosProcessor; template @@ -1295,8 +1295,6 @@ protected: RGWQuotaHandler *quota_handler; - Finisher *finisher; - RGWCoroutinesManagerRegistry *cr_registry; RGWSyncModulesManager *sync_modules_manager{nullptr}; @@ -1305,8 +1303,10 @@ protected: RGWServiceRegistryRef svc_registry; RGWIndexCompletionManager *index_completion_manager{nullptr}; + + bool use_cache{false}; public: - RGWRados() : lock("rados_timer_lock"), timer(NULL), + RGWRados(): lock("rados_timer_lock"), timer(NULL), gc(NULL), lc(NULL), obj_expirer(NULL), use_gc_thread(false), use_lc_thread(false), quota_threads(false), run_sync_thread(false), run_reshard_thread(false), async_rados(nullptr), meta_notifier(NULL), data_notifier(NULL), meta_sync_processor_thread(NULL), @@ -1319,10 +1319,13 @@ public: binfo_cache(NULL), obj_tombstone_cache(nullptr), pools_initialized(false), quota_handler(NULL), - finisher(NULL), cr_registry(NULL), meta_mgr(NULL), data_log(NULL), reshard(NULL) {} + void set_use_cache(bool status) { + use_cache = status; + } + uint64_t get_new_req_id() { return ++max_req_id; } @@ -1340,6 +1343,16 @@ public: std::shared_ptr zone_utils; std::shared_ptr quota; std::shared_ptr sysobj; + std::shared_ptr cache; + } _svc; + + struct { + RGWSI_RADOS *rados{nullptr}; + RGWSI_Zone *zone{nullptr}; + RGWSI_ZoneUtils *zone_utils{nullptr}; + RGWSI_Quota *quota{nullptr}; + RGWSI_SysObj *sysobj{nullptr}; + RGWSI_SysObj_Cache *cache{nullptr}; } svc; /** @@ -1412,7 +1425,7 @@ public: return initialize(); } /** Initialize the RADOS instance and prepare to do other ops */ - virtual int init_rados(); + int init_rados(); int init_complete(); int initialize(); void finalize(); diff --git a/src/rgw/rgw_rest_realm.cc b/src/rgw/rgw_rest_realm.cc index 39197472d4a3..88eb0b2c5635 100644 --- a/src/rgw/rgw_rest_realm.cc +++ b/src/rgw/rgw_rest_realm.cc @@ -65,7 +65,7 @@ void RGWOp_Period_Get::execute() period.set_id(period_id); period.set_epoch(epoch); - http_ret = period.init(store->ctx(), store->svc.sysobj.get(), realm_id, realm_name); + http_ret = period.init(store->ctx(), store->svc.sysobj, realm_id, realm_name); if (http_ret < 0) ldout(store->ctx(), 5) << "failed to read period" << dendl; } @@ -82,7 +82,7 @@ void RGWOp_Period_Post::execute() auto cct = store->ctx(); // initialize the period without reading from rados - period.init(cct, store->svc.sysobj.get(), false); + period.init(cct, store->svc.sysobj, false); // decode the period from input const auto max_size = cct->_conf->rgw_max_put_param_size; @@ -105,7 +105,7 @@ void RGWOp_Period_Post::execute() // period that we haven't restarted with yet. we also don't want to modify // the objects in use by RGWRados RGWRealm realm(period.get_realm()); - http_ret = realm.init(cct, store->svc.sysobj.get()); + http_ret = realm.init(cct, store->svc.sysobj); if (http_ret < 0) { lderr(cct) << "failed to read current realm: " << cpp_strerror(-http_ret) << dendl; @@ -113,7 +113,7 @@ void RGWOp_Period_Post::execute() } RGWPeriod current_period; - http_ret = current_period.init(cct, store->svc.sysobj.get(), realm.get_id()); + http_ret = current_period.init(cct, store->svc.sysobj, realm.get_id()); if (http_ret < 0) { lderr(cct) << "failed to read current period: " << cpp_strerror(-http_ret) << dendl; @@ -258,7 +258,7 @@ void RGWOp_Realm_Get::execute() // read realm realm.reset(new RGWRealm(id, name)); - http_ret = realm->init(g_ceph_context, store->svc.sysobj.get()); + http_ret = realm->init(g_ceph_context, store->svc.sysobj); if (http_ret < 0) lderr(store->ctx()) << "failed to read realm id=" << id << " name=" << name << dendl; diff --git a/src/rgw/rgw_service.cc b/src/rgw/rgw_service.cc index 6b37c68e8991..09a4079a5125 100644 --- a/src/rgw/rgw_service.cc +++ b/src/rgw/rgw_service.cc @@ -1,10 +1,14 @@ #include "rgw_service.h" +#include "services/svc_finisher.h" +#include "services/svc_notify.h" #include "services/svc_rados.h" #include "services/svc_zone.h" #include "services/svc_zone_utils.h" #include "services/svc_quota.h" #include "services/svc_sys_obj.h" +#include "services/svc_sys_obj_cache.h" +#include "services/svc_sys_obj_core.h" #define dout_subsys ceph_subsys_rgw @@ -18,11 +22,15 @@ RGWServiceInstance::~RGWServiceInstance() void RGWServiceRegistry::register_all(CephContext *cct) { + services["finisher"] = make_shared(cct); + services["notify"] = make_shared(cct); services["rados"] = make_shared(cct); services["zone"] = make_shared(cct); services["zone_utils"] = make_shared(cct); services["quota"] = make_shared(cct); services["sys_obj"] = make_shared(cct); + services["sys_obj_cache"] = make_shared(cct); + services["sys_obj_core"] = make_shared(cct); } bool RGWServiceRegistry::find(const string& name, RGWServiceRef *svc) diff --git a/src/rgw/rgw_sync.cc b/src/rgw/rgw_sync.cc index f5323b824876..d28a3c2fa73d 100644 --- a/src/rgw/rgw_sync.cc +++ b/src/rgw/rgw_sync.cc @@ -2566,7 +2566,7 @@ connection_map make_peer_connections(RGWRados *store, for (auto& g : zonegroups) { for (auto& z : g.second.zones) { std::unique_ptr conn{ - new RGWRESTConn(store->ctx(), store->svc.zone.get(), z.first, z.second.endpoints)}; + new RGWRESTConn(store->ctx(), store->svc.zone, z.first, z.second.endpoints)}; connections.emplace(z.first, std::move(conn)); } } diff --git a/src/rgw/rgw_sync_module_aws.cc b/src/rgw/rgw_sync_module_aws.cc index 27f8976131c7..1684d2a26005 100644 --- a/src/rgw/rgw_sync_module_aws.cc +++ b/src/rgw/rgw_sync_module_aws.cc @@ -639,7 +639,7 @@ struct AWSSyncConfig { auto& root_conf = root_profile->conn_conf; root_profile->conn.reset(new S3RESTConn(sync_env->cct, - sync_env->store->svc.zone.get(), + sync_env->store->svc.zone, id, { root_conf->endpoint }, root_conf->key, @@ -649,7 +649,7 @@ struct AWSSyncConfig { auto& c = i.second; c->conn.reset(new S3RESTConn(sync_env->cct, - sync_env->store->svc.zone.get(), + sync_env->store->svc.zone, id, { c->conn_conf->endpoint }, c->conn_conf->key, diff --git a/src/rgw/services/svc_finisher.cc b/src/rgw/services/svc_finisher.cc new file mode 100644 index 000000000000..c2330fe54fc4 --- /dev/null +++ b/src/rgw/services/svc_finisher.cc @@ -0,0 +1,57 @@ +#include "common/Finisher.h" + +#include "svc_finisher.h" +#include "svc_zone.h" + +#include "rgw/rgw_zone.h" + +int RGWS_Finisher::create_instance(const string& conf, RGWServiceInstanceRef *instance) +{ + instance->reset(new RGWSI_Finisher(this, cct)); + return 0; +} + +std::map RGWSI_Finisher::get_deps() +{ + std::map dep; + return dep; +} + +int RGWSI_Finisher::init() +{ + finisher = new Finisher(cct); + finisher->start(); + + return 0; +} + +void RGWSI_Finisher::shutdown() +{ + if (finisher) { + finisher->stop(); + + map cbs; + cbs.swap(shutdown_cbs); /* move cbs out, in case caller unregisetrs */ + for (auto& iter : cbs) { + iter.second->call(); + } + delete finisher; + } +} + +void RGWSI_Finisher::register_caller(ShutdownCB *cb, int *phandle) +{ + *phandle = ++handles_counter; + shutdown_cbs[*phandle] = cb; +} + +void RGWSI_Finisher::unregister_caller(int handle) +{ + shutdown_cbs.erase(handle); +} + +void RGWSI_Finisher::schedule_context(Context *c) +{ + finisher->queue(c); +} + diff --git a/src/rgw/services/svc_finisher.h b/src/rgw/services/svc_finisher.h new file mode 100644 index 000000000000..aab7350fdcf0 --- /dev/null +++ b/src/rgw/services/svc_finisher.h @@ -0,0 +1,50 @@ +#ifndef CEPH_RGW_SERVICES_FINISHER_H +#define CEPH_RGW_SERVICES_FINISHER_H + + +#include "rgw/rgw_service.h" + +class Context; +class Finisher; + +class RGWS_Finisher : public RGWService +{ +public: + RGWS_Finisher(CephContext *cct) : RGWService(cct, "finisher") {} + + int create_instance(const std::string& conf, RGWServiceInstanceRef *instance) override; +}; + +class RGWSI_Finisher : public RGWServiceInstance +{ +public: + class ShutdownCB; + +private: + Finisher *finisher{nullptr}; + + std::map get_deps() override; + int load(const std::string& conf, std::map& dep_refs) override; + int init() override; + void shutdown() override; + + std::map shutdown_cbs; + std::atomic handles_counter; + +public: + RGWSI_Finisher(RGWService *svc, CephContext *cct): RGWServiceInstance(svc, cct) {} + ~RGWSI_Finisher(); + + class ShutdownCB { + public: + virtual ~ShutdownCB() {} + virtual void call() = 0; + }; + + void register_caller(ShutdownCB *cb, int *phandle); + void unregister_caller(int handle); + + void schedule_context(Context *c); +}; + +#endif diff --git a/src/rgw/services/svc_notify.cc b/src/rgw/services/svc_notify.cc index 8c468b91051c..e8ac830feabe 100644 --- a/src/rgw/services/svc_notify.cc +++ b/src/rgw/services/svc_notify.cc @@ -2,6 +2,7 @@ #include "common/errno.h" #include "svc_notify.h" +#include "svc_finisher.h" #include "svc_zone.h" #include "svc_rados.h" @@ -11,6 +12,12 @@ static string notify_oid_prefix = "notify"; +int RGWS_Notify::create_instance(const string& conf, RGWServiceInstanceRef *instance) +{ + instance->reset(new RGWSI_Notify(this, cct)); + return 0; +} + class RGWWatcher : public librados::WatchCtx2 { CephContext *cct; RGWSI_Notify *svc; @@ -127,11 +134,16 @@ public: } }; -int RGWS_Notify::create_instance(const string& conf, RGWServiceInstanceRef *instance) + +class RGWSI_Notify_ShutdownCB : public RGWSI_Finisher::ShutdownCB { - instance->reset(new RGWSI_Notify(this, cct)); - return 0; -} + RGWSI_Notify *svc; +public: + RGWSI_Notify_ShutdownCB(RGWSI_Notify *_svc) : svc(_svc) {} + void call() override { + svc->shutdown(); + } +}; std::map RGWSI_Notify::get_deps() { @@ -140,6 +152,8 @@ std::map RGWSI_Notify::get_deps() .conf = "{}" }; deps["rados_dep"] = { .name = "rados", .conf = "{}" }; + deps["finisher_dep"] = { .name = "finisher", + .conf = "{}" }; return deps; } @@ -149,6 +163,8 @@ int RGWSI_Notify::load(const string& conf, std::map(dep_refs["rados_dep"]); assert(rados_svc); + finisher_svc = static_pointer_cast(dep_refs["finisher_dep"]); + assert(finisher_svc); return 0; } @@ -230,6 +246,10 @@ int RGWSI_Notify::init_watch() void RGWSI_Notify::finalize_watch() { + if (finalized) { + return; + } + for (int i = 0; i < num_watchers; i++) { RGWWatcher *watcher = watchers[i]; watcher->unregister_watch(); @@ -249,11 +269,15 @@ int RGWSI_Notify::init() return ret; } + shutdown_cb = new RGWSI_Notify_ShutdownCB(this); + finisher_svc->register_caller(shutdown_cb, &finisher_handle); + return 0; } void RGWSI_Notify::shutdown() { + finisher_svc->unregister_caller(finisher_handle); finalize_watch(); } @@ -428,3 +452,8 @@ void RGWSI_Notify::register_watch_cb(CB *_cb) RWLock::WLocker l(watchers_lock); cb = _cb; } + +void RGWSI_Notify::schedule_context(Context *c) +{ + finisher_svc->schedule_context(c); +} diff --git a/src/rgw/services/svc_notify.h b/src/rgw/services/svc_notify.h index 36adefa71682..bcd96a8f4d95 100644 --- a/src/rgw/services/svc_notify.h +++ b/src/rgw/services/svc_notify.h @@ -8,24 +8,32 @@ class RGWSI_Zone; +class RGWSI_Finisher; class RGWWatcher; class RGWS_Notify : public RGWService { public: - RGWS_Notify(CephContext *cct) : RGWService(cct, "quota") {} + RGWS_Notify(CephContext *cct) : RGWService(cct, "notify") {} int create_instance(const std::string& conf, RGWServiceInstanceRef *instance) override; }; +class RGWSI_Notify_ShutdownCB; + class RGWSI_Notify : public RGWServiceInstance { + friend class RGWWatcher; + friend class RGWSI_Notify_ShutdownCB; + public: class CB; + private: std::shared_ptr zone_svc; std::shared_ptr rados_svc; + std::shared_ptr finisher_svc; std::map get_deps() override; int load(const std::string& conf, std::map& dep_refs) override; @@ -41,13 +49,16 @@ private: double inject_notify_timeout_probability{0}; unsigned max_notify_retries{0}; - friend class RGWWatcher; - string get_control_oid(int i); RGWSI_RADOS::Obj pick_control_obj(const string& key); CB *cb{nullptr}; + int finisher_handle{0}; + RGWSI_Notify_ShutdownCB *shutdown_cb{nullptr}; + + bool finalized{false}; + int init_watch(); void finalize_watch(); @@ -65,8 +76,11 @@ private: void set_enabled(bool status); int robust_notify(RGWSI_RADOS::Obj& notify_obj, bufferlist& bl); + + void schedule_context(Context *c); public: RGWSI_Notify(RGWService *svc, CephContext *cct): RGWServiceInstance(svc, cct) {} + ~RGWSI_Notify(); class CB { public: diff --git a/src/rgw/services/svc_sys_obj_cache.cc b/src/rgw/services/svc_sys_obj_cache.cc index 06399de3212d..1d9f326f156d 100644 --- a/src/rgw/services/svc_sys_obj_cache.cc +++ b/src/rgw/services/svc_sys_obj_cache.cc @@ -4,6 +4,12 @@ #define dout_subsys ceph_subsys_rgw +int RGWS_SysObj_Cache::create_instance(const string& conf, RGWServiceInstanceRef *instance) +{ + instance->reset(new RGWSI_SysObj_Cache(this, cct)); + return 0; +} + class RGWSI_SysObj_Cache_CB : public RGWSI_Notify::CB { RGWSI_SysObj_Cache *svc; diff --git a/src/rgw/services/svc_sys_obj_cache.h b/src/rgw/services/svc_sys_obj_cache.h index e108ac0b5c05..b573a37fd25e 100644 --- a/src/rgw/services/svc_sys_obj_cache.h +++ b/src/rgw/services/svc_sys_obj_cache.h @@ -12,6 +12,14 @@ class RGWSI_Notify; class RGWSI_SysObj_Cache_CB; +class RGWS_SysObj_Cache : public RGWService +{ +public: + RGWS_SysObj_Cache(CephContext *cct) : RGWService(cct, "sysobj_cache") {} + + int create_instance(const std::string& conf, RGWServiceInstanceRef *instance) override; +}; + class RGWSI_SysObj_Cache : public RGWSI_SysObj_Core { friend class RGWSI_SysObj_Cache_CB; diff --git a/src/rgw/services/svc_sys_obj_core.cc b/src/rgw/services/svc_sys_obj_core.cc index 742bfcc73523..c75b72ccca0b 100644 --- a/src/rgw/services/svc_sys_obj_core.cc +++ b/src/rgw/services/svc_sys_obj_core.cc @@ -6,6 +6,12 @@ #define dout_subsys ceph_subsys_rgw +int RGWS_SysObj_Core::create_instance(const string& conf, RGWServiceInstanceRef *instance) +{ + instance->reset(new RGWSI_SysObj_Core(this, cct)); + return 0; +} + int RGWSI_SysObj_Core::GetObjState::get_rados_obj(RGWSI_RADOS *rados_svc, RGWSI_Zone *zone_svc, rgw_raw_obj& obj, diff --git a/src/rgw/services/svc_sys_obj_core.h b/src/rgw/services/svc_sys_obj_core.h index 5ca9aed31022..d296909a524e 100644 --- a/src/rgw/services/svc_sys_obj_core.h +++ b/src/rgw/services/svc_sys_obj_core.h @@ -98,6 +98,14 @@ public: } }; +class RGWS_SysObj_Core : public RGWService +{ +public: + RGWS_SysObj_Core(CephContext *cct) : RGWService(cct, "sysobj_core") {} + + int create_instance(const std::string& conf, RGWServiceInstanceRef *instance) override; +}; + class RGWSI_SysObj_Core : public RGWServiceInstance { friend class RGWSI_SysObj;