]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
rgw: Remove `RGWSI_RADOS` from `RGWSI_MDLog`
authorAdam C. Emerson <aemerson@redhat.com>
Fri, 2 Dec 2022 06:55:47 +0000 (01:55 -0500)
committerCasey Bodley <cbodley@redhat.com>
Wed, 29 Nov 2023 18:15:27 +0000 (13:15 -0500)
Simply use the RADOS handle and `rgw_rados_ref` directly.

Also move `async_processor` out from `RGWSI_RADOS` and into
`RGWServices_Def`. This is as good a place as it for any, for now, as
it's reachable by everyone who needs it and exists through the
lifetime of the process.

Eventually it's going to go away due to coroutinization, anyway.

Signed-off-by: Adam C. Emerson <aemerson@redhat.com>
13 files changed:
src/rgw/driver/rados/rgw_data_sync.cc
src/rgw/driver/rados/rgw_period.cc
src/rgw/driver/rados/rgw_rados.cc
src/rgw/driver/rados/rgw_service.cc
src/rgw/driver/rados/rgw_service.h
src/rgw/driver/rados/rgw_trim_bilog.cc
src/rgw/driver/rados/rgw_trim_datalog.cc
src/rgw/driver/rados/rgw_trim_mdlog.cc
src/rgw/rgw_admin.cc
src/rgw/services/svc_mdlog.cc
src/rgw/services/svc_mdlog.h
src/rgw/services/svc_rados.cc
src/rgw/services/svc_rados.h

index 02965f9073d654f3aca7dccd9cb549f4501d0eb8..7b8c0b7343d9968f2f3cd627de10028986861f7c 100644 (file)
@@ -6068,7 +6068,7 @@ int RGWBucketPipeSyncStatusManager::do_init(const DoutPrefixProvider *dpp,
   }
 
   sync_module.reset(new RGWDefaultSyncModuleInstance());
-  auto async_rados = driver->svc()->rados->get_async_processor();
+  auto async_rados = driver->svc()->async_processor;
 
   sync_env.init(this, driver->ctx(), driver,
                 driver->svc(), async_rados, &http_manager,
@@ -6680,7 +6680,7 @@ int rgw_read_bucket_inc_sync_status(const DoutPrefixProvider *dpp,
 
   RGWDataSyncEnv env;
   RGWSyncModuleInstanceRef module; // null sync module
-  env.init(dpp, driver->ctx(), driver, driver->svc(), driver->svc()->rados->get_async_processor(),
+  env.init(dpp, driver->ctx(), driver, driver->svc(), driver->svc()->async_processor,
            nullptr, nullptr, nullptr, module, nullptr);
 
   RGWDataSyncCtx sc;
index 61602b354e28e0d6174ab1b17d26e21904b8b969..4a16faccefb3a83bbee513229f857c98564a444d 100644 (file)
@@ -154,7 +154,7 @@ static int read_sync_status(const DoutPrefixProvider *dpp, rgw::sal::Driver* dri
 {
   rgw::sal::RadosStore* rados_store = static_cast<rgw::sal::RadosStore*>(driver);
   // initialize a sync status manager to read the status
-  RGWMetaSyncStatusManager mgr(rados_store, rados_store->svc()->rados->get_async_processor());
+  RGWMetaSyncStatusManager mgr(rados_store, rados_store->svc()->async_processor);
   int r = mgr.init(dpp);
   if (r < 0) {
     return r;
index 9cf0831ecaa9e5b77ab64f7d9b2a6f8db050b1f8..5fd174c3aff039c45b3368a87a38b817f629fcc5 100644 (file)
@@ -1021,7 +1021,7 @@ void RGWRados::finalize()
   /* Before joining any sync threads, drain outstanding requests &
    * mark the async_processor as going_down() */
   if (svc.rados) {
-    svc.rados->stop_processor();
+    svc.async_processor->stop();
   }
 
   if (run_sync_thread) {
@@ -1258,7 +1258,7 @@ int RGWRados::init_complete(const DoutPrefixProvider *dpp, optional_yield y)
                       << pt.second.name << " present in zonegroup" << dendl;
       }
     }
-    auto async_processor = svc.rados->get_async_processor();
+    auto async_processor = svc.async_processor;
     std::lock_guard l{meta_sync_thread_lock};
     meta_sync_processor_thread = new RGWMetaSyncProcessorThread(this->driver, async_processor);
     ret = meta_sync_processor_thread->init(dpp);
@@ -1283,7 +1283,7 @@ int RGWRados::init_complete(const DoutPrefixProvider *dpp, optional_yield y)
     std::lock_guard dl{data_sync_thread_lock};
     for (auto source_zone : svc.zone->get_data_sync_source_zones()) {
       ldpp_dout(dpp, 5) << "starting data sync thread for zone " << source_zone->name << dendl;
-      auto *thread = new RGWDataSyncProcessorThread(this->driver, svc.rados->get_async_processor(), source_zone);
+      auto *thread = new RGWDataSyncProcessorThread(this->driver, svc.async_processor, source_zone);
       ret = thread->init(dpp);
       if (ret < 0) {
         ldpp_dout(dpp, 0) << "ERROR: failed to initialize data sync thread" << dendl;
index 9dfa5465dfa9693266ed704a281b87a26b36af46..af4043efa7af1a1376795afc748817ed7fa029eb 100644 (file)
@@ -31,6 +31,7 @@
 #include "common/errno.h"
 
 #include "rgw_bucket.h"
+#include "rgw_cr_rados.h"
 #include "rgw_datalog.h"
 #include "rgw_metadata.h"
 #include "rgw_otp.h"
@@ -78,6 +79,8 @@ int RGWServices_Def::init(CephContext *cct,
   sysobj_core = std::make_unique<RGWSI_SysObj_Core>(cct);
   user_rados = std::make_unique<RGWSI_User_RADOS>(cct);
   role_rados = std::make_unique<RGWSI_Role_RADOS>(cct);
+  async_processor = std::make_unique<RGWAsyncRadosProcessor>(
+    cct, cct->_conf->rgw_num_async_rados_threads);
 
   if (have_cache) {
     sysobj_cache = std::make_unique<RGWSI_SysObj_Cache>(dpp, cct);
@@ -85,6 +88,7 @@ int RGWServices_Def::init(CephContext *cct,
 
   vector<RGWSI_MetaBackend *> meta_bes{meta_be_sobj.get(), meta_be_otp.get()};
 
+  async_processor->start();
   finisher->init();
   bi_rados->init(zone.get(), radoshandle, bilog_rados.get(), datalog_rados.get());
   bilog_rados->init(bi_rados.get());
@@ -97,7 +101,8 @@ int RGWServices_Def::init(CephContext *cct,
                          bucket_sobj.get());
   cls->init(zone.get(), radoshandle);
   config_key_rados->init(radoshandle);
-  mdlog->init(rados.get(), zone.get(), sysobj.get(), cls.get());
+  mdlog->init(radoshandle, zone.get(), sysobj.get(), cls.get(),
+             async_processor.get());
   meta->init(sysobj.get(), mdlog.get(), meta_bes);
   meta_be_sobj->init(sysobj.get(), mdlog.get());
   meta_be_otp->init(sysobj.get(), mdlog.get(), cls.get());
@@ -300,10 +305,10 @@ void RGWServices_Def::shutdown()
   quota->shutdown();
   zone_utils->shutdown();
   zone->shutdown();
+  async_processor->stop();
   rados->shutdown();
 
   has_shutdown = true;
-
 }
 
 int RGWServices::do_init(CephContext *_cct, bool have_cache, bool raw,
@@ -345,6 +350,7 @@ int RGWServices::do_init(CephContext *_cct, bool have_cache, bool raw,
   core = _svc.sysobj_core.get();
   user = _svc.user_rados.get();
   role = _svc.role_rados.get();
+  async_processor = _svc.async_processor.get();
 
   return 0;
 }
index 46ee16417feb29b9168935f7aa21555dd99a2432..ad80a6c88266bee76ff6d8ff1b45e0c7c387e6bd 100644 (file)
@@ -74,6 +74,7 @@ class RGWSI_User;
 class RGWSI_User_RADOS;
 class RGWDataChangesLog;
 class RGWSI_Role_RADOS;
+class RGWAsyncRadosProcessor;
 
 struct RGWServices_Def
 {
@@ -104,6 +105,7 @@ struct RGWServices_Def
   std::unique_ptr<RGWSI_User_RADOS> user_rados;
   std::unique_ptr<RGWDataChangesLog> datalog_rados;
   std::unique_ptr<RGWSI_Role_RADOS> role_rados;
+  std::unique_ptr<RGWAsyncRadosProcessor> async_processor;
 
   RGWServices_Def();
   ~RGWServices_Def();
@@ -149,6 +151,7 @@ struct RGWServices
   RGWSI_SysObj_Core *core{nullptr};
   RGWSI_User *user{nullptr};
   RGWSI_Role_RADOS *role{nullptr};
+  RGWAsyncRadosProcessor* async_processor;
 
   int do_init(CephContext *cct, bool have_cache, bool raw_storage,
              bool run_sync, librados::Rados* radoshandle, optional_yield y,
index be6d990d962b7503e5174e93bcd0ea6840ad368c..db9980470055ee01e376e3e35d6cd7e9251da00a 100644 (file)
@@ -617,7 +617,7 @@ int BucketTrimInstanceCR::operate(const DoutPrefixProvider *dpp)
 
     get_policy_params.zone = zone_id;
     get_policy_params.bucket = bucket;
-    yield call(new RGWBucketGetSyncPolicyHandlerCR(store->svc()->rados->get_async_processor(),
+    yield call(new RGWBucketGetSyncPolicyHandlerCR(store->svc()->async_processor,
                                                    store,
                                                    get_policy_params,
                                                    source_policy,
@@ -728,14 +728,14 @@ int BucketTrimInstanceCR::operate(const DoutPrefixProvider *dpp)
       }
       while (clean_info && retries < MAX_RETRIES) {
        yield call(new RGWPutBucketInstanceInfoCR(
-                    store->svc()->rados->get_async_processor(),
+                    store->svc()->async_processor,
                     store, clean_info->first, false, {},
                     no_change_attrs(), dpp));
 
        // Raced, try again.
        if (retcode == -ECANCELED) {
          yield call(new RGWGetBucketInstanceInfoCR(
-                      store->svc()->rados->get_async_processor(),
+                      store->svc()->async_processor,
                       store, clean_info->first.bucket,
                       &(clean_info->first), nullptr, dpp));
          if (retcode < 0) {
@@ -1132,7 +1132,7 @@ int BucketTrimCR::operate(const DoutPrefixProvider *dpp)
           return buckets.size() < config.buckets_per_interval;
         };
 
-        call(new MetadataListCR(cct, store->svc()->rados->get_async_processor(),
+        call(new MetadataListCR(cct, store->svc()->async_processor,
                                 store->ctl()->meta.mgr,
                                 section, status.marker, cb));
       }
@@ -1219,7 +1219,7 @@ int BucketTrimPollCR::operate(const DoutPrefixProvider *dpp)
 
       // prevent others from trimming for our entire wait interval
       set_status("acquiring trim lock");
-      yield call(new RGWSimpleRadosLockCR(store->svc()->rados->get_async_processor(), store,
+      yield call(new RGWSimpleRadosLockCR(store->svc()->async_processor, store,
                                           obj, name, cookie,
                                           config.trim_interval_sec));
       if (retcode < 0) {
@@ -1232,7 +1232,7 @@ int BucketTrimPollCR::operate(const DoutPrefixProvider *dpp)
       if (retcode < 0) {
         // on errors, unlock so other gateways can try
         set_status("unlocking");
-        yield call(new RGWSimpleRadosUnlockCR(store->svc()->rados->get_async_processor(), store,
+        yield call(new RGWSimpleRadosUnlockCR(store->svc()->async_processor, store,
                                               obj, name, cookie));
       }
     }
index bac0cda8dd6477650853d82cbbe80ecbba342a1b..5dcddb659e1e51c69fb90022eaa285c1041744b5 100644 (file)
@@ -224,7 +224,7 @@ int DataLogTrimPollCR::operate(const DoutPrefixProvider *dpp)
       // prevent other gateways from attempting to trim for the duration
       set_status("acquiring trim lock");
 
-      yield call(new RGWSimpleRadosLockCR(store->svc()->rados->get_async_processor(), store,
+      yield call(new RGWSimpleRadosLockCR(store->svc()->async_processor, store,
                                           rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, lock_oid),
                                           "data_trim", lock_cookie,
                                           // interval is a small number and unlikely to overflow
index 13773ae2877f91fcc75e6a0c7bb782b12da46426..be513b6e74df19a0cd822bbe41010e72ef48108e 100644 (file)
@@ -565,7 +565,7 @@ class MetaPeerTrimShardCollectCR : public RGWShardCollectCR {
       env(env), mdlog(mdlog), period_id(env.current.get_period().get_id())
   {
     meta_env.init(env.dpp, cct, env.store, env.store->svc()->zone->get_master_conn(),
-                  env.store->svc()->rados->get_async_processor(), env.http, nullptr,
+                  env.store->svc()->async_processor, env.http, nullptr,
                   env.store->getRados()->get_sync_tracer());
   }
 
@@ -669,7 +669,7 @@ int MetaTrimPollCR::operate(const DoutPrefixProvider *dpp)
       // prevent others from trimming for our entire wait interval
       set_status("acquiring trim lock");
 
-      yield call(new RGWSimpleRadosLockCR(store->svc()->rados->get_async_processor(), store,
+      yield call(new RGWSimpleRadosLockCR(store->svc()->async_processor, store,
                                           obj, name, cookie, 
                                           // interval is a small number and unlikely to overflow
                                           // coverity[store_truncates_time_t:SUPPRESS]
@@ -685,7 +685,7 @@ int MetaTrimPollCR::operate(const DoutPrefixProvider *dpp)
       if (retcode < 0) {
         // on errors, unlock so other gateways can try
         set_status("unlocking");
-        yield call(new RGWSimpleRadosUnlockCR(store->svc()->rados->get_async_processor(), store,
+        yield call(new RGWSimpleRadosUnlockCR(store->svc()->async_processor, store,
                                               obj, name, cookie));
       }
     }
index c19d44c9e5366acf72d2198af9cc6753824fd3e7..f4a2d1480bdad0e03744008d35e855196afa05ba 100644 (file)
@@ -2098,7 +2098,7 @@ stringstream& push_ss(stringstream& ss, list<string>& l, int tab = 0)
 
 static void get_md_sync_status(list<string>& status)
 {
-  RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor());
+  RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->async_processor);
 
   int ret = sync.init(dpp());
   if (ret < 0) {
@@ -2254,7 +2254,7 @@ static void get_data_sync_status(const rgw_zone_id& source_zone, list<string>& s
     flush_ss(ss, status);
     return;
   }
-  RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor(), source_zone, nullptr);
+  RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->async_processor, source_zone, nullptr);
 
   int ret = sync.init(dpp());
   if (ret < 0) {
@@ -8995,7 +8995,7 @@ next:
   }
 
   if (opt_cmd == OPT::METADATA_SYNC_STATUS) {
-    RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor());
+    RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->async_processor);
 
     int ret = sync.init(dpp());
     if (ret < 0) {
@@ -9039,7 +9039,7 @@ next:
   }
 
   if (opt_cmd == OPT::METADATA_SYNC_INIT) {
-    RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor());
+    RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->async_processor);
 
     int ret = sync.init(dpp());
     if (ret < 0) {
@@ -9055,7 +9055,7 @@ next:
 
 
   if (opt_cmd == OPT::METADATA_SYNC_RUN) {
-    RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor());
+    RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->async_processor);
 
     int ret = sync.init(dpp());
     if (ret < 0) {
@@ -9075,7 +9075,7 @@ next:
       cerr << "ERROR: source zone not specified" << std::endl;
       return EINVAL;
     }
-    RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor(), source_zone, nullptr);
+    RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->async_processor, source_zone, nullptr);
 
     int ret = sync.init(dpp());
     if (ret < 0) {
@@ -9145,7 +9145,7 @@ next:
       return EINVAL;
     }
 
-    RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor(), source_zone, nullptr);
+    RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->async_processor, source_zone, nullptr);
 
     int ret = sync.init(dpp());
     if (ret < 0) {
@@ -9174,7 +9174,7 @@ next:
       return ret;
     }
 
-    RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor(), source_zone, nullptr, sync_module);
+    RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->async_processor, source_zone, nullptr, sync_module);
 
     ret = sync.init(dpp());
     if (ret < 0) {
index da723f3e1295dd8df95eaf796b3cdea3a7f86e2d..603718dc96e5133bd469541cd8043fb54c0e8ddc 100644 (file)
@@ -30,13 +30,16 @@ RGWSI_MDLog::RGWSI_MDLog(CephContext *cct, bool _run_sync) : RGWServiceInstance(
 RGWSI_MDLog::~RGWSI_MDLog() {
 }
 
-int RGWSI_MDLog::init(RGWSI_RADOS *_rados_svc, RGWSI_Zone *_zone_svc, RGWSI_SysObj *_sysobj_svc, RGWSI_Cls *_cls_svc)
+int RGWSI_MDLog::init(librados::Rados* rados_, RGWSI_Zone *_zone_svc,
+                     RGWSI_SysObj *_sysobj_svc, RGWSI_Cls *_cls_svc,
+                     RGWAsyncRadosProcessor* async_processor_)
 {
   svc.zone = _zone_svc;
   svc.sysobj = _sysobj_svc;
   svc.mdlog = this;
-  svc.rados = _rados_svc;
+  rados = rados_;
   svc.cls = _cls_svc;
+  async_processor = async_processor_;
 
   return 0;
 }
@@ -262,11 +265,12 @@ class ReadHistoryCR : public RGWCoroutine {
   ReadHistoryCR(const DoutPrefixProvider *dpp, 
                 const Svc& svc,
                 Cursor *cursor,
-                RGWObjVersionTracker *objv_tracker)
+                RGWObjVersionTracker *objv_tracker,
+               RGWAsyncRadosProcessor* async_processor)
     : RGWCoroutine(svc.zone->ctx()), dpp(dpp), svc(svc),
       cursor(cursor),
       objv_tracker(objv_tracker),
-      async_processor(svc.rados->get_async_processor())
+      async_processor(async_processor)
   {}
 
   int operate(const DoutPrefixProvider *dpp) {
@@ -312,10 +316,11 @@ class WriteHistoryCR : public RGWCoroutine {
   WriteHistoryCR(const DoutPrefixProvider *dpp, 
                  Svc& svc,
                  const Cursor& cursor,
-                 RGWObjVersionTracker *objv)
+                 RGWObjVersionTracker *objv,
+                RGWAsyncRadosProcessor* async_processor)
     : RGWCoroutine(svc.zone->ctx()), dpp(dpp), svc(svc),
       cursor(cursor), objv(objv),
-      async_processor(svc.rados->get_async_processor())
+      async_processor(async_processor)
   {}
 
   int operate(const DoutPrefixProvider *dpp) {
@@ -353,18 +358,22 @@ class TrimHistoryCR : public RGWCoroutine {
   RGWObjVersionTracker *objv; //< to prevent racing updates
   Cursor next; //< target cursor for oldest log period
   Cursor existing; //< existing cursor read from disk
+  RGWAsyncRadosProcessor* async_processor;
 
  public:
-  TrimHistoryCR(const DoutPrefixProvider *dpp, const Svc& svc, Cursor cursor, RGWObjVersionTracker *objv)
+  TrimHistoryCR(const DoutPrefixProvider *dpp, const Svc& svc, Cursor cursor,
+               RGWObjVersionTracker *objv,
+               RGWAsyncRadosProcessor* async_processor)
     : RGWCoroutine(svc.zone->ctx()), dpp(dpp), svc(svc),
-      cursor(cursor), objv(objv), next(cursor) {
+      cursor(cursor), objv(objv), next(cursor),
+      async_processor(async_processor) {
     next.next(); // advance past cursor
   }
 
   int operate(const DoutPrefixProvider *dpp) {
     reenter(this) {
       // read an existing history, and write the new history if it's newer
-      yield call(new ReadHistoryCR(dpp, svc, &existing, objv));
+      yield call(new ReadHistoryCR(dpp, svc, &existing, objv, async_processor));
       if (retcode < 0) {
         return set_cr_error(retcode);
       }
@@ -375,7 +384,7 @@ class TrimHistoryCR : public RGWCoroutine {
         return set_cr_error(-ECANCELED);
       }
       // overwrite with updated history
-      yield call(new WriteHistoryCR(dpp, svc, next, objv));
+      yield call(new WriteHistoryCR(dpp, svc, next, objv, async_processor));
       if (retcode < 0) {
         return set_cr_error(retcode);
       }
@@ -512,13 +521,13 @@ Cursor RGWSI_MDLog::read_oldest_log_period(optional_yield y, const DoutPrefixPro
 RGWCoroutine* RGWSI_MDLog::read_oldest_log_period_cr(const DoutPrefixProvider *dpp, 
         Cursor *period, RGWObjVersionTracker *objv) const
 {
-  return new mdlog::ReadHistoryCR(dpp, svc, period, objv);
+  return new mdlog::ReadHistoryCR(dpp, svc, period, objv, async_processor);
 }
 
 RGWCoroutine* RGWSI_MDLog::trim_log_period_cr(const DoutPrefixProvider *dpp, 
         Cursor period, RGWObjVersionTracker *objv) const
 {
-  return new mdlog::TrimHistoryCR(dpp, svc, period, objv);
+  return new mdlog::TrimHistoryCR(dpp, svc, period, objv, async_processor);
 }
 
 RGWMetadataLog* RGWSI_MDLog::get_log(const std::string& period)
index 8b37ba11e565922f4f92c5e95f770ed20cb08ef0..f169ee88db9bc66b82510d33f6d156c7fc2186d2 100644 (file)
@@ -29,7 +29,6 @@ class RGWCoroutine;
 
 class RGWSI_Zone;
 class RGWSI_SysObj;
-class RGWSI_RADOS;
 
 namespace mdlog {
   class ReadHistoryCR;
@@ -58,18 +57,21 @@ public:
   RGWSI_MDLog(CephContext *cct, bool run_sync);
   virtual ~RGWSI_MDLog();
 
+  librados::Rados* rados{nullptr};
+  RGWAsyncRadosProcessor* async_processor{nullptr};
+
   struct Svc {
-    RGWSI_RADOS *rados{nullptr};
     RGWSI_Zone *zone{nullptr};
     RGWSI_SysObj *sysobj{nullptr};
     RGWSI_MDLog *mdlog{nullptr};
     RGWSI_Cls *cls{nullptr};
   } svc;
 
-  int init(RGWSI_RADOS *_rados_svc,
+  int init(librados::Rados* rados_,
            RGWSI_Zone *_zone_svc,
            RGWSI_SysObj *_sysobj_svc,
-           RGWSI_Cls *_cls_svc);
+           RGWSI_Cls *_cls_svc,
+          RGWAsyncRadosProcessor* async_processor_);
 
   int do_start(optional_yield y, const DoutPrefixProvider *dpp) override;
 
index d682b87eb2c278ad2c43355d0945e01e0f402047..34de9857e19a842ec2ac4b44f14153ff0be35aa6 100644 (file)
@@ -34,27 +34,14 @@ int RGWSI_RADOS::do_start(optional_yield, const DoutPrefixProvider *dpp)
     return ret;
   }
 
-  async_processor.reset(new RGWAsyncRadosProcessor(cct, cct->_conf->rgw_num_async_rados_threads));
-  async_processor->start();
-
   return 0;
 }
 
 void RGWSI_RADOS::shutdown()
 {
-  if (async_processor) {
-    async_processor->stop();
-  }
   rados.shutdown();
 }
 
-void RGWSI_RADOS::stop_processor()
-{
-  if (async_processor) {
-    async_processor->stop();
-  }
-}
-
 librados::Rados* RGWSI_RADOS::get_rados_handle()
 {
   return &rados;
index 4e7cf672c7a739a6dbfc6d3cced90201300d1452..2a3940a34d9603215be44f464ebbc0f61a3520b0 100644 (file)
@@ -28,7 +28,6 @@ struct RGWAccessListFilterPrefix : public RGWAccessListFilter {
 class RGWSI_RADOS : public RGWServiceInstance
 {
   librados::Rados rados;
-  std::unique_ptr<RGWAsyncRadosProcessor> async_processor;
 
   int do_start(optional_yield, const DoutPrefixProvider *dpp) override;
 
@@ -66,16 +65,11 @@ public:
 
   void init() {}
   void shutdown() override;
-  void stop_processor();
 
   std::string cluster_fsid();
   uint64_t instance_id();
   bool check_secure_mon_conn(const DoutPrefixProvider *dpp) const;
 
-  RGWAsyncRadosProcessor *get_async_processor() {
-    return async_processor.get();
-  }
-
   int clog_warn(const std::string& msg);
 
   class Handle;