]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
rgw: SAL drivers take `boost::asio::io_context`
authorAdam C. Emerson <aemerson@redhat.com>
Fri, 14 Apr 2023 18:59:19 +0000 (14:59 -0400)
committerAdam Emerson <aemerson@redhat.com>
Wed, 24 Jan 2024 20:51:46 +0000 (15:51 -0500)
`RadosDriver` needs it, and since SAL generally uses `optional_yield`,
other stores are going to need it to implement that sensibly.

Signed-off-by: Adam C. Emerson <aemerson@redhat.com>
18 files changed:
src/rgw/CMakeLists.txt
src/rgw/driver/rados/rgw_rados.cc
src/rgw/driver/rados/rgw_sal_rados.cc
src/rgw/driver/rados/rgw_sal_rados.h
src/rgw/driver/rados/rgw_service.cc
src/rgw/driver/rados/rgw_service.h
src/rgw/rgw_admin.cc
src/rgw/rgw_appmain.cc
src/rgw/rgw_main.h
src/rgw/rgw_object_expirer.cc
src/rgw/rgw_realm_reloader.cc
src/rgw/rgw_realm_reloader.h
src/rgw/rgw_sal.cc
src/rgw/rgw_sal.h
src/test/rgw/rgw_cr_test.cc
src/test/rgw/test_d4n_filter.cc
src/test/rgw/test_rgw_iam_policy.cc
src/test/rgw/test_rgw_lua.cc

index a308c833ddce8fa54c6800f7c0714b30e3c97859..3c2f1423808b2639cc3fdfd4fda8e2a5ab7b025a 100644 (file)
@@ -270,6 +270,7 @@ target_link_libraries(rgw_common
     cls_user_client
     cls_version_client
     librados
+    libneorados
     rt
     ICU::uc
     OATH::OATH
index 207ed5c919c0909ff0788646fddbd7cb9c946724..fe0d14d1029ae182c9c96c4a20b7234aae7423e9 100644 (file)
@@ -1365,11 +1365,10 @@ int RGWRados::init_complete(const DoutPrefixProvider *dpp, optional_yield y)
 int RGWRados::init_svc(bool raw, const DoutPrefixProvider *dpp)
 {
   if (raw) {
-    return svc.init_raw(cct, use_cache, get_rados_handle(), null_yield, dpp);
+    return svc.init_raw(cct, driver, use_cache, null_yield, dpp);
   }
 
-  return svc.init(cct, use_cache, run_sync_thread, get_rados_handle(),
-                 null_yield, dpp);
+  return svc.init(cct, driver, use_cache, run_sync_thread, null_yield, dpp);
 }
 
 int RGWRados::init_ctl(const DoutPrefixProvider *dpp)
@@ -1383,9 +1382,16 @@ int RGWRados::init_ctl(const DoutPrefixProvider *dpp)
  */
 int RGWRados::init_begin(const DoutPrefixProvider *dpp)
 {
-  int ret = init_rados();
+  int ret;
+
+  ret = driver->init_neorados(dpp);
+  if (ret < 0) {
+    ldpp_dout(dpp, 0) << "ERROR: failed to initialize neorados (ret=" << cpp_strerror(-ret) << ")" << dendl;
+    return ret;
+  }
+  ret = init_rados();
   if (ret < 0) {
-    ldpp_dout(dpp, 0) << "ERROR: failed to init rados (ret=" << cpp_strerror(-ret) << ")" << dendl;
+    ldpp_dout(dpp, 0) << "ERROR: failed to initialize librados (ret=" << cpp_strerror(-ret) << ")" << dendl;
     return ret;
   }
 
index 32d11a151b5eef89722d9e6059e5e1bec5f1fdb6..600e54d97d50adf44af735d28bb30ba5ae0a46a2 100644 (file)
@@ -22,6 +22,8 @@
 #include <boost/algorithm/string.hpp>
 #include <boost/process.hpp>
 
+#include "common/async/blocked_completion.h"
+
 #include "common/Clock.h"
 #include "common/errno.h"
 
@@ -1124,12 +1126,25 @@ int RadosStore::get_raw_chunk_size(const DoutPrefixProvider* dpp, const rgw_raw_
   return rados->get_max_chunk_size(obj.pool, chunk_size, dpp);
 }
 
+int RadosStore::init_neorados(const DoutPrefixProvider* dpp) {
+  if (!neorados) try {
+      neorados = neorados::RADOS::make_with_cct(dpp->get_cct(), io_context,
+                                               ceph::async::use_blocked);
+    } catch (const boost::system::system_error& e) {
+      ldpp_dout(dpp, 0) << "ERROR: creating neorados handle failed: "
+                       << e.what() << dendl;
+      return ceph::from_error_code(e.code());
+    }
+  return 0;
+}
+
 int RadosStore::initialize(CephContext *cct, const DoutPrefixProvider *dpp)
 {
   std::unique_ptr<ZoneGroup> zg =
     std::make_unique<RadosZoneGroup>(this, svc()->zone->get_zonegroup());
   zone = make_unique<RadosZone>(this, std::move(zg));
-  return 0;
+
+  return init_neorados(dpp);
 }
 
 int RadosStore::log_usage(const DoutPrefixProvider *dpp, map<rgw_user_bucket, RGWUsageBatch>& usage_info, optional_yield y)
@@ -3718,9 +3733,10 @@ int RadosRole::delete_obj(const DoutPrefixProvider *dpp, optional_yield y)
 
 extern "C" {
 
-void* newRadosStore(void)
+void* newRadosStore(void* io_context)
 {
-  rgw::sal::RadosStore* store = new rgw::sal::RadosStore();
+  rgw::sal::RadosStore* store = new rgw::sal::RadosStore(
+    *static_cast<boost::asio::io_context*>(io_context));
   if (store) {
     RGWRados* rados = new RGWRados();
 
index 7b810504457b4b99f3c3a151b2c549b487f25b45..d5174f39bc8f6f4ecc9dc3e7f1821bbe42bf9c98 100644 (file)
 
 #pragma once
 
+#include "include/neorados/RADOS.hpp"
+
+#include <boost/asio/io_context.hpp>
+
 #include "rgw_sal_store.h"
 #include "rgw_rados.h"
 #include "rgw_notify.h"
@@ -112,19 +116,22 @@ class RadosZone : public StoreZone {
 
 class RadosStore : public StoreDriver {
   private:
+    boost::asio::io_context& io_context;
     RGWRados* rados;
     RGWUserCtl* user_ctl;
     std::unique_ptr<RadosZone> zone;
+    std::optional<neorados::RADOS> neorados;
     std::string topics_oid(const std::string& tenant) const;
 
   public:
-    RadosStore()
-      : rados(nullptr) {
+    RadosStore(boost::asio::io_context& io_context)
+      : io_context(io_context), rados(nullptr) {
       }
     ~RadosStore() {
       delete rados;
     }
 
+    int init_neorados(const DoutPrefixProvider* dpp);
     virtual int initialize(CephContext *cct, const DoutPrefixProvider *dpp) override;
     virtual const std::string get_name() const override {
       return "rados";
@@ -240,6 +247,8 @@ class RadosStore : public StoreDriver {
 
     void setRados(RGWRados * st) { rados = st; }
     RGWRados* getRados(void) { return rados; }
+    boost::asio::io_context& get_io_context() { return io_context; }
+    neorados::RADOS& get_neorados() { return *neorados; }
 
     RGWServices* svc() { return &rados->svc; }
     const RGWServices* svc() const { return &rados->svc; }
index 5b78472dfe3bab981e50902ad4853498eb435b8d..4be0738bae2bf0ab0e24eaabd7e24b606463b9a9 100644 (file)
@@ -34,6 +34,7 @@
 #include "rgw_datalog.h"
 #include "rgw_metadata.h"
 #include "rgw_otp.h"
+#include "rgw_sal_rados.h"
 #include "rgw_user.h"
 #include "rgw_role.h"
 
@@ -48,10 +49,10 @@ RGWServices_Def::~RGWServices_Def()
 }
 
 int RGWServices_Def::init(CephContext *cct,
+                         rgw::sal::RadosStore* driver,
                          bool have_cache,
                           bool raw,
                          bool run_sync,
-                         librados::Rados* rados,
                          optional_yield y,
                           const DoutPrefixProvider *dpp)
 {
@@ -88,7 +89,8 @@ int RGWServices_Def::init(CephContext *cct,
 
   async_processor->start();
   finisher->init();
-  bi_rados->init(zone.get(), rados, bilog_rados.get(), datalog_rados.get());
+  bi_rados->init(zone.get(), driver->getRados()->get_rados_handle(),
+                bilog_rados.get(), datalog_rados.get());
   bilog_rados->init(bi_rados.get());
   bucket_sobj->init(zone.get(), sysobj.get(), sysobj_cache.get(),
                     bi_rados.get(), meta.get(), meta_be_sobj.get(),
@@ -97,27 +99,29 @@ int RGWServices_Def::init(CephContext *cct,
                          sysobj.get(),
                          sysobj_cache.get(),
                          bucket_sobj.get());
-  cls->init(zone.get(), rados);
-  config_key_rados->init(rados);
-  mdlog->init(rados, zone.get(), sysobj.get(), cls.get(),
-             async_processor.get());
+  cls->init(zone.get(), driver->getRados()->get_rados_handle());
+  config_key_rados->init(driver->getRados()->get_rados_handle());
+  mdlog->init(driver->getRados()->get_rados_handle(), zone.get(), sysobj.get(),
+             cls.get(), async_processor.get());
   meta->init(sysobj.get(), mdlog.get(), meta_bes);
   meta_be_sobj->init(sysobj.get(), mdlog.get());
   meta_be_otp->init(sysobj.get(), mdlog.get(), cls.get());
-  notify->init(zone.get(), rados, finisher.get());
+  notify->init(zone.get(), driver->getRados()->get_rados_handle(),
+              finisher.get());
   otp->init(zone.get(), meta.get(), meta_be_otp.get());
-  zone->init(sysobj.get(), rados, sync_modules.get(), bucket_sync_sobj.get());
-  zone_utils->init(rados, zone.get());
+  zone->init(sysobj.get(), driver->getRados()->get_rados_handle(),
+            sync_modules.get(), bucket_sync_sobj.get());
+  zone_utils->init(driver->getRados()->get_rados_handle(), zone.get());
   quota->init(zone.get());
   sync_modules->init(zone.get());
-  sysobj_core->core_init(rados, zone.get());
+  sysobj_core->core_init(driver->getRados()->get_rados_handle(), zone.get());
   if (have_cache) {
-    sysobj_cache->init(rados, zone.get(), notify.get());
-    sysobj->init(rados, sysobj_cache.get());
+    sysobj_cache->init(driver->getRados()->get_rados_handle(), zone.get(), notify.get());
+    sysobj->init(driver->getRados()->get_rados_handle(), sysobj_cache.get());
   } else {
-    sysobj->init(rados, sysobj_core.get());
+    sysobj->init(driver->getRados()->get_rados_handle(), sysobj_core.get());
   }
-  user_rados->init(rados, zone.get(), sysobj.get(), sysobj_cache.get(),
+  user_rados->init(driver->getRados()->get_rados_handle(), zone.get(), sysobj.get(), sysobj_cache.get(),
                    meta.get(), meta_be_sobj.get(), sync_modules.get());
   role_rados->init(zone.get(), meta.get(), meta_be_sobj.get(), sysobj.get());
 
@@ -146,7 +150,7 @@ int RGWServices_Def::init(CephContext *cct,
 
     r = datalog_rados->start(dpp, &zone->get_zone(),
                             zone->get_zone_params(),
-                            rados);
+                            driver->getRados()->get_rados_handle());
     if (r < 0) {
       ldpp_dout(dpp, 0) << "ERROR: failed to start datalog_rados service (" << cpp_strerror(-r) << dendl;
       return r;
@@ -301,13 +305,11 @@ void RGWServices_Def::shutdown()
   has_shutdown = true;
 }
 
-int RGWServices::do_init(CephContext *_cct, bool have_cache, bool raw,
-                        bool run_sync, librados::Rados* rados,
-                        optional_yield y, const DoutPrefixProvider *dpp)
+int RGWServices::do_init(CephContext *_cct, rgw::sal::RadosStore* driver, bool have_cache, bool raw, bool run_sync, optional_yield y, const DoutPrefixProvider *dpp)
 {
   cct = _cct;
 
-  int r = _svc.init(cct, have_cache, raw, run_sync, rados, y, dpp);
+  int r = _svc.init(cct, driver, have_cache, raw, run_sync, y, dpp);
   if (r < 0) {
     return r;
   }
index 7c05f043a47c723104f7ce45304b3f62d3604fdd..9996b42e2514563aaf7afebdec1375f7d0659062 100644 (file)
 
 #include "rgw_common.h"
 
+namespace rgw::sal {
+class RadosStore;
+}
+
 struct RGWServices_Def;
 
 class RGWServiceInstance
@@ -108,8 +112,8 @@ struct RGWServices_Def
   RGWServices_Def();
   ~RGWServices_Def();
 
-  int init(CephContext *cct, bool have_cache, bool raw_storage, bool run_sync,
-          librados::Rados* rados, optional_yield y,
+  int init(CephContext *cct, rgw::sal::RadosStore* store, bool have_cache,
+          bool raw_storage, bool run_sync, optional_yield y,
           const DoutPrefixProvider *dpp);
   void shutdown();
 };
@@ -150,19 +154,19 @@ struct RGWServices
   RGWSI_Role_RADOS *role{nullptr};
   RGWAsyncRadosProcessor* async_processor;
 
-  int do_init(CephContext *cct, bool have_cache, bool raw_storage,
-             bool run_sync, librados::Rados* rados, optional_yield y,
+  int do_init(CephContext *cct, rgw::sal::RadosStore* store, bool have_cache,
+             bool raw_storage, bool run_sync, optional_yield y,
              const DoutPrefixProvider *dpp);
 
-  int init(CephContext *cct, bool have_cache, bool run_sync,
-          librados::Rados* rados, optional_yield y,
-          const DoutPrefixProvider *dpp) {
-    return do_init(cct, have_cache, false, run_sync, rados, y, dpp);
+  int init(CephContext *cct, rgw::sal::RadosStore* store, bool have_cache,
+          bool run_sync, optional_yield y, const DoutPrefixProvider *dpp) {
+    return do_init(cct, store, have_cache, false, run_sync, y, dpp);
   }
 
-  int init_raw(CephContext *cct, bool have_cache, librados::Rados* rados,
-              optional_yield y, const DoutPrefixProvider *dpp) {
-    return do_init(cct, have_cache, true, false, rados, y, dpp);
+  int init_raw(CephContext *cct, rgw::sal::RadosStore* store,
+              bool have_cache, optional_yield y,
+              const DoutPrefixProvider *dpp) {
+    return do_init(cct, store, have_cache, true, false, y, dpp);
   }
   void shutdown() {
     _svc.shutdown();
index 3d817c6039bf85bf97c31b65595c65bb9cc8ced7..5a3e5102bf7f64e049d65480c6e541f2d32abad3 100644 (file)
@@ -17,6 +17,8 @@ extern "C" {
 #include "auth/Crypto.h"
 #include "compressor/Compressor.h"
 
+#include "common/async/context_pool.h"
+
 #include "common/armor.h"
 #include "common/ceph_json.h"
 #include "common/config.h"
@@ -3309,6 +3311,7 @@ int main(int argc, const char **argv)
 
   auto cct = rgw_global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
                             CODE_ENVIRONMENT_UTILITY, 0);
+  ceph::async::io_context_pool context_pool(cct->_conf->rgw_thread_pool_size);
 
   // for region -> zonegroup conversion (must happen before common_init_finish())
   if (!g_conf()->rgw_region.empty() && g_conf()->rgw_zonegroup.empty()) {
@@ -4256,13 +4259,13 @@ int main(int argc, const char **argv)
     }
 
     if (raw_storage_op) {
-      driver = DriverManager::get_raw_storage(dpp(),
-                                           g_ceph_context,
-                                           cfg);
+      driver = DriverManager::get_raw_storage(dpp(), g_ceph_context,
+                                             cfg, context_pool);
     } else {
       driver = DriverManager::get_storage(dpp(),
                                        g_ceph_context,
                                        cfg,
+                                       context_pool,
                                        false,
                                        false,
                                        false,
index 57a1a16783a9ff862637c153a8cd01c802d94fd5..d37721ef4b9de597a8598c0e659ac1de7e933049 100644 (file)
@@ -237,6 +237,7 @@ int rgw::AppMain::init_storage()
   DriverManager::Config cfg = DriverManager::get_config(false, g_ceph_context);
   env.driver = DriverManager::get_storage(dpp, dpp->get_cct(),
           cfg,
+         context_pool,
           run_gc,
           run_lc,
           run_quota,
@@ -515,7 +516,7 @@ int rgw::AppMain::init_frontends2(RGWLib* rgwlib)
       rgw_pauser->add_pauser(env.lua.background);
     }
     reloader = std::make_unique<RGWRealmReloader>(
-        env, *implicit_tenant_context, service_map_meta, rgw_pauser.get());
+      env, *implicit_tenant_context, service_map_meta, rgw_pauser.get(), context_pool);
     realm_watcher = std::make_unique<RGWRealmWatcher>(dpp, g_ceph_context,
                                  static_cast<rgw::sal::RadosStore*>(env.driver)->svc()->zone->get_realm());
     realm_watcher->add_watcher(RGWRealmNotify::Reload, *reloader);
index 49846c5d595a01a7c93d4f9b797639f08b758039..9bdea60e2860df75f5047cb98e6247c7c312bcdc 100644 (file)
@@ -18,6 +18,9 @@
 #include <vector>
 #include <map>
 #include <string>
+
+#include "common/async/context_pool.h"
+
 #include "rgw_common.h"
 #include "rgw_rest.h"
 #include "rgw_frontend.h"
@@ -81,7 +84,8 @@ class AppMain {
   SiteConfig site;
   const DoutPrefixProvider* dpp;
   RGWProcessEnv env;
-
+  ceph::async::io_context_pool context_pool{
+    dpp->get_cct()->_conf->rgw_thread_pool_size};
 public:
   AppMain(const DoutPrefixProvider* dpp);
   ~AppMain();
index 0470b1a6e6fc28bf394054c40087913e4fb8b9df..e3a816865f80c517ef87bbedc2e56f69a17255fb 100644 (file)
@@ -9,6 +9,8 @@
 
 #include "auth/Crypto.h"
 
+#include "common/async/context_pool.h"
+
 #include "common/armor.h"
 #include "common/ceph_json.h"
 #include "common/config.h"
@@ -81,12 +83,13 @@ int main(const int argc, const char **argv)
   }
 
   common_init_finish(g_ceph_context);
+  ceph::async::io_context_pool context_pool{cct->_conf->rgw_thread_pool_size};
 
   const DoutPrefix dp(cct.get(), dout_subsys, "rgw object expirer: ");
   DriverManager::Config cfg;
   cfg.store_name = "rados";
   cfg.filter_name = "none";
-  driver = DriverManager::get_storage(&dp, g_ceph_context, cfg, false, false, false, false, false, false, null_yield);
+  driver = DriverManager::get_storage(&dp, g_ceph_context, cfg, context_pool, false, false, false, false, false, false, null_yield);
   if (!driver) {
     std::cerr << "couldn't init storage provider" << std::endl;
     return EIO;
index b3a511becb685b92d61ec486fb2a83b72a2e3c42..26dffa99cf4f7ccd7c46e095d3f026283f31709c 100644 (file)
@@ -31,11 +31,13 @@ static constexpr bool USE_SAFE_TIMER_CALLBACKS = false;
 RGWRealmReloader::RGWRealmReloader(RGWProcessEnv& env,
                                    const rgw::auth::ImplicitTenants& implicit_tenants,
                                    std::map<std::string, std::string>& service_map_meta,
-                                   Pauser* frontends)
+                                   Pauser* frontends,
+                                  boost::asio::io_context& io_context)
   : env(env),
     implicit_tenants(implicit_tenants),
     service_map_meta(service_map_meta),
     frontends(frontends),
+    io_context(io_context),
     timer(env.driver->ctx(), mutex, USE_SAFE_TIMER_CALLBACKS),
     mutex(ceph::make_mutex("RGWRealmReloader")),
     reload_scheduled(nullptr)
@@ -118,7 +120,7 @@ void RGWRealmReloader::reload()
       DriverManager::Config cfg;
       cfg.store_name = "rados";
       cfg.filter_name = "none";
-      env.driver = DriverManager::get_storage(&dp, cct, cfg,
+      env.driver = DriverManager::get_storage(&dp, cct, cfg, io_context,
           cct->_conf->rgw_enable_gc_threads,
           cct->_conf->rgw_enable_lc_threads,
           cct->_conf->rgw_enable_quota_threads,
index 25082a2e490d78cdaf8ac4f3a8bdaa0d854a392f..6cf969da55add980b6f48f45e6d31029cfcfe1ec 100644 (file)
@@ -3,6 +3,8 @@
 
 #pragma once
 
+#include <boost/asio/io_context.hpp>
+
 #include "rgw_realm_watcher.h"
 #include "common/Cond.h"
 #include "rgw_sal_fwd.h"
@@ -36,7 +38,7 @@ class RGWRealmReloader : public RGWRealmWatcher::Watcher {
   RGWRealmReloader(RGWProcessEnv& env,
                    const rgw::auth::ImplicitTenants& implicit_tenants,
                    std::map<std::string, std::string>& service_map_meta,
-                   Pauser* frontends);
+                   Pauser* frontends, boost::asio::io_context& io_context);
   ~RGWRealmReloader() override;
 
   /// respond to realm notifications by scheduling a reload()
@@ -52,6 +54,7 @@ class RGWRealmReloader : public RGWRealmWatcher::Watcher {
   const rgw::auth::ImplicitTenants& implicit_tenants;
   std::map<std::string, std::string>& service_map_meta;
   Pauser *const frontends;
+  boost::asio::io_context& io_context;
 
   /// reload() takes a significant amount of time, so we don't want to run
   /// it in the handle_notify() thread. we choose a timer thread instead of a
index 6c0212dae81793b41302ecf3b79041f1f98c9afd..a70813435bc019536df75997b6a6016e9d5f216f 100644 (file)
@@ -46,7 +46,7 @@
 #define dout_subsys ceph_subsys_rgw
 
 extern "C" {
-extern rgw::sal::Driver* newRadosStore(void);
+extern rgw::sal::Driver* newRadosStore(boost::asio::io_context* io_context);
 #ifdef WITH_RADOSGW_DBSTORE
 extern rgw::sal::Driver* newDBStore(CephContext *cct);
 #endif
@@ -103,6 +103,7 @@ RGWObjState::RGWObjState(const RGWObjState& rhs) : obj (rhs.obj) {
 rgw::sal::Driver* DriverManager::init_storage_provider(const DoutPrefixProvider* dpp,
                                                     CephContext* cct,
                                                     const Config& cfg,
+                                                    boost::asio::io_context& io_context,
                                                     bool use_gc_thread,
                                                     bool use_lc_thread,
                                                     bool quota_threads,
@@ -115,7 +116,7 @@ rgw::sal::Driver* DriverManager::init_storage_provider(const DoutPrefixProvider*
   rgw::sal::Driver* driver{nullptr};
 
   if (cfg.store_name.compare("rados") == 0) {
-    driver = newRadosStore();
+    driver = newRadosStore(&io_context);
     RGWRados* rados = static_cast<rgw::sal::RadosStore* >(driver)->getRados();
 
     if ((*rados).set_use_cache(use_cache)
@@ -141,7 +142,7 @@ rgw::sal::Driver* DriverManager::init_storage_provider(const DoutPrefixProvider*
     }
   }
   else if (cfg.store_name.compare("d3n") == 0) {
-    driver = new rgw::sal::RadosStore();
+    driver = new rgw::sal::RadosStore(io_context);
     RGWRados* rados = new D3nRGWDataCache<RGWRados>;
     dynamic_cast<rgw::sal::RadosStore*>(driver)->setRados(rados);
     rados->set_store(static_cast<rgw::sal::RadosStore* >(driver));
@@ -261,11 +262,11 @@ rgw::sal::Driver* DriverManager::init_storage_provider(const DoutPrefixProvider*
   return driver;
 }
 
-rgw::sal::Driver* DriverManager::init_raw_storage_provider(const DoutPrefixProvider* dpp, CephContext* cct, const Config& cfg)
+rgw::sal::Driver* DriverManager::init_raw_storage_provider(const DoutPrefixProvider* dpp, CephContext* cct, const Config& cfg, boost::asio::io_context& io_context)
 {
   rgw::sal::Driver* driver = nullptr;
   if (cfg.store_name.compare("rados") == 0) {
-    driver = newRadosStore();
+    driver = newRadosStore(&io_context);
     RGWRados* rados = static_cast<rgw::sal::RadosStore* >(driver)->getRados();
 
     rados->set_context(cct);
index 943905af17a4f1121bb1800e1cc820105118a419..fbff4f60645cb0d1f9c11c5a2b809f6251195b82 100644 (file)
@@ -1536,6 +1536,7 @@ public:
   static rgw::sal::Driver* get_storage(const DoutPrefixProvider* dpp,
                                      CephContext* cct,
                                      const Config& cfg,
+                                     boost::asio::io_context& io_context,
                                      bool use_gc_thread,
                                      bool use_lc_thread,
                                      bool quota_threads,
@@ -1544,7 +1545,7 @@ public:
                                      bool run_notification_thread, optional_yield y,
                                      bool use_cache = true,
                                      bool use_gc = true) {
-    rgw::sal::Driver* driver = init_storage_provider(dpp, cct, cfg, use_gc_thread,
+    rgw::sal::Driver* driver = init_storage_provider(dpp, cct, cfg, io_context, use_gc_thread,
                                                   use_lc_thread,
                                                   quota_threads,
                                                   run_sync_thread,
@@ -1555,14 +1556,16 @@ public:
   }
   /** Get a stripped down driver by service name */
   static rgw::sal::Driver* get_raw_storage(const DoutPrefixProvider* dpp,
-                                         CephContext* cct, const Config& cfg) {
-    rgw::sal::Driver* driver = init_raw_storage_provider(dpp, cct, cfg);
+                                         CephContext* cct, const Config& cfg,
+                                         boost::asio::io_context& io_context) {
+    rgw::sal::Driver* driver = init_raw_storage_provider(dpp, cct, cfg, io_context);
     return driver;
   }
   /** Initialize a new full Driver */
   static rgw::sal::Driver* init_storage_provider(const DoutPrefixProvider* dpp,
                                                CephContext* cct,
                                                const Config& cfg,
+                                               boost::asio::io_context& io_context,
                                                bool use_gc_thread,
                                                bool use_lc_thread,
                                                bool quota_threads,
@@ -1574,7 +1577,8 @@ public:
   /** Initialize a new raw Driver */
   static rgw::sal::Driver* init_raw_storage_provider(const DoutPrefixProvider* dpp,
                                                    CephContext* cct,
-                                                   const Config& cfg);
+                                                   const Config& cfg,
+                                                   boost::asio::io_context& io_context);
   /** Close a Driver when it's no longer needed */
   static void close_storage(rgw::sal::Driver* driver);
 
index 2c250b390a986f6ba8f9117a754284e80a5f0582..db59dd1841b38487914963befc4e947448be3143 100644 (file)
@@ -10,7 +10,7 @@
 
 #include "include/rados/librados.hpp"
 
-#include "rgw_tools.h"
+#include "common/async/context_pool.h"
 
 #include "common/common_init.h"
 #include "common/config.h"
@@ -321,12 +321,14 @@ int main(int argc, const char **argv)
   common_init_finish(g_ceph_context);
 
 
+  ceph::async::io_context_pool context_pool{cct->_conf->rgw_thread_pool_size};
   DriverManager::Config cfg = DriverManager::get_config(true, g_ceph_context);
 
   store = static_cast<rgw::sal::RadosStore*>(
     DriverManager::get_storage(dpp(),
                              g_ceph_context,
                              cfg,
+                             context_pool,
                              false,
                              false,
                              false,
index b1483f26e0db379b9675f4bb1e57b776bb18d4a5..00f7f24e00f802461e074d298b26affb0091405a 100644 (file)
@@ -12,6 +12,8 @@
 #include "rgw_auth.h"
 #include "rgw_auth_registry.h"
 
+#include <boost/asio/io_context.hpp>
+
 #define dout_subsys ceph_subsys_rgw
 
 #define METADATA_LENGTH 22
@@ -33,6 +35,7 @@ class StoreObject : public rgw::sal::StoreObject {
 };
 
 class Environment : public ::testing::Environment {
+  boost::asio::io_context ioc;
   public:
     Environment() {}
     
@@ -61,6 +64,7 @@ class Environment : public ::testing::Environment {
       
       driver = DriverManager::get_storage(dpp, dpp->get_cct(),
               cfg,
+              ioc,
               false,
               false,
               false,
index 9c5f8d9caa3f655f756137aeed2f38289967e8f8..b7cd66f6ea5ade11654a964f4b72a047bbf31cc0 100644 (file)
@@ -20,6 +20,7 @@
 #include <gtest/gtest.h>
 
 #include "include/stringify.h"
+#include "common/async/context_pool.h"
 #include "common/code_environment.h"
 #include "common/ceph_context.h"
 #include "global/global_init.h"
@@ -912,7 +913,8 @@ TEST_F(IPPolicyTest, IPEnvironment) {
   RGWProcessEnv penv;
   // Unfortunately RGWCivetWeb is too tightly tied to civetweb to test RGWCivetWeb::init_env.
   RGWEnv rgw_env;
-  rgw::sal::RadosStore store;
+  ceph::async::io_context_pool context_pool(cct->_conf->rgw_thread_pool_size); \
+  rgw::sal::RadosStore store(context_pool);
   std::unique_ptr<rgw::sal::User> user = store.get_user(rgw_user());
   rgw_env.set("REMOTE_ADDR", "192.168.1.1");
   rgw_env.set("HTTP_HOST", "1.2.3.4");
index 0485e71ede37694fc7bff9f8f0f01bef4950b310..1ae7976c7944df09fafe2b67dd9bdbaaa6e6ba23 100644 (file)
@@ -1,4 +1,5 @@
 #include <gtest/gtest.h>
+#include "common/async/context_pool.h"
 #include "common/ceph_context.h"
 #include "rgw_common.h"
 #include "rgw_auth_registry.h"
@@ -159,8 +160,10 @@ CctCleaner cleaner(g_cct);
 
 tracing::Tracer tracer;
 
-#define MAKE_STORE auto store = std::unique_ptr<sal::RadosStore>(new sal::RadosStore); \
-                        store->setRados(new RGWRados);
+#define MAKE_STORE \
+  ceph::async::io_context_pool context_pool(g_cct->_conf->rgw_thread_pool_size); \
+  auto store = std::unique_ptr<sal::RadosStore>(new sal::RadosStore(context_pool)); \
+  store->setRados(new RGWRados);
 
 #define DEFINE_REQ_STATE RGWProcessEnv pe; \
   MAKE_STORE; \