]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
Project Zipper Part 1 - Framework and RGWRadosStore 28824/head
authorDaniel Gryniewicz <dang@redhat.com>
Thu, 6 Jun 2019 18:41:55 +0000 (14:41 -0400)
committerDaniel Gryniewicz <dang@redhat.com>
Thu, 15 Aug 2019 12:48:13 +0000 (08:48 -0400)
This is the first part of Project Zipper, the Store Abstraction Layer.
It introduces the basic framework, and wraps RGWRados in RGWRadosStore.
The goal over the next few weeks is to do the same for user, bucket, and
object.  This will make most of the remaining users of RGWRados wrapped
in SAL classes, allowing it to be completely absorbed into the private
RGWRadosStore.  This will also expose all the APIs that need to be
pusheg up to higher layers in the SAL.

Signed-off-by: Daniel Gryniewicz <dang@redhat.com>
98 files changed:
src/rgw/CMakeLists.txt
src/rgw/librgw.cc
src/rgw/rgw_admin.cc
src/rgw/rgw_admin_user.h
src/rgw/rgw_asio_frontend.cc
src/rgw/rgw_asio_frontend.h
src/rgw/rgw_bucket.cc
src/rgw/rgw_bucket.h
src/rgw/rgw_civetweb_frontend.cc
src/rgw/rgw_cr_rados.cc
src/rgw/rgw_cr_rados.h
src/rgw/rgw_cr_tools.cc
src/rgw/rgw_data_sync.cc
src/rgw/rgw_data_sync.h
src/rgw/rgw_file.cc
src/rgw/rgw_file.h
src/rgw/rgw_frontend.h
src/rgw/rgw_lc.cc
src/rgw/rgw_lc.h
src/rgw/rgw_lib.h
src/rgw/rgw_loadgen_process.cc
src/rgw/rgw_main.cc
src/rgw/rgw_metadata.cc
src/rgw/rgw_metadata.h
src/rgw/rgw_multi.cc
src/rgw/rgw_multi.h
src/rgw/rgw_obj_manifest.h
src/rgw/rgw_object_expirer.cc
src/rgw/rgw_object_expirer_core.cc
src/rgw/rgw_object_expirer_core.h
src/rgw/rgw_op.cc
src/rgw/rgw_op.h
src/rgw/rgw_orphan.cc
src/rgw/rgw_orphan.h
src/rgw/rgw_otp.h
src/rgw/rgw_period_pusher.cc
src/rgw/rgw_period_pusher.h
src/rgw/rgw_process.cc
src/rgw/rgw_process.h
src/rgw/rgw_pubsub.cc
src/rgw/rgw_pubsub.h
src/rgw/rgw_putobj_processor.cc
src/rgw/rgw_putobj_processor.h
src/rgw/rgw_quota.cc
src/rgw/rgw_quota.h
src/rgw/rgw_rados.cc
src/rgw/rgw_rados.h
src/rgw/rgw_realm_reloader.cc
src/rgw/rgw_realm_reloader.h
src/rgw/rgw_reshard.cc
src/rgw/rgw_reshard.h
src/rgw/rgw_rest.cc
src/rgw/rgw_rest.h
src/rgw/rgw_rest_bucket.cc
src/rgw/rgw_rest_config.cc
src/rgw/rgw_rest_iam.cc
src/rgw/rgw_rest_iam.h
src/rgw/rgw_rest_log.cc
src/rgw/rgw_rest_metadata.cc
src/rgw/rgw_rest_realm.cc
src/rgw/rgw_rest_role.cc
src/rgw/rgw_rest_s3.cc
src/rgw/rgw_rest_s3.h
src/rgw/rgw_rest_s3website.h
src/rgw/rgw_rest_sts.cc
src/rgw/rgw_rest_sts.h
src/rgw/rgw_rest_swift.cc
src/rgw/rgw_rest_swift.h
src/rgw/rgw_rest_usage.cc
src/rgw/rgw_rest_user_policy.cc
src/rgw/rgw_sal.cc [new file with mode: 0644]
src/rgw/rgw_sal.h [new file with mode: 0644]
src/rgw/rgw_sts.cc
src/rgw/rgw_sts.h
src/rgw/rgw_swift_auth.cc
src/rgw/rgw_swift_auth.h
src/rgw/rgw_sync.cc
src/rgw/rgw_sync.h
src/rgw/rgw_sync_module_aws.cc
src/rgw/rgw_sync_module_es.cc
src/rgw/rgw_sync_module_es_rest.cc
src/rgw/rgw_sync_module_pubsub.cc
src/rgw/rgw_sync_module_pubsub_rest.cc
src/rgw/rgw_tools.cc
src/rgw/rgw_tools.h
src/rgw/rgw_torrent.cc
src/rgw/rgw_torrent.h
src/rgw/rgw_trim_bilog.cc
src/rgw/rgw_trim_bilog.h
src/rgw/rgw_trim_datalog.cc
src/rgw/rgw_trim_datalog.h
src/rgw/rgw_trim_mdlog.cc
src/rgw/rgw_trim_mdlog.h
src/rgw/rgw_user.cc
src/rgw/rgw_user.h
src/rgw/rgw_zone.cc
src/rgw/rgw_zone.h
src/test/rgw/test_rgw_iam_policy.cc

index e5f3b6f17126c01aff83f884834b2236f92dea55..c891c4a3fdf865a9277519b90d1a32df10edcac9 100644 (file)
@@ -117,6 +117,7 @@ set(librgw_common_srcs
   rgw_rest_role.cc
   rgw_rest_s3.cc
   rgw_role.cc
+  rgw_sal.cc
   rgw_string.cc
   rgw_tag.cc
   rgw_tag_s3.cc
index 6de594fc31796711cf095e36276b9ee3caf2bec4..b216663d3fbe4f7d0c082902870367c3c55bd0fb 100644 (file)
@@ -236,7 +236,7 @@ namespace rgw {
 
     RGWObjectCtx rados_ctx(store, s); // XXX holds std::map
 
-    auto sysobj_ctx = store->svc.sysobj->init_obj_ctx();
+    auto sysobj_ctx = store->svc()->sysobj->init_obj_ctx();
     s->sysobj_ctx = &sysobj_ctx;
 
     /* XXX and -then- stash req_state pointers everywhere they are needed */
@@ -336,7 +336,7 @@ namespace rgw {
               << e.what() << dendl;
     }
     if (should_log) {
-      rgw_log_op(store, nullptr /* !rest */, s,
+      rgw_log_op(store->getRados(), nullptr /* !rest */, s,
                 (op ? op->name() : "unknown"), olog);
     }
 
@@ -527,7 +527,7 @@ namespace rgw {
 
     r = rgw_perf_start(g_ceph_context);
 
-    rgw_rest_init(g_ceph_context, store, store->svc.zone->get_zonegroup());
+    rgw_rest_init(g_ceph_context, store->svc()->zone->get_zonegroup());
 
     mutex.lock();
     init_timer.cancel_all_events();
@@ -550,7 +550,7 @@ namespace rgw {
     ldh->init();
     ldh->bind();
 
-    rgw_log_usage_init(g_ceph_context, store);
+    rgw_log_usage_init(g_ceph_context, store->getRados());
 
     // XXX ex-RGWRESTMgr_lib, mgr->set_logging(true)
 
@@ -582,7 +582,7 @@ namespace rgw {
 
     fe->run();
 
-    r = store->register_to_service_map("rgw-nfs", service_map_meta);
+    r = store->getRados()->register_to_service_map("rgw-nfs", service_map_meta);
     if (r < 0) {
       derr << "ERROR: failed to register to service map: " << cpp_strerror(-r) << dendl;
       /* ignore error */
@@ -625,9 +625,9 @@ namespace rgw {
     return 0;
   } /* RGWLib::stop() */
 
-  int RGWLibIO::set_uid(RGWRados *store, const rgw_user& uid)
+  int RGWLibIO::set_uid(rgw::sal::RGWRadosStore *store, const rgw_user& uid)
   {
-    int ret = store->ctl.user->get_info_by_uid(uid, &user_info, null_yield);
+    int ret = store->ctl()->user->get_info_by_uid(uid, &user_info, null_yield);
     if (ret < 0) {
       derr << "ERROR: failed reading user info: uid=" << uid << " ret="
           << ret << dendl;
index 23bb53d9a84caac37be74a0f872bd4be47ef1c01..c983e5e59d97e591ae04de31863829e0b8398740 100644 (file)
@@ -70,7 +70,7 @@ extern "C" {
 #define SECRET_KEY_LEN 40
 #define PUBLIC_ID_LEN 20
 
-static RGWRados *store = NULL;
+static rgw::sal::RGWRadosStore *store = NULL;
 
 static const DoutPrefixProvider* dpp() {
   struct GlobalPrefix : public DoutPrefixProvider {
@@ -1188,9 +1188,9 @@ static void show_reshard_status(
 }
 
 class StoreDestructor {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
 public:
-  explicit StoreDestructor(RGWRados *_s) : store(_s) {}
+  explicit StoreDestructor(rgw::sal::RGWRadosStore *_s) : store(_s) {}
   ~StoreDestructor() {
     RGWStoreManager::close_storage(store);
     rgw_http_client_cleanup();
@@ -1201,13 +1201,13 @@ static int init_bucket(const string& tenant_name, const string& bucket_name, con
                        RGWBucketInfo& bucket_info, rgw_bucket& bucket, map<string, bufferlist> *pattrs = nullptr)
 {
   if (!bucket_name.empty()) {
-    auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+    auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
     int r;
     if (bucket_id.empty()) {
-      r = store->get_bucket_info(obj_ctx, tenant_name, bucket_name, bucket_info, nullptr, null_yield, pattrs);
+      r = store->getRados()->get_bucket_info(obj_ctx, tenant_name, bucket_name, bucket_info, nullptr, null_yield, pattrs);
     } else {
       string bucket_instance_id = bucket_name + ":" + bucket_id;
-      r = store->get_bucket_instance_info(obj_ctx, bucket_instance_id, bucket_info, NULL, pattrs, null_yield);
+      r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket_instance_id, bucket_info, NULL, pattrs, null_yield);
     }
     if (r < 0) {
       cerr << "could not get bucket info for bucket=" << bucket_name << std::endl;
@@ -1380,15 +1380,15 @@ void set_quota_info(RGWQuotaInfo& quota, int opt_cmd, int64_t max_size, int64_t
   }
 }
 
-int set_bucket_quota(RGWRados *store, int opt_cmd,
+int set_bucket_quota(rgw::sal::RGWRadosStore *store, int opt_cmd,
                      const string& tenant_name, const string& bucket_name,
                      int64_t max_size, int64_t max_objects,
                      bool have_max_size, bool have_max_objects)
 {
   RGWBucketInfo bucket_info;
   map<string, bufferlist> attrs;
-  auto obj_ctx = store->svc.sysobj->init_obj_ctx();
-  int r = store->get_bucket_info(obj_ctx, tenant_name, bucket_name, bucket_info, NULL, null_yield, &attrs);
+  auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
+  int r = store->getRados()->get_bucket_info(obj_ctx, tenant_name, bucket_name, bucket_info, NULL, null_yield, &attrs);
   if (r < 0) {
     cerr << "could not get bucket info for bucket=" << bucket_name << ": " << cpp_strerror(-r) << std::endl;
     return -r;
@@ -1396,7 +1396,7 @@ int set_bucket_quota(RGWRados *store, int opt_cmd,
 
   set_quota_info(bucket_info.quota, opt_cmd, max_size, max_objects, have_max_size, have_max_objects);
 
-   r = store->put_bucket_instance_info(bucket_info, false, real_time(), &attrs);
+   r = store->getRados()->put_bucket_instance_info(bucket_info, false, real_time(), &attrs);
   if (r < 0) {
     cerr << "ERROR: failed writing bucket instance info: " << cpp_strerror(-r) << std::endl;
     return -r;
@@ -1447,13 +1447,13 @@ static bool bucket_object_check_filter(const string& name)
   return rgw_obj_key::oid_to_key_in_ns(name, &k, ns);
 }
 
-int check_min_obj_stripe_size(RGWRados *store, RGWBucketInfo& bucket_info, rgw_obj& obj, uint64_t min_stripe_size, bool *need_rewrite)
+int check_min_obj_stripe_size(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info, rgw_obj& obj, uint64_t min_stripe_size, bool *need_rewrite)
 {
   map<string, bufferlist> attrs;
   uint64_t obj_size;
 
   RGWObjectCtx obj_ctx(store);
-  RGWRados::Object op_target(store, bucket_info, obj_ctx, obj);
+  RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, obj);
   RGWRados::Object::Read read_op(&op_target);
 
   read_op.params.attrs = &attrs;
@@ -1518,7 +1518,7 @@ int check_obj_locator_underscore(RGWBucketInfo& bucket_info, rgw_obj& obj, rgw_o
 
   RGWObjectCtx obj_ctx(store);
 
-  RGWRados::Object op_target(store, bucket_info, obj_ctx, obj);
+  RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, obj);
   RGWRados::Object::Read read_op(&op_target);
 
   int ret = read_op.prepare(null_yield);
@@ -1529,7 +1529,7 @@ int check_obj_locator_underscore(RGWBucketInfo& bucket_info, rgw_obj& obj, rgw_o
   string status = (needs_fixing ? "needs_fixing" : "ok");
 
   if ((needs_fixing || remove_bad) && fix) {
-    ret = store->fix_head_obj_locator(bucket_info, needs_fixing, remove_bad, key);
+    ret = store->getRados()->fix_head_obj_locator(bucket_info, needs_fixing, remove_bad, key);
     if (ret < 0) {
       cerr << "ERROR: fix_head_object_locator() returned ret=" << ret << std::endl;
       goto done;
@@ -1556,7 +1556,7 @@ int check_obj_tail_locator_underscore(RGWBucketInfo& bucket_info, rgw_obj& obj,
   bool needs_fixing;
   string status;
 
-  int ret = store->fix_tail_obj_locator(bucket_info, key, fix, &needs_fixing, null_yield);
+  int ret = store->getRados()->fix_tail_obj_locator(bucket_info, key, fix, &needs_fixing, null_yield);
   if (ret < 0) {
     cerr << "ERROR: fix_tail_object_locator_underscore() returned ret=" << ret << std::endl;
     status = "failed";
@@ -1602,7 +1602,7 @@ int do_check_object_locator(const string& tenant_name, const string& bucket_name
   map<string, bool> common_prefixes;
   string ns;
 
-  RGWRados::Bucket target(store, bucket_info);
+  RGWRados::Bucket target(store->getRados(), bucket_info);
   RGWRados::Bucket::List list_op(&target);
 
   string marker;
@@ -1650,13 +1650,13 @@ int do_check_object_locator(const string& tenant_name, const string& bucket_name
   return 0;
 }
 
-int set_bucket_sync_enabled(RGWRados *store, int opt_cmd, const string& tenant_name, const string& bucket_name)
+int set_bucket_sync_enabled(rgw::sal::RGWRadosStore *store, int opt_cmd, const string& tenant_name, const string& bucket_name)
 {
   RGWBucketInfo bucket_info;
   map<string, bufferlist> attrs;
-  auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+  auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
 
-  int r = store->get_bucket_info(obj_ctx, tenant_name, bucket_name, bucket_info, NULL, null_yield, &attrs);
+  int r = store->getRados()->get_bucket_info(obj_ctx, tenant_name, bucket_name, bucket_info, NULL, null_yield, &attrs);
   if (r < 0) {
     cerr << "could not get bucket info for bucket=" << bucket_name << ": " << cpp_strerror(-r) << std::endl;
     return -r;
@@ -1668,7 +1668,7 @@ int set_bucket_sync_enabled(RGWRados *store, int opt_cmd, const string& tenant_n
     bucket_info.flags |= BUCKET_DATASYNC_DISABLED;
   }
 
-  r = store->put_bucket_instance_info(bucket_info, false, real_time(), &attrs);
+  r = store->getRados()->put_bucket_instance_info(bucket_info, false, real_time(), &attrs);
   if (r < 0) {
     cerr << "ERROR: failed writing bucket instance info: " << cpp_strerror(-r) << std::endl;
     return -r;
@@ -1678,13 +1678,13 @@ int set_bucket_sync_enabled(RGWRados *store, int opt_cmd, const string& tenant_n
   int shard_id = bucket_info.num_shards? 0 : -1;
 
   if (opt_cmd == OPT_BUCKET_SYNC_DISABLE) {
-    r = store->svc.bilog_rados->log_stop(bucket_info, -1);
+    r = store->svc()->bilog_rados->log_stop(bucket_info, -1);
     if (r < 0) {
       lderr(store->ctx()) << "ERROR: failed writing stop bilog" << dendl;
       return r;
     }
   } else {
-    r = store->svc.bilog_rados->log_start(bucket_info, -1);
+    r = store->svc()->bilog_rados->log_start(bucket_info, -1);
     if (r < 0) {
       lderr(store->ctx()) << "ERROR: failed writing resync bilog" << dendl;
       return r;
@@ -1692,7 +1692,7 @@ int set_bucket_sync_enabled(RGWRados *store, int opt_cmd, const string& tenant_n
   }
 
   for (int i = 0; i < shards_num; ++i, ++shard_id) {
-    r = store->svc.datalog_rados->add_entry(bucket_info.bucket, shard_id);
+    r = store->svc()->datalog_rados->add_entry(bucket_info.bucket, shard_id);
     if (r < 0) {
       lderr(store->ctx()) << "ERROR: failed writing data log" << dendl;
       return r;
@@ -1704,18 +1704,18 @@ int set_bucket_sync_enabled(RGWRados *store, int opt_cmd, const string& tenant_n
 
 
 /// search for a matching zone/zonegroup id and return a connection if found
-static boost::optional<RGWRESTConn> get_remote_conn(RGWRados *store,
+static boost::optional<RGWRESTConn> get_remote_conn(rgw::sal::RGWRadosStore *store,
                                                     const RGWZoneGroup& zonegroup,
                                                     const std::string& remote)
 {
   boost::optional<RGWRESTConn> conn;
   if (remote == zonegroup.get_id()) {
-    conn.emplace(store->ctx(), store->svc.zone, remote, zonegroup.endpoints);
+    conn.emplace(store->ctx(), store->svc()->zone, remote, zonegroup.endpoints);
   } else {
     for (const auto& z : zonegroup.zones) {
       const auto& zone = z.second;
       if (remote == zone.id) {
-        conn.emplace(store->ctx(), store->svc.zone, remote, zone.endpoints);
+        conn.emplace(store->ctx(), store->svc()->zone, remote, zone.endpoints);
         break;
       }
     }
@@ -1724,7 +1724,7 @@ static boost::optional<RGWRESTConn> get_remote_conn(RGWRados *store,
 }
 
 /// search each zonegroup for a connection
-static boost::optional<RGWRESTConn> get_remote_conn(RGWRados *store,
+static boost::optional<RGWRESTConn> get_remote_conn(rgw::sal::RGWRadosStore *store,
                                                     const RGWPeriodMap& period_map,
                                                     const std::string& remote)
 {
@@ -1808,10 +1808,10 @@ static int commit_period(RGWRealm& realm, RGWPeriod& period,
     return -EINVAL;
   }
   // are we the period's master zone?
-  if (store->svc.zone->get_zone_params().get_id() == master_zone) {
+  if (store->svc()->zone->get_zone_params().get_id() == master_zone) {
     // read the current period
     RGWPeriod current_period;
-    int ret = current_period.init(g_ceph_context, store->svc.sysobj, realm.get_id());
+    int ret = current_period.init(g_ceph_context, store->svc()->sysobj, realm.get_id());
     if (ret < 0) {
       cerr << "Error initializing current period: "
           << cpp_strerror(-ret) << std::endl;
@@ -1909,7 +1909,7 @@ static int update_period(const string& realm_id, const string& realm_name,
                          Formatter *formatter, bool force)
 {
   RGWRealm realm(realm_id, realm_name);
-  int ret = realm.init(g_ceph_context, store->svc.sysobj);
+  int ret = realm.init(g_ceph_context, store->svc()->sysobj);
   if (ret < 0 ) {
     cerr << "Error initializing realm " << cpp_strerror(-ret) << std::endl;
     return ret;
@@ -1919,7 +1919,7 @@ static int update_period(const string& realm_id, const string& realm_name,
     epoch = atoi(period_epoch.c_str());
   }
   RGWPeriod period(period_id, epoch);
-  ret = period.init(g_ceph_context, store->svc.sysobj, realm.get_id());
+  ret = period.init(g_ceph_context, store->svc()->sysobj, realm.get_id());
   if (ret < 0) {
     cerr << "period init failed: " << cpp_strerror(-ret) << std::endl;
     return ret;
@@ -1991,7 +1991,7 @@ static int do_period_pull(RGWRESTConn *remote_conn, const string& url,
     cerr << "request failed: " << cpp_strerror(-ret) << std::endl;
     return ret;
   }
-  ret = period->init(g_ceph_context, store->svc.sysobj, false);
+  ret = period->init(g_ceph_context, store->svc()->sysobj, false);
   if (ret < 0) {
     cerr << "faile to init period " << cpp_strerror(-ret) << std::endl;
     return ret;
@@ -2011,12 +2011,12 @@ static int do_period_pull(RGWRESTConn *remote_conn, const string& url,
   return 0;
 }
 
-static int read_current_period_id(RGWRados* store, const std::string& realm_id,
+static int read_current_period_id(rgw::sal::RGWRadosStore* store, const std::string& realm_id,
                                   const std::string& realm_name,
                                   std::string* period_id)
 {
   RGWRealm realm(realm_id, realm_name);
-  int ret = realm.init(g_ceph_context, store->svc.sysobj);
+  int ret = realm.init(g_ceph_context, store->svc()->sysobj);
   if (ret < 0) {
     std::cerr << "failed to read realm: " << cpp_strerror(-ret) << std::endl;
     return ret;
@@ -2044,7 +2044,7 @@ stringstream& push_ss(stringstream& ss, list<string>& l, int tab = 0)
 
 static void get_md_sync_status(list<string>& status)
 {
-  RGWMetaSyncStatusManager sync(store, store->svc.rados->get_async_processor());
+  RGWMetaSyncStatusManager sync(store, store->svc()->rados->get_async_processor());
 
   int ret = sync.init();
   if (ret < 0) {
@@ -2110,7 +2110,7 @@ static void get_md_sync_status(list<string>& status)
   push_ss(ss, status) << "incremental sync: " << num_inc << "/" << total_shards << " shards";
 
   map<int, RGWMetadataLogInfo> master_shards_info;
-  string master_period = store->svc.zone->get_current_period_id();
+  string master_period = store->svc()->zone->get_current_period_id();
 
   ret = sync.read_master_log_shards_info(master_period, &master_shards_info);
   if (ret < 0) {
@@ -2183,18 +2183,18 @@ static void get_data_sync_status(const string& source_zone, list<string>& status
 
   RGWZone *sz;
 
-  if (!store->svc.zone->find_zone_by_id(source_zone, &sz)) {
+  if (!store->svc()->zone->find_zone_by_id(source_zone, &sz)) {
     push_ss(ss, status, tab) << string("zone not found");
     flush_ss(ss, status);
     return;
   }
 
-  if (!store->svc.zone->zone_syncs_from(store->svc.zone->get_zone(), *sz)) {
+  if (!store->svc()->zone->zone_syncs_from(store->svc()->zone->get_zone(), *sz)) {
     push_ss(ss, status, tab) << string("not syncing from zone");
     flush_ss(ss, status);
     return;
   }
-  RGWDataSyncStatusManager sync(store, store->svc.rados->get_async_processor(), source_zone, nullptr);
+  RGWDataSyncStatusManager sync(store, store->svc()->rados->get_async_processor(), source_zone, nullptr);
 
   int ret = sync.init();
   if (ret < 0) {
@@ -2348,9 +2348,9 @@ static void tab_dump(const string& header, int width, const list<string>& entrie
 
 static void sync_status(Formatter *formatter)
 {
-  const RGWRealm& realm = store->svc.zone->get_realm();
-  const RGWZoneGroup& zonegroup = store->svc.zone->get_zonegroup();
-  const RGWZone& zone = store->svc.zone->get_zone();
+  const RGWRealm& realm = store->svc()->zone->get_realm();
+  const RGWZoneGroup& zonegroup = store->svc()->zone->get_zonegroup();
+  const RGWZone& zone = store->svc()->zone->get_zone();
 
   int width = 15;
 
@@ -2360,7 +2360,7 @@ static void sync_status(Formatter *formatter)
 
   list<string> md_status;
 
-  if (store->svc.zone->is_meta_master()) {
+  if (store->svc()->zone->is_meta_master()) {
     md_status.push_back("no sync (zone is master)");
   } else {
     get_md_sync_status(md_status);
@@ -2370,14 +2370,14 @@ static void sync_status(Formatter *formatter)
 
   list<string> data_status;
 
-  auto& zone_conn_map = store->svc.zone->get_zone_conn_map();
+  auto& zone_conn_map = store->svc()->zone->get_zone_conn_map();
 
   for (auto iter : zone_conn_map) {
     const string& source_id = iter.first;
     string source_str = "source: ";
     string s = source_str + source_id;
     RGWZone *sz;
-    if (store->svc.zone->find_zone_by_id(source_id, &sz)) {
+    if (store->svc()->zone->find_zone_by_id(source_id, &sz)) {
       s += string(" (") + sz->name + ")";
     }
     data_status.push_back(s);
@@ -2396,7 +2396,7 @@ std::ostream& operator<<(std::ostream& out, const indented& h) {
   return out << std::setw(h.w) << h.header << std::setw(1) << ' ';
 }
 
-static int remote_bilog_markers(RGWRados *store, const RGWZone& source,
+static int remote_bilog_markers(rgw::sal::RGWRadosStore *store, const RGWZone& source,
                                 RGWRESTConn *conn, const RGWBucketInfo& info,
                                 BucketIndexShardsManager *markers)
 {
@@ -2421,7 +2421,7 @@ static int remote_bilog_markers(RGWRados *store, const RGWZone& source,
   return 0;
 }
 
-static int bucket_source_sync_status(RGWRados *store, const RGWZone& zone,
+static int bucket_source_sync_status(rgw::sal::RGWRadosStore *store, const RGWZone& zone,
                                      const RGWZone& source, RGWRESTConn *conn,
                                      const RGWBucketInfo& bucket_info,
                                      int width, std::ostream& out)
@@ -2490,13 +2490,13 @@ static int bucket_source_sync_status(RGWRados *store, const RGWZone& zone,
   return 0;
 }
 
-static int bucket_sync_status(RGWRados *store, const RGWBucketInfo& info,
+static int bucket_sync_status(rgw::sal::RGWRadosStore *store, const RGWBucketInfo& info,
                               const std::string& source_zone_id,
                               std::ostream& out)
 {
-  const RGWRealm& realm = store->svc.zone->get_realm();
-  const RGWZoneGroup& zonegroup = store->svc.zone->get_zonegroup();
-  const RGWZone& zone = store->svc.zone->get_zone();
+  const RGWRealm& realm = store->svc()->zone->get_realm();
+  const RGWZoneGroup& zonegroup = store->svc()->zone->get_zonegroup();
+  const RGWZone& zone = store->svc()->zone->get_zone();
   constexpr int width = 15;
 
   out << indented{width, "realm"} << realm.get_id() << " (" << realm.get_name() << ")\n";
@@ -2509,7 +2509,7 @@ static int bucket_sync_status(RGWRados *store, const RGWBucketInfo& info,
     return 0;
   }
 
-  auto& zone_conn_map = store->svc.zone->get_zone_conn_map();
+  auto& zone_conn_map = store->svc()->zone->get_zone_conn_map();
   if (!source_zone_id.empty()) {
     auto z = zonegroup.zones.find(source_zone_id);
     if (z == zonegroup.zones.end()) {
@@ -2573,7 +2573,7 @@ static void parse_tier_config_param(const string& s, map<string, string, ltstr_n
 static int check_pool_support_omap(const rgw_pool& pool)
 {
   librados::IoCtx io_ctx;
-  int ret = store->get_rados_handle()->ioctx_create(pool.to_str().c_str(), io_ctx);
+  int ret = store->getRados()->get_rados_handle()->ioctx_create(pool.to_str().c_str(), io_ctx);
   if (ret < 0) {
      // the pool may not exist at this moment, we have no way to check if it supports omap.
      return 0;
@@ -2588,7 +2588,7 @@ static int check_pool_support_omap(const rgw_pool& pool)
   return 0;
 }
 
-int check_reshard_bucket_params(RGWRados *store,
+int check_reshard_bucket_params(rgw::sal::RGWRadosStore *store,
                                const string& bucket_name,
                                const string& tenant,
                                const string& bucket_id,
@@ -2609,8 +2609,8 @@ int check_reshard_bucket_params(RGWRados *store,
     return -EINVAL;
   }
 
-  if (num_shards > (int)store->get_max_bucket_shards()) {
-    cerr << "ERROR: num_shards too high, max value: " << store->get_max_bucket_shards() << std::endl;
+  if (num_shards > (int)store->getRados()->get_max_bucket_shards()) {
+    cerr << "ERROR: num_shards too high, max value: " << store->getRados()->get_max_bucket_shards() << std::endl;
     return -EINVAL;
   }
 
@@ -2630,25 +2630,25 @@ int check_reshard_bucket_params(RGWRados *store,
   return 0;
 }
 
-int create_new_bucket_instance(RGWRados *store,
+int create_new_bucket_instance(rgw::sal::RGWRadosStore *store,
                               int new_num_shards,
                               const RGWBucketInfo& bucket_info,
                               map<string, bufferlist>& attrs,
                               RGWBucketInfo& new_bucket_info)
 {
 
-  store->create_bucket_id(&new_bucket_info.bucket.bucket_id);
+  store->getRados()->create_bucket_id(&new_bucket_info.bucket.bucket_id);
 
   new_bucket_info.num_shards = new_num_shards;
   new_bucket_info.objv_tracker.clear();
 
-  int ret = store->svc.bi->init_index(new_bucket_info);
+  int ret = store->svc()->bi->init_index(new_bucket_info);
   if (ret < 0) {
     cerr << "ERROR: failed to init new bucket indexes: " << cpp_strerror(-ret) << std::endl;
     return -ret;
   }
 
-  ret = store->put_bucket_instance_info(new_bucket_info, true, real_time(), &attrs);
+  ret = store->getRados()->put_bucket_instance_info(new_bucket_info, true, real_time(), &attrs);
   if (ret < 0) {
     cerr << "ERROR: failed to store new bucket instance info: " << cpp_strerror(-ret) << std::endl;
     return -ret;
@@ -2713,7 +2713,7 @@ static int trim_sync_error_log(int shard_id, const ceph::real_time& start_time,
                                                shard_id);
   // call cls_log_trim() until it returns -ENODATA
   for (;;) {
-    int ret = store->svc.cls->timelog.trim(oid, start_time, end_time,
+    int ret = store->svc()->cls->timelog.trim(oid, start_time, end_time,
                                            start_marker, end_marker, nullptr,
                                            null_yield);
     if (ret == -ENODATA) {
@@ -2729,8 +2729,8 @@ static int trim_sync_error_log(int shard_id, const ceph::real_time& start_time,
   // unreachable
 }
 
-const string& get_tier_type(RGWRados *store) {
-  return store->svc.zone->get_zone().tier_type;
+const string& get_tier_type(rgw::sal::RGWRadosStore *store) {
+  return store->svc()->zone->get_zone().tier_type;
 }
 
 int main(int argc, const char **argv)
@@ -3459,7 +3459,7 @@ int main(int argc, const char **argv)
   }
 
   if (!source_zone_name.empty()) {
-    if (!store->svc.zone->find_zone_id_by_name(source_zone_name, &source_zone)) {
+    if (!store->svc()->zone->find_zone_id_by_name(source_zone_name, &source_zone)) {
       cerr << "WARNING: cannot find source zone id for name=" << source_zone_name << std::endl;
       source_zone = source_zone_name;
     }
@@ -3489,7 +3489,7 @@ int main(int argc, const char **argv)
          return EINVAL;
        }
        RGWPeriod period(period_id);
-       int ret = period.init(g_ceph_context, store->svc.sysobj);
+       int ret = period.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "period.init failed: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -3510,7 +3510,7 @@ int main(int argc, const char **argv)
        }
         if (staging) {
           RGWRealm realm(realm_id, realm_name);
-          int ret = realm.init(g_ceph_context, store->svc.sysobj);
+          int ret = realm.init(g_ceph_context, store->svc()->sysobj);
           if (ret < 0 ) {
             cerr << "Error initializing realm " << cpp_strerror(-ret) << std::endl;
             return -ret;
@@ -3521,7 +3521,7 @@ int main(int argc, const char **argv)
           epoch = 1;
         }
        RGWPeriod period(period_id, epoch);
-       int ret = period.init(g_ceph_context, store->svc.sysobj, realm_id, realm_name);
+       int ret = period.init(g_ceph_context, store->svc()->sysobj, realm_id, realm_name);
        if (ret < 0) {
          cerr << "period init failed: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -3545,7 +3545,7 @@ int main(int argc, const char **argv)
     case OPT_PERIOD_LIST:
       {
        list<string> periods;
-       int ret = store->svc.zone->list_periods(periods);
+       int ret = store->svc()->zone->list_periods(periods);
        if (ret < 0) {
          cerr << "failed to list periods: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -3573,13 +3573,13 @@ int main(int argc, const char **argv)
         if (url.empty()) {
           // load current period for endpoints
           RGWRealm realm(realm_id, realm_name);
-          int ret = realm.init(g_ceph_context, store->svc.sysobj);
+          int ret = realm.init(g_ceph_context, store->svc()->sysobj);
           if (ret < 0) {
             cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl;
             return -ret;
           }
           RGWPeriod current_period(realm.get_current_period());
-          ret = current_period.init(g_ceph_context, store->svc.sysobj);
+          ret = current_period.init(g_ceph_context, store->svc()->sysobj);
           if (ret < 0) {
             cerr << "failed to init current period: " << cpp_strerror(-ret) << std::endl;
             return -ret;
@@ -3616,7 +3616,7 @@ int main(int argc, const char **argv)
     case OPT_GLOBAL_QUOTA_DISABLE:
       {
         if (realm_id.empty()) {
-          RGWRealm realm(g_ceph_context, store->svc.sysobj);
+          RGWRealm realm(g_ceph_context, store->svc()->sysobj);
           if (!realm_name.empty()) {
             // look up realm_id for the given realm_name
             int ret = realm.read_id(realm_name, realm_id);
@@ -3637,7 +3637,7 @@ int main(int argc, const char **argv)
         }
 
         RGWPeriodConfig period_config;
-        int ret = period_config.read(store->svc.sysobj, realm_id);
+        int ret = period_config.read(store->svc()->sysobj, realm_id);
         if (ret < 0 && ret != -ENOENT) {
           cerr << "ERROR: failed to read period config: "
               << cpp_strerror(-ret) << std::endl;
@@ -3668,7 +3668,7 @@ int main(int argc, const char **argv)
 
         if (opt_cmd != OPT_GLOBAL_QUOTA_GET) {
           // write the modified period config
-          ret = period_config.write(store->svc.sysobj, realm_id);
+          ret = period_config.write(store->svc()->sysobj, realm_id);
           if (ret < 0) {
             cerr << "ERROR: failed to write period config: "
                 << cpp_strerror(-ret) << std::endl;
@@ -3694,7 +3694,7 @@ int main(int argc, const char **argv)
          return EINVAL;
        }
 
-       RGWRealm realm(realm_name, g_ceph_context, store->svc.sysobj);
+       RGWRealm realm(realm_name, g_ceph_context, store->svc()->sysobj);
        int ret = realm.create();
        if (ret < 0) {
          cerr << "ERROR: couldn't create realm " << realm_name << ": " << cpp_strerror(-ret) << std::endl;
@@ -3719,7 +3719,7 @@ int main(int argc, const char **argv)
          cerr << "missing realm name or id" << std::endl;
          return EINVAL;
        }
-       int ret = realm.init(g_ceph_context, store->svc.sysobj);
+       int ret = realm.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "realm.init failed: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -3735,7 +3735,7 @@ int main(int argc, const char **argv)
     case OPT_REALM_GET:
       {
        RGWRealm realm(realm_id, realm_name);
-       int ret = realm.init(g_ceph_context, store->svc.sysobj);
+       int ret = realm.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          if (ret == -ENOENT && realm_name.empty() && realm_id.empty()) {
            cerr << "missing realm name or id, or default realm not found" << std::endl;
@@ -3750,7 +3750,7 @@ int main(int argc, const char **argv)
       break;
     case OPT_REALM_GET_DEFAULT:
       {
-       RGWRealm realm(g_ceph_context, store->svc.sysobj);
+       RGWRealm realm(g_ceph_context, store->svc()->sysobj);
        string default_id;
        int ret = realm.read_default_id(default_id);
        if (ret == -ENOENT) {
@@ -3765,14 +3765,14 @@ int main(int argc, const char **argv)
       break;
     case OPT_REALM_LIST:
       {
-       RGWRealm realm(g_ceph_context, store->svc.sysobj);
+       RGWRealm realm(g_ceph_context, store->svc()->sysobj);
        string default_id;
        int ret = realm.read_default_id(default_id);
        if (ret < 0 && ret != -ENOENT) {
          cerr << "could not determine default realm: " << cpp_strerror(-ret) << std::endl;
        }
        list<string> realms;
-       ret = store->svc.zone->list_realms(realms);
+       ret = store->svc()->zone->list_realms(realms);
        if (ret < 0) {
          cerr << "failed to list realms: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -3791,7 +3791,7 @@ int main(int argc, const char **argv)
          return -ret;
        }
        list<string> periods;
-       ret = store->svc.zone->list_periods(period_id, periods);
+       ret = store->svc()->zone->list_periods(period_id, periods);
        if (ret < 0) {
          cerr << "list periods failed: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -3815,7 +3815,7 @@ int main(int argc, const char **argv)
          cerr << "missing realm name or id" << std::endl;
          return EINVAL;
        }
-       int ret = realm.init(g_ceph_context, store->svc.sysobj);
+       int ret = realm.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "realm.init failed: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -3838,7 +3838,7 @@ int main(int argc, const char **argv)
        }
        RGWRealm realm(realm_id, realm_name);
        bool new_realm = false;
-       int ret = realm.init(g_ceph_context, store->svc.sysobj);
+       int ret = realm.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0 && ret != -ENOENT) {
          cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -3885,7 +3885,7 @@ int main(int argc, const char **argv)
     case OPT_REALM_DEFAULT:
       {
        RGWRealm realm(realm_id, realm_name);
-       int ret = realm.init(g_ceph_context, store->svc.sysobj);
+       int ret = realm.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -3927,7 +3927,7 @@ int main(int argc, const char **argv)
           return -ret;
         }
         RGWRealm realm;
-        realm.init(g_ceph_context, store->svc.sysobj, false);
+        realm.init(g_ceph_context, store->svc()->sysobj, false);
         try {
           decode_json_obj(realm, &p);
         } catch (const JSONDecoder::err& e) {
@@ -3979,14 +3979,14 @@ int main(int argc, const char **argv)
        }
 
        RGWZoneGroup zonegroup(zonegroup_id,zonegroup_name);
-       int ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+       int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "failed to initialize zonegroup " << zonegroup_name << " id " << zonegroup_id << " :"
               << cpp_strerror(-ret) << std::endl;
          return -ret;
        }
        RGWZoneParams zone(zone_id, zone_name);
-       ret = zone.init(g_ceph_context, store->svc.sysobj);
+       ret = zone.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -4019,7 +4019,7 @@ int main(int argc, const char **argv)
                                  endpoints, ptier_type,
                                  psync_from_all, sync_from, sync_from_rm,
                                  predirect_zone,
-                                store->svc.sync_modules->get_manager());
+                                store->svc()->sync_modules->get_manager());
        if (ret < 0) {
          cerr << "failed to add zone " << zone_name << " to zonegroup " << zonegroup.get_name() << ": "
               << cpp_strerror(-ret) << std::endl;
@@ -4037,13 +4037,13 @@ int main(int argc, const char **argv)
          return EINVAL;
        }
        RGWRealm realm(realm_id, realm_name);
-       int ret = realm.init(g_ceph_context, store->svc.sysobj);
+       int ret = realm.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl;
          return -ret;
        }
 
-       RGWZoneGroup zonegroup(zonegroup_name, is_master, g_ceph_context, store->svc.sysobj, realm.get_id(), endpoints);
+       RGWZoneGroup zonegroup(zonegroup_name, is_master, g_ceph_context, store->svc()->sysobj, realm.get_id(), endpoints);
         zonegroup.api_name = (api_name.empty() ? zonegroup_name : api_name);
        ret = zonegroup.create();
        if (ret < 0) {
@@ -4070,7 +4070,7 @@ int main(int argc, const char **argv)
        }
 
        RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
-       int ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+       int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -4090,7 +4090,7 @@ int main(int argc, const char **argv)
          return EINVAL;
        }
        RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
-       int ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+       int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -4105,7 +4105,7 @@ int main(int argc, const char **argv)
     case OPT_ZONEGROUP_GET:
       {
        RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
-       int ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+       int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -4118,14 +4118,14 @@ int main(int argc, const char **argv)
     case OPT_ZONEGROUP_LIST:
       {
        RGWZoneGroup zonegroup;
-       int ret = zonegroup.init(g_ceph_context, store->svc.sysobj, false);
+       int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, false);
        if (ret < 0) {
          cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
          return -ret;
        }
 
        list<string> zonegroups;
-       ret = store->svc.zone->list_zonegroups(zonegroups);
+       ret = store->svc()->zone->list_zonegroups(zonegroups);
        if (ret < 0) {
          cerr << "failed to list zonegroups: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -4145,14 +4145,14 @@ int main(int argc, const char **argv)
     case OPT_ZONEGROUP_MODIFY:
       {
        RGWRealm realm(realm_id, realm_name);
-       int ret = realm.init(g_ceph_context, store->svc.sysobj);
+       int ret = realm.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl;
          return -ret;
        }
 
        RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
-       ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+       ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -4185,7 +4185,7 @@ int main(int argc, const char **argv)
           need_update = true;
         } else if (!realm_name.empty()) {
           // get realm id from name
-          RGWRealm realm{g_ceph_context, store->svc.sysobj};
+          RGWRealm realm{g_ceph_context, store->svc()->sysobj};
           ret = realm.read_id(realm_name, zonegroup.realm_id);
           if (ret < 0) {
             cerr << "failed to find realm by name " << realm_name << std::endl;
@@ -4216,7 +4216,7 @@ int main(int argc, const char **argv)
     case OPT_ZONEGROUP_SET:
       {
        RGWRealm realm(realm_id, realm_name);
-       int ret = realm.init(g_ceph_context, store->svc.sysobj);
+       int ret = realm.init(g_ceph_context, store->svc()->sysobj);
        bool default_realm_not_exist = (ret == -ENOENT && realm_id.empty() && realm_name.empty());
 
        if (ret < 0 && !default_realm_not_exist ) {
@@ -4225,7 +4225,7 @@ int main(int argc, const char **argv)
        }
 
        RGWZoneGroup zonegroup;
-       ret = zonegroup.init(g_ceph_context, store->svc.sysobj, false);
+       ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, false);
        if (ret < 0) {
          cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -4263,7 +4263,7 @@ int main(int argc, const char **argv)
     case OPT_ZONEGROUP_REMOVE:
       {
         RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
-        int ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+        int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
         if (ret < 0) {
           cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
           return -ret;
@@ -4309,7 +4309,7 @@ int main(int argc, const char **argv)
          return EINVAL;
        }
        RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
-       int ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+       int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -4324,7 +4324,7 @@ int main(int argc, const char **argv)
     case OPT_ZONEGROUP_PLACEMENT_LIST:
       {
        RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
-       int ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+       int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -4356,7 +4356,7 @@ int main(int argc, const char **argv)
         }
 
        RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
-       int ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+       int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -4411,7 +4411,7 @@ int main(int argc, const char **argv)
        RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
        /* if the user didn't provide zonegroup info , create stand alone zone */
        if (!zonegroup_id.empty() || !zonegroup_name.empty()) {
-         ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+         ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
          if (ret < 0) {
            cerr << "unable to initialize zonegroup " << zonegroup_name << ": " << cpp_strerror(-ret) << std::endl;
            return -ret;
@@ -4422,7 +4422,7 @@ int main(int argc, const char **argv)
        }
 
        RGWZoneParams zone(zone_id, zone_name);
-       ret = zone.init(g_ceph_context, store->svc.sysobj, false);
+       ret = zone.init(g_ceph_context, store->svc()->sysobj, false);
        if (ret < 0) {
          cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -4457,7 +4457,7 @@ int main(int argc, const char **argv)
                                    psync_from_all,
                                    sync_from, sync_from_rm,
                                    predirect_zone,
-                                  store->svc.sync_modules->get_manager());
+                                  store->svc()->sync_modules->get_manager());
          if (ret < 0) {
            cerr << "failed to add zone " << zone_name << " to zonegroup " << zonegroup.get_name()
                 << ": " << cpp_strerror(-ret) << std::endl;
@@ -4479,7 +4479,7 @@ int main(int argc, const char **argv)
     case OPT_ZONE_DEFAULT:
       {
        RGWZoneGroup zonegroup(zonegroup_id,zonegroup_name);
-       int ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+       int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "WARNING: failed to initialize zonegroup " << zonegroup_name << std::endl;
        }
@@ -4488,7 +4488,7 @@ int main(int argc, const char **argv)
          return EINVAL;
        }
        RGWZoneParams zone(zone_id, zone_name);
-       ret = zone.init(g_ceph_context, store->svc.sysobj);
+       ret = zone.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -4507,14 +4507,14 @@ int main(int argc, const char **argv)
          return EINVAL;
        }
        RGWZoneParams zone(zone_id, zone_name);
-       int ret = zone.init(g_ceph_context, store->svc.sysobj);
+       int ret = zone.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
          return -ret;
        }
 
         list<string> zonegroups;
-       ret = store->svc.zone->list_zonegroups(zonegroups);
+       ret = store->svc()->zone->list_zonegroups(zonegroups);
        if (ret < 0) {
          cerr << "failed to list zonegroups: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -4522,7 +4522,7 @@ int main(int argc, const char **argv)
 
         for (list<string>::iterator iter = zonegroups.begin(); iter != zonegroups.end(); ++iter) {
           RGWZoneGroup zonegroup(string(), *iter);
-          int ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+          int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
           if (ret < 0) {
             cerr << "WARNING: failed to initialize zonegroup " << zonegroup_name << std::endl;
             continue;
@@ -4544,7 +4544,7 @@ int main(int argc, const char **argv)
     case OPT_ZONE_GET:
       {
        RGWZoneParams zone(zone_id, zone_name);
-       int ret = zone.init(g_ceph_context, store->svc.sysobj);
+       int ret = zone.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -4556,7 +4556,7 @@ int main(int argc, const char **argv)
     case OPT_ZONE_SET:
       {
        RGWZoneParams zone(zone_name);
-       int ret = zone.init(g_ceph_context, store->svc.sysobj, false);
+       int ret = zone.init(g_ceph_context, store->svc()->sysobj, false);
        if (ret < 0) {
          return -ret;
        }
@@ -4576,7 +4576,7 @@ int main(int argc, const char **argv)
 
        if(zone.realm_id.empty()) {
          RGWRealm realm(realm_id, realm_name);
-         int ret = realm.init(g_ceph_context, store->svc.sysobj);
+         int ret = realm.init(g_ceph_context, store->svc()->sysobj);
          if (ret < 0 && ret != -ENOENT) {
            cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl;
            return -ret;
@@ -4639,14 +4639,14 @@ int main(int argc, const char **argv)
     case OPT_ZONE_LIST:
       {
        list<string> zones;
-       int ret = store->svc.zone->list_zones(zones);
+       int ret = store->svc()->zone->list_zones(zones);
        if (ret < 0) {
          cerr << "failed to list zones: " << cpp_strerror(-ret) << std::endl;
          return -ret;
        }
 
        RGWZoneParams zone;
-       ret = zone.init(g_ceph_context, store->svc.sysobj, false);
+       ret = zone.init(g_ceph_context, store->svc()->sysobj, false);
        if (ret < 0) {
          cerr << "failed to init zone: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -4666,7 +4666,7 @@ int main(int argc, const char **argv)
     case OPT_ZONE_MODIFY:
       {
        RGWZoneParams zone(zone_id, zone_name);
-       int ret = zone.init(g_ceph_context, store->svc.sysobj);
+       int ret = zone.init(g_ceph_context, store->svc()->sysobj);
         if (ret < 0) {
          cerr << "failed to init zone: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -4688,7 +4688,7 @@ int main(int argc, const char **argv)
           need_zone_update = true;
         } else if (!realm_name.empty()) {
           // get realm id from name
-          RGWRealm realm{g_ceph_context, store->svc.sysobj};
+          RGWRealm realm{g_ceph_context, store->svc()->sysobj};
           ret = realm.read_id(realm_name, zone.realm_id);
           if (ret < 0) {
             cerr << "failed to find realm by name " << realm_name << std::endl;
@@ -4724,7 +4724,7 @@ int main(int argc, const char **argv)
         }
 
        RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
-       ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+       ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -4740,7 +4740,7 @@ int main(int argc, const char **argv)
                                  endpoints, ptier_type,
                                  psync_from_all, sync_from, sync_from_rm,
                                  predirect_zone,
-                                store->svc.sync_modules->get_manager());
+                                store->svc()->sync_modules->get_manager());
        if (ret < 0) {
          cerr << "failed to update zonegroup: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -4774,7 +4774,7 @@ int main(int argc, const char **argv)
          return EINVAL;
        }
        RGWZoneParams zone(zone_id,zone_name);
-       int ret = zone.init(g_ceph_context, store->svc.sysobj);
+       int ret = zone.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -4786,7 +4786,7 @@ int main(int argc, const char **argv)
          return -ret;
        }
        RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
-       ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+       ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "WARNING: failed to initialize zonegroup " << zonegroup_name << std::endl;
        } else {
@@ -4814,7 +4814,7 @@ int main(int argc, const char **argv)
         }
 
        RGWZoneParams zone(zone_id, zone_name);
-       int ret = zone.init(g_ceph_context, store->svc.sysobj);
+       int ret = zone.init(g_ceph_context, store->svc()->sysobj);
         if (ret < 0) {
          cerr << "failed to init zone: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -4823,7 +4823,7 @@ int main(int argc, const char **argv)
         if (opt_cmd == OPT_ZONE_PLACEMENT_ADD ||
            opt_cmd == OPT_ZONE_PLACEMENT_MODIFY) {
          RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
-         ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+         ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
          if (ret < 0) {
            cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
            return -ret;
@@ -4901,7 +4901,7 @@ int main(int argc, const char **argv)
     case OPT_ZONE_PLACEMENT_LIST:
       {
        RGWZoneParams zone(zone_id, zone_name);
-       int ret = zone.init(g_ceph_context, store->svc.sysobj);
+       int ret = zone.init(g_ceph_context, store->svc()->sysobj);
        if (ret < 0) {
          cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
          return -ret;
@@ -4914,7 +4914,7 @@ int main(int argc, const char **argv)
     return 0;
   }
 
-  bool non_master_cmd = (!store->svc.zone->is_meta_master() && !yes_i_really_mean_it);
+  bool non_master_cmd = (!store->svc()->zone->is_meta_master() && !yes_i_really_mean_it);
   std::set<int> non_master_ops_list = {OPT_USER_CREATE, OPT_USER_RM, 
                                         OPT_USER_MODIFY, OPT_USER_ENABLE,
                                         OPT_USER_SUSPEND, OPT_SUBUSER_CREATE,
@@ -5171,7 +5171,7 @@ int main(int argc, const char **argv)
 
       // load the period
       RGWPeriod period(period_id);
-      int ret = period.init(g_ceph_context, store->svc.sysobj);
+      int ret = period.init(g_ceph_context, store->svc()->sysobj);
       if (ret < 0) {
         cerr << "period init failed: " << cpp_strerror(-ret) << std::endl;
         return -ret;
@@ -5205,13 +5205,13 @@ int main(int argc, const char **argv)
     {
       // read realm and staging period
       RGWRealm realm(realm_id, realm_name);
-      int ret = realm.init(g_ceph_context, store->svc.sysobj);
+      int ret = realm.init(g_ceph_context, store->svc()->sysobj);
       if (ret < 0) {
         cerr << "Error initializing realm: " << cpp_strerror(-ret) << std::endl;
         return -ret;
       }
       RGWPeriod period(RGWPeriod::get_staging_id(realm.get_id()), 1);
-      ret = period.init(g_ceph_context, store->svc.sysobj, realm.get_id());
+      ret = period.init(g_ceph_context, store->svc()->sysobj, realm.get_id());
       if (ret < 0) {
         cerr << "period init failed: " << cpp_strerror(-ret) << std::endl;
         return -ret;
@@ -5245,7 +5245,7 @@ int main(int argc, const char **argv)
         cerr << "failed to parse policy: " << e.what() << std::endl;
         return -EINVAL;
       }
-      RGWRole role(g_ceph_context, store->pctl, role_name, path, assume_role_doc, tenant);
+      RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, path, assume_role_doc, tenant);
       ret = role.create(true);
       if (ret < 0) {
         return -ret;
@@ -5259,7 +5259,7 @@ int main(int argc, const char **argv)
         cerr << "ERROR: empty role name" << std::endl;
         return -EINVAL;
       }
-      RGWRole role(g_ceph_context, store->pctl, role_name, tenant);
+      RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, tenant);
       ret = role.delete_obj();
       if (ret < 0) {
         return -ret;
@@ -5273,7 +5273,7 @@ int main(int argc, const char **argv)
         cerr << "ERROR: empty role name" << std::endl;
         return -EINVAL;
       }
-      RGWRole role(g_ceph_context, store->pctl, role_name, tenant);
+      RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, tenant);
       ret = role.get();
       if (ret < 0) {
         return -ret;
@@ -5301,7 +5301,7 @@ int main(int argc, const char **argv)
         return -EINVAL;
       }
 
-      RGWRole role(g_ceph_context, store->pctl, role_name, tenant);
+      RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, tenant);
       ret = role.get();
       if (ret < 0) {
         return -ret;
@@ -5317,7 +5317,7 @@ int main(int argc, const char **argv)
   case OPT_ROLE_LIST:
     {
       vector<RGWRole> result;
-      ret = RGWRole::get_roles_by_path_prefix(store, g_ceph_context, path_prefix, tenant, result);
+      ret = RGWRole::get_roles_by_path_prefix(store->getRados(), g_ceph_context, path_prefix, tenant, result);
       if (ret < 0) {
         return -ret;
       }
@@ -5349,7 +5349,7 @@ int main(int argc, const char **argv)
         return -EINVAL;
       }
 
-      RGWRole role(g_ceph_context, store->pctl, role_name, tenant);
+      RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, tenant);
       ret = role.get();
       if (ret < 0) {
         return -ret;
@@ -5368,7 +5368,7 @@ int main(int argc, const char **argv)
         cerr << "ERROR: Role name is empty" << std::endl;
         return -EINVAL;
       }
-      RGWRole role(g_ceph_context, store->pctl, role_name, tenant);
+      RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, tenant);
       ret = role.get();
       if (ret < 0) {
         return -ret;
@@ -5388,7 +5388,7 @@ int main(int argc, const char **argv)
         cerr << "ERROR: policy name is empty" << std::endl;
         return -EINVAL;
       }
-      RGWRole role(g_ceph_context, store->pctl, role_name, tenant);
+      RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, tenant);
       int ret = role.get();
       if (ret < 0) {
         return -ret;
@@ -5412,7 +5412,7 @@ int main(int argc, const char **argv)
         cerr << "ERROR: policy name is empty" << std::endl;
         return -EINVAL;
       }
-      RGWRole role(g_ceph_context, store->pctl, role_name, tenant);
+      RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, tenant);
       ret = role.get();
       if (ret < 0) {
         return -ret;
@@ -5475,7 +5475,7 @@ int main(int argc, const char **argv)
     } else {
       /* list users in groups of max-keys, then perform user-bucket
        * limit-check on each group */
-     ret = store->ctl.meta.mgr->list_keys_init(metadata_key, &handle);
+     ret = store->ctl()->meta.mgr->list_keys_init(metadata_key, &handle);
       if (ret < 0) {
        cerr << "ERROR: buckets limit check can't get user metadata_key: "
             << cpp_strerror(-ret) << std::endl;
@@ -5483,7 +5483,7 @@ int main(int argc, const char **argv)
       }
 
       do {
-       ret = store->ctl.meta.mgr->list_keys_next(handle, max, user_ids,
+       ret = store->ctl()->meta.mgr->list_keys_next(handle, max, user_ids,
                                              &truncated);
        if (ret < 0 && ret != -ENOENT) {
          cerr << "ERROR: buckets limit check lists_keys_next(): "
@@ -5499,7 +5499,7 @@ int main(int argc, const char **argv)
        }
        user_ids.clear();
       } while (truncated);
-      store->ctl.meta.mgr->list_keys_complete(handle);
+      store->ctl()->meta.mgr->list_keys_complete(handle);
     }
     return -ret;
   } /* OPT_BUCKET_LIMIT_CHECK */
@@ -5526,7 +5526,7 @@ int main(int argc, const char **argv)
       map<string, bool> common_prefixes;
       string ns;
 
-      RGWRados::Bucket target(store, bucket_info);
+      RGWRados::Bucket target(store->getRados(), bucket_info);
       RGWRados::Bucket::List list_op(&target);
 
       list_op.params.prefix = prefix;
@@ -5611,7 +5611,7 @@ int main(int argc, const char **argv)
     formatter->reset();
     formatter->open_array_section("logs");
     RGWAccessHandle h;
-    int r = store->log_list_init(date, &h);
+    int r = store->getRados()->log_list_init(date, &h);
     if (r == -ENOENT) {
       // no logs.
     } else {
@@ -5621,7 +5621,7 @@ int main(int argc, const char **argv)
       }
       while (true) {
         string name;
-        int r = store->log_list_next(h, &name);
+        int r = store->getRados()->log_list_next(h, &name);
         if (r == -ENOENT)
           break;
         if (r < 0) {
@@ -5656,7 +5656,7 @@ int main(int argc, const char **argv)
     if (opt_cmd == OPT_LOG_SHOW) {
       RGWAccessHandle h;
 
-      int r = store->log_show_init(oid, &h);
+      int r = store->getRados()->log_show_init(oid, &h);
       if (r < 0) {
        cerr << "error opening log " << oid << ": " << cpp_strerror(-r) << std::endl;
        return -r;
@@ -5668,7 +5668,7 @@ int main(int argc, const char **argv)
       struct rgw_log_entry entry;
 
       // peek at first entry to get bucket metadata
-      r = store->log_show_next(h, &entry);
+      r = store->getRados()->log_show_next(h, &entry);
       if (r < 0) {
        cerr << "error reading log " << oid << ": " << cpp_strerror(-r) << std::endl;
        return -r;
@@ -5704,7 +5704,7 @@ int main(int argc, const char **argv)
          formatter->flush(cout);
         }
 next:
-       r = store->log_show_next(h, &entry);
+       r = store->getRados()->log_show_next(h, &entry);
       } while (r > 0);
 
       if (r < 0) {
@@ -5727,7 +5727,7 @@ next:
       cout << std::endl;
     }
     if (opt_cmd == OPT_LOG_RM) {
-      int r = store->log_remove(oid);
+      int r = store->getRados()->log_remove(oid);
       if (r < 0) {
        cerr << "error removing log " << oid << ": " << cpp_strerror(-r) << std::endl;
        return -r;
@@ -5741,7 +5741,7 @@ next:
       exit(1);
     }
 
-    int ret = store->svc.zone->add_bucket_placement(pool);
+    int ret = store->svc()->zone->add_bucket_placement(pool);
     if (ret < 0)
       cerr << "failed to add bucket placement: " << cpp_strerror(-ret) << std::endl;
   }
@@ -5752,14 +5752,14 @@ next:
       exit(1);
     }
 
-    int ret = store->svc.zone->remove_bucket_placement(pool);
+    int ret = store->svc()->zone->remove_bucket_placement(pool);
     if (ret < 0)
       cerr << "failed to remove bucket placement: " << cpp_strerror(-ret) << std::endl;
   }
 
   if (opt_cmd == OPT_POOLS_LIST) {
     set<rgw_pool> pools;
-    int ret = store->svc.zone->list_placement_set(pools);
+    int ret = store->svc()->zone->list_placement_set(pools);
     if (ret < 0) {
       cerr << "could not list placement set: " << cpp_strerror(-ret) << std::endl;
       return -ret;
@@ -5798,7 +5798,7 @@ next:
     }
 
 
-    ret = RGWUsage::show(store, user_id, bucket_name, start_epoch, end_epoch,
+    ret = RGWUsage::show(store->getRados(), user_id, bucket_name, start_epoch, end_epoch,
                         show_log_entries, show_log_sum, &categories,
                         f);
     if (ret < 0) {
@@ -5835,7 +5835,7 @@ next:
       }
     }
 
-    ret = RGWUsage::trim(store, user_id, bucket_name, start_epoch, end_epoch);
+    ret = RGWUsage::trim(store->getRados(), user_id, bucket_name, start_epoch, end_epoch);
     if (ret < 0) {
       cerr << "ERROR: read_usage() returned ret=" << ret << std::endl;
       return 1;
@@ -5849,7 +5849,7 @@ next:
       return 1;
     }
 
-    ret = RGWUsage::clear(store);
+    ret = RGWUsage::clear(store->getRados());
     if (ret < 0) {
       return ret;
     }
@@ -5876,7 +5876,7 @@ next:
     }
     RGWOLHInfo olh;
     rgw_obj obj(bucket, object);
-    ret = store->get_olh(bucket_info, obj, &olh);
+    ret = store->getRados()->get_olh(bucket_info, obj, &olh);
     if (ret < 0) {
       cerr << "ERROR: failed reading olh: " << cpp_strerror(-ret) << std::endl;
       return -ret;
@@ -5900,12 +5900,12 @@ next:
 
     RGWObjState *state;
 
-    ret = store->get_obj_state(&rctx, bucket_info, obj, &state, false, null_yield); /* don't follow olh */
+    ret = store->getRados()->get_obj_state(&rctx, bucket_info, obj, &state, false, null_yield); /* don't follow olh */
     if (ret < 0) {
       return -ret;
     }
 
-    ret = store->bucket_index_read_olh_log(bucket_info, *state, obj, 0, &log, &is_truncated);
+    ret = store->getRados()->bucket_index_read_olh_log(bucket_info, *state, obj, 0, &log, &is_truncated);
     if (ret < 0) {
       cerr << "ERROR: failed reading olh: " << cpp_strerror(-ret) << std::endl;
       return -ret;
@@ -5939,7 +5939,7 @@ next:
 
     rgw_cls_bi_entry entry;
 
-    ret = store->bi_get(bucket_info, obj, bi_index_type, &entry);
+    ret = store->getRados()->bi_get(bucket_info, obj, bi_index_type, &entry);
     if (ret < 0) {
       cerr << "ERROR: bi_get(): " << cpp_strerror(-ret) << std::endl;
       return -ret;
@@ -5970,7 +5970,7 @@ next:
 
     rgw_obj obj(bucket, key);
 
-    ret = store->bi_put(bucket, obj, entry);
+    ret = store->getRados()->bi_put(bucket, obj, entry);
     if (ret < 0) {
       cerr << "ERROR: bi_put(): " << cpp_strerror(-ret) << std::endl;
       return -ret;
@@ -6001,7 +6001,7 @@ next:
 
     int i = (specified_shard_id ? shard_id : 0);
     for (; i < max_shards; i++) {
-      RGWRados::BucketShard bs(store);
+      RGWRados::BucketShard bs(store->getRados());
       int shard_id = (bucket_info.num_shards > 0  ? i : -1);
       int ret = bs.init(bucket, shard_id, nullptr /* no RGWBucketInfo */);
       marker.clear();
@@ -6013,7 +6013,7 @@ next:
 
       do {
         entries.clear();
-        ret = store->bi_list(bs, object, marker, max_entries, &entries, &is_truncated);
+        ret = store->getRados()->bi_list(bs, object, marker, max_entries, &entries, &is_truncated);
         if (ret < 0) {
           cerr << "ERROR: bi_list(): " << cpp_strerror(-ret) << std::endl;
           return -ret;
@@ -6065,7 +6065,7 @@ next:
     int max_shards = (bucket_info.num_shards > 0 ? bucket_info.num_shards : 1);
 
     for (int i = 0; i < max_shards; i++) {
-      RGWRados::BucketShard bs(store);
+      RGWRados::BucketShard bs(store->getRados());
       int shard_id = (bucket_info.num_shards > 0  ? i : -1);
       int ret = bs.init(bucket, shard_id, nullptr /* no RGWBucketInfo */);
       if (ret < 0) {
@@ -6073,7 +6073,7 @@ next:
         return -ret;
       }
 
-      ret = store->bi_remove(bs);
+      ret = store->getRados()->bi_remove(bs);
       if (ret < 0) {
         cerr << "ERROR: failed to remove bucket index object: " << cpp_strerror(-ret) << std::endl;
         return -ret;
@@ -6165,7 +6165,7 @@ next:
       }
     }
     if (need_rewrite) {
-      ret = store->rewrite_obj(bucket_info, obj, dpp(), null_yield);
+      ret = store->getRados()->rewrite_obj(bucket_info, obj, dpp(), null_yield);
       if (ret < 0) {
         cerr << "ERROR: object rewrite returned: " << cpp_strerror(-ret) << std::endl;
         return -ret;
@@ -6176,7 +6176,7 @@ next:
   }
 
   if (opt_cmd == OPT_OBJECTS_EXPIRE) {
-    if (!store->process_expire_objects()) {
+    if (!store->getRados()->process_expire_objects()) {
       cerr << "ERROR: process_expire_objects() processing returned error." << std::endl;
       return 1;
     }
@@ -6240,7 +6240,7 @@ next:
     while (is_truncated) {
       RGWRados::ent_map_t result;
       int r =
-       store->cls_bucket_list_ordered(bucket_info, RGW_NO_SHARD, marker,
+       store->getRados()->cls_bucket_list_ordered(bucket_info, RGW_NO_SHARD, marker,
                                       prefix, 1000, true,
                                       result, &is_truncated, &marker,
                                        null_yield,
@@ -6282,7 +6282,7 @@ next:
           if (!need_rewrite) {
             formatter->dump_string("status", "Skipped");
           } else {
-            r = store->rewrite_obj(bucket_info, obj, dpp(), null_yield);
+            r = store->getRados()->rewrite_obj(bucket_info, obj, dpp(), null_yield);
             if (r == 0) {
               formatter->dump_string("status", "Success");
             } else {
@@ -6499,7 +6499,7 @@ next:
     rgw_obj_index_key index_key;
     key.get_index_key(&index_key);
     oid_list.push_back(index_key);
-    ret = store->remove_objs_from_index(bucket_info, oid_list);
+    ret = store->getRados()->remove_objs_from_index(bucket_info, oid_list);
     if (ret < 0) {
       cerr << "ERROR: remove_obj_from_index() returned error: " << cpp_strerror(-ret) << std::endl;
       return 1;
@@ -6519,7 +6519,7 @@ next:
     uint64_t obj_size;
     map<string, bufferlist> attrs;
     RGWObjectCtx obj_ctx(store);
-    RGWRados::Object op_target(store, bucket_info, obj_ctx, obj);
+    RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, obj);
     RGWRados::Object::Read read_op(&op_target);
 
     read_op.params.attrs = &attrs;
@@ -6598,7 +6598,7 @@ next:
 
     do {
       list<cls_rgw_gc_obj_info> result;
-      int ret = store->list_gc_objs(&index, marker, 1000, !include_all, result, &truncated);
+      int ret = store->getRados()->list_gc_objs(&index, marker, 1000, !include_all, result, &truncated);
       if (ret < 0) {
        cerr << "ERROR: failed to list objs: " << cpp_strerror(-ret) << std::endl;
        return 1;
@@ -6628,7 +6628,7 @@ next:
   }
 
   if (opt_cmd == OPT_GC_PROCESS) {
-    int ret = store->process_gc(!include_all);
+    int ret = store->getRados()->process_gc(!include_all);
     if (ret < 0) {
       cerr << "ERROR: gc processing returned error: " << cpp_strerror(-ret) << std::endl;
       return 1;
@@ -6644,7 +6644,7 @@ next:
       max_entries = MAX_LC_LIST_ENTRIES;
     }
     do {
-      int ret = store->list_lc_progress(marker, max_entries, &bucket_lc_map);
+      int ret = store->getRados()->list_lc_progress(marker, max_entries, &bucket_lc_map);
       if (ret < 0) {
         cerr << "ERROR: failed to list objs: " << cpp_strerror(-ret) << std::endl;
         return 1;
@@ -6700,7 +6700,7 @@ next:
   }
 
   if (opt_cmd == OPT_LC_PROCESS) {
-    int ret = store->process_lc();
+    int ret = store->getRados()->process_lc();
     if (ret < 0) {
       cerr << "ERROR: lc processing returned error: " << cpp_strerror(-ret) << std::endl;
       return 1;
@@ -6813,7 +6813,7 @@ next:
        cerr << "ERROR: recalculate doesn't work on buckets" << std::endl;
        return EINVAL;
       }
-      ret = store->ctl.user->reset_stats(user_id);
+      ret = store->ctl()->user->reset_stats(user_id);
       if (ret < 0) {
        cerr << "ERROR: could not clear user stats: " << cpp_strerror(-ret) << std::endl;
        return -ret;
@@ -6828,7 +6828,7 @@ next:
           cerr << "ERROR: could not init bucket: " << cpp_strerror(-ret) << std::endl;
           return -ret;
         }
-        ret = store->ctl.bucket->sync_user_stats(user_id, bucket_info);
+        ret = store->ctl()->bucket->sync_user_stats(user_id, bucket_info);
         if (ret < 0) {
           cerr << "ERROR: could not sync bucket stats: " << cpp_strerror(-ret) << std::endl;
           return -ret;
@@ -6845,7 +6845,7 @@ next:
     RGWStorageStats stats;
     ceph::real_time last_stats_sync;
     ceph::real_time last_stats_update;
-    int ret = store->ctl.user->read_stats(user_id, &stats, &last_stats_sync, &last_stats_update);
+    int ret = store->ctl()->user->read_stats(user_id, &stats, &last_stats_sync, &last_stats_update);
     if (ret < 0) {
       if (ret == -ENOENT) { /* in case of ENOENT */
         cerr << "User has not been initialized or user does not exist" << std::endl;
@@ -6867,7 +6867,7 @@ next:
   }
 
   if (opt_cmd == OPT_METADATA_GET) {
-    int ret = store->ctl.meta.mgr->get(metadata_key, formatter, null_yield);
+    int ret = store->ctl()->meta.mgr->get(metadata_key, formatter, null_yield);
     if (ret < 0) {
       cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl;
       return -ret;
@@ -6883,7 +6883,7 @@ next:
       cerr << "ERROR: failed to read input: " << cpp_strerror(-ret) << std::endl;
       return -ret;
     }
-    ret = store->ctl.meta.mgr->put(metadata_key, bl, null_yield, RGWMDLogSyncType::APPLY_ALWAYS);
+    ret = store->ctl()->meta.mgr->put(metadata_key, bl, null_yield, RGWMDLogSyncType::APPLY_ALWAYS);
     if (ret < 0) {
       cerr << "ERROR: can't put key: " << cpp_strerror(-ret) << std::endl;
       return -ret;
@@ -6891,7 +6891,7 @@ next:
   }
 
   if (opt_cmd == OPT_METADATA_RM) {
-    int ret = store->ctl.meta.mgr->remove(metadata_key, null_yield);
+    int ret = store->ctl()->meta.mgr->remove(metadata_key, null_yield);
     if (ret < 0) {
       cerr << "ERROR: can't remove key: " << cpp_strerror(-ret) << std::endl;
       return -ret;
@@ -6904,7 +6904,7 @@ next:
     }
     void *handle;
     int max = 1000;
-    int ret = store->ctl.meta.mgr->list_keys_init(metadata_key, marker, &handle);
+    int ret = store->ctl()->meta.mgr->list_keys_init(metadata_key, marker, &handle);
     if (ret < 0) {
       cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl;
       return -ret;
@@ -6922,7 +6922,7 @@ next:
     do {
       list<string> keys;
       left = (max_entries_specified ? max_entries - count : max);
-      ret = store->ctl.meta.mgr->list_keys_next(handle, left, keys, &truncated);
+      ret = store->ctl()->meta.mgr->list_keys_next(handle, left, keys, &truncated);
       if (ret < 0 && ret != -ENOENT) {
         cerr << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << std::endl;
         return -ret;
@@ -6941,13 +6941,13 @@ next:
       encode_json("truncated", truncated, formatter);
       encode_json("count", count, formatter);
       if (truncated) {
-        encode_json("marker", store->ctl.meta.mgr->get_marker(handle), formatter);
+        encode_json("marker", store->ctl()->meta.mgr->get_marker(handle), formatter);
       }
       formatter->close_section();
     }
     formatter->flush(cout);
 
-    store->ctl.meta.mgr->list_keys_complete(handle);
+    store->ctl()->meta.mgr->list_keys_complete(handle);
   }
 
   if (opt_cmd == OPT_MDLOG_LIST) {
@@ -6971,7 +6971,7 @@ next:
       std::cerr << "No --period given, using current period="
           << period_id << std::endl;
     }
-    RGWMetadataLog *meta_log = store->svc.mdlog->get_log(period_id);
+    RGWMetadataLog *meta_log = store->svc()->mdlog->get_log(period_id);
 
     formatter->open_array_section("entries");
     for (; i < g_ceph_context->_conf->rgw_md_log_max_shards; i++) {
@@ -6990,7 +6990,7 @@ next:
 
         for (list<cls_log_entry>::iterator iter = entries.begin(); iter != entries.end(); ++iter) {
           cls_log_entry& entry = *iter;
-          store->ctl.meta.mgr->dump_log_entry(entry, formatter);
+          store->ctl()->meta.mgr->dump_log_entry(entry, formatter);
         }
         formatter->flush(cout);
       } while (truncated);
@@ -7017,7 +7017,7 @@ next:
       std::cerr << "No --period given, using current period="
           << period_id << std::endl;
     }
-    RGWMetadataLog *meta_log = store->svc.mdlog->get_log(period_id);
+    RGWMetadataLog *meta_log = store->svc()->mdlog->get_log(period_id);
 
     formatter->open_array_section("entries");
 
@@ -7038,9 +7038,9 @@ next:
 
   if (opt_cmd == OPT_MDLOG_AUTOTRIM) {
     // need a full history for purging old mdlog periods
-    store->svc.mdlog->init_oldest_log_period();
+    store->svc()->mdlog->init_oldest_log_period();
 
-    RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
+    RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
     RGWHTTPManager http(store->ctx(), crs.get_completion_mgr());
     int ret = http.start();
     if (ret < 0) {
@@ -7076,7 +7076,7 @@ next:
       std::cerr << "missing --period argument" << std::endl;
       return EINVAL;
     }
-    RGWMetadataLog *meta_log = store->svc.mdlog->get_log(period_id);
+    RGWMetadataLog *meta_log = store->svc()->mdlog->get_log(period_id);
 
     ret = meta_log->trim(shard_id, start_time.to_real_time(), end_time.to_real_time(), start_marker, end_marker);
     if (ret < 0) {
@@ -7090,7 +7090,7 @@ next:
   }
 
   if (opt_cmd == OPT_METADATA_SYNC_STATUS) {
-    RGWMetaSyncStatusManager sync(store, store->svc.rados->get_async_processor());
+    RGWMetaSyncStatusManager sync(store, store->svc()->rados->get_async_processor());
 
     int ret = sync.init();
     if (ret < 0) {
@@ -7131,7 +7131,7 @@ next:
   }
 
   if (opt_cmd == OPT_METADATA_SYNC_INIT) {
-    RGWMetaSyncStatusManager sync(store, store->svc.rados->get_async_processor());
+    RGWMetaSyncStatusManager sync(store, store->svc()->rados->get_async_processor());
 
     int ret = sync.init();
     if (ret < 0) {
@@ -7147,7 +7147,7 @@ next:
 
 
   if (opt_cmd == OPT_METADATA_SYNC_RUN) {
-    RGWMetaSyncStatusManager sync(store, store->svc.rados->get_async_processor());
+    RGWMetaSyncStatusManager sync(store, store->svc()->rados->get_async_processor());
 
     int ret = sync.init();
     if (ret < 0) {
@@ -7167,7 +7167,7 @@ next:
       cerr << "ERROR: source zone not specified" << std::endl;
       return EINVAL;
     }
-    RGWDataSyncStatusManager sync(store, store->svc.rados->get_async_processor(), source_zone, nullptr);
+    RGWDataSyncStatusManager sync(store, store->svc()->rados->get_async_processor(), source_zone, nullptr);
 
     int ret = sync.init();
     if (ret < 0) {
@@ -7231,7 +7231,7 @@ next:
       return EINVAL;
     }
 
-    RGWDataSyncStatusManager sync(store, store->svc.rados->get_async_processor(), source_zone, nullptr);
+    RGWDataSyncStatusManager sync(store, store->svc()->rados->get_async_processor(), source_zone, nullptr);
 
     int ret = sync.init();
     if (ret < 0) {
@@ -7253,14 +7253,14 @@ next:
     }
 
     RGWSyncModuleInstanceRef sync_module;
-    int ret = store->svc.sync_modules->get_manager()->create_instance(g_ceph_context, store->svc.zone->get_zone().tier_type, 
-        store->svc.zone->get_zone_params().tier_config, &sync_module);
+    int ret = store->svc()->sync_modules->get_manager()->create_instance(g_ceph_context, store->svc()->zone->get_zone().tier_type,
+        store->svc()->zone->get_zone_params().tier_config, &sync_module);
     if (ret < 0) {
       lderr(cct) << "ERROR: failed to init sync module instance, ret=" << ret << dendl;
       return ret;
     }
 
-    RGWDataSyncStatusManager sync(store, store->svc.rados->get_async_processor(), source_zone, nullptr, sync_module);
+    RGWDataSyncStatusManager sync(store, store->svc()->rados->get_async_processor(), source_zone, nullptr, sync_module);
 
     ret = sync.init();
     if (ret < 0) {
@@ -7314,13 +7314,13 @@ next:
       return ret;
     }
     RGWPeriod period;
-    ret = period.init(g_ceph_context, store->svc.sysobj, realm_id, realm_name, true);
+    ret = period.init(g_ceph_context, store->svc()->sysobj, realm_id, realm_name, true);
     if (ret < 0) {
       cerr << "failed to init period " << ": " << cpp_strerror(-ret) << std::endl;
       return ret;
     }
 
-    if (!store->svc.zone->is_meta_master()) {
+    if (!store->svc()->zone->is_meta_master()) {
       cerr << "failed to update bucket sync: only allowed on meta master zone "  << std::endl;
       cerr << period.get_master_zone() << " | " << period.get_realm() << std::endl;
       return EINVAL;
@@ -7427,7 +7427,7 @@ next:
 
     do {
       list<rgw_bi_log_entry> entries;
-      ret = store->svc.bilog_rados->log_list(bucket_info, shard_id, marker, max_entries - count, entries, &truncated);
+      ret = store->svc()->bilog_rados->log_list(bucket_info, shard_id, marker, max_entries - count, entries, &truncated);
       if (ret < 0) {
         cerr << "ERROR: list_bi_log_entries(): " << cpp_strerror(-ret) << std::endl;
         return -ret;
@@ -7480,7 +7480,7 @@ next:
 
       do {
         list<cls_log_entry> entries;
-        ret = store->svc.cls->timelog.list(oid, start_time.to_real_time(), end_time.to_real_time(),
+        ret = store->svc()->cls->timelog.list(oid, start_time.to_real_time(), end_time.to_real_time(),
                                            max_entries - count, entries, marker, &marker, &truncated,
                                            null_yield);
         if (ret == -ENOENT) {
@@ -7565,7 +7565,7 @@ next:
       cerr << "ERROR: could not init bucket: " << cpp_strerror(-ret) << std::endl;
       return -ret;
     }
-    ret = store->svc.bilog_rados->log_trim(bucket_info, shard_id, start_marker, end_marker);
+    ret = store->svc()->bilog_rados->log_trim(bucket_info, shard_id, start_marker, end_marker);
     if (ret < 0) {
       cerr << "ERROR: trim_bi_log_entries(): " << cpp_strerror(-ret) << std::endl;
       return -ret;
@@ -7584,7 +7584,7 @@ next:
       return -ret;
     }
     map<int, string> markers;
-    ret = store->svc.bilog_rados->get_log_status(bucket_info, shard_id, &markers);
+    ret = store->svc()->bilog_rados->get_log_status(bucket_info, shard_id, &markers);
     if (ret < 0) {
       cerr << "ERROR: get_bi_log_status(): " << cpp_strerror(-ret) << std::endl;
       return -ret;
@@ -7596,7 +7596,7 @@ next:
   }
 
   if (opt_cmd == OPT_BILOG_AUTOTRIM) {
-    RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
+    RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
     RGWHTTPManager http(store->ctx(), crs.get_completion_mgr());
     int ret = http.start();
     if (ret < 0) {
@@ -7637,7 +7637,7 @@ next:
     if (ret < 0)
       return -ret;
 
-    auto datalog_svc = store->svc.datalog_rados;
+    auto datalog_svc = store->svc()->datalog_rados;
     RGWDataChangesLog::LogMarker log_marker;
 
     do {
@@ -7677,7 +7677,7 @@ next:
       list<cls_log_entry> entries;
 
       RGWDataChangesLogInfo info;
-      store->svc.datalog_rados->get_info(i, &info);
+      store->svc()->datalog_rados->get_info(i, &info);
 
       ::encode_json("info", info, formatter);
 
@@ -7690,7 +7690,7 @@ next:
   }
 
   if (opt_cmd == OPT_DATALOG_AUTOTRIM) {
-    RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
+    RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
     RGWHTTPManager http(store->ctx(), crs.get_completion_mgr());
     int ret = http.start();
     if (ret < 0) {
@@ -7718,7 +7718,7 @@ next:
     if (ret < 0)
       return -ret;
 
-    ret = store->svc.datalog_rados->trim_entries(start_time.to_real_time(), end_time.to_real_time(), start_marker, end_marker);
+    ret = store->svc()->datalog_rados->trim_entries(start_time.to_real_time(), end_time.to_real_time(), start_marker, end_marker);
     if (ret < 0) {
       cerr << "ERROR: trim_entries(): " << cpp_strerror(-ret) << std::endl;
       return -ret;
@@ -7794,14 +7794,14 @@ next:
     }
 
     real_time mtime = real_clock::now();
-    string oid = store->svc.cls->mfa.get_mfa_oid(user_id);
-
-    int ret = store->ctl.meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user_id),
-                                         mtime, &objv_tracker,
-                                         null_yield,
-                                         MDLOG_STATUS_WRITE,
-                                         [&] {
-      return store->svc.cls->mfa.create_mfa(user_id, config, &objv_tracker, mtime, null_yield);
+    string oid = store->svc()->cls->mfa.get_mfa_oid(user_id);
+
+    int ret = store->ctl()->meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user_id),
+                                            mtime, &objv_tracker,
+                                            null_yield,
+                                            MDLOG_STATUS_WRITE,
+                                            [&] {
+      return store->svc()->cls->mfa.create_mfa(user_id, config, &objv_tracker, mtime, null_yield);
     });
     if (ret < 0) {
       cerr << "MFA creation failed, error: " << cpp_strerror(-ret) << std::endl;
@@ -7832,12 +7832,12 @@ next:
 
     real_time mtime = real_clock::now();
 
-    int ret = store->ctl.meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user_id),
-                                         mtime, &objv_tracker,
-                                         null_yield,
-                                         MDLOG_STATUS_WRITE,
-                                         [&] {
-      return store->svc.cls->mfa.remove_mfa(user_id, totp_serial, &objv_tracker, mtime, null_yield);
+    int ret = store->ctl()->meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user_id),
+                                            mtime, &objv_tracker,
+                                            null_yield,
+                                            MDLOG_STATUS_WRITE,
+                                            [&] {
+      return store->svc()->cls->mfa.remove_mfa(user_id, totp_serial, &objv_tracker, mtime, null_yield);
     });
     if (ret < 0) {
       cerr << "MFA removal failed, error: " << cpp_strerror(-ret) << std::endl;
@@ -7867,7 +7867,7 @@ next:
     }
 
     rados::cls::otp::otp_info_t result;
-    int ret = store->svc.cls->mfa.get_mfa(user_id, totp_serial, &result, null_yield);
+    int ret = store->svc()->cls->mfa.get_mfa(user_id, totp_serial, &result, null_yield);
     if (ret < 0) {
       if (ret == -ENOENT || ret == -ENODATA) {
         cerr << "MFA serial id not found" << std::endl;
@@ -7889,7 +7889,7 @@ next:
     }
 
     list<rados::cls::otp::otp_info_t> result;
-    int ret = store->svc.cls->mfa.list_mfa(user_id, &result, null_yield);
+    int ret = store->svc()->cls->mfa.list_mfa(user_id, &result, null_yield);
     if (ret < 0) {
       cerr << "MFA listing failed, error: " << cpp_strerror(-ret) << std::endl;
       return -ret;
@@ -7917,7 +7917,7 @@ next:
     }
 
     list<rados::cls::otp::otp_info_t> result;
-    int ret = store->svc.cls->mfa.check_mfa(user_id, totp_serial, totp_pin.front(), null_yield);
+    int ret = store->svc()->cls->mfa.check_mfa(user_id, totp_serial, totp_pin.front(), null_yield);
     if (ret < 0) {
       cerr << "MFA check failed, error: " << cpp_strerror(-ret) << std::endl;
       return -ret;
@@ -7942,7 +7942,7 @@ next:
     }
 
     rados::cls::otp::otp_info_t config;
-    int ret = store->svc.cls->mfa.get_mfa(user_id, totp_serial, &config, null_yield);
+    int ret = store->svc()->cls->mfa.get_mfa(user_id, totp_serial, &config, null_yield);
     if (ret < 0) {
       if (ret == -ENOENT || ret == -ENODATA) {
         cerr << "MFA serial id not found" << std::endl;
@@ -7954,7 +7954,7 @@ next:
 
     ceph::real_time now;
 
-    ret = store->svc.cls->mfa.otp_get_current_time(user_id, &now, null_yield);
+    ret = store->svc()->cls->mfa.otp_get_current_time(user_id, &now, null_yield);
     if (ret < 0) {
       cerr << "ERROR: failed to fetch current time from osd: " << cpp_strerror(-ret) << std::endl;
       return -ret;
@@ -7976,12 +7976,12 @@ next:
     /* now update the backend */
     real_time mtime = real_clock::now();
 
-    ret = store->ctl.meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user_id),
-                                     mtime, &objv_tracker,
-                                     null_yield,
-                                     MDLOG_STATUS_WRITE,
-                                     [&] {
-      return store->svc.cls->mfa.create_mfa(user_id, config, &objv_tracker, mtime, null_yield);
+    ret = store->ctl()->meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user_id),
+                                        mtime, &objv_tracker,
+                                        null_yield,
+                                        MDLOG_STATUS_WRITE,
+                                        [&] {
+      return store->svc()->cls->mfa.create_mfa(user_id, config, &objv_tracker, mtime, null_yield);
     });
     if (ret < 0) {
       cerr << "MFA update failed, error: " << cpp_strerror(-ret) << std::endl;
@@ -7991,7 +7991,7 @@ next:
  }
 
  if (opt_cmd == OPT_RESHARD_STALE_INSTANCES_LIST) {
-   if (!store->svc.zone->can_reshard() && !yes_i_really_mean_it) {
+   if (!store->svc()->zone->can_reshard() && !yes_i_really_mean_it) {
      cerr << "Resharding disabled in a multisite env, stale instances unlikely from resharding" << std::endl;
      cerr << "These instances may not be safe to delete." << std::endl;
      cerr << "Use --yes-i-really-mean-it to force displaying these instances." << std::endl;
@@ -8005,7 +8005,7 @@ next:
  }
 
  if (opt_cmd == OPT_RESHARD_STALE_INSTANCES_DELETE) {
-   if (!store->svc.zone->can_reshard()) {
+   if (!store->svc()->zone->can_reshard()) {
      cerr << "Resharding disabled in a multisite env. Stale instances are not safe to be deleted." << std::endl;
      return EINVAL;
    }
@@ -8265,7 +8265,7 @@ next:
     dest_config.oid_prefix = sub_oid_prefix;
     dest_config.push_endpoint = sub_push_endpoint;
 
-    auto psmodule = static_cast<RGWPSSyncModuleInstance *>(store->get_sync_module().get());
+    auto psmodule = static_cast<RGWPSSyncModuleInstance *>(store->getRados()->get_sync_module().get());
     auto conf = psmodule->get_effective_conf();
 
     if (dest_config.bucket_name.empty()) {
index 5f839d2679ea9e4b384c8010edee836a88c8b1a8..54289e9a7c119e4bcfcd141a7408b0ba2dc94b97 100644 (file)
 #include "common/config.h"
 
 #include <boost/intrusive_ptr.hpp>
-
-class RGWRados;
+#include "rgw_sal.h"
 
 namespace rgw {
 
   class RGWLibAdmin
   {
-    RGWRados *store;
+    rgw::sal::RGWRadosStore *store;
     boost::intrusive_ptr<CephContext> cct;
 
   public:
-    RGWRados* get_store()
+    rgw::sal::RGWRadosStore* get_store()
     {
       return store;
     }
index c70abf3a354c06fc5cc4fa272da35987e3003015..82dec1e7ecb9f0f04ba172ed96d9acc3314c9705 100644 (file)
@@ -143,7 +143,7 @@ void handle_connection(boost::asio::io_context& context,
       }
 
       // process the request
-      RGWRequest req{env.store->get_new_req_id()};
+      RGWRequest req{env.store->getRados()->get_new_req_id()};
 
       auto& socket = stream.lowest_layer();
       StreamIO real_client{cct, stream, parser, yield, buffer, is_ssl,
@@ -292,7 +292,7 @@ class AsioFrontend {
   void stop();
   void join();
   void pause();
-  void unpause(RGWRados* store, rgw_auth_registry_ptr_t);
+  void unpause(rgw::sal::RGWRadosStore* store, rgw_auth_registry_ptr_t);
 };
 
 unsigned short parse_port(const char *input, boost::system::error_code& ec)
@@ -695,7 +695,7 @@ void AsioFrontend::pause()
   }
 }
 
-void AsioFrontend::unpause(RGWRados* const store,
+void AsioFrontend::unpause(rgw::sal::RGWRadosStore* const store,
                            rgw_auth_registry_ptr_t auth_registry)
 {
   env.store = store;
@@ -759,7 +759,7 @@ void RGWAsioFrontend::pause_for_new_config()
 }
 
 void RGWAsioFrontend::unpause_with_new_config(
-  RGWRados* const store,
+  rgw::sal::RGWRadosStore* const store,
   rgw_auth_registry_ptr_t auth_registry
 ) {
   impl->unpause(store, std::move(auth_registry));
index 857910bbd9b0bf1faa8bfd7f6542bf94c1079d16..df130a36a1ae9a85936e8bee71ed4954b87457a2 100644 (file)
@@ -21,7 +21,7 @@ public:
   void join() override;
 
   void pause_for_new_config() override;
-  void unpause_with_new_config(RGWRados *store,
+  void unpause_with_new_config(rgw::sal::RGWRadosStore *store,
                                rgw_auth_registry_ptr_t auth_registry) override;
 };
 
index 6856720bc78b61d8b6971e7f94712b7a0d8db184..ece514095b4bd85d896d4088dacf261408d91f52 100644 (file)
 
 #include "cls/user/cls_user_types.h"
 
+#include "rgw_sal.h"
+
 #define dout_context g_ceph_context
 #define dout_subsys ceph_subsys_rgw
 
 #define BUCKET_TAG_TIMEOUT 30
 
+using namespace rgw::sal;
+
 
 /*
  * The tenant_name is always returned on purpose. May be empty, of course.
@@ -141,7 +145,7 @@ void rgw_parse_url_bucket(const string &bucket, const string& auth_tenant,
  * Get all the buckets owned by a user and fill up an RGWUserBuckets with them.
  * Returns: 0 on success, -ERR# on failure.
  */
-int rgw_read_user_buckets(RGWRados * store,
+int rgw_read_user_buckets(RGWRadosStore * store,
                           const rgw_user& user_id,
                           RGWUserBuckets& buckets,
                           const string& marker,
@@ -151,7 +155,7 @@ int rgw_read_user_buckets(RGWRados * store,
                          bool *is_truncated,
                          uint64_t default_amount)
 {
-  return store->ctl.user->list_buckets(user_id, marker, end_marker,
+  return store->ctl()->user->list_buckets(user_id, marker, end_marker,
                                        max, need_stats, &buckets,
                                        is_truncated, default_amount);
 }
@@ -246,7 +250,7 @@ static void dump_mulipart_index_results(list<rgw_obj_index_key>& objs_to_unlink,
   }
 }
 
-void check_bad_user_bucket_mapping(RGWRados *store, const rgw_user& user_id,
+void check_bad_user_bucket_mapping(RGWRadosStore *store, const rgw_user& user_id,
                                   bool fix)
 {
   RGWUserBuckets user_buckets;
@@ -278,8 +282,8 @@ void check_bad_user_bucket_mapping(RGWRados *store, const rgw_user& user_id,
 
       RGWBucketInfo bucket_info;
       real_time mtime;
-      RGWSysObjectCtx obj_ctx = store->svc.sysobj->init_obj_ctx();
-      int r = store->get_bucket_info(obj_ctx, user_id.tenant, bucket.name, bucket_info, &mtime, null_yield);
+      RGWSysObjectCtx obj_ctx = store->svc()->sysobj->init_obj_ctx();
+      int r = store->getRados()->get_bucket_info(obj_ctx, user_id.tenant, bucket.name, bucket_info, &mtime, null_yield);
       if (r < 0) {
         ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket << dendl;
         continue;
@@ -294,7 +298,7 @@ void check_bad_user_bucket_mapping(RGWRados *store, const rgw_user& user_id,
         cout << "bucket info mismatch: expected " << actual_bucket << " got " << bucket << std::endl;
         if (fix) {
           cout << "fixing" << std::endl;
-          r = store->ctl.bucket->link_bucket(user_id, actual_bucket,
+          r = store->ctl()->bucket->link_bucket(user_id, actual_bucket,
                                              bucket_info.creation_time,
                                             null_yield);
           if (r < 0) {
@@ -313,7 +317,7 @@ static bool bucket_object_check_filter(const string& oid)
   return rgw_obj_key::oid_to_key_in_ns(oid, &key, ns);
 }
 
-int rgw_remove_object(RGWRados *store, const RGWBucketInfo& bucket_info, const rgw_bucket& bucket, rgw_obj_key& key)
+int rgw_remove_object(RGWRadosStore *store, const RGWBucketInfo& bucket_info, const rgw_bucket& bucket, rgw_obj_key& key)
 {
   RGWObjectCtx rctx(store);
 
@@ -323,29 +327,29 @@ int rgw_remove_object(RGWRados *store, const RGWBucketInfo& bucket_info, const r
 
   rgw_obj obj(bucket, key);
 
-  return store->delete_obj(rctx, bucket_info, obj, bucket_info.versioning_status());
+  return store->getRados()->delete_obj(rctx, bucket_info, obj, bucket_info.versioning_status());
 }
 
-int rgw_remove_bucket(RGWRados *store, rgw_bucket& bucket, bool delete_children, optional_yield y)
+int rgw_remove_bucket(RGWRadosStore *store, rgw_bucket& bucket, bool delete_children, optional_yield y)
 {
   int ret;
   map<RGWObjCategory, RGWStorageStats> stats;
   std::vector<rgw_bucket_dir_entry> objs;
   map<string, bool> common_prefixes;
   RGWBucketInfo info;
-  RGWSysObjectCtx obj_ctx = store->svc.sysobj->init_obj_ctx();
+  RGWSysObjectCtx obj_ctx = store->svc()->sysobj->init_obj_ctx();
 
   string bucket_ver, master_ver;
 
-  ret = store->get_bucket_info(obj_ctx, bucket.tenant, bucket.name, info, NULL, null_yield);
+  ret = store->getRados()->get_bucket_info(obj_ctx, bucket.tenant, bucket.name, info, NULL, null_yield);
   if (ret < 0)
     return ret;
 
-  ret = store->get_bucket_stats(info, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, NULL);
+  ret = store->getRados()->get_bucket_stats(info, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, NULL);
   if (ret < 0)
     return ret;
 
-  RGWRados::Bucket target(store, info);
+  RGWRados::Bucket target(store->getRados(), info);
   RGWRados::Bucket::List list_op(&target);
   CephContext *cct = store->ctx();
   int max = 1000;
@@ -382,7 +386,7 @@ int rgw_remove_bucket(RGWRados *store, rgw_bucket& bucket, bool delete_children,
     return ret;
   }
 
-  ret = store->ctl.bucket->sync_user_stats(info.owner, info);
+  ret = store->ctl()->bucket->sync_user_stats(info.owner, info);
   if ( ret < 0) {
      dout(1) << "WARNING: failed sync user stats before bucket delete. ret=" <<  ret << dendl;
   }
@@ -391,14 +395,14 @@ int rgw_remove_bucket(RGWRados *store, rgw_bucket& bucket, bool delete_children,
 
   // if we deleted children above we will force delete, as any that
   // remain is detrius from a prior bug
-  ret = store->delete_bucket(info, objv_tracker, null_yield, !delete_children);
+  ret = store->getRados()->delete_bucket(info, objv_tracker, null_yield, !delete_children);
   if (ret < 0) {
     lderr(store->ctx()) << "ERROR: could not remove bucket " <<
       bucket.name << dendl;
     return ret;
   }
 
-  ret = store->ctl.bucket->unlink_bucket(info.owner, bucket, null_yield, false);
+  ret = store->ctl()->bucket->unlink_bucket(info.owner, bucket, null_yield, false);
   if (ret < 0) {
     lderr(store->ctx()) << "ERROR: unable to remove user bucket information" << dendl;
   }
@@ -429,7 +433,7 @@ static int drain_handles(list<librados::AioCompletion *>& pending)
   return ret;
 }
 
-int rgw_remove_bucket_bypass_gc(RGWRados *store, rgw_bucket& bucket,
+int rgw_remove_bucket_bypass_gc(RGWRadosStore *store, rgw_bucket& bucket,
                                 int concurrent_max, bool keep_index_consistent,
                                 optional_yield y)
 {
@@ -439,16 +443,16 @@ int rgw_remove_bucket_bypass_gc(RGWRados *store, rgw_bucket& bucket,
   map<string, bool> common_prefixes;
   RGWBucketInfo info;
   RGWObjectCtx obj_ctx(store);
-  RGWSysObjectCtx sysobj_ctx = store->svc.sysobj->init_obj_ctx();
+  RGWSysObjectCtx sysobj_ctx = store->svc()->sysobj->init_obj_ctx();
   CephContext *cct = store->ctx();
 
   string bucket_ver, master_ver;
 
-  ret = store->get_bucket_info(sysobj_ctx, bucket.tenant, bucket.name, info, NULL, null_yield);
+  ret = store->getRados()->get_bucket_info(sysobj_ctx, bucket.tenant, bucket.name, info, NULL, null_yield);
   if (ret < 0)
     return ret;
 
-  ret = store->get_bucket_stats(info, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, NULL);
+  ret = store->getRados()->get_bucket_stats(info, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, NULL);
   if (ret < 0)
     return ret;
 
@@ -459,7 +463,7 @@ int rgw_remove_bucket_bypass_gc(RGWRados *store, rgw_bucket& bucket,
     return ret;
   }
 
-  RGWRados::Bucket target(store, info);
+  RGWRados::Bucket target(store->getRados(), info);
   RGWRados::Bucket::List list_op(&target);
 
   list_op.params.list_versions = true;
@@ -482,7 +486,7 @@ int rgw_remove_bucket_bypass_gc(RGWRados *store, rgw_bucket& bucket,
       RGWObjState *astate = NULL;
       rgw_obj obj(bucket, (*it).key);
 
-      ret = store->get_obj_state(&obj_ctx, info, obj, &astate, false, y);
+      ret = store->getRados()->get_obj_state(&obj_ctx, info, obj, &astate, false, y);
       if (ret == -ENOENT) {
         dout(1) << "WARNING: cannot find obj state for obj " << obj.get_oid() << dendl;
         continue;
@@ -497,7 +501,7 @@ int rgw_remove_bucket_bypass_gc(RGWRados *store, rgw_bucket& bucket,
         RGWObjManifest::obj_iterator miter = manifest.obj_begin();
         rgw_obj head_obj = manifest.get_obj();
         rgw_raw_obj raw_head_obj;
-        store->obj_to_raw(info.placement_rule, head_obj, &raw_head_obj);
+        store->getRados()->obj_to_raw(info.placement_rule, head_obj, &raw_head_obj);
 
 
         for (; miter != manifest.obj_end() && max_aio--; ++miter) {
@@ -510,20 +514,20 @@ int rgw_remove_bucket_bypass_gc(RGWRados *store, rgw_bucket& bucket,
             max_aio = concurrent_max;
           }
 
-          rgw_raw_obj last_obj = miter.get_location().get_raw_obj(store);
+          rgw_raw_obj last_obj = miter.get_location().get_raw_obj(store->getRados());
           if (last_obj == raw_head_obj) {
             // have the head obj deleted at the end
             continue;
           }
 
-          ret = store->delete_raw_obj_aio(last_obj, handles);
+          ret = store->getRados()->delete_raw_obj_aio(last_obj, handles);
           if (ret < 0) {
             lderr(store->ctx()) << "ERROR: delete obj aio failed with " << ret << dendl;
             return ret;
           }
         } // for all shadow objs
 
-        ret = store->delete_obj_aio(head_obj, info, astate, handles, keep_index_consistent, null_yield);
+        ret = store->getRados()->delete_obj_aio(head_obj, info, astate, handles, keep_index_consistent, null_yield);
         if (ret < 0) {
           lderr(store->ctx()) << "ERROR: delete obj aio failed with " << ret << dendl;
           return ret;
@@ -547,7 +551,7 @@ int rgw_remove_bucket_bypass_gc(RGWRados *store, rgw_bucket& bucket,
     return ret;
   }
 
-  ret = store->ctl.bucket->sync_user_stats(info.owner, info);
+  ret = store->ctl()->bucket->sync_user_stats(info.owner, info);
   if (ret < 0) {
      dout(1) << "WARNING: failed sync user stats before bucket delete. ret=" <<  ret << dendl;
   }
@@ -557,13 +561,13 @@ int rgw_remove_bucket_bypass_gc(RGWRados *store, rgw_bucket& bucket,
   // this function can only be run if caller wanted children to be
   // deleted, so we can ignore the check for children as any that
   // remain are detritus from a prior bug
-  ret = store->delete_bucket(info, objv_tracker, y, false);
+  ret = store->getRados()->delete_bucket(info, objv_tracker, y, false);
   if (ret < 0) {
     lderr(store->ctx()) << "ERROR: could not remove bucket " << bucket.name << dendl;
     return ret;
   }
 
-  ret = store->ctl.bucket->unlink_bucket(info.owner, bucket, null_yield, false);
+  ret = store->ctl()->bucket->unlink_bucket(info.owner, bucket, null_yield, false);
   if (ret < 0) {
     lderr(store->ctx()) << "ERROR: unable to remove user bucket information" << dendl;
   }
@@ -577,7 +581,7 @@ static void set_err_msg(std::string *sink, std::string msg)
     *sink = msg;
 }
 
-int RGWBucket::init(RGWRados *storage, RGWBucketAdminOpState& op_state,
+int RGWBucket::init(RGWRadosStore *storage, RGWBucketAdminOpState& op_state,
                     optional_yield y, std::string *err_msg,
                     map<string, bufferlist> *pattrs)
 {
@@ -605,7 +609,7 @@ int RGWBucket::init(RGWRados *storage, RGWBucketAdminOpState& op_state,
 
   if (!bucket.name.empty()) {
     ceph::real_time mtime;
-    int r = store->ctl.bucket->read_bucket_info(
+    int r = store->ctl()->bucket->read_bucket_info(
         bucket, &bucket_info, y,
         RGWBucketCtl::BucketInstance::GetParams().set_attrs(pattrs),
         &ep_objv);
@@ -618,7 +622,7 @@ int RGWBucket::init(RGWRados *storage, RGWBucketAdminOpState& op_state,
   }
 
   if (!user_id.empty()) {
-    int r = store->ctl.user->get_info_by_uid(user_id, &user_info, y);
+    int r = store->ctl()->user->get_info_by_uid(user_id, &user_info, y);
     if (r < 0) {
       set_err_msg(err_msg, "failed to fetch user info");
       return r;
@@ -683,7 +687,7 @@ int RGWBucket::link(RGWBucketAdminOpState& op_state, optional_yield y,
     return -EIO;
   }
 
-  auto bucket_ctl = store->ctl.bucket;
+  auto bucket_ctl = store->ctl()->bucket;
   int r = bucket_ctl->unlink_bucket(owner.get_id(), old_bucket, y, false);
   if (r < 0) {
     set_err_msg(err_msg, "could not unlink policy from user " + owner.get_id().to_str());
@@ -726,7 +730,7 @@ int RGWBucket::link(RGWBucketAdminOpState& op_state, optional_yield y,
   rgw_ep_info ep_data{ep, ep_attrs};
 
   /* link to user */
-  r = store->ctl.bucket->link_bucket(user_info.user_id,
+  r = store->ctl()->bucket->link_bucket(user_info.user_id,
                                      bucket_info.bucket,
                                      ceph::real_time(),
                                      y, true, &ep_data);
@@ -760,7 +764,7 @@ int RGWBucket::link(RGWBucketAdminOpState& op_state, optional_yield y,
 int RGWBucket::chown(RGWBucketAdminOpState& op_state, const string& marker,
                      optional_yield y, std::string *err_msg)
 {
-  int ret = store->ctl.bucket->chown(store, bucket_info, user_info.user_id,
+  int ret = store->ctl()->bucket->chown(store, bucket_info, user_info.user_id,
                                      user_info.display_name, marker, y);
   if (ret < 0) {
     set_err_msg(err_msg, "Failed to change object ownership: " + cpp_strerror(-ret));
@@ -778,7 +782,7 @@ int RGWBucket::unlink(RGWBucketAdminOpState& op_state, optional_yield y, std::st
     return -EINVAL;
   }
 
-  int r = store->ctl.bucket->unlink_bucket(user_info.user_id, bucket, y);
+  int r = store->ctl()->bucket->unlink_bucket(user_info.user_id, bucket, y);
   if (r < 0) {
     set_err_msg(err_msg, "error unlinking bucket" + cpp_strerror(-r));
   }
@@ -791,15 +795,15 @@ int RGWBucket::set_quota(RGWBucketAdminOpState& op_state, std::string *err_msg)
   rgw_bucket bucket = op_state.get_bucket();
   RGWBucketInfo bucket_info;
   map<string, bufferlist> attrs;
-  auto obj_ctx = store->svc.sysobj->init_obj_ctx();
-  int r = store->get_bucket_info(obj_ctx, bucket.tenant, bucket.name, bucket_info, NULL, null_yield, &attrs);
+  auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
+  int r = store->getRados()->get_bucket_info(obj_ctx, bucket.tenant, bucket.name, bucket_info, NULL, null_yield, &attrs);
   if (r < 0) {
     set_err_msg(err_msg, "could not get bucket info for bucket=" + bucket.name + ": " + cpp_strerror(-r));
     return r;
   }
 
   bucket_info.quota = op_state.quota;
-  r = store->put_bucket_instance_info(bucket_info, false, real_time(), &attrs);
+  r = store->getRados()->put_bucket_instance_info(bucket_info, false, real_time(), &attrs);
   if (r < 0) {
     set_err_msg(err_msg, "ERROR: failed writing bucket instance info: " + cpp_strerror(-r));
     return r;
@@ -900,14 +904,14 @@ int RGWBucket::check_bad_index_multipart(RGWBucketAdminOpState& op_state,
   map<rgw_obj_index_key, string> all_objs;
 
   RGWBucketInfo bucket_info;
-  auto obj_ctx = store->svc.sysobj->init_obj_ctx();
-  int r = store->get_bucket_instance_info(obj_ctx, bucket, bucket_info, nullptr, nullptr, null_yield);
+  auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
+  int r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket, bucket_info, nullptr, nullptr, null_yield);
   if (r < 0) {
     ldout(store->ctx(), 0) << "ERROR: " << __func__ << "(): get_bucket_instance_info(bucket=" << bucket << ") returned r=" << r << dendl;
     return r;
   }
 
-  RGWRados::Bucket target(store, bucket_info);
+  RGWRados::Bucket target(store->getRados(), bucket_info);
   RGWRados::Bucket::List list_op(&target);
 
   list_op.params.list_versions = true;
@@ -962,7 +966,7 @@ int RGWBucket::check_bad_index_multipart(RGWBucketAdminOpState& op_state,
 
     if (objs_to_unlink.size() > max) {
       if (fix_index) {
-       int r = store->remove_objs_from_index(bucket_info, objs_to_unlink);
+       int r = store->getRados()->remove_objs_from_index(bucket_info, objs_to_unlink);
        if (r < 0) {
          set_err_msg(err_msg, "ERROR: remove_obj_from_index() returned error: " +
                      cpp_strerror(-r));
@@ -977,7 +981,7 @@ int RGWBucket::check_bad_index_multipart(RGWBucketAdminOpState& op_state,
   }
 
   if (fix_index) {
-    int r = store->remove_objs_from_index(bucket_info, objs_to_unlink);
+    int r = store->getRados()->remove_objs_from_index(bucket_info, objs_to_unlink);
     if (r < 0) {
       set_err_msg(err_msg, "ERROR: remove_obj_from_index() returned error: " +
               cpp_strerror(-r));
@@ -1006,7 +1010,7 @@ int RGWBucket::check_object_index(RGWBucketAdminOpState& op_state,
     return -EINVAL;
   }
 
-  store->cls_obj_set_bucket_tag_timeout(bucket_info, BUCKET_TAG_TIMEOUT);
+  store->getRados()->cls_obj_set_bucket_tag_timeout(bucket_info, BUCKET_TAG_TIMEOUT);
 
   string prefix;
   rgw_obj_index_key marker;
@@ -1017,7 +1021,7 @@ int RGWBucket::check_object_index(RGWBucketAdminOpState& op_state,
   while (is_truncated) {
     RGWRados::ent_map_t result;
 
-    int r = store->cls_bucket_list_ordered(bucket_info, RGW_NO_SHARD,
+    int r = store->getRados()->cls_bucket_list_ordered(bucket_info, RGW_NO_SHARD,
                                           marker, prefix, 1000, true,
                                           result, &is_truncated, &marker,
                                            y,
@@ -1034,7 +1038,7 @@ int RGWBucket::check_object_index(RGWBucketAdminOpState& op_state,
 
   formatter->close_section();
 
-  store->cls_obj_set_bucket_tag_timeout(bucket_info, 0);
+  store->getRados()->cls_obj_set_bucket_tag_timeout(bucket_info, 0);
 
   return 0;
 }
@@ -1047,14 +1051,14 @@ int RGWBucket::check_index(RGWBucketAdminOpState& op_state,
 {
   bool fix_index = op_state.will_fix_index();
 
-  int r = store->bucket_check_index(bucket_info, &existing_stats, &calculated_stats);
+  int r = store->getRados()->bucket_check_index(bucket_info, &existing_stats, &calculated_stats);
   if (r < 0) {
     set_err_msg(err_msg, "failed to check index error=" + cpp_strerror(-r));
     return r;
   }
 
   if (fix_index) {
-    r = store->bucket_rebuild_index(bucket_info);
+    r = store->getRados()->bucket_rebuild_index(bucket_info);
     if (r < 0) {
       set_err_msg(err_msg, "failed to rebuild index err=" + cpp_strerror(-r));
       return r;
@@ -1075,12 +1079,12 @@ int RGWBucket::policy_bl_to_stream(bufferlist& bl, ostream& o)
   return 0;
 }
 
-int rgw_object_get_attr(RGWRados* store, const RGWBucketInfo& bucket_info,
+int rgw_object_get_attr(rgw::sal::RGWRadosStore* store, const RGWBucketInfo& bucket_info,
                        const rgw_obj& obj, const char* attr_name,
                        bufferlist& out_bl, optional_yield y)
 {
   RGWObjectCtx obj_ctx(store);
-  RGWRados::Object op_target(store, bucket_info, obj_ctx, obj);
+  RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, obj);
   RGWRados::Object::Read rop(&op_target);
 
   return rop.get_attr(attr_name, out_bl, y);
@@ -1090,11 +1094,11 @@ int RGWBucket::get_policy(RGWBucketAdminOpState& op_state, RGWAccessControlPolic
 {
   std::string object_name = op_state.get_object_name();
   rgw_bucket bucket = op_state.get_bucket();
-  auto sysobj_ctx = store->svc.sysobj->init_obj_ctx();
+  auto sysobj_ctx = store->svc()->sysobj->init_obj_ctx();
 
   RGWBucketInfo bucket_info;
   map<string, bufferlist> attrs;
-  int ret = store->get_bucket_info(sysobj_ctx, bucket.tenant, bucket.name, bucket_info, NULL, null_yield, &attrs);
+  int ret = store->getRados()->get_bucket_info(sysobj_ctx, bucket.tenant, bucket.name, bucket_info, NULL, null_yield, &attrs);
   if (ret < 0) {
     return ret;
   }
@@ -1129,7 +1133,7 @@ int RGWBucket::get_policy(RGWBucketAdminOpState& op_state, RGWAccessControlPolic
 }
 
 
-int RGWBucketAdminOp::get_policy(RGWRados *store, RGWBucketAdminOpState& op_state,
+int RGWBucketAdminOp::get_policy(RGWRadosStore *store, RGWBucketAdminOpState& op_state,
                   RGWAccessControlPolicy& policy)
 {
   RGWBucket bucket;
@@ -1148,7 +1152,7 @@ int RGWBucketAdminOp::get_policy(RGWRados *store, RGWBucketAdminOpState& op_stat
 /* Wrappers to facilitate RESTful interface */
 
 
-int RGWBucketAdminOp::get_policy(RGWRados *store, RGWBucketAdminOpState& op_state,
+int RGWBucketAdminOp::get_policy(RGWRadosStore *store, RGWBucketAdminOpState& op_state,
                   RGWFormatterFlusher& flusher)
 {
   RGWAccessControlPolicy policy(store->ctx());
@@ -1170,7 +1174,7 @@ int RGWBucketAdminOp::get_policy(RGWRados *store, RGWBucketAdminOpState& op_stat
   return 0;
 }
 
-int RGWBucketAdminOp::dump_s3_policy(RGWRados *store, RGWBucketAdminOpState& op_state,
+int RGWBucketAdminOp::dump_s3_policy(RGWRadosStore *store, RGWBucketAdminOpState& op_state,
                   ostream& os)
 {
   RGWAccessControlPolicy_S3 policy(store->ctx());
@@ -1184,7 +1188,7 @@ int RGWBucketAdminOp::dump_s3_policy(RGWRados *store, RGWBucketAdminOpState& op_
   return 0;
 }
 
-int RGWBucketAdminOp::unlink(RGWRados *store, RGWBucketAdminOpState& op_state)
+int RGWBucketAdminOp::unlink(RGWRadosStore *store, RGWBucketAdminOpState& op_state)
 {
   RGWBucket bucket;
 
@@ -1195,7 +1199,7 @@ int RGWBucketAdminOp::unlink(RGWRados *store, RGWBucketAdminOpState& op_state)
   return bucket.unlink(op_state, null_yield);
 }
 
-int RGWBucketAdminOp::link(RGWRados *store, RGWBucketAdminOpState& op_state, string *err)
+int RGWBucketAdminOp::link(RGWRadosStore *store, RGWBucketAdminOpState& op_state, string *err)
 {
   RGWBucket bucket;
   map<string, bufferlist> attrs;
@@ -1208,7 +1212,7 @@ int RGWBucketAdminOp::link(RGWRados *store, RGWBucketAdminOpState& op_state, str
 
 }
 
-int RGWBucketAdminOp::chown(RGWRados *store, RGWBucketAdminOpState& op_state, const string& marker, string *err)
+int RGWBucketAdminOp::chown(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, const string& marker, string *err)
 {
   RGWBucket bucket;
   map<string, bufferlist> attrs;
@@ -1225,7 +1229,7 @@ int RGWBucketAdminOp::chown(RGWRados *store, RGWBucketAdminOpState& op_state, co
 
 }
 
-int RGWBucketAdminOp::check_index(RGWRados *store, RGWBucketAdminOpState& op_state,
+int RGWBucketAdminOp::check_index(RGWRadosStore *store, RGWBucketAdminOpState& op_state,
                   RGWFormatterFlusher& flusher, optional_yield y)
 {
   int ret;
@@ -1260,7 +1264,7 @@ int RGWBucketAdminOp::check_index(RGWRados *store, RGWBucketAdminOpState& op_sta
   return 0;
 }
 
-int RGWBucketAdminOp::remove_bucket(RGWRados *store, RGWBucketAdminOpState& op_state,
+int RGWBucketAdminOp::remove_bucket(RGWRadosStore *store, RGWBucketAdminOpState& op_state,
                                     optional_yield y, bool bypass_gc, bool keep_index_consistent)
 {
   RGWBucket bucket;
@@ -1277,7 +1281,7 @@ int RGWBucketAdminOp::remove_bucket(RGWRados *store, RGWBucketAdminOpState& op_s
   return ret;
 }
 
-int RGWBucketAdminOp::remove_object(RGWRados *store, RGWBucketAdminOpState& op_state)
+int RGWBucketAdminOp::remove_object(RGWRadosStore *store, RGWBucketAdminOpState& op_state)
 {
   RGWBucket bucket;
 
@@ -1288,15 +1292,15 @@ int RGWBucketAdminOp::remove_object(RGWRados *store, RGWBucketAdminOpState& op_s
   return bucket.remove_object(op_state);
 }
 
-static int bucket_stats(RGWRados *store, const std::string& tenant_name, std::string&  bucket_name, Formatter *formatter)
+static int bucket_stats(RGWRadosStore *store, const std::string& tenant_name, std::string&  bucket_name, Formatter *formatter)
 {
   RGWBucketInfo bucket_info;
   map<RGWObjCategory, RGWStorageStats> stats;
   map<string, bufferlist> attrs;
 
   real_time mtime;
-  auto obj_ctx = store->svc.sysobj->init_obj_ctx();
-  int r = store->get_bucket_info(obj_ctx, tenant_name, bucket_name, bucket_info, &mtime, null_yield, &attrs);
+  auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
+  int r = store->getRados()->get_bucket_info(obj_ctx, tenant_name, bucket_name, bucket_info, &mtime, null_yield, &attrs);
   if (r < 0)
     return r;
 
@@ -1304,7 +1308,7 @@ static int bucket_stats(RGWRados *store, const std::string& tenant_name, std::st
 
   string bucket_ver, master_ver;
   string max_marker;
-  int ret = store->get_bucket_stats(bucket_info, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, &max_marker);
+  int ret = store->getRados()->get_bucket_stats(bucket_info, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, &max_marker);
   if (ret < 0) {
     cerr << "error getting bucket stats ret=" << ret << std::endl;
     return ret;
@@ -1349,7 +1353,7 @@ static int bucket_stats(RGWRados *store, const std::string& tenant_name, std::st
   return 0;
 }
 
-int RGWBucketAdminOp::limit_check(RGWRados *store,
+int RGWBucketAdminOp::limit_check(RGWRadosStore *store,
                                  RGWBucketAdminOpState& op_state,
                                  const std::list<std::string>& user_ids,
                                  RGWFormatterFlusher& flusher,
@@ -1398,13 +1402,13 @@ int RGWBucketAdminOp::limit_check(RGWRados *store,
 
        /* need info for num_shards */
        RGWBucketInfo info;
-       auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+       auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
 
        marker = bucket.name; /* Casey's location for marker update,
                               * as we may now not reach the end of
                               * the loop body */
 
-       ret = store->get_bucket_info(obj_ctx, bucket.tenant, bucket.name,
+       ret = store->getRados()->get_bucket_info(obj_ctx, bucket.tenant, bucket.name,
                                     info, nullptr, null_yield);
        if (ret < 0)
          continue;
@@ -1412,7 +1416,7 @@ int RGWBucketAdminOp::limit_check(RGWRados *store,
        /* need stats for num_entries */
        string bucket_ver, master_ver;
        std::map<RGWObjCategory, RGWStorageStats> stats;
-       ret = store->get_bucket_stats(info, RGW_NO_SHARD, &bucket_ver,
+       ret = store->getRados()->get_bucket_stats(info, RGW_NO_SHARD, &bucket_ver,
                                      &master_ver, stats, nullptr);
 
        if (ret < 0)
@@ -1471,7 +1475,7 @@ int RGWBucketAdminOp::limit_check(RGWRados *store,
   return ret;
 } /* RGWBucketAdminOp::limit_check */
 
-int RGWBucketAdminOp::info(RGWRados *store, RGWBucketAdminOpState& op_state,
+int RGWBucketAdminOp::info(RGWRadosStore *store, RGWBucketAdminOpState& op_state,
                   RGWFormatterFlusher& flusher)
 {
   int ret = 0;
@@ -1530,11 +1534,11 @@ int RGWBucketAdminOp::info(RGWRados *store, RGWBucketAdminOpState& op_state,
     bool truncated = true;
 
     formatter->open_array_section("buckets");
-    ret = store->ctl.meta.mgr->list_keys_init("bucket", &handle);
+    ret = store->ctl()->meta.mgr->list_keys_init("bucket", &handle);
     while (ret == 0 && truncated) {
       std::list<std::string> buckets;
       const int max_keys = 1000;
-      ret = store->ctl.meta.mgr->list_keys_next(handle, max_keys, buckets,
+      ret = store->ctl()->meta.mgr->list_keys_next(handle, max_keys, buckets,
                                             &truncated);
       for (auto& bucket_name : buckets) {
         if (show_stats)
@@ -1552,7 +1556,7 @@ int RGWBucketAdminOp::info(RGWRados *store, RGWBucketAdminOpState& op_state,
   return 0;
 }
 
-int RGWBucketAdminOp::set_quota(RGWRados *store, RGWBucketAdminOpState& op_state)
+int RGWBucketAdminOp::set_quota(RGWRadosStore *store, RGWBucketAdminOpState& op_state)
 {
   RGWBucket bucket;
 
@@ -1562,11 +1566,11 @@ int RGWBucketAdminOp::set_quota(RGWRados *store, RGWBucketAdminOpState& op_state
   return bucket.set_quota(op_state);
 }
 
-static int purge_bucket_instance(RGWRados *store, const RGWBucketInfo& bucket_info)
+static int purge_bucket_instance(RGWRadosStore *store, const RGWBucketInfo& bucket_info)
 {
   int max_shards = (bucket_info.num_shards > 0 ? bucket_info.num_shards : 1);
   for (int i = 0; i < max_shards; i++) {
-    RGWRados::BucketShard bs(store);
+    RGWRados::BucketShard bs(store->getRados());
     int shard_id = (bucket_info.num_shards > 0  ? i : -1);
     int ret = bs.init(bucket_info.bucket, shard_id, nullptr);
     if (ret < 0) {
@@ -1574,7 +1578,7 @@ static int purge_bucket_instance(RGWRados *store, const RGWBucketInfo& bucket_in
            << "): " << cpp_strerror(-ret) << std::endl;
       return ret;
     }
-    ret = store->bi_remove(bs);
+    ret = store->getRados()->bi_remove(bs);
     if (ret < 0) {
       cerr << "ERROR: failed to remove bucket index object: "
            << cpp_strerror(-ret) << std::endl;
@@ -1593,19 +1597,19 @@ inline auto split_tenant(const std::string& bucket_name){
 }
 
 using bucket_instance_ls = std::vector<RGWBucketInfo>;
-void get_stale_instances(RGWRados *store, const std::string& bucket_name,
+void get_stale_instances(RGWRadosStore *store, const std::string& bucket_name,
                          const vector<std::string>& lst,
                          bucket_instance_ls& stale_instances)
 {
 
-  auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+  auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
 
   bucket_instance_ls other_instances;
 // first iterate over the entries, and pick up the done buckets; these
 // are guaranteed to be stale
   for (const auto& bucket_instance : lst){
     RGWBucketInfo binfo;
-    int r = store->get_bucket_instance_info(obj_ctx, bucket_instance,
+    int r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket_instance,
                                             binfo, nullptr,nullptr, null_yield);
     if (r < 0){
       // this can only happen if someone deletes us right when we're processing
@@ -1624,7 +1628,7 @@ void get_stale_instances(RGWRados *store, const std::string& bucket_name,
   // all the instances
   auto [tenant, bucket] = split_tenant(bucket_name);
   RGWBucketInfo cur_bucket_info;
-  int r = store->get_bucket_info(obj_ctx, tenant, bucket, cur_bucket_info, nullptr, null_yield);
+  int r = store->getRados()->get_bucket_info(obj_ctx, tenant, bucket, cur_bucket_info, nullptr, null_yield);
   if (r < 0) {
     if (r == -ENOENT) {
       // bucket doesn't exist, everything is stale then
@@ -1679,18 +1683,18 @@ void get_stale_instances(RGWRados *store, const std::string& bucket_name,
   return;
 }
 
-static int process_stale_instances(RGWRados *store, RGWBucketAdminOpState& op_state,
+static int process_stale_instances(RGWRadosStore *store, RGWBucketAdminOpState& op_state,
                                    RGWFormatterFlusher& flusher,
                                    std::function<void(const bucket_instance_ls&,
                                                       Formatter *,
-                                                      RGWRados*)> process_f)
+                                                      RGWRadosStore*)> process_f)
 {
   std::string marker;
   void *handle;
   Formatter *formatter = flusher.get_formatter();
   static constexpr auto default_max_keys = 1000;
 
-  int ret = store->ctl.meta.mgr->list_keys_init("bucket.instance", marker, &handle);
+  int ret = store->ctl()->meta.mgr->list_keys_init("bucket.instance", marker, &handle);
   if (ret < 0) {
     cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl;
     return ret;
@@ -1703,7 +1707,7 @@ static int process_stale_instances(RGWRados *store, RGWBucketAdminOpState& op_st
   do {
     list<std::string> keys;
 
-    ret = store->ctl.meta.mgr->list_keys_next(handle, default_max_keys, keys, &truncated);
+    ret = store->ctl()->meta.mgr->list_keys_next(handle, default_max_keys, keys, &truncated);
     if (ret < 0 && ret != -ENOENT) {
       cerr << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << std::endl;
       return ret;
@@ -1729,13 +1733,13 @@ static int process_stale_instances(RGWRados *store, RGWBucketAdminOpState& op_st
   return 0;
 }
 
-int RGWBucketAdminOp::list_stale_instances(RGWRados *store,
+int RGWBucketAdminOp::list_stale_instances(RGWRadosStore *store,
                                            RGWBucketAdminOpState& op_state,
                                            RGWFormatterFlusher& flusher)
 {
   auto process_f = [](const bucket_instance_ls& lst,
                       Formatter *formatter,
-                      RGWRados*){
+                      RGWRadosStore*){
                      for (const auto& binfo: lst)
                        formatter->dump_string("key", binfo.bucket.get_key());
                    };
@@ -1743,18 +1747,18 @@ int RGWBucketAdminOp::list_stale_instances(RGWRados *store,
 }
 
 
-int RGWBucketAdminOp::clear_stale_instances(RGWRados *store,
+int RGWBucketAdminOp::clear_stale_instances(RGWRadosStore *store,
                                             RGWBucketAdminOpState& op_state,
                                             RGWFormatterFlusher& flusher)
 {
   auto process_f = [](const bucket_instance_ls& lst,
                       Formatter *formatter,
-                      RGWRados *store){
+                      RGWRadosStore *store){
                      for (const auto &binfo: lst) {
                        int ret = purge_bucket_instance(store, binfo);
                        if (ret == 0){
                          auto md_key = "bucket.instance:" + binfo.bucket.get_key();
-                         ret = store->ctl.meta.mgr->remove(md_key, null_yield);
+                         ret = store->ctl()->meta.mgr->remove(md_key, null_yield);
                        }
                        formatter->open_object_section("delete_status");
                        formatter->dump_string("bucket_instance", binfo.bucket.get_key());
@@ -1766,14 +1770,14 @@ int RGWBucketAdminOp::clear_stale_instances(RGWRados *store,
   return process_stale_instances(store, op_state, flusher, process_f);
 }
 
-static int fix_single_bucket_lc(RGWRados *store,
+static int fix_single_bucket_lc(rgw::sal::RGWRadosStore *store,
                                 const std::string& tenant_name,
                                 const std::string& bucket_name)
 {
-  auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+  auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
   RGWBucketInfo bucket_info;
   map <std::string, bufferlist> bucket_attrs;
-  int ret = store->get_bucket_info(obj_ctx, tenant_name, bucket_name,
+  int ret = store->getRados()->get_bucket_info(obj_ctx, tenant_name, bucket_name,
                                    bucket_info, nullptr, null_yield, &bucket_attrs);
   if (ret < 0) {
     // TODO: Should we handle the case where the bucket could've been removed between
@@ -1796,7 +1800,8 @@ static void format_lc_status(Formatter* formatter,
   formatter->close_section(); // bucket_entry
 }
 
-static void process_single_lc_entry(RGWRados *store, Formatter *formatter,
+static void process_single_lc_entry(rgw::sal::RGWRadosStore *store,
+                                   Formatter *formatter,
                                     const std::string& tenant_name,
                                     const std::string& bucket_name)
 {
@@ -1804,7 +1809,7 @@ static void process_single_lc_entry(RGWRados *store, Formatter *formatter,
   format_lc_status(formatter, tenant_name, bucket_name, -ret);
 }
 
-int RGWBucketAdminOp::fix_lc_shards(RGWRados *store,
+int RGWBucketAdminOp::fix_lc_shards(rgw::sal::RGWRadosStore *store,
                                     RGWBucketAdminOpState& op_state,
                                     RGWFormatterFlusher& flusher)
 {
@@ -1820,7 +1825,7 @@ int RGWBucketAdminOp::fix_lc_shards(RGWRados *store,
     process_single_lc_entry(store, formatter, user_id.tenant, bucket_name);
     formatter->flush(cout);
   } else {
-    int ret = store->ctl.meta.mgr->list_keys_init("bucket", marker, &handle);
+    int ret = store->ctl()->meta.mgr->list_keys_init("bucket", marker, &handle);
     if (ret < 0) {
       std::cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl;
       return ret;
@@ -1829,13 +1834,13 @@ int RGWBucketAdminOp::fix_lc_shards(RGWRados *store,
     {
       formatter->open_array_section("lc_fix_status");
       auto sg = make_scope_guard([&store, &handle, &formatter](){
-                                   store->ctl.meta.mgr->list_keys_complete(handle);
+                                   store->ctl()->meta.mgr->list_keys_complete(handle);
                                    formatter->close_section(); // lc_fix_status
                                    formatter->flush(cout);
                                  });
       do {
         list<std::string> keys;
-        ret = store->ctl.meta.mgr->list_keys_next(handle, default_max_keys, keys, &truncated);
+        ret = store->ctl()->meta.mgr->list_keys_next(handle, default_max_keys, keys, &truncated);
         if (ret < 0 && ret != -ENOENT) {
           std::cerr << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << std::endl;
           return ret;
@@ -1854,7 +1859,8 @@ int RGWBucketAdminOp::fix_lc_shards(RGWRados *store,
 
 }
 
-static bool has_object_expired(RGWRados *store, const RGWBucketInfo& bucket_info,
+static bool has_object_expired(rgw::sal::RGWRadosStore *store,
+                              const RGWBucketInfo& bucket_info,
                               const rgw_obj_key& key, utime_t& delete_at)
 {
   rgw_obj obj(bucket_info.bucket, key);
@@ -1877,7 +1883,8 @@ static bool has_object_expired(RGWRados *store, const RGWBucketInfo& bucket_info
   return false;
 }
 
-static int fix_bucket_obj_expiry(RGWRados *store, const RGWBucketInfo& bucket_info,
+static int fix_bucket_obj_expiry(rgw::sal::RGWRadosStore *store,
+                                const RGWBucketInfo& bucket_info,
                                 RGWFormatterFlusher& flusher, bool dry_run)
 {
   if (bucket_info.bucket.bucket_id == bucket_info.bucket.marker) {
@@ -1892,7 +1899,7 @@ static int fix_bucket_obj_expiry(RGWRados *store, const RGWBucketInfo& bucket_in
                               formatter->flush(std::cout);
                             });
 
-  RGWRados::Bucket target(store, bucket_info);
+  RGWRados::Bucket target(store->getRados(), bucket_info);
   RGWRados::Bucket::List list_op(&target);
 
   list_op.params.list_versions = bucket_info.versioned();
@@ -1930,7 +1937,8 @@ static int fix_bucket_obj_expiry(RGWRados *store, const RGWBucketInfo& bucket_in
   return 0;
 }
 
-int RGWBucketAdminOp::fix_obj_expiry(RGWRados *store, RGWBucketAdminOpState& op_state,
+int RGWBucketAdminOp::fix_obj_expiry(rgw::sal::RGWRadosStore *store,
+                                    RGWBucketAdminOpState& op_state,
                                     RGWFormatterFlusher& flusher, bool dry_run)
 {
   RGWBucket admin_bucket;
@@ -3478,7 +3486,7 @@ int RGWBucketCtl::set_acl(ACLOwner& owner, rgw_bucket& bucket,
 }
 
 // TODO: remove RGWRados dependency for bucket listing
-int RGWBucketCtl::chown(RGWRados *store, RGWBucketInfo& bucket_info,
+int RGWBucketCtl::chown(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info,
                         const rgw_user& user_id, const std::string& display_name,
                         const std::string& marker, optional_yield y)
 {
@@ -3486,7 +3494,7 @@ int RGWBucketCtl::chown(RGWRados *store, RGWBucketInfo& bucket_info,
   std::vector<rgw_bucket_dir_entry> objs;
   map<string, bool> common_prefixes;
 
-  RGWRados::Bucket target(store, bucket_info);
+  RGWRados::Bucket target(store->getRados(), bucket_info);
   RGWRados::Bucket::List list_op(&target);
 
   list_op.params.list_versions = true;
@@ -3513,7 +3521,7 @@ int RGWBucketCtl::chown(RGWRados *store, RGWBucketInfo& bucket_info,
     for (const auto& obj : objs) {
 
       rgw_obj r_obj(bucket_info.bucket, obj.key);
-      RGWRados::Object op_target(store, bucket_info, obj_ctx, r_obj);
+      RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, r_obj);
       RGWRados::Object::Read read_op(&op_target);
 
       map<string, bufferlist> attrs;
@@ -3559,7 +3567,7 @@ int RGWBucketCtl::chown(RGWRados *store, RGWBucketInfo& bucket_info,
         encode(policy, bl);
 
         obj_ctx.set_atomic(r_obj);
-        ret = store->set_attr(&obj_ctx, bucket_info, r_obj, RGW_ATTR_ACL, bl);
+        ret = store->getRados()->set_attr(&obj_ctx, bucket_info, r_obj, RGW_ATTR_ACL, bl);
         if (ret < 0) {
           ldout(store->ctx(), 0) << "ERROR: modify attr failed " << cpp_strerror(-ret) << dendl;
           return ret;
index 6179776c8a32029598d9be43a31bd3d11c71b56b..aabccafa544d70ebdc8c03878e7122065d77abf8 100644 (file)
@@ -26,6 +26,7 @@ class RGWSI_Meta;
 class RGWBucketMetadataHandler;
 class RGWBucketInstanceMetadataHandler;
 class RGWUserCtl;
+namespace rgw { namespace sal { class RGWRadosStore; } }
 
 extern int rgw_bucket_parse_bucket_instance(const string& bucket_instance, string *bucket_name, string *bucket_id, int *shard_id);
 extern int rgw_bucket_parse_bucket_key(CephContext *cct, const string& key,
@@ -208,7 +209,7 @@ public:
  * Get all the buckets owned by a user and fill up an RGWUserBuckets with them.
  * Returns: 0 on success, -ERR# on failure.
  */
-extern int rgw_read_user_buckets(RGWRados *store,
+extern int rgw_read_user_buckets(rgw::sal::RGWRadosStore *store,
                                  const rgw_user& user_id,
                                  RGWUserBuckets& buckets,
                                  const string& marker,
@@ -218,15 +219,15 @@ extern int rgw_read_user_buckets(RGWRados *store,
                                 bool* is_truncated,
                                  uint64_t default_amount = 1000);
 
-extern int rgw_remove_object(RGWRados *store, const RGWBucketInfo& bucket_info, const rgw_bucket& bucket, rgw_obj_key& key);
-extern int rgw_remove_bucket(RGWRados *store, rgw_bucket& bucket, bool delete_children, optional_yield y);
-extern int rgw_remove_bucket_bypass_gc(RGWRados *store, rgw_bucket& bucket, int concurrent_max, optional_yield y);
+extern int rgw_remove_object(rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info, const rgw_bucket& bucket, rgw_obj_key& key);
+extern int rgw_remove_bucket(rgw::sal::RGWRadosStore *store, rgw_bucket& bucket, bool delete_children, optional_yield y);
+extern int rgw_remove_bucket_bypass_gc(rgw::sal::RGWRadosStore *store, rgw_bucket& bucket, int concurrent_max, optional_yield y);
 
-extern int rgw_object_get_attr(RGWRados* store, const RGWBucketInfo& bucket_info,
+extern int rgw_object_get_attr(rgw::sal::RGWRadosStore* store, const RGWBucketInfo& bucket_info,
                                const rgw_obj& obj, const char* attr_name,
                                bufferlist& out_bl, optional_yield y);
 
-extern void check_bad_user_bucket_mapping(RGWRados *store, const rgw_user& user_id, bool fix);
+extern void check_bad_user_bucket_mapping(rgw::sal::RGWRadosStore *store, const rgw_user& user_id, bool fix);
 
 struct RGWBucketAdminOpState {
   rgw_user uid;
@@ -310,7 +311,7 @@ struct RGWBucketAdminOpState {
 class RGWBucket
 {
   RGWUserBuckets buckets;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   RGWAccessHandle handle;
 
   RGWUserInfo user_info;
@@ -323,7 +324,7 @@ class RGWBucket
 
 public:
   RGWBucket() : store(NULL), handle(NULL), failure(false) {}
-  int init(RGWRados *storage, RGWBucketAdminOpState& op_state, optional_yield y,
+  int init(rgw::sal::RGWRadosStore *storage, RGWBucketAdminOpState& op_state, optional_yield y,
               std::string *err_msg = NULL, map<string, bufferlist> *pattrs = NULL);
 
   int check_bad_index_multipart(RGWBucketAdminOpState& op_state,
@@ -359,37 +360,37 @@ public:
 class RGWBucketAdminOp
 {
 public:
-  static int get_policy(RGWRados *store, RGWBucketAdminOpState& op_state,
+  static int get_policy(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state,
                   RGWFormatterFlusher& flusher);
-  static int get_policy(RGWRados *store, RGWBucketAdminOpState& op_state,
+  static int get_policy(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state,
                   RGWAccessControlPolicy& policy);
-  static int dump_s3_policy(RGWRados *store, RGWBucketAdminOpState& op_state,
+  static int dump_s3_policy(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state,
                   ostream& os);
 
-  static int unlink(RGWRados *store, RGWBucketAdminOpState& op_state);
-  static int link(RGWRados *store, RGWBucketAdminOpState& op_state, string *err_msg = NULL);
-  static int chown(RGWRados *store, RGWBucketAdminOpState& op_state, const string& marker, string *err_msg = NULL);
+  static int unlink(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state);
+  static int link(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, string *err_msg = NULL);
+  static int chown(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, const string& marker, string *err_msg = NULL);
 
-  static int check_index(RGWRados *store, RGWBucketAdminOpState& op_state,
+  static int check_index(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state,
                   RGWFormatterFlusher& flusher, optional_yield y);
 
-  static int remove_bucket(RGWRados *store, RGWBucketAdminOpState& op_state, optional_yield y, bool bypass_gc = false, bool keep_index_consistent = true);
-  static int remove_object(RGWRados *store, RGWBucketAdminOpState& op_state);
-  static int info(RGWRados *store, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher);
-  static int limit_check(RGWRados *store, RGWBucketAdminOpState& op_state,
+  static int remove_bucket(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, optional_yield y, bool bypass_gc = false, bool keep_index_consistent = true);
+  static int remove_object(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state);
+  static int info(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher);
+  static int limit_check(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state,
                         const std::list<std::string>& user_ids,
                         RGWFormatterFlusher& flusher,
                         bool warnings_only = false);
-  static int set_quota(RGWRados *store, RGWBucketAdminOpState& op_state);
+  static int set_quota(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state);
 
-  static int list_stale_instances(RGWRados *store, RGWBucketAdminOpState& op_state,
+  static int list_stale_instances(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state,
                                  RGWFormatterFlusher& flusher);
 
-  static int clear_stale_instances(RGWRados *store, RGWBucketAdminOpState& op_state,
+  static int clear_stale_instances(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state,
                                   RGWFormatterFlusher& flusher);
-  static int fix_lc_shards(RGWRados *store, RGWBucketAdminOpState& op_state,
+  static int fix_lc_shards(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state,
                            RGWFormatterFlusher& flusher);
-  static int fix_obj_expiry(RGWRados *store, RGWBucketAdminOpState& op_state,
+  static int fix_obj_expiry(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state,
                            RGWFormatterFlusher& flusher, bool dry_run = false);
 };
 
@@ -830,7 +831,7 @@ public:
                    optional_yield y,
                     bool update_entrypoint = true);
 
-  int chown(RGWRados *store, RGWBucketInfo& bucket_info,
+  int chown(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info,
             const rgw_user& user_id, const std::string& display_name,
             const std::string& marker, optional_yield y);
 
index b2cb2b5ff6cc106c6bb29a5e96367986d8c1b0d7..b27de40c354c8ef4a2fbf3a55f2c4f7b474477f8 100644 (file)
@@ -65,7 +65,7 @@ int RGWCivetWebFrontend::process(struct mg_connection*  const conn)
                                 &cw_client))));
   RGWRestfulIO client_io(dout_context, &real_client_io);
 
-  RGWRequest req(env.store->get_new_req_id());
+  RGWRequest req(env.store->getRados()->get_new_req_id());
   int http_ret = 0;
   //assert (scheduler != nullptr);
   int ret = process_request(env.store, env.rest, &req, env.uri_prefix,
index 11494b975b3333d0f66c7383251abc8a005194f9..42e0e20a527785b1ad048bf30571e42299af740b 100644 (file)
@@ -2,7 +2,7 @@
 // vim: ts=8 sw=2 smarttab
 
 #include "include/compat.h"
-#include "rgw_rados.h"
+#include "rgw_sal.h"
 #include "rgw_zone.h"
 #include "rgw_coroutine.h"
 #include "rgw_cr_rados.h"
@@ -181,7 +181,7 @@ RGWAsyncPutSystemObjAttrs::RGWAsyncPutSystemObjAttrs(RGWCoroutine *caller, RGWAi
 }
 
 
-RGWOmapAppend::RGWOmapAppend(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store, const rgw_raw_obj& _obj,
+RGWOmapAppend::RGWOmapAppend(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store, const rgw_raw_obj& _obj,
                              uint64_t _window_size)
                       : RGWConsumerCR<string>(_store->ctx()), async_rados(_async_rados),
                         store(_store), obj(_obj), going_down(false), num_pending_entries(0), window_size(_window_size), total_entries(0)
@@ -191,7 +191,7 @@ RGWOmapAppend::RGWOmapAppend(RGWAsyncRadosProcessor *_async_rados, RGWRados *_st
 int RGWAsyncLockSystemObj::_send_request()
 {
   rgw_rados_ref ref;
-  int r = store->get_raw_obj_ref(obj, &ref);
+  int r = store->getRados()->get_raw_obj_ref(obj, &ref);
   if (r < 0) {
     lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
     return r;
@@ -206,7 +206,7 @@ int RGWAsyncLockSystemObj::_send_request()
   return l.lock_exclusive(&ref.pool.ioctx(), ref.obj.oid);
 }
 
-RGWAsyncLockSystemObj::RGWAsyncLockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+RGWAsyncLockSystemObj::RGWAsyncLockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
                       RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj,
                        const string& _name, const string& _cookie, uint32_t _duration_secs) : RGWAsyncRadosRequest(caller, cn), store(_store),
                                                               obj(_obj),
@@ -219,7 +219,7 @@ RGWAsyncLockSystemObj::RGWAsyncLockSystemObj(RGWCoroutine *caller, RGWAioComplet
 int RGWAsyncUnlockSystemObj::_send_request()
 {
   rgw_rados_ref ref;
-  int r = store->get_raw_obj_ref(obj, &ref);
+  int r = store->getRados()->get_raw_obj_ref(obj, &ref);
   if (r < 0) {
     lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
     return r;
@@ -232,7 +232,7 @@ int RGWAsyncUnlockSystemObj::_send_request()
   return l.unlock(&ref.pool.ioctx(), ref.obj.oid);
 }
 
-RGWAsyncUnlockSystemObj::RGWAsyncUnlockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+RGWAsyncUnlockSystemObj::RGWAsyncUnlockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
                                                  RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj,
                                                  const string& _name, const string& _cookie) : RGWAsyncRadosRequest(caller, cn), store(_store),
   obj(_obj),
@@ -240,7 +240,7 @@ RGWAsyncUnlockSystemObj::RGWAsyncUnlockSystemObj(RGWCoroutine *caller, RGWAioCom
 {
 }
 
-RGWRadosSetOmapKeysCR::RGWRadosSetOmapKeysCR(RGWRados *_store,
+RGWRadosSetOmapKeysCR::RGWRadosSetOmapKeysCR(rgw::sal::RGWRadosStore *_store,
                       const rgw_raw_obj& _obj,
                       map<string, bufferlist>& _entries) : RGWSimpleCoroutine(_store->ctx()),
                                                 store(_store),
@@ -260,7 +260,7 @@ RGWRadosSetOmapKeysCR::RGWRadosSetOmapKeysCR(RGWRados *_store,
 
 int RGWRadosSetOmapKeysCR::send_request()
 {
-  int r = store->get_raw_obj_ref(obj, &ref);
+  int r = store->getRados()->get_raw_obj_ref(obj, &ref);
   if (r < 0) {
     lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
     return r;
@@ -284,7 +284,7 @@ int RGWRadosSetOmapKeysCR::request_complete()
   return r;
 }
 
-RGWRadosGetOmapKeysCR::RGWRadosGetOmapKeysCR(RGWRados *_store,
+RGWRadosGetOmapKeysCR::RGWRadosGetOmapKeysCR(rgw::sal::RGWRadosStore *_store,
                       const rgw_raw_obj& _obj,
                       const string& _marker,
                       int _max_entries,
@@ -298,7 +298,7 @@ RGWRadosGetOmapKeysCR::RGWRadosGetOmapKeysCR(RGWRados *_store,
 }
 
 int RGWRadosGetOmapKeysCR::send_request() {
-  int r = store->get_raw_obj_ref(obj, &result->ref);
+  int r = store->getRados()->get_raw_obj_ref(obj, &result->ref);
   if (r < 0) {
     lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
     return r;
@@ -322,7 +322,7 @@ int RGWRadosGetOmapKeysCR::request_complete()
   return r;
 }
 
-RGWRadosRemoveOmapKeysCR::RGWRadosRemoveOmapKeysCR(RGWRados *_store,
+RGWRadosRemoveOmapKeysCR::RGWRadosRemoveOmapKeysCR(rgw::sal::RGWRadosStore *_store,
                       const rgw_raw_obj& _obj,
                       const set<string>& _keys) : RGWSimpleCoroutine(_store->ctx()),
                                                 store(_store),
@@ -333,7 +333,7 @@ RGWRadosRemoveOmapKeysCR::RGWRadosRemoveOmapKeysCR(RGWRados *_store,
 }
 
 int RGWRadosRemoveOmapKeysCR::send_request() {
-  int r = store->get_raw_obj_ref(obj, &ref);
+  int r = store->getRados()->get_raw_obj_ref(obj, &ref);
   if (r < 0) {
     lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
     return r;
@@ -357,7 +357,7 @@ int RGWRadosRemoveOmapKeysCR::request_complete()
   return r;
 }
 
-RGWRadosRemoveCR::RGWRadosRemoveCR(RGWRados *store, const rgw_raw_obj& obj)
+RGWRadosRemoveCR::RGWRadosRemoveCR(rgw::sal::RGWRadosStore *store, const rgw_raw_obj& obj)
   : RGWSimpleCoroutine(store->ctx()), store(store), obj(obj)
 {
   set_description() << "remove dest=" << obj;
@@ -365,7 +365,7 @@ RGWRadosRemoveCR::RGWRadosRemoveCR(RGWRados *store, const rgw_raw_obj& obj)
 
 int RGWRadosRemoveCR::send_request()
 {
-  auto rados = store->get_rados_handle();
+  auto rados = store->getRados()->get_rados_handle();
   int r = rados->ioctx_create(obj.pool.name.c_str(), ioctx);
   if (r < 0) {
     lderr(cct) << "ERROR: failed to open pool (" << obj.pool.name << ") ret=" << r << dendl;
@@ -391,7 +391,7 @@ int RGWRadosRemoveCR::request_complete()
   return r;
 }
 
-RGWSimpleRadosLockCR::RGWSimpleRadosLockCR(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store,
+RGWSimpleRadosLockCR::RGWSimpleRadosLockCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store,
                       const rgw_raw_obj& _obj,
                       const string& _lock_name,
                       const string& _cookie,
@@ -430,7 +430,7 @@ int RGWSimpleRadosLockCR::request_complete()
   return req->get_ret_status();
 }
 
-RGWSimpleRadosUnlockCR::RGWSimpleRadosUnlockCR(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store,
+RGWSimpleRadosUnlockCR::RGWSimpleRadosUnlockCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store,
                       const rgw_raw_obj& _obj,
                       const string& _lock_name,
                       const string& _cookie) : RGWSimpleCoroutine(_store->ctx()),
@@ -529,8 +529,8 @@ bool RGWOmapAppend::finish() {
 
 int RGWAsyncGetBucketInstanceInfo::_send_request()
 {
-  RGWSysObjectCtx obj_ctx = store->svc.sysobj->init_obj_ctx();
-  int r = store->get_bucket_instance_info(obj_ctx, bucket, bucket_info, nullptr, nullptr, null_yield);
+  RGWSysObjectCtx obj_ctx = store->svc()->sysobj->init_obj_ctx();
+  int r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket, bucket_info, nullptr, nullptr, null_yield);
   if (r < 0) {
     ldout(store->ctx(), 0) << "ERROR: failed to get bucket instance info for "
         << bucket << dendl;
@@ -540,12 +540,12 @@ int RGWAsyncGetBucketInstanceInfo::_send_request()
   return 0;
 }
 
-RGWRadosBILogTrimCR::RGWRadosBILogTrimCR(RGWRados *store,
+RGWRadosBILogTrimCR::RGWRadosBILogTrimCR(rgw::sal::RGWRadosStore *store,
                                          const RGWBucketInfo& bucket_info,
                                          int shard_id,
                                          const std::string& start_marker,
                                          const std::string& end_marker)
-  : RGWSimpleCoroutine(store->ctx()), bs(store),
+  : RGWSimpleCoroutine(store->ctx()), bs(store->getRados()),
     start_marker(BucketIndexShardsManager::get_shard_marker(start_marker)),
     end_marker(BucketIndexShardsManager::get_shard_marker(end_marker))
 {
@@ -580,7 +580,7 @@ int RGWAsyncFetchRemoteObj::_send_request()
 
   string user_id;
   char buf[16];
-  snprintf(buf, sizeof(buf), ".%lld", (long long)store->instance_id());
+  snprintf(buf, sizeof(buf), ".%lld", (long long)store->getRados()->instance_id());
   map<string, bufferlist> attrs;
 
   rgw_obj src_obj(bucket_info.bucket, key);
@@ -588,7 +588,7 @@ int RGWAsyncFetchRemoteObj::_send_request()
   rgw_obj dest_obj(bucket_info.bucket, dest_key.value_or(key));
 
   std::optional<uint64_t> bytes_transferred;
-  int r = store->fetch_remote_obj(obj_ctx,
+  int r = store->getRados()->fetch_remote_obj(obj_ctx,
                        user_id,
                        NULL, /* req_info */
                        source_zone,
@@ -639,13 +639,13 @@ int RGWAsyncStatRemoteObj::_send_request()
 
   string user_id;
   char buf[16];
-  snprintf(buf, sizeof(buf), ".%lld", (long long)store->instance_id());
+  snprintf(buf, sizeof(buf), ".%lld", (long long)store->getRados()->instance_id());
 
   rgw_obj src_obj(bucket_info.bucket, key);
 
   rgw_obj dest_obj(src_obj);
 
-  int r = store->stat_remote_obj(obj_ctx,
+  int r = store->getRados()->stat_remote_obj(obj_ctx,
                        user_id,
                        nullptr, /* req_info */
                        source_zone,
@@ -683,7 +683,7 @@ int RGWAsyncRemoveObj::_send_request()
 
   RGWObjState *state;
 
-  int ret = store->get_obj_state(&obj_ctx, bucket_info, obj, &state, null_yield);
+  int ret = store->getRados()->get_obj_state(&obj_ctx, bucket_info, obj, &state, null_yield);
   if (ret < 0) {
     ldout(store->ctx(), 20) << __func__ << "(): get_obj_state() obj=" << obj << " returned ret=" << ret << dendl;
     return ret;
@@ -709,7 +709,7 @@ int RGWAsyncRemoveObj::_send_request()
     }
   }
 
-  RGWRados::Object del_target(store, bucket_info, obj_ctx, obj);
+  RGWRados::Object del_target(store->getRados(), bucket_info, obj_ctx, obj);
   RGWRados::Object::Delete del_op(&del_target);
 
   del_op.params.bucket_owner = bucket_info.owner;
@@ -761,7 +761,7 @@ int RGWContinuousLeaseCR::operate()
   return 0;
 }
 
-RGWRadosTimelogAddCR::RGWRadosTimelogAddCR(RGWRados *_store, const string& _oid,
+RGWRadosTimelogAddCR::RGWRadosTimelogAddCR(rgw::sal::RGWRadosStore *_store, const string& _oid,
                       const cls_log_entry& entry) : RGWSimpleCoroutine(_store->ctx()),
                                                 store(_store),
                                                 oid(_oid), cn(NULL)
@@ -776,7 +776,7 @@ int RGWRadosTimelogAddCR::send_request()
   set_status() << "sending request";
 
   cn = stack->create_completion_notifier();
-  return store->svc.cls->timelog.add(oid, entries, cn->completion(), true, null_yield);
+  return store->svc()->cls->timelog.add(oid, entries, cn->completion(), true, null_yield);
 }
 
 int RGWRadosTimelogAddCR::request_complete()
@@ -788,7 +788,7 @@ int RGWRadosTimelogAddCR::request_complete()
   return r;
 }
 
-RGWRadosTimelogTrimCR::RGWRadosTimelogTrimCR(RGWRados *store,
+RGWRadosTimelogTrimCR::RGWRadosTimelogTrimCR(rgw::sal::RGWRadosStore *store,
                                              const std::string& oid,
                                              const real_time& start_time,
                                              const real_time& end_time,
@@ -808,7 +808,7 @@ int RGWRadosTimelogTrimCR::send_request()
   set_status() << "sending request";
 
   cn = stack->create_completion_notifier();
-  return store->svc.cls->timelog.trim(oid, start_time, end_time, from_marker,
+  return store->svc()->cls->timelog.trim(oid, start_time, end_time, from_marker,
                                       to_marker, cn->completion(),
                                       null_yield);
 }
@@ -823,7 +823,7 @@ int RGWRadosTimelogTrimCR::request_complete()
 }
 
 
-RGWSyncLogTrimCR::RGWSyncLogTrimCR(RGWRados *store, const std::string& oid,
+RGWSyncLogTrimCR::RGWSyncLogTrimCR(rgw::sal::RGWRadosStore *store, const std::string& oid,
                                    const std::string& to_marker,
                                    std::string *last_trim_marker)
   : RGWRadosTimelogTrimCR(store, oid, real_time{}, real_time{},
@@ -849,12 +849,12 @@ int RGWSyncLogTrimCR::request_complete()
 int RGWAsyncStatObj::_send_request()
 {
   rgw_raw_obj raw_obj;
-  store->obj_to_raw(bucket_info.placement_rule, obj, &raw_obj);
-  return store->raw_obj_stat(raw_obj, psize, pmtime, pepoch,
+  store->getRados()->obj_to_raw(bucket_info.placement_rule, obj, &raw_obj);
+  return store->getRados()->raw_obj_stat(raw_obj, psize, pmtime, pepoch,
                              nullptr, nullptr, objv_tracker, null_yield);
 }
 
-RGWStatObjCR::RGWStatObjCR(RGWAsyncRadosProcessor *async_rados, RGWRados *store,
+RGWStatObjCR::RGWStatObjCR(RGWAsyncRadosProcessor *async_rados, rgw::sal::RGWRadosStore *store,
                            const RGWBucketInfo& _bucket_info, const rgw_obj& obj, uint64_t *psize,
                            real_time* pmtime, uint64_t *pepoch,
                            RGWObjVersionTracker *objv_tracker)
@@ -885,7 +885,7 @@ int RGWStatObjCR::request_complete()
   return req->get_ret_status();
 }
 
-RGWRadosNotifyCR::RGWRadosNotifyCR(RGWRados *store, const rgw_raw_obj& obj,
+RGWRadosNotifyCR::RGWRadosNotifyCR(rgw::sal::RGWRadosStore *store, const rgw_raw_obj& obj,
                                    bufferlist& request, uint64_t timeout_ms,
                                    bufferlist *response)
   : RGWSimpleCoroutine(store->ctx()), store(store), obj(obj),
@@ -896,7 +896,7 @@ RGWRadosNotifyCR::RGWRadosNotifyCR(RGWRados *store, const rgw_raw_obj& obj,
 
 int RGWRadosNotifyCR::send_request()
 {
-  int r = store->get_raw_obj_ref(obj, &ref);
+  int r = store->getRados()->get_raw_obj_ref(obj, &ref);
   if (r < 0) {
     lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
     return r;
index cf2789f1fee9ce532934841b3edc27f70263853e..84546845cbeadb0ed48ef8379da0184e5ec06c8f 100644 (file)
@@ -7,7 +7,7 @@
 #include <boost/intrusive_ptr.hpp>
 #include "include/ceph_assert.h"
 #include "rgw_coroutine.h"
-#include "rgw_rados.h"
+#include "rgw_sal.h"
 #include "common/WorkQueue.h"
 #include "common/Throttle.h"
 
@@ -108,13 +108,13 @@ public:
 template <class P>
 class RGWSimpleWriteOnlyAsyncCR : public RGWSimpleCoroutine {
   RGWAsyncRadosProcessor *async_rados;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
 
   P params;
   const DoutPrefixProvider *dpp;
 
   class Request : public RGWAsyncRadosRequest {
-    RGWRados *store;
+    rgw::sal::RGWRadosStore *store;
     P params;
     const DoutPrefixProvider *dpp;
   protected:
@@ -122,7 +122,7 @@ class RGWSimpleWriteOnlyAsyncCR : public RGWSimpleCoroutine {
   public:
     Request(RGWCoroutine *caller,
             RGWAioCompletionNotifier *cn,
-            RGWRados *store,
+            rgw::sal::RGWRadosStore *store,
             const P& _params,
             const DoutPrefixProvider *dpp) : RGWAsyncRadosRequest(caller, cn),
                                 store(store),
@@ -132,7 +132,7 @@ class RGWSimpleWriteOnlyAsyncCR : public RGWSimpleCoroutine {
 
  public:
   RGWSimpleWriteOnlyAsyncCR(RGWAsyncRadosProcessor *_async_rados,
-                           RGWRados *_store,
+                           rgw::sal::RGWRadosStore *_store,
                            const P& _params,
                             const DoutPrefixProvider *_dpp) : RGWSimpleCoroutine(_store->ctx()),
                                                 async_rados(_async_rados),
@@ -169,13 +169,13 @@ class RGWSimpleWriteOnlyAsyncCR : public RGWSimpleCoroutine {
 template <class P, class R>
 class RGWSimpleAsyncCR : public RGWSimpleCoroutine {
   RGWAsyncRadosProcessor *async_rados;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
 
   P params;
   std::shared_ptr<R> result;
 
   class Request : public RGWAsyncRadosRequest {
-    RGWRados *store;
+    rgw::sal::RGWRadosStore *store;
     P params;
     std::shared_ptr<R> result;
   protected:
@@ -183,7 +183,7 @@ class RGWSimpleAsyncCR : public RGWSimpleCoroutine {
   public:
     Request(RGWCoroutine *caller,
             RGWAioCompletionNotifier *cn,
-            RGWRados *_store,
+            rgw::sal::RGWRadosStore *_store,
             const P& _params,
             std::shared_ptr<R>& _result) : RGWAsyncRadosRequest(caller, cn),
                                            store(_store),
@@ -193,7 +193,7 @@ class RGWSimpleAsyncCR : public RGWSimpleCoroutine {
 
  public:
   RGWSimpleAsyncCR(RGWAsyncRadosProcessor *_async_rados,
-                   RGWRados *_store,
+                   rgw::sal::RGWRadosStore *_store,
                    const P& _params,
                    std::shared_ptr<R>& _result) : RGWSimpleCoroutine(_store->ctx()),
                                                   async_rados(_async_rados),
@@ -276,7 +276,7 @@ public:
 };
 
 class RGWAsyncLockSystemObj : public RGWAsyncRadosRequest {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   rgw_raw_obj obj;
   string lock_name;
   string cookie;
@@ -285,13 +285,13 @@ class RGWAsyncLockSystemObj : public RGWAsyncRadosRequest {
 protected:
   int _send_request() override;
 public:
-  RGWAsyncLockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+  RGWAsyncLockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
                         RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj,
                        const string& _name, const string& _cookie, uint32_t _duration_secs);
 };
 
 class RGWAsyncUnlockSystemObj : public RGWAsyncRadosRequest {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   rgw_raw_obj obj;
   string lock_name;
   string cookie;
@@ -299,7 +299,7 @@ class RGWAsyncUnlockSystemObj : public RGWAsyncRadosRequest {
 protected:
   int _send_request() override;
 public:
-  RGWAsyncUnlockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+  RGWAsyncUnlockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
                         RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj,
                        const string& _name, const string& _cookie);
 };
@@ -504,7 +504,7 @@ public:
 };
 
 class RGWRadosSetOmapKeysCR : public RGWSimpleCoroutine {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   map<string, bufferlist> entries;
 
   rgw_rados_ref ref;
@@ -514,7 +514,7 @@ class RGWRadosSetOmapKeysCR : public RGWSimpleCoroutine {
   boost::intrusive_ptr<RGWAioCompletionNotifier> cn;
 
 public:
-  RGWRadosSetOmapKeysCR(RGWRados *_store,
+  RGWRadosSetOmapKeysCR(rgw::sal::RGWRadosStore *_store,
                      const rgw_raw_obj& _obj,
                      map<string, bufferlist>& _entries);
 
@@ -531,7 +531,7 @@ class RGWRadosGetOmapKeysCR : public RGWSimpleCoroutine {
   };
   using ResultPtr = std::shared_ptr<Result>;
 
-  RGWRadosGetOmapKeysCR(RGWRados *_store, const rgw_raw_obj& _obj,
+  RGWRadosGetOmapKeysCR(rgw::sal::RGWRadosStore *_store, const rgw_raw_obj& _obj,
                         const string& _marker, int _max_entries,
                         ResultPtr result);
 
@@ -539,7 +539,7 @@ class RGWRadosGetOmapKeysCR : public RGWSimpleCoroutine {
   int request_complete() override;
 
  private:
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   rgw_raw_obj obj;
   string marker;
   int max_entries;
@@ -548,7 +548,7 @@ class RGWRadosGetOmapKeysCR : public RGWSimpleCoroutine {
 };
 
 class RGWRadosRemoveOmapKeysCR : public RGWSimpleCoroutine {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
 
   rgw_rados_ref ref;
 
@@ -559,7 +559,7 @@ class RGWRadosRemoveOmapKeysCR : public RGWSimpleCoroutine {
   boost::intrusive_ptr<RGWAioCompletionNotifier> cn;
 
 public:
-  RGWRadosRemoveOmapKeysCR(RGWRados *_store,
+  RGWRadosRemoveOmapKeysCR(rgw::sal::RGWRadosStore *_store,
                      const rgw_raw_obj& _obj,
                      const set<string>& _keys);
 
@@ -569,13 +569,13 @@ public:
 };
 
 class RGWRadosRemoveCR : public RGWSimpleCoroutine {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   librados::IoCtx ioctx;
   const rgw_raw_obj obj;
   boost::intrusive_ptr<RGWAioCompletionNotifier> cn;
 
 public:
-  RGWRadosRemoveCR(RGWRados *store, const rgw_raw_obj& obj);
+  RGWRadosRemoveCR(rgw::sal::RGWRadosStore *store, const rgw_raw_obj& obj);
 
   int send_request() override;
   int request_complete() override;
@@ -583,7 +583,7 @@ public:
 
 class RGWSimpleRadosLockCR : public RGWSimpleCoroutine {
   RGWAsyncRadosProcessor *async_rados;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   string lock_name;
   string cookie;
   uint32_t duration;
@@ -593,7 +593,7 @@ class RGWSimpleRadosLockCR : public RGWSimpleCoroutine {
   RGWAsyncLockSystemObj *req;
 
 public:
-  RGWSimpleRadosLockCR(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store,
+  RGWSimpleRadosLockCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store,
                      const rgw_raw_obj& _obj,
                       const string& _lock_name,
                      const string& _cookie,
@@ -616,7 +616,7 @@ public:
 
 class RGWSimpleRadosUnlockCR : public RGWSimpleCoroutine {
   RGWAsyncRadosProcessor *async_rados;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   string lock_name;
   string cookie;
 
@@ -625,7 +625,7 @@ class RGWSimpleRadosUnlockCR : public RGWSimpleCoroutine {
   RGWAsyncUnlockSystemObj *req;
 
 public:
-  RGWSimpleRadosUnlockCR(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store,
+  RGWSimpleRadosUnlockCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store,
                      const rgw_raw_obj& _obj, 
                       const string& _lock_name,
                      const string& _cookie);
@@ -642,7 +642,7 @@ public:
 
 class RGWOmapAppend : public RGWConsumerCR<string> {
   RGWAsyncRadosProcessor *async_rados;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
 
   rgw_raw_obj obj;
 
@@ -656,7 +656,7 @@ class RGWOmapAppend : public RGWConsumerCR<string> {
   uint64_t window_size;
   uint64_t total_entries;
 public:
-  RGWOmapAppend(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store,
+  RGWOmapAppend(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store,
                 const rgw_raw_obj& _obj,
                 uint64_t _window_size = OMAP_APPEND_MAX_ENTRIES_DEFAULT);
   int operate() override;
@@ -741,14 +741,14 @@ public:
 
 class RGWShardedOmapCRManager {
   RGWAsyncRadosProcessor *async_rados;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   RGWCoroutine *op;
 
   int num_shards;
 
   vector<RGWOmapAppend *> shards;
 public:
-  RGWShardedOmapCRManager(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store, RGWCoroutine *_op, int _num_shards, const rgw_pool& pool, const string& oid_prefix)
+  RGWShardedOmapCRManager(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store, RGWCoroutine *_op, int _num_shards, const rgw_pool& pool, const string& oid_prefix)
                       : async_rados(_async_rados),
                        store(_store), op(_op), num_shards(_num_shards) {
     shards.reserve(num_shards);
@@ -785,14 +785,14 @@ public:
 };
 
 class RGWAsyncGetBucketInstanceInfo : public RGWAsyncRadosRequest {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   rgw_bucket bucket;
 
 protected:
   int _send_request() override;
 public:
   RGWAsyncGetBucketInstanceInfo(RGWCoroutine *caller, RGWAioCompletionNotifier *cn,
-                                RGWRados *_store, const rgw_bucket& bucket)
+                                rgw::sal::RGWRadosStore *_store, const rgw_bucket& bucket)
     : RGWAsyncRadosRequest(caller, cn), store(_store), bucket(bucket) {}
 
   RGWBucketInfo bucket_info;
@@ -800,7 +800,7 @@ public:
 
 class RGWGetBucketInstanceInfoCR : public RGWSimpleCoroutine {
   RGWAsyncRadosProcessor *async_rados;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   rgw_bucket bucket;
   RGWBucketInfo *bucket_info;
 
@@ -808,7 +808,7 @@ class RGWGetBucketInstanceInfoCR : public RGWSimpleCoroutine {
   
 public:
   // rgw_bucket constructor
-  RGWGetBucketInstanceInfoCR(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store,
+  RGWGetBucketInstanceInfoCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store,
                              const rgw_bucket& _bucket, RGWBucketInfo *_bucket_info)
     : RGWSimpleCoroutine(_store->ctx()), async_rados(_async_rados), store(_store),
       bucket(_bucket), bucket_info(_bucket_info) {}
@@ -841,7 +841,7 @@ class RGWRadosBILogTrimCR : public RGWSimpleCoroutine {
   std::string end_marker;
   boost::intrusive_ptr<RGWAioCompletionNotifier> cn;
  public:
-  RGWRadosBILogTrimCR(RGWRados *store, const RGWBucketInfo& bucket_info,
+  RGWRadosBILogTrimCR(rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info,
                       int shard_id, const std::string& start_marker,
                       const std::string& end_marker);
 
@@ -850,7 +850,7 @@ class RGWRadosBILogTrimCR : public RGWSimpleCoroutine {
 };
 
 class RGWAsyncFetchRemoteObj : public RGWAsyncRadosRequest {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   string source_zone;
 
   RGWBucketInfo bucket_info;
@@ -870,7 +870,7 @@ class RGWAsyncFetchRemoteObj : public RGWAsyncRadosRequest {
 protected:
   int _send_request() override;
 public:
-  RGWAsyncFetchRemoteObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+  RGWAsyncFetchRemoteObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
                          const string& _source_zone,
                          RGWBucketInfo& _bucket_info,
                         std::optional<rgw_placement_rule> _dest_placement_rule,
@@ -898,7 +898,7 @@ public:
 class RGWFetchRemoteObjCR : public RGWSimpleCoroutine {
   CephContext *cct;
   RGWAsyncRadosProcessor *async_rados;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   string source_zone;
 
   RGWBucketInfo bucket_info;
@@ -918,7 +918,7 @@ class RGWFetchRemoteObjCR : public RGWSimpleCoroutine {
   const DoutPrefixProvider *dpp;
 
 public:
-  RGWFetchRemoteObjCR(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store,
+  RGWFetchRemoteObjCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store,
                       const string& _source_zone,
                       RGWBucketInfo& _bucket_info,
                      std::optional<rgw_placement_rule> _dest_placement_rule,
@@ -965,7 +965,7 @@ public:
 };
 
 class RGWAsyncStatRemoteObj : public RGWAsyncRadosRequest {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   string source_zone;
 
   RGWBucketInfo bucket_info;
@@ -981,7 +981,7 @@ class RGWAsyncStatRemoteObj : public RGWAsyncRadosRequest {
 protected:
   int _send_request() override;
 public:
-  RGWAsyncStatRemoteObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+  RGWAsyncStatRemoteObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
                          const string& _source_zone,
                          RGWBucketInfo& _bucket_info,
                          const rgw_obj_key& _key,
@@ -1003,7 +1003,7 @@ public:
 class RGWStatRemoteObjCR : public RGWSimpleCoroutine {
   CephContext *cct;
   RGWAsyncRadosProcessor *async_rados;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   string source_zone;
 
   RGWBucketInfo bucket_info;
@@ -1019,7 +1019,7 @@ class RGWStatRemoteObjCR : public RGWSimpleCoroutine {
   RGWAsyncStatRemoteObj *req;
 
 public:
-  RGWStatRemoteObjCR(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store,
+  RGWStatRemoteObjCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store,
                       const string& _source_zone,
                       RGWBucketInfo& _bucket_info,
                       const rgw_obj_key& _key,
@@ -1064,7 +1064,7 @@ public:
 };
 
 class RGWAsyncRemoveObj : public RGWAsyncRadosRequest {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   string source_zone;
 
   RGWBucketInfo bucket_info;
@@ -1083,7 +1083,7 @@ class RGWAsyncRemoveObj : public RGWAsyncRadosRequest {
 protected:
   int _send_request() override;
 public:
-  RGWAsyncRemoveObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+  RGWAsyncRemoveObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
                          const string& _source_zone,
                          RGWBucketInfo& _bucket_info,
                          const rgw_obj_key& _key,
@@ -1117,7 +1117,7 @@ public:
 class RGWRemoveObjCR : public RGWSimpleCoroutine {
   CephContext *cct;
   RGWAsyncRadosProcessor *async_rados;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   string source_zone;
 
   RGWBucketInfo bucket_info;
@@ -1137,7 +1137,7 @@ class RGWRemoveObjCR : public RGWSimpleCoroutine {
   rgw_zone_set *zones_trace;
 
 public:
-  RGWRemoveObjCR(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store,
+  RGWRemoveObjCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store,
                       const string& _source_zone,
                       RGWBucketInfo& _bucket_info,
                       const rgw_obj_key& _key,
@@ -1194,7 +1194,7 @@ public:
 
 class RGWContinuousLeaseCR : public RGWCoroutine {
   RGWAsyncRadosProcessor *async_rados;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
 
   const rgw_raw_obj obj;
 
@@ -1212,7 +1212,7 @@ class RGWContinuousLeaseCR : public RGWCoroutine {
   bool aborted{false};
 
 public:
-  RGWContinuousLeaseCR(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store,
+  RGWContinuousLeaseCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store,
                        const rgw_raw_obj& _obj,
                        const string& _lock_name, int _interval, RGWCoroutine *_caller)
     : RGWCoroutine(_store->ctx()), async_rados(_async_rados), store(_store),
@@ -1244,7 +1244,7 @@ public:
 };
 
 class RGWRadosTimelogAddCR : public RGWSimpleCoroutine {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   list<cls_log_entry> entries;
 
   string oid;
@@ -1252,7 +1252,7 @@ class RGWRadosTimelogAddCR : public RGWSimpleCoroutine {
   boost::intrusive_ptr<RGWAioCompletionNotifier> cn;
 
 public:
-  RGWRadosTimelogAddCR(RGWRados *_store, const string& _oid,
+  RGWRadosTimelogAddCR(rgw::sal::RGWRadosStore *_store, const string& _oid,
                        const cls_log_entry& entry);
 
   int send_request() override;
@@ -1260,7 +1260,7 @@ public:
 };
 
 class RGWRadosTimelogTrimCR : public RGWSimpleCoroutine {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   boost::intrusive_ptr<RGWAioCompletionNotifier> cn;
  protected:
   std::string oid;
@@ -1270,7 +1270,7 @@ class RGWRadosTimelogTrimCR : public RGWSimpleCoroutine {
   std::string to_marker;
 
  public:
-  RGWRadosTimelogTrimCR(RGWRados *store, const std::string& oid,
+  RGWRadosTimelogTrimCR(rgw::sal::RGWRadosStore *store, const std::string& oid,
                         const real_time& start_time, const real_time& end_time,
                         const std::string& from_marker,
                         const std::string& to_marker);
@@ -1287,13 +1287,13 @@ class RGWSyncLogTrimCR : public RGWRadosTimelogTrimCR {
   // a marker that compares greater than any timestamp-based index
   static constexpr const char* max_marker = "99999999";
 
-  RGWSyncLogTrimCR(RGWRados *store, const std::string& oid,
+  RGWSyncLogTrimCR(rgw::sal::RGWRadosStore *store, const std::string& oid,
                    const std::string& to_marker, std::string *last_trim_marker);
   int request_complete() override;
 };
 
 class RGWAsyncStatObj : public RGWAsyncRadosRequest {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   RGWBucketInfo bucket_info;
   rgw_obj obj;
   uint64_t *psize;
@@ -1303,7 +1303,7 @@ class RGWAsyncStatObj : public RGWAsyncRadosRequest {
 protected:
   int _send_request() override;
 public:
-  RGWAsyncStatObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *store,
+  RGWAsyncStatObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *store,
                   const RGWBucketInfo& _bucket_info, const rgw_obj& obj, uint64_t *psize = nullptr,
                   real_time *pmtime = nullptr, uint64_t *pepoch = nullptr,
                   RGWObjVersionTracker *objv_tracker = nullptr)
@@ -1312,7 +1312,7 @@ public:
 };
 
 class RGWStatObjCR : public RGWSimpleCoroutine {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   RGWAsyncRadosProcessor *async_rados;
   RGWBucketInfo bucket_info;
   rgw_obj obj;
@@ -1322,7 +1322,7 @@ class RGWStatObjCR : public RGWSimpleCoroutine {
   RGWObjVersionTracker *objv_tracker;
   RGWAsyncStatObj *req = nullptr;
  public:
-  RGWStatObjCR(RGWAsyncRadosProcessor *async_rados, RGWRados *store,
+  RGWStatObjCR(RGWAsyncRadosProcessor *async_rados, rgw::sal::RGWRadosStore *store,
          const RGWBucketInfo& _bucket_info, const rgw_obj& obj, uint64_t *psize = nullptr,
          real_time* pmtime = nullptr, uint64_t *pepoch = nullptr,
          RGWObjVersionTracker *objv_tracker = nullptr);
@@ -1337,7 +1337,7 @@ class RGWStatObjCR : public RGWSimpleCoroutine {
 
 /// coroutine wrapper for IoCtx::aio_notify()
 class RGWRadosNotifyCR : public RGWSimpleCoroutine {
-  RGWRados *const store;
+  rgw::sal::RGWRadosStore *const store;
   const rgw_raw_obj obj;
   bufferlist request;
   const uint64_t timeout_ms;
@@ -1346,7 +1346,7 @@ class RGWRadosNotifyCR : public RGWSimpleCoroutine {
   boost::intrusive_ptr<RGWAioCompletionNotifier> cn;
 
 public:
-  RGWRadosNotifyCR(RGWRados *store, const rgw_raw_obj& obj,
+  RGWRadosNotifyCR(rgw::sal::RGWRadosStore *store, const rgw_raw_obj& obj,
                    bufferlist& request, uint64_t timeout_ms,
                    bufferlist *response);
 
index 090418bbaa4fa1acdec8ce4e56a01ab254b86313..98bce05297299130a930714124d41ae0cd4cadf3 100644 (file)
@@ -89,14 +89,14 @@ int RGWUserCreateCR::Request::_send_request()
 template<>
 int RGWGetUserInfoCR::Request::_send_request()
 {
-  return store->ctl.user->get_info_by_uid(params.user, result.get(), null_yield);
+  return store->ctl()->user->get_info_by_uid(params.user, result.get(), null_yield);
 }
 
 template<>
 int RGWGetBucketInfoCR::Request::_send_request()
 {
-  RGWSysObjectCtx obj_ctx(store->svc.sysobj->init_obj_ctx());
-  return store->get_bucket_info(obj_ctx, params.tenant, params.bucket_name,
+  RGWSysObjectCtx obj_ctx(store->svc()->sysobj->init_obj_ctx());
+  return store->getRados()->get_bucket_info(obj_ctx, params.tenant, params.bucket_name,
                                 result->bucket_info, &result->mtime, null_yield, &result->attrs);
 }
 
@@ -104,8 +104,8 @@ template<>
 int RGWBucketCreateLocalCR::Request::_send_request()
 {
   CephContext *cct = store->ctx();
-  auto& zone_svc = store->svc.zone;
-  auto& sysobj_svc = store->svc.sysobj;
+  auto& zone_svc = store->svc()->zone;
+  auto& sysobj_svc = store->svc()->sysobj;
 
   const auto& user_info = params.user_info.get();
   const auto& user = user_info->user_id;
@@ -126,7 +126,7 @@ int RGWBucketCreateLocalCR::Request::_send_request()
   RGWBucketInfo bucket_info;
   map<string, bufferlist> bucket_attrs;
 
-  int ret = store->get_bucket_info(sysobj_ctx, user.tenant, bucket_name,
+  int ret = store->getRados()->get_bucket_info(sysobj_ctx, user.tenant, bucket_name,
                                  bucket_info, nullptr, null_yield, &bucket_attrs);
   if (ret < 0 && ret != -ENOENT)
     return ret;
@@ -137,7 +137,7 @@ int RGWBucketCreateLocalCR::Request::_send_request()
   bucket_owner.set_id(user);
   bucket_owner.set_name(user_info->display_name);
   if (bucket_exists) {
-    ret = rgw_op_get_bucket_policy_from_attr(cct, store->ctl.user, bucket_info,
+    ret = rgw_op_get_bucket_policy_from_attr(cct, store->ctl()->user, bucket_info,
                                              bucket_attrs, &old_policy);
     if (ret >= 0)  {
       if (old_policy.get_owner().get_id().compare(user) != 0) {
@@ -189,7 +189,7 @@ int RGWBucketCreateLocalCR::Request::_send_request()
   RGWBucketInfo info;
   obj_version ep_objv;
 
-  ret = store->create_bucket(*user_info, bucket, zonegroup_id,
+  ret = store->getRados()->create_bucket(*user_info, bucket, zonegroup_id,
                                 placement_rule, bucket_info.swift_ver_location,
                                 pquota_info, attrs,
                                 info, nullptr, &ep_objv, creation_time,
@@ -209,10 +209,10 @@ int RGWBucketCreateLocalCR::Request::_send_request()
     bucket = info.bucket;
   }
 
-  ret = store->ctl.bucket->link_bucket(user, bucket, info.creation_time, null_yield, false);
+  ret = store->ctl()->bucket->link_bucket(user, bucket, info.creation_time, null_yield, false);
   if (ret && !existed && ret != -EEXIST) {
     /* if it exists (or previously existed), don't remove it! */
-    int r = store->ctl.bucket->unlink_bucket(user, bucket, null_yield);
+    int r = store->ctl()->bucket->unlink_bucket(user, bucket, null_yield);
     if (r < 0) {
       ldout(cct, 0) << "WARNING: failed to unlink bucket: ret=" << r << dendl;
     }
@@ -257,7 +257,7 @@ int RGWBucketLifecycleConfigCR::Request::_send_request()
 {
   CephContext *cct = store->ctx();
 
-  RGWLC *lc = store->get_lc();
+  RGWLC *lc = store->getRados()->get_lc();
   if (!lc) {
     lderr(cct) << "ERROR: lifecycle object is not initialized!" << dendl;
     return -EIO;
index ec16585618e59402c5af85d0a61a52cac6ce87d2..c3e874a6e27b26d18982bfec385055a368f4587d 100644 (file)
@@ -23,6 +23,7 @@
 #include "rgw_metadata.h"
 #include "rgw_sync_counters.h"
 #include "rgw_sync_module.h"
+#include "rgw_sal.h"
 
 #include "cls/lock/cls_lock_client.h"
 
@@ -87,8 +88,8 @@ bool RGWReadDataSyncStatusMarkersCR::spawn_next()
     return false;
   }
   using CR = RGWSimpleRadosReadCR<rgw_data_sync_marker>;
-  spawn(new CR(env->async_rados, env->store->svc.sysobj,
-               rgw_raw_obj(env->store->svc.zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(env->source_zone, shard_id)),
+  spawn(new CR(env->async_rados, env->store->svc()->sysobj,
+               rgw_raw_obj(env->store->svc()->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(env->source_zone, shard_id)),
                &markers[shard_id]),
         false);
   shard_id++;
@@ -124,7 +125,7 @@ bool RGWReadDataSyncRecoveringShardsCR::spawn_next()
   string error_oid = RGWDataSyncStatusManager::shard_obj_name(env->source_zone, shard_id) + ".retry";
   auto& shard_keys = omapkeys[shard_id];
   shard_keys = std::make_shared<RGWRadosGetOmapKeysCR::Result>();
-  spawn(new RGWRadosGetOmapKeysCR(env->store, rgw_raw_obj(env->store->svc.zone->get_zone_params().log_pool, error_oid),
+  spawn(new RGWRadosGetOmapKeysCR(env->store, rgw_raw_obj(env->store->svc()->zone->get_zone_params().log_pool, error_oid),
                                   marker, max_entries, shard_keys), false);
 
   ++shard_id;
@@ -150,8 +151,8 @@ int RGWReadDataSyncStatusCoroutine::operate()
     using ReadInfoCR = RGWSimpleRadosReadCR<rgw_data_sync_info>;
     yield {
       bool empty_on_enoent = false; // fail on ENOENT
-      call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc.sysobj,
-                          rgw_raw_obj(sync_env->store->svc.zone->get_zone_params().log_pool, RGWDataSyncStatusManager::sync_status_oid(sync_env->source_zone)),
+      call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj,
+                          rgw_raw_obj(sync_env->store->svc()->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::sync_status_oid(sync_env->source_zone)),
                           &sync_status->sync_info, empty_on_enoent));
     }
     if (retcode < 0) {
@@ -373,7 +374,6 @@ public:
 
   int send_request() override {
     RGWRESTConn *conn = sync_env->conn;
-    RGWRados *store = sync_env->store;
 
     char buf[32];
     snprintf(buf, sizeof(buf), "%d", shard_id);
@@ -396,7 +396,7 @@ public:
 
     int ret = http_op->aio_read();
     if (ret < 0) {
-      ldout(store->ctx(), 0) << "ERROR: failed to read from " << p << dendl;
+      ldout(sync_env->store->ctx(), 0) << "ERROR: failed to read from " << p << dendl;
       log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl;
       http_op->put();
       return ret;
@@ -452,7 +452,7 @@ bool RGWListRemoteDataLogCR::spawn_next() {
 class RGWInitDataSyncStatusCoroutine : public RGWCoroutine {
   static constexpr uint32_t lock_duration = 30;
   RGWDataSyncEnv *sync_env;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   const rgw_pool& pool;
   const uint32_t num_shards;
 
@@ -470,7 +470,7 @@ public:
                                  RGWSyncTraceNodeRef& _tn_parent,
                                  rgw_data_sync_status *status)
     : RGWCoroutine(_sync_env->cct), sync_env(_sync_env), store(sync_env->store),
-      pool(store->svc.zone->get_zone_params().log_pool),
+      pool(store->svc()->zone->get_zone_params().log_pool),
       num_shards(num_shards), status(status),
       tn(sync_env->sync_tracer->add_node(_tn_parent, "init_data_sync_status")) {
     lock_name = "sync_lock";
@@ -499,7 +499,7 @@ public:
         return set_cr_error(retcode);
       }
       using WriteInfoCR = RGWSimpleRadosWriteCR<rgw_data_sync_info>;
-      yield call(new WriteInfoCR(sync_env->async_rados, store->svc.sysobj,
+      yield call(new WriteInfoCR(sync_env->async_rados, store->svc()->sysobj,
                                  rgw_raw_obj{pool, sync_status_oid},
                                  status->sync_info));
       if (retcode < 0) {
@@ -520,7 +520,7 @@ public:
 
       /* fetch current position in logs */
       yield {
-        RGWRESTConn *conn = store->svc.zone->get_zone_conn_by_id(sync_env->source_zone);
+        RGWRESTConn *conn = store->svc()->zone->get_zone_conn_by_id(sync_env->source_zone);
         if (!conn) {
           tn->log(0, SSTR("ERROR: connection to zone " << sync_env->source_zone << " does not exist!"));
           return set_cr_error(-EIO);
@@ -544,7 +544,7 @@ public:
           marker.timestamp = info.last_update;
           const auto& oid = RGWDataSyncStatusManager::shard_obj_name(sync_env->source_zone, i);
           using WriteMarkerCR = RGWSimpleRadosWriteCR<rgw_data_sync_marker>;
-          spawn(new WriteMarkerCR(sync_env->async_rados, store->svc.sysobj,
+          spawn(new WriteMarkerCR(sync_env->async_rados, store->svc()->sysobj,
                                   rgw_raw_obj{pool, oid}, marker), true);
         }
       }
@@ -557,7 +557,7 @@ public:
       }
 
       status->sync_info.state = rgw_data_sync_info::StateBuildingFullSyncMaps;
-      yield call(new WriteInfoCR(sync_env->async_rados, store->svc.sysobj,
+      yield call(new WriteInfoCR(sync_env->async_rados, store->svc()->sysobj,
                                  rgw_raw_obj{pool, sync_status_oid},
                                  status->sync_info));
       if (retcode < 0) {
@@ -573,9 +573,9 @@ public:
   }
 };
 
-RGWRemoteDataLog::RGWRemoteDataLog(const DoutPrefixProvider *dpp, RGWRados *_store,
+RGWRemoteDataLog::RGWRemoteDataLog(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *_store,
                                    RGWAsyncRadosProcessor *async_rados)
-  : RGWCoroutinesManager(_store->ctx(), _store->get_cr_registry()),
+  : RGWCoroutinesManager(_store->ctx(), _store->getRados()->get_cr_registry()),
       dpp(dpp), store(_store), async_rados(async_rados),
       http_manager(store->ctx(), completion_mgr),
       data_sync_cr(NULL),
@@ -647,7 +647,7 @@ void RGWRemoteDataLog::finish()
 int RGWRemoteDataLog::read_sync_status(rgw_data_sync_status *sync_status)
 {
   // cannot run concurrently with run_sync(), so run in a separate manager
-  RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
+  RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
   RGWHTTPManager http_manager(store->ctx(), crs.get_completion_mgr());
   int ret = http_manager.start();
   if (ret < 0) {
@@ -664,7 +664,7 @@ int RGWRemoteDataLog::read_sync_status(rgw_data_sync_status *sync_status)
 int RGWRemoteDataLog::read_recovering_shards(const int num_shards, set<int>& recovering_shards)
 {
   // cannot run concurrently with run_sync(), so run in a separate manager
-  RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
+  RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
   RGWHTTPManager http_manager(store->ctx(), crs.get_completion_mgr());
   int ret = http_manager.start();
   if (ret < 0) {
@@ -695,7 +695,7 @@ int RGWRemoteDataLog::init_sync_status(int num_shards)
   rgw_data_sync_status sync_status;
   sync_status.sync_info.num_shards = num_shards;
 
-  RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
+  RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
   RGWHTTPManager http_manager(store->ctx(), crs.get_completion_mgr());
   int ret = http_manager.start();
   if (ret < 0) {
@@ -752,7 +752,7 @@ struct bucket_instance_meta_info {
 class RGWListBucketIndexesCR : public RGWCoroutine {
   RGWDataSyncEnv *sync_env;
 
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
 
   rgw_data_sync_status *sync_status;
   int num_shards;
@@ -792,7 +792,7 @@ public:
   int operate() override {
     reenter(this) {
       entries_index = new RGWShardedOmapCRManager(sync_env->async_rados, store, this, num_shards,
-                                                 store->svc.zone->get_zone_params().log_pool,
+                                                 store->svc()->zone->get_zone_params().log_pool,
                                                   oid_prefix);
       yield; // yield so OmapAppendCRs can start
 
@@ -829,10 +829,10 @@ public:
               char buf[16];
               snprintf(buf, sizeof(buf), ":%d", i);
               s = key + buf;
-              yield entries_index->append(s, store->svc.datalog_rados->get_log_shard_id(meta_info.data.get_bucket_info().bucket, i));
+              yield entries_index->append(s, store->svc()->datalog_rados->get_log_shard_id(meta_info.data.get_bucket_info().bucket, i));
             }
           } else {
-            yield entries_index->append(key, store->svc.datalog_rados->get_log_shard_id(meta_info.data.get_bucket_info().bucket, -1));
+            yield entries_index->append(key, store->svc()->datalog_rados->get_log_shard_id(meta_info.data.get_bucket_info().bucket, -1));
           }
         }
         truncated = result.truncated;
@@ -848,8 +848,8 @@ public:
           int shard_id = (int)iter->first;
           rgw_data_sync_marker& marker = iter->second;
           marker.total_entries = entries_index->get_total_entries(shard_id);
-          spawn(new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(sync_env->async_rados, store->svc.sysobj,
-                                                                rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sync_env->source_zone, shard_id)),
+          spawn(new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(sync_env->async_rados, store->svc()->sysobj,
+                                                                rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sync_env->source_zone, shard_id)),
                                                                 marker),
                 true);
         }
@@ -914,10 +914,10 @@ public:
     sync_marker.pos = index_pos;
 
     tn->log(20, SSTR("updating marker marker_oid=" << marker_oid << " marker=" << new_marker));
-    RGWRados *store = sync_env->store;
+    RGWRados *rados = sync_env->store->getRados();
 
-    return new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(sync_env->async_rados, store->svc.sysobj,
-                                                           rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, marker_oid),
+    return new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(sync_env->async_rados, rados->svc.sysobj,
+                                                           rgw_raw_obj(rados->svc.zone->get_zone_params().log_pool, marker_oid),
                                                            sync_marker);
   }
 
@@ -1251,9 +1251,8 @@ public:
     if (lease_cr) {
       lease_cr->abort();
     }
-    RGWRados *store = sync_env->store;
-    lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, store,
-                                            rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, status_oid),
+    lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, sync_env->store,
+                                            rgw_raw_obj(sync_env->store->svc()->zone->get_zone_params().log_pool, status_oid),
                                             lock_name, lock_duration, this));
     lease_stack.reset(spawn(lease_cr.get(), false));
   }
@@ -1333,9 +1332,9 @@ public:
         sync_marker.state = rgw_data_sync_marker::IncrementalSync;
         sync_marker.marker = sync_marker.next_step_marker;
         sync_marker.next_step_marker.clear();
-        RGWRados *store = sync_env->store;
-        call(new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(sync_env->async_rados, store->svc.sysobj,
-                                                             rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, status_oid),
+        RGWRados *rados = sync_env->store->getRados();
+        call(new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(sync_env->async_rados, rados->svc.sysobj,
+                                                             rgw_raw_obj(rados->svc.zone->get_zone_params().log_pool, status_oid),
                                                              sync_marker));
       }
       if (retcode < 0) {
@@ -1547,9 +1546,9 @@ public:
   }
 
   RGWCoroutine *alloc_finisher_cr() override {
-    RGWRados *store = sync_env->store;
-    return new RGWSimpleRadosReadCR<rgw_data_sync_marker>(sync_env->async_rados, store->svc.sysobj,
-                                                          rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sync_env->source_zone, shard_id)),
+    RGWRados *rados = sync_env->store->getRados();
+    return new RGWSimpleRadosReadCR<rgw_data_sync_marker>(sync_env->async_rados, rados->svc.sysobj,
+                                                          rgw_raw_obj(rados->svc.zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sync_env->source_zone, shard_id)),
                                                           &sync_marker);
   }
 
@@ -1662,7 +1661,7 @@ public:
           tn->log(10, SSTR("spawning " << num_shards << " shards sync"));
           for (map<uint32_t, rgw_data_sync_marker>::iterator iter = sync_status.sync_markers.begin();
                iter != sync_status.sync_markers.end(); ++iter) {
-            RGWDataSyncShardControlCR *cr = new RGWDataSyncShardControlCR(sync_env, sync_env->store->svc.zone->get_zone_params().log_pool,
+            RGWDataSyncShardControlCR *cr = new RGWDataSyncShardControlCR(sync_env, sync_env->store->svc()->zone->get_zone_params().log_pool,
                                                                           iter->first, iter->second, tn);
             cr->get();
             shard_crs_lock.lock();
@@ -1679,9 +1678,9 @@ public:
   }
 
   RGWCoroutine *set_sync_info_cr() {
-    RGWRados *store = sync_env->store;
-    return new RGWSimpleRadosWriteCR<rgw_data_sync_info>(sync_env->async_rados, store->svc.sysobj,
-                                                         rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, RGWDataSyncStatusManager::sync_status_oid(sync_env->source_zone)),
+    RGWRados *rados = sync_env->store->getRados();
+    return new RGWSimpleRadosWriteCR<rgw_data_sync_info>(sync_env->async_rados, rados->svc.sysobj,
+                                                         rgw_raw_obj(rados->svc.zone->get_zone_params().log_pool, RGWDataSyncStatusManager::sync_status_oid(sync_env->source_zone)),
                                                          sync_status.sync_info);
   }
 
@@ -1786,7 +1785,7 @@ RGWCoroutine *RGWArchiveDataSyncModule::sync_object(RGWDataSyncEnv *sync_env, RG
      (bucket_info.flags & BUCKET_VERSIONS_SUSPENDED)) {
       ldout(sync_env->cct, 0) << "SYNC_ARCHIVE: sync_object: enabling object versioning for archive bucket" << dendl;
       bucket_info.flags = (bucket_info.flags & ~BUCKET_VERSIONS_SUSPENDED) | BUCKET_VERSIONED;
-      int op_ret = sync_env->store->put_bucket_instance_info(bucket_info, false, real_time(), NULL);
+      int op_ret = sync_env->store->getRados()->put_bucket_instance_info(bucket_info, false, real_time(), NULL);
       if (op_ret < 0) {
          ldout(sync_env->cct, 0) << "SYNC_ARCHIVE: sync_object: error versioning archive bucket" << dendl;
          return NULL;
@@ -1799,7 +1798,7 @@ RGWCoroutine *RGWArchiveDataSyncModule::sync_object(RGWDataSyncEnv *sync_env, RG
     versioned_epoch = 0;
     dest_key = key;
     if (key.instance.empty()) {
-      sync_env->store->gen_rand_obj_instance_name(&(*dest_key));
+      sync_env->store->getRados()->gen_rand_obj_instance_name(&(*dest_key));
     }
   }
 
@@ -1905,22 +1904,22 @@ int RGWDataSyncStatusManager::init()
 {
   RGWZone *zone_def;
 
-  if (!store->svc.zone->find_zone_by_id(source_zone, &zone_def)) {
+  if (!store->svc()->zone->find_zone_by_id(source_zone, &zone_def)) {
     ldpp_dout(this, 0) << "ERROR: failed to find zone config info for zone=" << source_zone << dendl;
     return -EIO;
   }
 
-  if (!store->svc.sync_modules->get_manager()->supports_data_export(zone_def->tier_type)) {
+  if (!store->svc()->sync_modules->get_manager()->supports_data_export(zone_def->tier_type)) {
     return -ENOTSUP;
   }
 
-  const RGWZoneParams& zone_params = store->svc.zone->get_zone_params();
+  const RGWZoneParams& zone_params = store->svc()->zone->get_zone_params();
 
   if (sync_module == nullptr) { 
-    sync_module = store->get_sync_module();
+    sync_module = store->getRados()->get_sync_module();
   }
 
-  conn = store->svc.zone->get_zone_conn_by_id(source_zone);
+  conn = store->svc()->zone->get_zone_conn_by_id(source_zone);
   if (!conn) {
     ldpp_dout(this, 0) << "connection object to zone " << source_zone << " does not exist" << dendl;
     return -EINVAL;
@@ -1928,7 +1927,7 @@ int RGWDataSyncStatusManager::init()
 
   error_logger = new RGWSyncErrorLogger(store, RGW_SYNC_ERROR_LOG_SHARD_PREFIX, ERROR_LOGGER_SHARDS);
 
-  int r = source_log.init(source_zone, conn, error_logger, store->get_sync_tracer(),
+  int r = source_log.init(source_zone, conn, error_logger, store->getRados()->get_sync_tracer(),
                           sync_module, counters);
   if (r < 0) {
     ldpp_dout(this, 0) << "ERROR: failed to init remote log, r=" << r << dendl;
@@ -1986,11 +1985,12 @@ string RGWDataSyncStatusManager::shard_obj_name(const string& source_zone, int s
   return string(buf);
 }
 
-RGWRemoteBucketLog::RGWRemoteBucketLog(const DoutPrefixProvider *_dpp, RGWRados *_store,
+RGWRemoteBucketLog::RGWRemoteBucketLog(const DoutPrefixProvider *_dpp,
+                                      rgw::sal::RGWRadosStore *_store,
                                        RGWBucketSyncStatusManager *_sm,
                                        RGWAsyncRadosProcessor *_async_rados,
                                        RGWHTTPManager *_http_manager)
-    : RGWCoroutinesManager(_store->ctx(), _store->get_cr_registry()),
+    : RGWCoroutinesManager(_store->ctx(), _store->getRados()->get_cr_registry()),
       dpp(_dpp), store(_store), status_manager(_sm),
       async_rados(_async_rados), http_manager(_http_manager)
 {
@@ -2074,7 +2074,7 @@ public:
       }
       yield {
         auto store = sync_env->store;
-        rgw_raw_obj obj(store->svc.zone->get_zone_params().log_pool, sync_status_oid);
+        rgw_raw_obj obj(store->svc()->zone->get_zone_params().log_pool, sync_status_oid);
 
         if (info.syncstopped) {
           call(new RGWRadosRemoveCR(store, obj));
@@ -2089,7 +2089,7 @@ public:
           }
           map<string, bufferlist> attrs;
           status.encode_all_attrs(attrs);
-          call(new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, store->svc.sysobj, obj, attrs));
+          call(new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, store->svc()->sysobj, obj, attrs));
         }
       }
       if (info.syncstopped) {
@@ -2187,8 +2187,8 @@ public:
 int RGWReadBucketSyncStatusCoroutine::operate()
 {
   reenter(this) {
-    yield call(new RGWSimpleRadosReadAttrsCR(sync_env->async_rados, sync_env->store->svc.sysobj,
-                                             rgw_raw_obj(sync_env->store->svc.zone->get_zone_params().log_pool, oid),
+    yield call(new RGWSimpleRadosReadAttrsCR(sync_env->async_rados, sync_env->store->svc()->sysobj,
+                                             rgw_raw_obj(sync_env->store->svc()->zone->get_zone_params().log_pool, oid),
                                              &attrs, true));
     if (retcode == -ENOENT) {
       *status = rgw_bucket_shard_sync_info();
@@ -2207,7 +2207,7 @@ int RGWReadBucketSyncStatusCoroutine::operate()
 #define OMAP_READ_MAX_ENTRIES 10
 class RGWReadRecoveringBucketShardsCoroutine : public RGWCoroutine {
   RGWDataSyncEnv *sync_env;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   
   const int shard_id;
   int max_entries;
@@ -2241,7 +2241,7 @@ int RGWReadRecoveringBucketShardsCoroutine::operate()
     count = 0;
     do {
       omapkeys = std::make_shared<RGWRadosGetOmapKeysCR::Result>();
-      yield call(new RGWRadosGetOmapKeysCR(store, rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, error_oid), 
+      yield call(new RGWRadosGetOmapKeysCR(store, rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, error_oid),
             marker, max_omap_entries, omapkeys));
 
       if (retcode == -ENOENT) {
@@ -2273,7 +2273,7 @@ int RGWReadRecoveringBucketShardsCoroutine::operate()
 
 class RGWReadPendingBucketShardsCoroutine : public RGWCoroutine {
   RGWDataSyncEnv *sync_env;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   
   const int shard_id;
   int max_entries;
@@ -2308,8 +2308,8 @@ int RGWReadPendingBucketShardsCoroutine::operate()
   reenter(this){
     //read sync status marker
     using CR = RGWSimpleRadosReadCR<rgw_data_sync_marker>;
-    yield call(new CR(sync_env->async_rados, store->svc.sysobj,
-                      rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, status_oid),
+    yield call(new CR(sync_env->async_rados, store->svc()->sysobj,
+                      rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, status_oid),
                       sync_marker));
     if (retcode < 0) {
       ldout(sync_env->cct,0) << "failed to read sync status marker with " 
@@ -2353,7 +2353,7 @@ int RGWReadPendingBucketShardsCoroutine::operate()
 int RGWRemoteDataLog::read_shard_status(int shard_id, set<string>& pending_buckets, set<string>& recovering_buckets, rgw_data_sync_marker *sync_marker, const int max_entries)
 {
   // cannot run concurrently with run_sync(), so run in a separate manager
-  RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
+  RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
   RGWHTTPManager http_manager(store->ctx(), crs.get_completion_mgr());
   int ret = http_manager.start();
   if (ret < 0) {
@@ -2379,14 +2379,14 @@ RGWCoroutine *RGWRemoteBucketLog::read_sync_status_cr(rgw_bucket_shard_sync_info
   return new RGWReadBucketSyncStatusCoroutine(&sync_env, bs, sync_status);
 }
 
-RGWBucketSyncStatusManager::RGWBucketSyncStatusManager(RGWRados *_store, const string& _source_zone,
-                                                       const rgw_bucket& bucket) : store(_store),
-                                                                                     cr_mgr(_store->ctx(), _store->get_cr_registry()),
-                                                                                     http_manager(store->ctx(), cr_mgr.get_completion_mgr()),
-                                                                                     source_zone(_source_zone),
-                                                                                     conn(NULL), error_logger(NULL),
-                                                                                     bucket(bucket),
-                                                                                     num_shards(0)
+RGWBucketSyncStatusManager::RGWBucketSyncStatusManager(rgw::sal::RGWRadosStore *_store, const string& _source_zone,
+                                                      const rgw_bucket& bucket) : store(_store),
+                                                                                   cr_mgr(_store->ctx(), _store->getRados()->get_cr_registry()),
+                                                                                   http_manager(store->ctx(), cr_mgr.get_completion_mgr()),
+                                                                                   source_zone(_source_zone),
+                                                                                   conn(NULL), error_logger(NULL),
+                                                                                   bucket(bucket),
+                                                                                   num_shards(0)
 {
 }
 
@@ -2591,11 +2591,11 @@ public:
     map<string, bufferlist> attrs;
     sync_marker.encode_attr(attrs);
 
-    RGWRados *store = sync_env->store;
+    RGWRados *rados = sync_env->store->getRados();
 
     tn->log(20, SSTR("updating marker marker_oid=" << marker_oid << " marker=" << new_marker));
-    return new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, store->svc.sysobj,
-                                          rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, marker_oid),
+    return new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, rados->svc.sysobj,
+                                          rgw_raw_obj(rados->svc.zone->get_zone_params().log_pool, marker_oid),
                                           attrs);
   }
 
@@ -2653,12 +2653,12 @@ public:
     map<string, bufferlist> attrs;
     sync_marker.encode_attr(attrs);
 
-    RGWRados *store = sync_env->store;
+    RGWRados *rados = sync_env->store->getRados();
 
     tn->log(20, SSTR("updating marker marker_oid=" << marker_oid << " marker=" << new_marker));
     return new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados,
-                                          store->svc.sysobj,
-                                          rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, marker_oid),
+                                          rados->svc.sysobj,
+                                          rgw_raw_obj(rados->svc.zone->get_zone_params().log_pool, marker_oid),
                                           attrs);
   }
 
@@ -2761,7 +2761,7 @@ public:
     data_sync_module = sync_env->sync_module->get_data_handler();
     
     zones_trace = _zones_trace;
-    zones_trace.insert(sync_env->store->svc.zone->get_zone().id);
+    zones_trace.insert(sync_env->store->svc()->zone->get_zone().id);
   }
 
   int operate() override {
@@ -2970,9 +2970,9 @@ int RGWBucketShardFullSyncCR::operate()
         sync_info.state = rgw_bucket_shard_sync_info::StateIncrementalSync;
         map<string, bufferlist> attrs;
         sync_info.encode_state_attr(attrs);
-        RGWRados *store = sync_env->store;
-        call(new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, store->svc.sysobj,
-                                            rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, status_oid),
+        RGWRados *rados = sync_env->store->getRados();
+        call(new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, rados->svc.sysobj,
+                                            rgw_raw_obj(rados->svc.zone->get_zone_params().log_pool, status_oid),
                                             attrs));
       }
     } else {
@@ -3028,7 +3028,7 @@ public:
     : RGWCoroutine(_sync_env->cct), sync_env(_sync_env), bs(bs),
       bucket_info(_bucket_info), lease_cr(lease_cr), sync_info(sync_info),
       marker_tracker(sync_env, status_oid, sync_info.inc_marker),
-      status_oid(status_oid), zone_id(_sync_env->store->svc.zone->get_zone().id),
+      status_oid(status_oid), zone_id(_sync_env->store->svc()->zone->get_zone().id),
       tn(sync_env->sync_tracer->add_node(_tn_parent, "inc_sync",
                                          SSTR(bucket_shard_str{bs})))
   {
@@ -3273,9 +3273,8 @@ int RGWRunBucketSyncCoroutine::operate()
   reenter(this) {
     yield {
       set_status("acquiring sync lock");
-      auto store = sync_env->store;
-      lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, store,
-                                              rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, status_oid),
+      lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, sync_env->store,
+                                              rgw_raw_obj(sync_env->store->svc()->zone->get_zone_params().log_pool, status_oid),
                                               "sync_lock",
                                               cct->_conf->rgw_sync_lease_period,
                                               this));
@@ -3310,7 +3309,7 @@ int RGWRunBucketSyncCoroutine::operate()
         tn->log(10, SSTR("no local info for bucket:" << ": fetching metadata"));
         string raw_key = string("bucket.instance:") + bs.bucket.get_key();
 
-        meta_sync_env.init(sync_env->dpp, cct, sync_env->store, sync_env->store->svc.zone->get_master_conn(), sync_env->async_rados,
+        meta_sync_env.init(sync_env->dpp, cct, sync_env->store, sync_env->store->svc()->zone->get_master_conn(), sync_env->async_rados,
                            sync_env->http_manager, sync_env->error_logger, sync_env->sync_tracer);
 
         call(new RGWMetaSyncSingleEntryCR(&meta_sync_env, raw_key,
@@ -3395,7 +3394,7 @@ RGWCoroutine *RGWRemoteBucketLog::run_sync_cr()
 
 int RGWBucketSyncStatusManager::init()
 {
-  conn = store->svc.zone->get_zone_conn_by_id(source_zone);
+  conn = store->svc()->zone->get_zone_conn_by_id(source_zone);
   if (!conn) {
     ldpp_dout(this, 0) << "connection object to zone " << source_zone << " does not exist" << dendl;
     return -EINVAL;
@@ -3431,11 +3430,11 @@ int RGWBucketSyncStatusManager::init()
 
   int effective_num_shards = (num_shards ? num_shards : 1);
 
-  auto async_rados = store->svc.rados->get_async_processor();
+  auto async_rados = store->svc()->rados->get_async_processor();
 
   for (int i = 0; i < effective_num_shards; i++) {
     RGWRemoteBucketLog *l = new RGWRemoteBucketLog(this, store, this, async_rados, &http_manager);
-    ret = l->init(source_zone, conn, bucket, (num_shards ? i : -1), error_logger, store->get_sync_tracer(), sync_module);
+    ret = l->init(source_zone, conn, bucket, (num_shards ? i : -1), error_logger, store->getRados()->get_sync_tracer(), sync_module);
     if (ret < 0) {
       ldpp_dout(this, 0) << "ERROR: failed to initialize RGWRemoteBucketLog object" << dendl;
       return ret;
@@ -3532,7 +3531,7 @@ string RGWBucketSyncStatusManager::obj_status_oid(const string& source_zone,
 
 class RGWCollectBucketSyncStatusCR : public RGWShardCollectCR {
   static constexpr int max_concurrent_shards = 16;
-  RGWRados *const store;
+  rgw::sal::RGWRadosStore *const store;
   RGWDataSyncEnv *const env;
   const int num_shards;
   rgw_bucket_shard bs;
@@ -3541,7 +3540,7 @@ class RGWCollectBucketSyncStatusCR : public RGWShardCollectCR {
   Vector::iterator i, end;
 
  public:
-  RGWCollectBucketSyncStatusCR(RGWRados *store, RGWDataSyncEnv *env,
+  RGWCollectBucketSyncStatusCR(rgw::sal::RGWRadosStore *store, RGWDataSyncEnv *env,
                                int num_shards, const rgw_bucket& bucket,
                                Vector *status)
     : RGWShardCollectCR(store->ctx(), max_concurrent_shards),
@@ -3561,7 +3560,7 @@ class RGWCollectBucketSyncStatusCR : public RGWShardCollectCR {
   }
 };
 
-int rgw_bucket_sync_status(const DoutPrefixProvider *dpp, RGWRados *store, const std::string& source_zone,
+int rgw_bucket_sync_status(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, const std::string& source_zone,
                            const RGWBucketInfo& bucket_info,
                            std::vector<rgw_bucket_shard_sync_info> *status)
 {
@@ -3571,10 +3570,10 @@ int rgw_bucket_sync_status(const DoutPrefixProvider *dpp, RGWRados *store, const
 
   RGWDataSyncEnv env;
   RGWSyncModuleInstanceRef module; // null sync module
-  env.init(dpp, store->ctx(), store, nullptr, store->svc.rados->get_async_processor(),
+  env.init(dpp, store->ctx(), store, nullptr, store->svc()->rados->get_async_processor(),
            nullptr, nullptr, nullptr, source_zone, module, nullptr);
 
-  RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
+  RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
   return crs.run(new RGWCollectBucketSyncStatusCR(store, &env, num_shards,
                                                   bucket_info.bucket, status));
 }
index 5470585fc29e308f5bcf0c9b2729897192ddd070..9ce4180adde1575567f1c7c2cc6d258852a30e7f 100644 (file)
@@ -11,6 +11,7 @@
 
 #include "rgw_coroutine.h"
 #include "rgw_http_client.h"
+#include "rgw_sal.h"
 
 #include "rgw_sync_module.h"
 #include "rgw_sync_trace.h"
@@ -234,7 +235,7 @@ class RGWRESTConn;
 struct RGWDataSyncEnv {
   const DoutPrefixProvider *dpp{nullptr};
   CephContext *cct{nullptr};
-  RGWRados *store{nullptr};
+  rgw::sal::RGWRadosStore *store{nullptr};
   RGWRESTConn *conn{nullptr};
   RGWAsyncRadosProcessor *async_rados{nullptr};
   RGWHTTPManager *http_manager{nullptr};
@@ -246,7 +247,7 @@ struct RGWDataSyncEnv {
 
   RGWDataSyncEnv() {}
 
-  void init(const DoutPrefixProvider *_dpp, CephContext *_cct, RGWRados *_store, RGWRESTConn *_conn,
+  void init(const DoutPrefixProvider *_dpp, CephContext *_cct, rgw::sal::RGWRadosStore *_store, RGWRESTConn *_conn,
             RGWAsyncRadosProcessor *_async_rados, RGWHTTPManager *_http_manager,
             RGWSyncErrorLogger *_error_logger, RGWSyncTraceManager *_sync_tracer,
             const string& _source_zone, RGWSyncModuleInstanceRef& _sync_module,
@@ -273,7 +274,7 @@ class RGWDataChangesLogInfo;
 
 class RGWRemoteDataLog : public RGWCoroutinesManager {
   const DoutPrefixProvider *dpp;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   RGWAsyncRadosProcessor *async_rados;
   RGWHTTPManager http_manager;
 
@@ -287,7 +288,7 @@ class RGWRemoteDataLog : public RGWCoroutinesManager {
   bool initialized;
 
 public:
-  RGWRemoteDataLog(const DoutPrefixProvider *dpp, RGWRados *_store,
+  RGWRemoteDataLog(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *_store,
                    RGWAsyncRadosProcessor *async_rados);
   int init(const string& _source_zone, RGWRESTConn *_conn, RGWSyncErrorLogger *_error_logger,
            RGWSyncTraceManager *_sync_tracer, RGWSyncModuleInstanceRef& module,
@@ -307,7 +308,7 @@ public:
 };
 
 class RGWDataSyncStatusManager : public DoutPrefixProvider {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
 
   string source_zone;
   RGWRESTConn *conn;
@@ -325,12 +326,12 @@ class RGWDataSyncStatusManager : public DoutPrefixProvider {
   int num_shards;
 
 public:
-  RGWDataSyncStatusManager(RGWRados *_store, RGWAsyncRadosProcessor *async_rados,
+  RGWDataSyncStatusManager(rgw::sal::RGWRadosStore *_store, RGWAsyncRadosProcessor *async_rados,
                            const string& _source_zone, PerfCounters* counters)
     : store(_store), source_zone(_source_zone), conn(NULL), error_logger(NULL),
       sync_module(nullptr), counters(counters),
       source_log(this, store, async_rados), num_shards(0) {}
-  RGWDataSyncStatusManager(RGWRados *_store, RGWAsyncRadosProcessor *async_rados,
+  RGWDataSyncStatusManager(rgw::sal::RGWRadosStore *_store, RGWAsyncRadosProcessor *async_rados,
                            const string& _source_zone, PerfCounters* counters,
                            const RGWSyncModuleInstanceRef& _sync_module)
     : store(_store), source_zone(_source_zone), conn(NULL), error_logger(NULL),
@@ -495,7 +496,7 @@ struct rgw_bucket_index_marker_info {
 
 class RGWRemoteBucketLog : public RGWCoroutinesManager {
   const DoutPrefixProvider *dpp;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   RGWRESTConn *conn{nullptr};
   string source_zone;
   rgw_bucket_shard bs;
@@ -510,7 +511,7 @@ class RGWRemoteBucketLog : public RGWCoroutinesManager {
   RGWBucketSyncCR *sync_cr{nullptr};
 
 public:
-  RGWRemoteBucketLog(const DoutPrefixProvider *_dpp, RGWRados *_store,
+  RGWRemoteBucketLog(const DoutPrefixProvider *_dpp, rgw::sal::RGWRadosStore *_store,
                      RGWBucketSyncStatusManager *_sm,
                      RGWAsyncRadosProcessor *_async_rados,
                      RGWHTTPManager *_http_manager);
@@ -530,7 +531,7 @@ public:
 };
 
 class RGWBucketSyncStatusManager : public DoutPrefixProvider {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
 
   RGWCoroutinesManager cr_mgr;
 
@@ -554,7 +555,7 @@ class RGWBucketSyncStatusManager : public DoutPrefixProvider {
   int num_shards;
 
 public:
-  RGWBucketSyncStatusManager(RGWRados *_store, const string& _source_zone,
+  RGWBucketSyncStatusManager(rgw::sal::RGWRadosStore *_store, const string& _source_zone,
                              const rgw_bucket& bucket);
   ~RGWBucketSyncStatusManager();
 
@@ -576,7 +577,7 @@ public:
 };
 
 /// read the sync status of all bucket shards from the given source zone
-int rgw_bucket_sync_status(const DoutPrefixProvider *dpp, RGWRados *store, const std::string& source_zone,
+int rgw_bucket_sync_status(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, const std::string& source_zone,
                            const RGWBucketInfo& bucket_info,
                            std::vector<rgw_bucket_shard_sync_info> *status);
 
index df533e9425d8e0df5b173af17dee4c0ce47ac158..a4414fd9dda113f4d66a2bb82d559bfccdc00fdc 100644 (file)
@@ -1515,7 +1515,7 @@ namespace rgw {
     struct req_state* s = get_state();
 
     auto compression_type =
-      get_store()->svc.zone->get_zone_params().get_compression_type(
+      get_store()->svc()->zone->get_zone_params().get_compression_type(
        s->bucket_info.placement_rule);
 
     /* not obviously supportable */
@@ -1551,7 +1551,7 @@ namespace rgw {
       if (!version_id.empty()) {
         obj.key.set_instance(version_id);
       } else {
-        get_store()->gen_rand_obj_instance_name(&obj);
+        get_store()->getRados()->gen_rand_obj_instance_name(&obj);
         version_id = obj.key.instance;
       }
     }
@@ -1597,7 +1597,7 @@ namespace rgw {
       return -EIO;
     }
 
-    op_ret = get_store()->check_quota(s->bucket_owner.get_id(), s->bucket,
+    op_ret = get_store()->getRados()->check_quota(s->bucket_owner.get_id(), s->bucket,
                                       user_quota, bucket_quota, real_ofs, true);
     /* max_size exceed */
     if (op_ret < 0)
@@ -1640,14 +1640,14 @@ namespace rgw {
       goto done;
     }
 
-    op_ret = get_store()->check_quota(s->bucket_owner.get_id(), s->bucket,
+    op_ret = get_store()->getRados()->check_quota(s->bucket_owner.get_id(), s->bucket,
                                      user_quota, bucket_quota, s->obj_size, true);
     /* max_size exceed */
     if (op_ret < 0) {
       goto done;
     }
 
-    op_ret = get_store()->check_bucket_shards(s->bucket_info, s->bucket,
+    op_ret = get_store()->getRados()->check_bucket_shards(s->bucket_info, s->bucket,
                                              bucket_quota);
     if (op_ret < 0) {
       goto done;
index f3cb3bf960aff7927482bc405b1afd30e4fd8a08..beaa2cc9b0d942437f5cdcb65980003358206f8e 100644 (file)
@@ -972,8 +972,8 @@ namespace rgw {
       (void) fh_lru.unref(fh, cohort::lru::FLAG_NONE);
     }
 
-    int authorize(RGWRados* store) {
-      int ret = store->ctl.user->get_info_by_access_key(key.id, &user, null_yield);
+    int authorize(rgw::sal::RGWRadosStore* store) {
+      int ret = store->ctl()->user->get_info_by_access_key(key.id, &user, null_yield);
       if (ret == 0) {
        RGWAccessKey* k = user.get_key(key.id);
        if (!k || (k->key != key.key))
@@ -992,8 +992,8 @@ namespace rgw {
        }
        if (token.valid() && (ldh->auth(token.id, token.key) == 0)) {
          /* try to store user if it doesn't already exist */
-         if (store->ctl.user->get_info_by_uid(token.id, &user, null_yield) < 0) {
-           int ret = store->ctl.user->store_info(user, null_yield,
+         if (store->ctl()->user->get_info_by_uid(token.id, &user, null_yield) < 0) {
+           int ret = store->ctl()->user->store_info(user, null_yield,
                                                   RGWUserCtl::PutParams()
                                                   .set_exclusive(true));
            if (ret < 0) {
@@ -1277,7 +1277,7 @@ namespace rgw {
 
     void update_user() {
       RGWUserInfo _user = user;
-      auto user_ctl = rgwlib.get_store()->ctl.user;
+      auto user_ctl = rgwlib.get_store()->ctl()->user;
       int ret = user_ctl->get_info_by_access_key(key.id, &user, null_yield);
       if (ret != 0)
         user = _user;
index b1db5adc44f05079d03da5d12d0c3e900e533ea7..7fa332839b766d58a2a59aeef51535de6f9baf9e 100644 (file)
@@ -79,7 +79,7 @@ public:
   virtual void join() = 0;
 
   virtual void pause_for_new_config() = 0;
-  virtual void unpause_with_new_config(RGWRados* store,
+  virtual void unpause_with_new_config(rgw::sal::RGWRadosStore* store,
                                        rgw_auth_registry_ptr_t auth_registry) = 0;
 };
 
@@ -142,7 +142,7 @@ public:
     env.mutex.get_write();
   }
 
-  void unpause_with_new_config(RGWRados* const store,
+  void unpause_with_new_config(rgw::sal::RGWRadosStore* const store,
                                rgw_auth_registry_ptr_t auth_registry) override {
     env.store = store;
     env.auth_registry = std::move(auth_registry);
@@ -185,7 +185,7 @@ public:
     pprocess->pause();
   }
 
-  void unpause_with_new_config(RGWRados* const store,
+  void unpause_with_new_config(rgw::sal::RGWRadosStore* const store,
                                rgw_auth_registry_ptr_t auth_registry) override {
     env.store = store;
     env.auth_registry = auth_registry;
@@ -229,7 +229,7 @@ public:
     rgw_user uid(uid_str);
 
     RGWUserInfo user_info;
-    int ret = env.store->ctl.user->get_info_by_uid(uid, &user_info, null_yield);
+    int ret = env.store->ctl()->user->get_info_by_uid(uid, &user_info, null_yield);
     if (ret < 0) {
       derr << "ERROR: failed reading user info: uid=" << uid << " ret="
           << ret << dendl;
@@ -269,11 +269,11 @@ class RGWFrontendPauser : public RGWRealmReloader::Pauser {
     if (pauser)
       pauser->pause();
   }
-  void resume(RGWRados *store) override {
+  void resume(rgw::sal::RGWRadosStore *store) override {
     /* Initialize the registry of auth strategies which will coordinate
      * the dynamic reconfiguration. */
     auto auth_registry = \
-      rgw::auth::StrategyRegistry::create(g_ceph_context, implicit_tenants, store->pctl);
+      rgw::auth::StrategyRegistry::create(g_ceph_context, implicit_tenants, store->getRados()->pctl);
 
     for (auto frontend : frontends)
       frontend->unpause_with_new_config(store, auth_registry);
index c2aad6481d4f59996916e98d19f3a4da56749a8e..b9702318d924a3feb69e441c1386ef45b98c4f4b 100644 (file)
@@ -42,6 +42,7 @@ const char* LC_STATUS[] = {
 };
 
 using namespace librados;
+using namespace rgw::sal;
 
 bool LCRule::valid() const
 {
@@ -221,7 +222,7 @@ void *RGWLC::LCWorker::entry() {
   return NULL;
 }
 
-void RGWLC::initialize(CephContext *_cct, RGWRados *_store) {
+void RGWLC::initialize(CephContext *_cct, RGWRadosStore *_store) {
   cct = _cct;
   store = _store;
   max_objs = cct->_conf->rgw_lc_max_objs;
@@ -280,13 +281,13 @@ int RGWLC::bucket_lc_prepare(int index)
 
 #define MAX_LC_LIST_ENTRIES 100
   do {
-    int ret = cls_rgw_lc_list(store->lc_pool_ctx, obj_names[index], marker, MAX_LC_LIST_ENTRIES, entries);
+    int ret = cls_rgw_lc_list(store->getRados()->lc_pool_ctx, obj_names[index], marker, MAX_LC_LIST_ENTRIES, entries);
     if (ret < 0)
       return ret;
     map<string, int>::iterator iter;
     for (iter = entries.begin(); iter != entries.end(); ++iter) {
       pair<string, int > entry(iter->first, lc_uninitial);
-      ret = cls_rgw_lc_set_entry(store->lc_pool_ctx, obj_names[index],  entry);
+      ret = cls_rgw_lc_set_entry(store->getRados()->lc_pool_ctx, obj_names[index],  entry);
       if (ret < 0) {
         ldpp_dout(this, 0) << "RGWLC::bucket_lc_prepare() failed to set entry on "
             << obj_names[index] << dendl;
@@ -468,7 +469,7 @@ static inline bool has_all_tags(const lc_op& rule_action,
 }
 
 class LCObjsLister {
-  RGWRados *store;
+  RGWRadosStore *store;
   RGWBucketInfo& bucket_info;
   RGWRados::Bucket target;
   RGWRados::Bucket::List list_op;
@@ -481,9 +482,9 @@ class LCObjsLister {
   int64_t delay_ms;
 
 public:
-  LCObjsLister(RGWRados *_store, RGWBucketInfo& _bucket_info) :
+  LCObjsLister(RGWRadosStore *_store, RGWBucketInfo& _bucket_info) :
       store(_store), bucket_info(_bucket_info),
-      target(store, bucket_info), list_op(&target) {
+      target(store->getRados(), bucket_info), list_op(&target) {
     list_op.params.list_versions = bucket_info.versioned();
     list_op.params.allow_unordered = true;
     delay_ms = store->ctx()->_conf.get_val<int64_t>("rgw_lc_thread_delay");
@@ -559,12 +560,12 @@ public:
 
 struct op_env {
   lc_op& op;
-  RGWRados *store;
+  RGWRadosStore *store;
   RGWLC *lc;
   RGWBucketInfo& bucket_info;
   LCObjsLister& ol;
 
-  op_env(lc_op& _op, RGWRados *_store, RGWLC *_lc, RGWBucketInfo& _bucket_info,
+  op_env(lc_op& _op, RGWRadosStore *_store, RGWLC *_lc, RGWBucketInfo& _bucket_info,
          LCObjsLister& _ol) : op(_op), store(_store), lc(_lc), bucket_info(_bucket_info), ol(_ol) {}
 };
 
@@ -575,7 +576,7 @@ struct lc_op_ctx {
   op_env& env;
   rgw_bucket_dir_entry& o;
 
-  RGWRados *store;
+  RGWRadosStore *store;
   RGWBucketInfo& bucket_info;
   lc_op& op;
   LCObjsLister& ol;
@@ -608,7 +609,7 @@ static int remove_expired_obj(lc_op_ctx& oc, bool remove_indeed)
   obj_owner.set_id(rgw_user {meta.owner});
   obj_owner.set_name(meta.owner_display_name);
 
-  RGWRados::Object del_target(store, bucket_info, oc.rctx, obj);
+  RGWRados::Object del_target(store->getRados(), bucket_info, oc.rctx, obj);
   RGWRados::Object::Delete del_op(&del_target);
 
   del_op.params.bucket_owner = bucket_info.owner;
@@ -678,7 +679,7 @@ static int check_tags(lc_op_ctx& oc, bool *skip)
     *skip = true;
 
     bufferlist tags_bl;
-    int ret = read_obj_tags(oc.store, oc.bucket_info, oc.obj, oc.rctx, tags_bl);
+    int ret = read_obj_tags(oc.store->getRados(), oc.bucket_info, oc.obj, oc.rctx, tags_bl);
     if (ret < 0) {
       if (ret != -ENODATA) {
         ldout(oc.cct, 5) << "ERROR: read_obj_tags returned r=" << ret << dendl;
@@ -793,7 +794,7 @@ public:
     bool is_expired = obj_has_expired(oc.cct, mtime, expiration, exp_time);
 
     ldout(oc.cct, 20) << __func__ << "(): key=" << o.key << ": is_expired=" << is_expired << dendl;
-    return is_expired && pass_object_lock_check(oc.store, oc.bucket_info, oc.obj, oc.rctx);
+    return is_expired && pass_object_lock_check(oc.store->getRados(), oc.bucket_info, oc.obj, oc.rctx);
   }
 
   int process(lc_op_ctx& oc) {
@@ -891,14 +892,14 @@ public:
     target_placement.inherit_from(oc.bucket_info.placement_rule);
     target_placement.storage_class = transition.storage_class;
 
-    if (!oc.store->svc.zone->get_zone_params().valid_placement(target_placement)) {
+    if (!oc.store->svc()->zone->get_zone_params().valid_placement(target_placement)) {
       ldpp_dout(oc.dpp, 0) << "ERROR: non existent dest placement: " << target_placement
                            << " bucket="<< oc.bucket_info.bucket
                            << " rule_id=" << oc.op.id << dendl;
       return -EINVAL;
     }
 
-    int r = oc.store->transition_obj(oc.rctx, oc.bucket_info, oc.obj,
+    int r = oc.store->getRados()->transition_obj(oc.rctx, oc.bucket_info, oc.obj,
                                      target_placement, o.meta.mtime, o.versioned_epoch, oc.dpp, null_yield);
     if (r < 0) {
       ldpp_dout(oc.dpp, 0) << "ERROR: failed to transition obj (r=" << r << ")" << dendl;
@@ -1025,13 +1026,13 @@ int RGWLC::bucket_lc_process(string& shard_id)
   map<string, bufferlist> bucket_attrs;
   string no_ns, list_versions;
   vector<rgw_bucket_dir_entry> objs;
-  auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+  auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
   vector<std::string> result;
   boost::split(result, shard_id, boost::is_any_of(":"));
   string bucket_tenant = result[0];
   string bucket_name = result[1];
   string bucket_marker = result[2];
-  int ret = store->get_bucket_info(obj_ctx, bucket_tenant, bucket_name, bucket_info, NULL, null_yield, &bucket_attrs);
+  int ret = store->getRados()->get_bucket_info(obj_ctx, bucket_tenant, bucket_name, bucket_info, NULL, null_yield, &bucket_attrs);
   if (ret < 0) {
     ldpp_dout(this, 0) << "LC:get_bucket_info for " << bucket_name << " failed" << dendl;
     return ret;
@@ -1044,7 +1045,7 @@ int RGWLC::bucket_lc_process(string& shard_id)
     return -ENOENT;
   }
 
-  RGWRados::Bucket target(store, bucket_info);
+  RGWRados::Bucket target(store->getRados(), bucket_info);
 
   map<string, bufferlist>::iterator aiter = bucket_attrs.find(RGW_ATTR_LC);
   if (aiter == bucket_attrs.end())
@@ -1127,7 +1128,7 @@ int RGWLC::bucket_lc_post(int index, int max_lock_sec, pair<string, int >& entry
   l.set_duration(lock_duration);
 
   do {
-    int ret = l.lock_exclusive(&store->lc_pool_ctx, obj_names[index]);
+    int ret = l.lock_exclusive(&store->getRados()->lc_pool_ctx, obj_names[index]);
     if (ret == -EBUSY || ret == -EEXIST) { /* already locked by another lc processor */
       ldpp_dout(this, 0) << "RGWLC::bucket_lc_post() failed to acquire lock on "
           << obj_names[index] << ", sleep 5, try again" << dendl;
@@ -1138,7 +1139,7 @@ int RGWLC::bucket_lc_post(int index, int max_lock_sec, pair<string, int >& entry
       return 0;
     ldpp_dout(this, 20) << "RGWLC::bucket_lc_post() lock " << obj_names[index] << dendl;
     if (result ==  -ENOENT) {
-      ret = cls_rgw_lc_rm_entry(store->lc_pool_ctx, obj_names[index],  entry);
+      ret = cls_rgw_lc_rm_entry(store->getRados()->lc_pool_ctx, obj_names[index],  entry);
       if (ret < 0) {
         ldpp_dout(this, 0) << "RGWLC::bucket_lc_post() failed to remove entry "
             << obj_names[index] << dendl;
@@ -1150,13 +1151,13 @@ int RGWLC::bucket_lc_post(int index, int max_lock_sec, pair<string, int >& entry
       entry.second = lc_complete;
     }
 
-    ret = cls_rgw_lc_set_entry(store->lc_pool_ctx, obj_names[index],  entry);
+    ret = cls_rgw_lc_set_entry(store->getRados()->lc_pool_ctx, obj_names[index],  entry);
     if (ret < 0) {
       ldpp_dout(this, 0) << "RGWLC::process() failed to set entry on "
           << obj_names[index] << dendl;
     }
 clean:
-    l.unlock(&store->lc_pool_ctx, obj_names[index]);
+    l.unlock(&store->getRados()->lc_pool_ctx, obj_names[index]);
     ldpp_dout(this, 20) << "RGWLC::bucket_lc_post() unlock " << obj_names[index] << dendl;
     return 0;
   } while (true);
@@ -1168,7 +1169,7 @@ int RGWLC::list_lc_progress(const string& marker, uint32_t max_entries, map<stri
   progress_map->clear();
   for(; index <max_objs; index++) {
     map<string, int > entries;
-    int ret = cls_rgw_lc_list(store->lc_pool_ctx, obj_names[index], marker, max_entries, entries);
+    int ret = cls_rgw_lc_list(store->getRados()->lc_pool_ctx, obj_names[index], marker, max_entries, entries);
     if (ret < 0) {
       if (ret == -ENOENT) {
         ldpp_dout(this, 10) << __func__ << "() ignoring unfound lc object="
@@ -1214,7 +1215,7 @@ int RGWLC::process(int index, int max_lock_secs)
     utime_t time(max_lock_secs, 0);
     l.set_duration(time);
 
-    int ret = l.lock_exclusive(&store->lc_pool_ctx, obj_names[index]);
+    int ret = l.lock_exclusive(&store->getRados()->lc_pool_ctx, obj_names[index]);
     if (ret == -EBUSY || ret == -EEXIST) { /* already locked by another lc processor */
       ldpp_dout(this, 0) << "RGWLC::process() failed to acquire lock on "
           << obj_names[index] << ", sleep 5, try again" << dendl;
@@ -1225,7 +1226,7 @@ int RGWLC::process(int index, int max_lock_secs)
       return 0;
 
     cls_rgw_lc_obj_head head;
-    ret = cls_rgw_lc_get_head(store->lc_pool_ctx, obj_names[index], head);
+    ret = cls_rgw_lc_get_head(store->getRados()->lc_pool_ctx, obj_names[index], head);
     if (ret < 0) {
       ldpp_dout(this, 0) << "RGWLC::process() failed to get obj head "
           << obj_names[index] << ", ret=" << ret << dendl;
@@ -1243,7 +1244,7 @@ int RGWLC::process(int index, int max_lock_secs)
       }
     }
 
-    ret = cls_rgw_lc_get_next_entry(store->lc_pool_ctx, obj_names[index], head.marker, entry);
+    ret = cls_rgw_lc_get_next_entry(store->getRados()->lc_pool_ctx, obj_names[index], head.marker, entry);
     if (ret < 0) {
       ldpp_dout(this, 0) << "RGWLC::process() failed to get obj entry "
           << obj_names[index] << dendl;
@@ -1254,7 +1255,7 @@ int RGWLC::process(int index, int max_lock_secs)
       goto exit;
 
     entry.second = lc_processing;
-    ret = cls_rgw_lc_set_entry(store->lc_pool_ctx, obj_names[index],  entry);
+    ret = cls_rgw_lc_set_entry(store->getRados()->lc_pool_ctx, obj_names[index],  entry);
     if (ret < 0) {
       ldpp_dout(this, 0) << "RGWLC::process() failed to set obj entry " << obj_names[index]
           << " (" << entry.first << "," << entry.second << ")" << dendl;
@@ -1262,18 +1263,18 @@ int RGWLC::process(int index, int max_lock_secs)
     }
 
     head.marker = entry.first;
-    ret = cls_rgw_lc_put_head(store->lc_pool_ctx, obj_names[index],  head);
+    ret = cls_rgw_lc_put_head(store->getRados()->lc_pool_ctx, obj_names[index],  head);
     if (ret < 0) {
       ldpp_dout(this, 0) << "RGWLC::process() failed to put head " << obj_names[index] << dendl;
       goto exit;
     }
-    l.unlock(&store->lc_pool_ctx, obj_names[index]);
+    l.unlock(&store->getRados()->lc_pool_ctx, obj_names[index]);
     ret = bucket_lc_process(entry.first);
     bucket_lc_post(index, max_lock_secs, entry, ret);
   }while(1);
 
 exit:
-    l.unlock(&store->lc_pool_ctx, obj_names[index]);
+    l.unlock(&store->getRados()->lc_pool_ctx, obj_names[index]);
     return 0;
 }
 
@@ -1393,7 +1394,7 @@ static std::string get_lc_shard_name(const rgw_bucket& bucket){
 }
 
 template<typename F>
-static int guard_lc_modify(RGWRados* store, const rgw_bucket& bucket, const string& cookie, const F& f) {
+static int guard_lc_modify(RGWRadosStore* store, const rgw_bucket& bucket, const string& cookie, const F& f) {
   CephContext *cct = store->ctx();
 
   string shard_id = get_lc_shard_name(bucket);
@@ -1409,7 +1410,7 @@ static int guard_lc_modify(RGWRados* store, const rgw_bucket& bucket, const stri
   l.set_duration(time);
   l.set_cookie(cookie);
 
-  librados::IoCtx *ctx = store->get_lc_pool_ctx();
+  librados::IoCtx *ctx = store->getRados()->get_lc_pool_ctx();
   int ret;
 
   do {
@@ -1446,7 +1447,7 @@ int RGWLC::set_bucket_config(RGWBucketInfo& bucket_info,
 
   attrs[RGW_ATTR_LC] = std::move(lc_bl);
 
-  int ret = store->ctl.bucket->set_bucket_instance_attrs(bucket_info, attrs,
+  int ret = store->ctl()->bucket->set_bucket_instance_attrs(bucket_info, attrs,
                                                         &bucket_info.objv_tracker,
                                                         null_yield);
   if (ret < 0)
@@ -1468,7 +1469,7 @@ int RGWLC::remove_bucket_config(RGWBucketInfo& bucket_info,
 {
   map<string, bufferlist> attrs = bucket_attrs;
   attrs.erase(RGW_ATTR_LC);
-  int ret = store->ctl.bucket->set_bucket_instance_attrs(bucket_info, attrs,
+  int ret = store->ctl()->bucket->set_bucket_instance_attrs(bucket_info, attrs,
                                                         &bucket_info.objv_tracker,
                                                         null_yield);
 
@@ -1491,7 +1492,7 @@ int RGWLC::remove_bucket_config(RGWBucketInfo& bucket_info,
 
 namespace rgw::lc {
 
-int fix_lc_shard_entry(RGWRados* store, const RGWBucketInfo& bucket_info,
+int fix_lc_shard_entry(rgw::sal::RGWRadosStore* store, const RGWBucketInfo& bucket_info,
                       const map<std::string,bufferlist>& battrs)
 {
   if (auto aiter = battrs.find(RGW_ATTR_LC);
@@ -1509,7 +1510,7 @@ int fix_lc_shard_entry(RGWRados* store, const RGWBucketInfo& bucket_info,
   // 2. entry doesn't exist, which usually happens when reshard has happened prior to update and next LC process has already dropped the update
   // 3. entry exists matching the current bucket id which was after a reshard (needs to be updated to the marker)
   // We are not dropping the old marker here as that would be caught by the next LC process update
-  auto lc_pool_ctx = store->get_lc_pool_ctx();
+  auto lc_pool_ctx = store->getRados()->get_lc_pool_ctx();
   int ret = cls_rgw_lc_get_entry(*lc_pool_ctx,
                                 lc_oid, shard_name, entry);
   if (ret == 0) {
index 553feebfcee0a9fe70d7bcbed9628c50f7896b61..8956cc9447f2d5c16edb44fd9325419a0a031ea8 100644 (file)
@@ -20,6 +20,7 @@
 #include "rgw_rados.h"
 #include "cls/rgw/cls_rgw_types.h"
 #include "rgw_tag.h"
+#include "rgw_sal.h"
 
 #include <atomic>
 #include <tuple>
@@ -451,7 +452,7 @@ WRITE_CLASS_ENCODER(RGWLifecycleConfiguration)
 
 class RGWLC : public DoutPrefixProvider {
   CephContext *cct;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   int max_objs{0};
   string *obj_names{nullptr};
   std::atomic<bool> down_flag = { false };
@@ -480,7 +481,7 @@ class RGWLC : public DoutPrefixProvider {
     finalize();
   }
 
-  void initialize(CephContext *_cct, RGWRados *_store);
+  void initialize(CephContext *_cct, rgw::sal::RGWRadosStore *_store);
   void finalize();
 
   int process();
@@ -511,7 +512,7 @@ class RGWLC : public DoutPrefixProvider {
 
 namespace rgw::lc {
 
-int fix_lc_shard_entry(RGWRados *store, const RGWBucketInfo& bucket_info,
+int fix_lc_shard_entry(rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info,
                       const map<std::string,bufferlist>& battrs);
 
 std::string s3_expiration_header(
index 9afd8d13272df20a006178d8192b2e1e14feba73..fcb7559637b61aa61d2dbee0768054046ea40f91 100644 (file)
@@ -30,7 +30,7 @@ namespace rgw {
     OpsLogSocket* olog;
     rgw::LDAPHelper* ldh{nullptr};
     RGWREST rest; // XXX needed for RGWProcessEnv
-    RGWRados* store;
+    rgw::sal::RGWRadosStore* store;
     boost::intrusive_ptr<CephContext> cct;
 
   public:
@@ -38,7 +38,7 @@ namespace rgw {
       {}
     ~RGWLib() {}
 
-    RGWRados* get_store() { return store; }
+    rgw::sal::RGWRadosStore* get_store() { return store; }
 
     RGWLibFrontend* get_fe() { return fe; }
 
@@ -74,7 +74,7 @@ namespace rgw {
       return user_info;
     }
 
-    int set_uid(RGWRados* store, const rgw_user& uid);
+    int set_uid(rgw::sal::RGWRadosStore* store, const rgw_user& uid);
 
     int write_data(const char *buf, int len);
     int read_data(char *buf, int len);
@@ -135,7 +135,7 @@ namespace rgw {
     inline struct req_state* get_state() { return this->RGWRequest::s; }
 
     RGWLibRequest(CephContext* _cct, RGWUserInfo* _user)
-      :  RGWRequest(rgwlib.get_store()->get_new_req_id()), cct(_cct),
+      :  RGWRequest(rgwlib.get_store()->getRados()->get_new_req_id()), cct(_cct),
         user(_user)
       {}
 
@@ -161,12 +161,12 @@ namespace rgw {
       RGWRequest::init_state(_s);
       RGWHandler::init(rados_ctx->get_store(), _s, io);
 
-      sysobj_ctx.emplace(store->svc.sysobj);
+      sysobj_ctx.emplace(store->svc()->sysobj);
 
       get_state()->obj_ctx = rados_ctx;
       get_state()->sysobj_ctx = &(sysobj_ctx.get());
-      get_state()->req_id = store->svc.zone_utils->unique_id(id);
-      get_state()->trans_id = store->svc.zone_utils->unique_trans_id(id);
+      get_state()->req_id = store->svc()->zone_utils->unique_id(id);
+      get_state()->trans_id = store->svc()->zone_utils->unique_trans_id(id);
 
       ldpp_dout(_s, 2) << "initializing for trans_id = "
          << get_state()->trans_id.c_str() << dendl;
@@ -200,18 +200,18 @@ namespace rgw {
        RGWRequest::init_state(&rstate);
        RGWHandler::init(rados_ctx.get_store(), &rstate, &io_ctx);
 
-       sysobj_ctx.emplace(store->svc.sysobj);
+       sysobj_ctx.emplace(store->svc()->sysobj);
 
        get_state()->obj_ctx = &rados_ctx;
        get_state()->sysobj_ctx = &(sysobj_ctx.get());
-       get_state()->req_id = store->svc.zone_utils->unique_id(id);
-       get_state()->trans_id = store->svc.zone_utils->unique_trans_id(id);
+       get_state()->req_id = store->svc()->zone_utils->unique_id(id);
+       get_state()->trans_id = store->svc()->zone_utils->unique_trans_id(id);
 
        ldpp_dout(get_state(), 2) << "initializing for trans_id = "
            << get_state()->trans_id.c_str() << dendl;
       }
 
-    inline RGWRados* get_store() { return store; }
+    inline rgw::sal::RGWRadosStore* get_store() { return store; }
 
     virtual int execute() final { ceph_abort(); }
     virtual int exec_start() = 0;
index 677599f03fec6a9a24ea0f20efcd347ce3b8277b..1fdba116fac1f1ada8635fbb526aa943b17510a3 100644 (file)
@@ -107,7 +107,7 @@ void RGWLoadGenProcess::gen_request(const string& method,
                                    int content_length, std::atomic<bool>* fail_flag)
 {
   RGWLoadGenRequest* req =
-    new RGWLoadGenRequest(store->get_new_req_id(), method, resource,
+    new RGWLoadGenRequest(store->getRados()->get_new_req_id(), method, resource,
                          content_length, fail_flag);
   dout(10) << "allocated request req=" << hex << req << dec << dendl;
   req_throttle.get(1);
index 4ce3fe82196a75426f118136ac40a42daacd35e1..f1f958579190cdbe1324c0402444440e7384897a 100644 (file)
@@ -300,7 +300,7 @@ int main(int argc, const char **argv)
   FCGX_Init();
 #endif
 
-  RGWRados *store =
+  rgw::sal::RGWRadosStore *store =
     RGWStoreManager::get_storage(g_ceph_context,
                                 g_conf()->rgw_enable_gc_threads,
                                 g_conf()->rgw_enable_lc_threads,
@@ -323,14 +323,14 @@ int main(int argc, const char **argv)
     return -r;
   }
 
-  rgw_rest_init(g_ceph_context, store, store->svc.zone->get_zonegroup());
+  rgw_rest_init(g_ceph_context, store->svc()->zone->get_zonegroup());
 
   mutex.lock();
   init_timer.cancel_all_events();
   init_timer.shutdown();
   mutex.unlock();
 
-  rgw_log_usage_init(g_ceph_context, store);
+  rgw_log_usage_init(g_ceph_context, store->getRados());
 
   RGWREST rest;
 
@@ -357,7 +357,7 @@ int main(int argc, const char **argv)
   const bool swift_at_root = g_conf()->rgw_swift_url_prefix == "/";
   if (apis_map.count("s3") > 0 || s3website_enabled) {
     if (! swift_at_root) {
-      rest.register_default_mgr(set_logging(rest_filter(store, RGW_REST_S3,
+      rest.register_default_mgr(set_logging(rest_filter(store->getRados(), RGW_REST_S3,
                                                         new RGWRESTMgr_S3(s3website_enabled, sts_enabled, iam_enabled))));
     } else {
       derr << "Cannot have the S3 or S3 Website enabled together with "
@@ -382,10 +382,10 @@ int main(int argc, const char **argv)
 
     if (! swift_at_root) {
       rest.register_resource(g_conf()->rgw_swift_url_prefix,
-                          set_logging(rest_filter(store, RGW_REST_SWIFT,
+                          set_logging(rest_filter(store->getRados(), RGW_REST_SWIFT,
                                                   swift_resource)));
     } else {
-      if (store->svc.zone->get_zonegroup().zones.size() > 1) {
+      if (store->svc()->zone->get_zonegroup().zones.size() > 1) {
         derr << "Placing Swift API in the root of URL hierarchy while running"
              << " multi-site configuration requires another instance of RadosGW"
              << " with S3 API enabled!" << dendl;
@@ -419,7 +419,7 @@ int main(int argc, const char **argv)
   rgw::auth::ImplicitTenants implicit_tenant_context{g_conf()};
   g_conf().add_observer(&implicit_tenant_context);
   auto auth_registry = \
-    rgw::auth::StrategyRegistry::create(g_ceph_context, implicit_tenant_context, store->pctl);
+    rgw::auth::StrategyRegistry::create(g_ceph_context, implicit_tenant_context, store->getRados()->pctl);
 
   /* Header custom behavior */
   rest.register_x_headers(g_conf()->rgw_log_http_headers);
@@ -529,7 +529,7 @@ int main(int argc, const char **argv)
     fes.push_back(fe);
   }
 
-  r = store->register_to_service_map("rgw", service_map_meta);
+  r = store->getRados()->register_to_service_map("rgw", service_map_meta);
   if (r < 0) {
     derr << "ERROR: failed to register to service map: " << cpp_strerror(-r) << dendl;
 
@@ -542,7 +542,7 @@ int main(int argc, const char **argv)
   RGWFrontendPauser pauser(fes, implicit_tenant_context, &pusher);
   RGWRealmReloader reloader(store, service_map_meta, &pauser);
 
-  RGWRealmWatcher realm_watcher(g_ceph_context, store->svc.zone->get_realm());
+  RGWRealmWatcher realm_watcher(g_ceph_context, store->svc()->zone->get_realm());
   realm_watcher.add_watcher(RGWRealmNotify::Reload, reloader);
   realm_watcher.add_watcher(RGWRealmNotify::ZonesNeedPeriod, pusher);
 
index caa43d64bcabc55eb0a20ede5379976cb952d2bf..e6f11671397d93c2233f51b8a47c9607bd35316f 100644 (file)
@@ -11,6 +11,7 @@
 #include "rgw_zone.h"
 #include "rgw_tools.h"
 #include "rgw_mdlog.h"
+#include "rgw_sal.h"
 
 #include "rgw_cr_rados.h"
 
@@ -24,6 +25,8 @@
 
 #include <boost/asio/yield.hpp>
 
+using namespace rgw::sal;
+
 #define dout_subsys ceph_subsys_rgw
 
 const std::string RGWMetadataLogHistory::oid = "meta.history";
@@ -389,11 +392,11 @@ int RGWMetadataHandler::attach(RGWMetadataManager *manager)
 }
 
 RGWMetadataHandler_GenericMetaBE::Put::Put(RGWMetadataHandler_GenericMetaBE *_handler,
-                                           RGWSI_MetaBackend_Handler::Op *_op,
-                                           string& _entry, RGWMetadataObject *_obj,
-                                           RGWObjVersionTracker& _objv_tracker,
-                                           optional_yield _y,
-                                           RGWMDLogSyncType _type) :
+                                          RGWSI_MetaBackend_Handler::Op *_op,
+                                          string& _entry, RGWMetadataObject *_obj,
+                                          RGWObjVersionTracker& _objv_tracker,
+                                          optional_yield _y,
+                                          RGWMDLogSyncType _type):
   handler(_handler), op(_op),
   entry(_entry), obj(_obj),
   objv_tracker(_objv_tracker),
index 194074d5daa37e3be148832f8a7dc8ebcc3b173e..a1ef9e616ebc34c3d0034424d642b4fe4bc9a972 100644 (file)
 #include "cls/log/cls_log_types.h"
 #include "common/RefCountedObj.h"
 #include "common/ceph_time.h"
-
 #include "services/svc_meta_be.h"
 
 
-class RGWRados;
+namespace rgw { namespace sal {
+class RGWRadosStore;
+} }
 class RGWCoroutine;
 class JSONObj;
 struct RGWObjVersionTracker;
@@ -136,7 +137,7 @@ public:
       return handler->do_get(op, entry, obj, y);
     }
   public:
-    Put(RGWMetadataHandler_GenericMetaBE *handler, RGWSI_MetaBackend_Handler::Op *_op,
+    Put(RGWMetadataHandler_GenericMetaBE *_handler, RGWSI_MetaBackend_Handler::Op *_op,
         string& _entry, RGWMetadataObject *_obj,
         RGWObjVersionTracker& _objv_tracker, optional_yield _y,
         RGWMDLogSyncType _type);
index f06818155e966fa75155e97b1a0a5dd7edc20e15..b17c6c8fa0ee27e319ec7c2f8faa3685ed9fa0eb 100644 (file)
@@ -11,6 +11,7 @@
 #include "rgw_xml.h"
 #include "rgw_multi.h"
 #include "rgw_op.h"
+#include "rgw_sal.h"
 
 #include "services/svc_sys_obj.h"
 #include "services/svc_tier_rados.h"
@@ -76,7 +77,7 @@ bool is_v2_upload_id(const string& upload_id)
          (strncmp(uid, MULTIPART_UPLOAD_ID_PREFIX_LEGACY, sizeof(MULTIPART_UPLOAD_ID_PREFIX_LEGACY) - 1) == 0);
 }
 
-int list_multipart_parts(RGWRados *store, RGWBucketInfo& bucket_info,
+int list_multipart_parts(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info,
                         CephContext *cct,
                         const string& upload_id,
                         const string& meta_oid, int num_parts,
@@ -92,13 +93,13 @@ int list_multipart_parts(RGWRados *store, RGWBucketInfo& bucket_info,
   obj.set_in_extra_data(true);
 
   rgw_raw_obj raw_obj;
-  store->obj_to_raw(bucket_info.placement_rule, obj, &raw_obj);
+  store->getRados()->obj_to_raw(bucket_info.placement_rule, obj, &raw_obj);
 
   bool sorted_omap = is_v2_upload_id(upload_id) && !assume_unsorted;
 
   parts.clear();
 
-  auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+  auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
   auto sysobj = obj_ctx.get_obj(raw_obj);
   int ret;
   if (sorted_omap) {
@@ -186,7 +187,7 @@ int list_multipart_parts(RGWRados *store, RGWBucketInfo& bucket_info,
   return 0;
 }
 
-int list_multipart_parts(RGWRados *store, struct req_state *s,
+int list_multipart_parts(rgw::sal::RGWRadosStore *store, struct req_state *s,
                         const string& upload_id,
                         const string& meta_oid, int num_parts,
                         int marker, map<uint32_t, RGWUploadPartInfo>& parts,
@@ -198,7 +199,7 @@ int list_multipart_parts(RGWRados *store, struct req_state *s,
                              next_marker, truncated, assume_unsorted);
 }
 
-int abort_multipart_upload(RGWRados *store, CephContext *cct,
+int abort_multipart_upload(rgw::sal::RGWRadosStore *store, CephContext *cct,
                           RGWObjectCtx *obj_ctx, RGWBucketInfo& bucket_info,
                           RGWMPObj& mp_obj)
 {
@@ -232,15 +233,15 @@ int abort_multipart_upload(RGWRados *store, CephContext *cct,
         string oid = mp_obj.get_part(obj_iter->second.num);
         obj.init_ns(bucket_info.bucket, oid, RGW_OBJ_NS_MULTIPART);
         obj.index_hash_source = mp_obj.get_key();
-        ret = store->delete_obj(*obj_ctx, bucket_info, obj, 0);
+        ret = store->getRados()->delete_obj(*obj_ctx, bucket_info, obj, 0);
         if (ret < 0 && ret != -ENOENT)
           return ret;
       } else {
-        store->update_gc_chain(meta_obj, obj_part.manifest, &chain);
+        store->getRados()->update_gc_chain(meta_obj, obj_part.manifest, &chain);
         RGWObjManifest::obj_iterator oiter = obj_part.manifest.obj_begin();
         if (oiter != obj_part.manifest.obj_end()) {
           rgw_obj head;
-          rgw_raw_obj raw_head = oiter.get_location().get_raw_obj(store);
+          rgw_raw_obj raw_head = oiter.get_location().get_raw_obj(store->getRados());
           RGWSI_Tier_RADOS::raw_obj_to_obj(bucket_info.bucket, raw_head, &head);
 
           rgw_obj_index_key key;
@@ -252,13 +253,13 @@ int abort_multipart_upload(RGWRados *store, CephContext *cct,
   } while (truncated);
 
   /* use upload id as tag and do it asynchronously */
-  ret = store->send_chain_to_gc(chain, mp_obj.get_upload_id(), false);
+  ret = store->getRados()->send_chain_to_gc(chain, mp_obj.get_upload_id(), false);
   if (ret < 0) {
     ldout(cct, 5) << __func__ << ": gc->send_chain() returned " << ret << dendl;
     return (ret == -ENOENT) ? -ERR_NO_SUCH_UPLOAD : ret;
   }
 
-  RGWRados::Object del_target(store, bucket_info, *obj_ctx, meta_obj);
+  RGWRados::Object del_target(store->getRados(), bucket_info, *obj_ctx, meta_obj);
   RGWRados::Object::Delete del_op(&del_target);
   del_op.params.bucket_owner = bucket_info.owner;
   del_op.params.versioning_status = 0;
@@ -275,14 +276,14 @@ int abort_multipart_upload(RGWRados *store, CephContext *cct,
   return (ret == -ENOENT) ? -ERR_NO_SUCH_UPLOAD : ret;
 }
 
-int list_bucket_multiparts(RGWRados *store, RGWBucketInfo& bucket_info,
+int list_bucket_multiparts(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info,
                           const string& prefix, const string& marker,
                           const string& delim,
                           const int& max_uploads,
                           vector<rgw_bucket_dir_entry> *objs,
                           map<string, bool> *common_prefixes, bool *is_truncated)
 {
-  RGWRados::Bucket target(store, bucket_info);
+  RGWRados::Bucket target(store->getRados(), bucket_info);
   RGWRados::Bucket::List list_op(&target);
   MultipartMetaFilter mp_filter;
 
@@ -295,7 +296,7 @@ int list_bucket_multiparts(RGWRados *store, RGWBucketInfo& bucket_info,
   return(list_op.list_objects(max_uploads, objs, common_prefixes, is_truncated, null_yield));
 }
 
-int abort_bucket_multiparts(RGWRados *store, CephContext *cct, RGWBucketInfo& bucket_info,
+int abort_bucket_multiparts(rgw::sal::RGWRadosStore *store, CephContext *cct, RGWBucketInfo& bucket_info,
                                string& prefix, string& delim)
 {
   constexpr int max = 1000;
index a9004c213c6fc7a4e1bbd2bd605bd30b96b79914..bd46484a3416cdf87fbf6aca9e49ef3d538d4b11 100644 (file)
@@ -9,6 +9,10 @@
 #include "rgw_obj_manifest.h"
 #include "rgw_compression_types.h"
 
+namespace rgw { namespace sal {
+  class RGWRadosStore;
+} }
+
 #define MULTIPART_UPLOAD_ID_PREFIX_LEGACY "2/"
 #define MULTIPART_UPLOAD_ID_PREFIX "2~" // must contain a unique char that may not come up in gen_rand_alpha()
 
@@ -104,7 +108,7 @@ public:
 
 extern bool is_v2_upload_id(const string& upload_id);
 
-extern int list_multipart_parts(RGWRados *store, RGWBucketInfo& bucket_info,
+extern int list_multipart_parts(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info,
                                CephContext *cct,
                                 const string& upload_id,
                                 const string& meta_oid, int num_parts,
@@ -112,17 +116,17 @@ extern int list_multipart_parts(RGWRados *store, RGWBucketInfo& bucket_info,
                                 int *next_marker, bool *truncated,
                                 bool assume_unsorted = false);
 
-extern int list_multipart_parts(RGWRados *store, struct req_state *s,
+extern int list_multipart_parts(rgw::sal::RGWRadosStore *store, struct req_state *s,
                                 const string& upload_id,
                                 const string& meta_oid, int num_parts,
                                 int marker, map<uint32_t, RGWUploadPartInfo>& parts,
                                 int *next_marker, bool *truncated,
                                 bool assume_unsorted = false);
 
-extern int abort_multipart_upload(RGWRados *store, CephContext *cct, RGWObjectCtx *obj_ctx,
+extern int abort_multipart_upload(rgw::sal::RGWRadosStore *store, CephContext *cct, RGWObjectCtx *obj_ctx,
                                 RGWBucketInfo& bucket_info, RGWMPObj& mp_obj);
 
-extern int list_bucket_multiparts(RGWRados *store, RGWBucketInfo& bucket_info,
+extern int list_bucket_multiparts(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info,
                                  const string& prefix,
                                  const string& marker,
                                  const string& delim,
@@ -130,6 +134,6 @@ extern int list_bucket_multiparts(RGWRados *store, RGWBucketInfo& bucket_info,
                                  vector<rgw_bucket_dir_entry> *objs,
                                  map<string, bool> *common_prefixes, bool *is_truncated);
 
-extern int abort_bucket_multiparts(RGWRados *store, CephContext *cct, RGWBucketInfo& bucket_info,
+extern int abort_bucket_multiparts(rgw::sal::RGWRadosStore *store, CephContext *cct, RGWBucketInfo& bucket_info,
                                 string& prefix, string& delim);
 #endif
index 3fc0fdb041c64cf5ed8b8423aa3743191f228785..78c7a05914daea94eac8e39e64e095220700e7ea 100644 (file)
@@ -21,6 +21,7 @@
 class RGWSI_Zone;
 struct RGWZoneGroup;
 struct RGWZoneParams;
+class RGWRados;
 
 class rgw_obj_select {
   rgw_placement_rule placement_rule;
index 07b917e6e85dc9c3c908f190b48cfe7e71715210..5589c26d15496576f01691e5a7de4c9651fea318 100644 (file)
 
 #define dout_subsys ceph_subsys_rgw
 
-static RGWRados *store = NULL;
+static rgw::sal::RGWRadosStore *store = NULL;
 
 class StoreDestructor {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
 
 public:
-  explicit StoreDestructor(RGWRados *_s) : store(_s) {}
+  explicit StoreDestructor(rgw::sal::RGWRadosStore *_s) : store(_s) {}
   ~StoreDestructor() {
     if (store) {
       RGWStoreManager::close_storage(store);
index 4952f8382d18bd4c4588e10f29857f3d7e34f783..4ee97b6f69d6590af618272c2e6136a8f3b0ada6 100644 (file)
@@ -174,7 +174,7 @@ int RGWObjectExpirer::init_bucket_info(const string& tenant_name,
                                        const string& bucket_id,
                                        RGWBucketInfo& bucket_info)
 {
-  auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+  auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
 
   /*
    * XXX Here's where it gets tricky. We went to all the trouble of
@@ -186,7 +186,7 @@ int RGWObjectExpirer::init_bucket_info(const string& tenant_name,
    * are ephemeral, good call encoding tenant info!
    */
 
-  return store->get_bucket_info(obj_ctx, tenant_name, bucket_name,
+  return store->getRados()->get_bucket_info(obj_ctx, tenant_name, bucket_name,
                                bucket_info, nullptr, null_yield, nullptr);
 
 }
@@ -215,8 +215,8 @@ int RGWObjectExpirer::garbage_single_object(objexp_hint_entry& hint)
   }
 
   rgw_obj obj(bucket_info.bucket, key);
-  store->set_atomic(&rctx, obj);
-  ret = store->delete_obj(rctx, bucket_info, obj,
+  store->getRados()->set_atomic(&rctx, obj);
+  ret = store->getRados()->delete_obj(rctx, bucket_info, obj,
           bucket_info.versioning_status(), 0, hint.exp_time);
 
   return ret;
@@ -235,7 +235,7 @@ void RGWObjectExpirer::garbage_chunk(list<cls_timeindex_entry>& entries,      /*
     ldout(store->ctx(), 15) << "got removal hint for: " << iter->key_ts.sec() \
         << " - " << iter->key_ext << dendl;
 
-    int ret = objexp_hint_parse(store->ctx(), *iter, &hint);
+    int ret = objexp_hint_parse(store->getRados()->ctx(), *iter, &hint);
     if (ret < 0) {
       ldout(store->ctx(), 1) << "cannot parse removal hint for " << hint.obj_key << dendl;
       continue;
@@ -298,7 +298,7 @@ bool RGWObjectExpirer::process_single_shard(const string& shard,
   utime_t time(max_secs, 0);
   l.set_duration(time);
 
-  int ret = l.lock_exclusive(&store->objexp_pool_ctx, shard);
+  int ret = l.lock_exclusive(&store->getRados()->objexp_pool_ctx, shard);
   if (ret == -EBUSY) { /* already locked by another processor */
     dout(5) << __func__ << "(): failed to acquire lock on " << shard << dendl;
     return false;
@@ -334,7 +334,7 @@ bool RGWObjectExpirer::process_single_shard(const string& shard,
     marker = out_marker;
   } while (truncated);
 
-  l.unlock(&store->objexp_pool_ctx, shard);
+  l.unlock(&store->getRados()->objexp_pool_ctx, shard);
   return done;
 }
 
index ad0a8c8dd8c8b4b2196f5981c3455f020cc4b8c3..a543cd0b80df64c70b968c275e24c19677e25368 100644 (file)
 #include "include/utime.h"
 #include "include/str_list.h"
 
+#include "rgw_sal.h"
+
 class CephContext;
 class RGWSI_RADOS;
 class RGWSI_Zone;
-class RGWRados;
 class RGWBucketInfo;
 class cls_timeindex_entry;
 
@@ -68,7 +69,7 @@ public:
 
 class RGWObjectExpirer {
 protected:
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   RGWObjExpStore exp_store;
 
   int init_bucket_info(const std::string& tenant_name,
@@ -97,9 +98,9 @@ protected:
   std::atomic<bool> down_flag = { false };
 
 public:
-  explicit RGWObjectExpirer(RGWRados *_store)
+  explicit RGWObjectExpirer(rgw::sal::RGWRadosStore *_store)
     : store(_store),
-      exp_store(_store->ctx(), _store->svc.rados, _store->svc.zone),
+      exp_store(_store->getRados()->ctx(), _store->svc()->rados, _store->svc()->zone),
       worker(NULL) {
   }
   ~RGWObjectExpirer() {
index 98703edfb377d3782e98a805b4747a2a32275f45..5099155c9cde80dbc3db0dc0610de2354fbb8214 100644 (file)
@@ -89,7 +89,7 @@ static string mp_ns = RGW_OBJ_NS_MULTIPART;
 static string shadow_ns = RGW_OBJ_NS_SHADOW;
 
 static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name);
-static int forward_request_to_master(struct req_state *s, obj_version *objv, RGWRados *store,
+static int forward_request_to_master(struct req_state *s, obj_version *objv, rgw::sal::RGWRadosStore *store,
                                      bufferlist& in_data, JSONParser *jp, req_info *forward_info = nullptr);
 
 static MultipartMetaFilter mp_filter;
@@ -187,7 +187,7 @@ static int decode_policy(CephContext *cct,
 
 
 static int get_user_policy_from_attr(CephContext * const cct,
-                                    RGWRados * const store,
+                                    rgw::sal::RGWRadosStore * const store,
                                     map<string, bufferlist>& attrs,
                                     RGWAccessControlPolicy& policy    /* out */)
 {
@@ -230,7 +230,7 @@ static int get_bucket_instance_policy_from_attr(CephContext *cct,
 }
 
 static int get_obj_policy_from_attr(CephContext *cct,
-                                   RGWRados *store,
+                                   rgw::sal::RGWRadosStore *store,
                                    RGWObjectCtx& obj_ctx,
                                    RGWBucketInfo& bucket_info,
                                    map<string, bufferlist>& bucket_attrs,
@@ -242,7 +242,7 @@ static int get_obj_policy_from_attr(CephContext *cct,
   bufferlist bl;
   int ret = 0;
 
-  RGWRados::Object op_target(store, bucket_info, obj_ctx, obj);
+  RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, obj);
   RGWRados::Object::Read rop(&op_target);
 
   ret = rop.get_attr(RGW_ATTR_ACL, bl, y);
@@ -254,7 +254,7 @@ static int get_obj_policy_from_attr(CephContext *cct,
     /* object exists, but policy is broken */
     ldout(cct, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl;
     RGWUserInfo uinfo;
-    ret = store->ctl.user->get_info_by_uid(bucket_info.owner, &uinfo, y);
+    ret = store->ctl()->user->get_info_by_uid(bucket_info.owner, &uinfo, y);
     if (ret < 0)
       return ret;
 
@@ -292,7 +292,7 @@ int rgw_op_get_bucket_policy_from_attr(CephContext *cct,
 }
 
 static boost::optional<Policy> get_iam_policy_from_attr(CephContext* cct,
-                                                       RGWRados* store,
+                                                       rgw::sal::RGWRadosStore* store,
                                                        map<string, bufferlist>& attrs,
                                                        const string& tenant) {
   auto i = attrs.find(RGW_ATTR_IAM_POLICY);
@@ -304,7 +304,7 @@ static boost::optional<Policy> get_iam_policy_from_attr(CephContext* cct,
 }
 
 vector<Policy> get_iam_user_policy_from_attr(CephContext* cct,
-                        RGWRados* store,
+                        rgw::sal::RGWRadosStore* store,
                         map<string, bufferlist>& attrs,
                         const string& tenant) {
   vector<Policy> policies;
@@ -321,9 +321,9 @@ vector<Policy> get_iam_user_policy_from_attr(CephContext* cct,
   return policies;
 }
 
-static int get_obj_attrs(RGWRados *store, struct req_state *s, const rgw_obj& obj, map<string, bufferlist>& attrs, rgw_obj *target_obj = nullptr)
+static int get_obj_attrs(rgw::sal::RGWRadosStore *store, struct req_state *s, const rgw_obj& obj, map<string, bufferlist>& attrs, rgw_obj *target_obj = nullptr)
 {
-  RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
+  RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
   RGWRados::Object::Read read_op(&op_target);
 
   read_op.params.attrs = &attrs;
@@ -332,14 +332,14 @@ static int get_obj_attrs(RGWRados *store, struct req_state *s, const rgw_obj& ob
   return read_op.prepare(s->yield);
 }
 
-static int get_obj_head(RGWRados *store, struct req_state *s,
+static int get_obj_head(rgw::sal::RGWRadosStore *store, struct req_state *s,
                         const rgw_obj& obj,
                         map<string, bufferlist> *attrs,
                          bufferlist *pbl)
 {
-  store->set_prefetch_data(s->obj_ctx, obj);
+  store->getRados()->set_prefetch_data(s->obj_ctx, obj);
 
-  RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
+  RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
   RGWRados::Object::Read read_op(&op_target);
 
   read_op.params.attrs = attrs;
@@ -376,7 +376,7 @@ struct multipart_upload_info
 };
 WRITE_CLASS_ENCODER(multipart_upload_info)
 
-static int get_multipart_info(RGWRados *store, struct req_state *s,
+static int get_multipart_info(rgw::sal::RGWRadosStore *store, struct req_state *s,
                              const rgw_obj& obj,
                               RGWAccessControlPolicy *policy,
                              map<string, bufferlist> *attrs,
@@ -425,7 +425,7 @@ static int get_multipart_info(RGWRados *store, struct req_state *s,
   return 0;
 }
 
-static int get_multipart_info(RGWRados *store, struct req_state *s,
+static int get_multipart_info(rgw::sal::RGWRadosStore *store, struct req_state *s,
                              const string& meta_oid,
                               RGWAccessControlPolicy *policy,
                              map<string, bufferlist> *attrs,
@@ -441,10 +441,10 @@ static int get_multipart_info(RGWRados *store, struct req_state *s,
   return get_multipart_info(store, s, meta_obj, policy, attrs, upload_info);
 }
 
-static int modify_obj_attr(RGWRados *store, struct req_state *s, const rgw_obj& obj, const char* attr_name, bufferlist& attr_val)
+static int modify_obj_attr(rgw::sal::RGWRadosStore *store, struct req_state *s, const rgw_obj& obj, const char* attr_name, bufferlist& attr_val)
 {
   map<string, bufferlist> attrs;
-  RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
+  RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
   RGWRados::Object::Read read_op(&op_target);
 
   read_op.params.attrs = &attrs;
@@ -453,9 +453,9 @@ static int modify_obj_attr(RGWRados *store, struct req_state *s, const rgw_obj&
   if (r < 0) {
     return r;
   }
-  store->set_atomic(s->obj_ctx, read_op.state.obj);
+  store->getRados()->set_atomic(s->obj_ctx, read_op.state.obj);
   attrs[attr_name] = attr_val;
-  return store->set_attrs(s->obj_ctx, s->bucket_info, read_op.state.obj, attrs, NULL, s->yield);
+  return store->getRados()->set_attrs(s->obj_ctx, s->bucket_info, read_op.state.obj, attrs, NULL, s->yield);
 }
 
 static int read_bucket_policy(RGWUserCtl *user_ctl,
@@ -483,7 +483,7 @@ static int read_bucket_policy(RGWUserCtl *user_ctl,
   return ret;
 }
 
-static int read_obj_policy(RGWRados *store,
+static int read_obj_policy(rgw::sal::RGWRadosStore *store,
                            struct req_state *s,
                            RGWBucketInfo& bucket_info,
                            map<string, bufferlist>& bucket_attrs,
@@ -521,7 +521,7 @@ static int read_obj_policy(RGWRados *store,
     /* object does not exist checking the bucket's ACL to make sure
        that we send a proper error code */
     RGWAccessControlPolicy bucket_policy(s->cct);
-    ret = rgw_op_get_bucket_policy_from_attr(s->cct, store->ctl.user, bucket_info, bucket_attrs, &bucket_policy);
+    ret = rgw_op_get_bucket_policy_from_attr(s->cct, store->ctl()->user, bucket_info, bucket_attrs, &bucket_policy);
     if (ret < 0) {
       return ret;
     }
@@ -553,12 +553,12 @@ static int read_obj_policy(RGWRados *store,
  * only_bucket: If true, reads the user and bucket ACLs rather than the object ACL.
  * Returns: 0 on success, -ERR# otherwise.
  */
-int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
+int rgw_build_bucket_policies(rgw::sal::RGWRadosStore* store, struct req_state* s)
 {
   int ret = 0;
   rgw_obj_key obj;
   RGWUserInfo bucket_owner_info;
-  auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+  auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
 
   string bi = s->info.args.get(RGW_SYS_PARAM_PREFIX "bucket-instance");
   if (!bi.empty()) {
@@ -588,13 +588,13 @@ int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
     RGWBucketInfo source_info;
 
     if (s->bucket_instance_id.empty()) {
-      ret = store->get_bucket_info(obj_ctx, s->src_tenant_name, s->src_bucket_name, source_info, NULL, s->yield);
+      ret = store->getRados()->get_bucket_info(obj_ctx, s->src_tenant_name, s->src_bucket_name, source_info, NULL, s->yield);
     } else {
-      ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, source_info, NULL, NULL, s->yield);
+      ret = store->getRados()->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, source_info, NULL, NULL, s->yield);
     }
     if (ret == 0) {
       string& zonegroup = source_info.zonegroup;
-      s->local_source = store->svc.zone->get_zonegroup().equals(zonegroup);
+      s->local_source = store->svc()->zone->get_zonegroup().equals(zonegroup);
     }
   }
 
@@ -612,7 +612,7 @@ int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
     auto b = rgw_bucket(rgw_bucket_key(s->bucket_tenant, s->bucket_name, s->bucket_instance_id));
 
     RGWObjVersionTracker ep_ot;
-    ret = store->ctl.bucket->read_bucket_info(b, &s->bucket_info,
+    ret = store->ctl()->bucket->read_bucket_info(b, &s->bucket_info,
                                               s->yield,
                                              RGWBucketCtl::BucketInstance::GetParams()
                                                .set_mtime(&s->bucket_mtime)
@@ -632,7 +632,7 @@ int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
     s->bucket = s->bucket_info.bucket;
 
     if (s->bucket_exists) {
-      ret = read_bucket_policy(store->ctl.user, s, s->bucket_info, s->bucket_attrs,
+      ret = read_bucket_policy(store->ctl()->user, s, s->bucket_info, s->bucket_attrs,
                                s->bucket_acl.get(), s->bucket);
       acct_acl_user = {
         s->bucket_info.owner,
@@ -645,7 +645,7 @@ int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
     s->bucket_owner = s->bucket_acl->get_owner();
 
     RGWZoneGroup zonegroup;
-    int r = store->svc.zone->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
+    int r = store->svc()->zone->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
     if (!r) {
       if (!zonegroup.endpoints.empty()) {
        s->zonegroup_endpoint = zonegroup.endpoints.front();
@@ -662,14 +662,14 @@ int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
       ret = r;
     }
 
-    if (s->bucket_exists && !store->svc.zone->get_zonegroup().equals(s->bucket_info.zonegroup)) {
+    if (s->bucket_exists && !store->svc()->zone->get_zonegroup().equals(s->bucket_info.zonegroup)) {
       ldpp_dout(s, 0) << "NOTICE: request for data in a different zonegroup ("
           << s->bucket_info.zonegroup << " != "
-          << store->svc.zone->get_zonegroup().get_id() << ")" << dendl;
+          << store->svc()->zone->get_zonegroup().get_id() << ")" << dendl;
       /* we now need to make sure that the operation actually requires copy source, that is
        * it's a copy operation
        */
-      if (store->svc.zone->get_zonegroup().is_master_zonegroup() && s->system_request) {
+      if (store->svc()->zone->get_zonegroup().is_master_zonegroup() && s->system_request) {
         /*If this is the master, don't redirect*/
       } else if (s->op_type == RGW_OP_GET_BUCKET_LOCATION ) {
         /* If op is get bucket location, don't redirect */
@@ -686,7 +686,7 @@ int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
       s->dest_placement.storage_class = s->info.storage_class;
       s->dest_placement.inherit_from(s->bucket_info.placement_rule);
 
-      if (!store->svc.zone->get_zone_params().valid_placement(s->dest_placement)) {
+      if (!store->svc()->zone->get_zone_params().valid_placement(s->dest_placement)) {
         ldpp_dout(s, 0) << "NOTICE: invalid dest placement: " << s->dest_placement.to_str() << dendl;
         return -EINVAL;
       }
@@ -696,7 +696,7 @@ int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
   /* handle user ACL only for those APIs which support it */
   if (s->user_acl) {
     map<string, bufferlist> uattrs;
-    ret = store->ctl.user->get_attrs_by_uid(acct_acl_user.uid, &uattrs, s->yield);
+    ret = store->ctl()->user->get_attrs_by_uid(acct_acl_user.uid, &uattrs, s->yield);
     if (!ret) {
       ret = get_user_policy_from_attr(s->cct, store, uattrs, *s->user_acl);
     }
@@ -722,7 +722,7 @@ int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
   if (! s->user->user_id.empty() && s->auth.identity->get_identity_type() != TYPE_ROLE) {
     try {
       map<string, bufferlist> uattrs;
-      if (ret = store->ctl.user->get_attrs_by_uid(s->user->user_id, &uattrs, s->yield); ! ret) {
+      if (ret = store->ctl()->user->get_attrs_by_uid(s->user->user_id, &uattrs, s->yield); ! ret) {
         if (s->iam_user_policies.empty()) {
           s->iam_user_policies = get_iam_user_policy_from_attr(s->cct, store, uattrs, s->user->user_id.tenant);
         } else {
@@ -753,7 +753,7 @@ int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
     ret = -EACCES;
   }
 
-  bool success = store->svc.zone->get_redirect_zone_endpoint(&s->redirect_zone_endpoint);
+  bool success = store->svc()->zone->get_redirect_zone_endpoint(&s->redirect_zone_endpoint);
   if (success) {
     ldpp_dout(s, 20) << "redirect_zone_endpoint=" << s->redirect_zone_endpoint << dendl;
   }
@@ -767,7 +767,7 @@ int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
  * only_bucket: If true, reads the bucket ACL rather than the object ACL.
  * Returns: 0 on success, -ERR# otherwise.
  */
-int rgw_build_object_policies(RGWRados *store, struct req_state *s,
+int rgw_build_object_policies(rgw::sal::RGWRadosStore *store, struct req_state *s,
                              bool prefetch_data)
 {
   int ret = 0;
@@ -779,9 +779,9 @@ int rgw_build_object_policies(RGWRados *store, struct req_state *s,
     s->object_acl = std::make_unique<RGWAccessControlPolicy>(s->cct);
     rgw_obj obj(s->bucket, s->object);
       
-    store->set_atomic(s->obj_ctx, obj);
+    store->getRados()->set_atomic(s->obj_ctx, obj);
     if (prefetch_data) {
-      store->set_prefetch_data(s->obj_ctx, obj);
+      store->getRados()->set_prefetch_data(s->obj_ctx, obj);
     }
     ret = read_obj_policy(store, s, s->bucket_info, s->bucket_attrs,
                          s->object_acl.get(), nullptr, s->iam_policy, s->bucket,
@@ -814,9 +814,9 @@ static int rgw_iam_add_tags_from_bl(struct req_state* s, bufferlist& bl){
   return 0;
 }
 
-static int rgw_iam_add_existing_objtags(RGWRados* store, struct req_state* s, rgw_obj& obj, std::uint64_t action){
+static int rgw_iam_add_existing_objtags(rgw::sal::RGWRadosStore* store, struct req_state* s, rgw_obj& obj, std::uint64_t action){
   map <string, bufferlist> attrs;
-  store->set_atomic(s->obj_ctx, obj);
+  store->getRados()->set_atomic(s->obj_ctx, obj);
   int op_ret = get_obj_attrs(store, s, obj, attrs);
   if (op_ret < 0)
     return op_ret;
@@ -848,7 +848,7 @@ static void rgw_add_grant_to_iam_environment(rgw::IAM::Environment& e, struct re
   }
 }
 
-void rgw_build_iam_environment(RGWRados* store,
+void rgw_build_iam_environment(rgw::sal::RGWRadosStore* store,
                                      struct req_state* s)
 {
   const auto& m = s->info.env->get_map();
@@ -950,9 +950,9 @@ int retry_raced_bucket_write(RGWRados* g, req_state* s, const F& f) {
 int RGWGetObj::verify_permission()
 {
   obj = rgw_obj(s->bucket, s->object);
-  store->set_atomic(s->obj_ctx, obj);
+  store->getRados()->set_atomic(s->obj_ctx, obj);
   if (get_data) {
-    store->set_prefetch_data(s->obj_ctx, obj);
+    store->getRados()->set_prefetch_data(s->obj_ctx, obj);
   }
 
   if (torrent.get_flag()) {
@@ -1001,7 +1001,7 @@ int RGWOp::verify_op_mask()
     return -EPERM;
   }
 
-  if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->svc.zone->zone_is_writeable()) {
+  if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->svc()->zone->zone_is_writeable()) {
     ldpp_dout(this, 5) << "NOTICE: modify request to a read-only zone by a "
         "non-system user, permission denied"  << dendl;
     return -EPERM;
@@ -1047,7 +1047,7 @@ void RGWGetObjTags::execute()
 
   obj = rgw_obj(s->bucket, s->object);
 
-  store->set_atomic(s->obj_ctx, obj);
+  store->getRados()->set_atomic(s->obj_ctx, obj);
 
   op_ret = get_obj_attrs(store, s, obj, attrs);
   if (op_ret < 0) {
@@ -1100,7 +1100,7 @@ void RGWPutObjTags::execute()
 
   rgw_obj obj;
   obj = rgw_obj(s->bucket, s->object);
-  store->set_atomic(s->obj_ctx, obj);
+  store->getRados()->set_atomic(s->obj_ctx, obj);
   op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_TAGS, tags_bl);
   if (op_ret == -ECANCELED){
     op_ret = -ERR_TAG_CONFLICT;
@@ -1145,12 +1145,12 @@ void RGWDeleteObjTags::execute()
 
   rgw_obj obj;
   obj = rgw_obj(s->bucket, s->object);
-  store->set_atomic(s->obj_ctx, obj);
+  store->getRados()->set_atomic(s->obj_ctx, obj);
   map <string, bufferlist> attrs;
   map <string, bufferlist> rmattr;
   bufferlist bl;
   rmattr[RGW_ATTR_TAGS] = bl;
-  op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattr, s->yield);
+  op_ret = store->getRados()->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattr, s->yield);
 }
 
 int RGWGetBucketTags::verify_permission()
@@ -1190,17 +1190,17 @@ void RGWPutBucketTags::execute() {
   if (op_ret < 0) 
     return;
 
-  if (!store->svc.zone->is_meta_master()) {
+  if (!store->svc()->zone->is_meta_master()) {
     op_ret = forward_request_to_master(s, nullptr, store, in_data, nullptr);
     if (op_ret < 0) {
       ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
     }
   }
 
-  op_ret = retry_raced_bucket_write(store, s, [this] {
+  op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
     map<string, bufferlist> attrs = s->bucket_attrs;
     attrs[RGW_ATTR_TAGS] = tags_bl;
-    return store->ctl.bucket->set_bucket_instance_attrs(s->bucket_info, attrs, &s->bucket_info.objv_tracker, s->yield);
+    return store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs, &s->bucket_info.objv_tracker, s->yield);
   });
 
 }
@@ -1217,7 +1217,7 @@ int RGWDeleteBucketTags::verify_permission()
 
 void RGWDeleteBucketTags::execute()
 {
-  if (!store->svc.zone->is_meta_master()) {
+  if (!store->svc()->zone->is_meta_master()) {
     bufferlist in_data;
     op_ret = forward_request_to_master(s, nullptr, store, in_data, nullptr);
     if (op_ret < 0) {
@@ -1226,10 +1226,10 @@ void RGWDeleteBucketTags::execute()
     }
   }
 
-  op_ret = retry_raced_bucket_write(store, s, [this] {
+  op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
     map<string, bufferlist> attrs = s->bucket_attrs;
     attrs.erase(RGW_ATTR_TAGS);
-    op_ret = store->ctl.bucket->set_bucket_instance_attrs(s->bucket_info, attrs, &s->bucket_info.objv_tracker, s->yield);
+    op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs, &s->bucket_info.objv_tracker, s->yield);
     if (op_ret < 0) {
       ldpp_dout(this, 0) << "RGWDeleteBucketTags() failed to remove RGW_ATTR_TAGS on bucket="
                         << s->bucket.name
@@ -1280,7 +1280,7 @@ int RGWOp::init_quota()
   if (s->user->user_id == s->bucket_owner.get_id()) {
     uinfo = s->user;
   } else {
-    int r = store->ctl.user->get_info_by_uid(s->bucket_info.owner, &owner_info, s->yield);
+    int r = store->ctl()->user->get_info_by_uid(s->bucket_info.owner, &owner_info, s->yield);
     if (r < 0)
       return r;
     uinfo = &owner_info;
@@ -1291,13 +1291,13 @@ int RGWOp::init_quota()
   } else if (uinfo->bucket_quota.enabled) {
     bucket_quota = uinfo->bucket_quota;
   } else {
-    bucket_quota = store->svc.quota->get_bucket_quota();
+    bucket_quota = store->svc()->quota->get_bucket_quota();
   }
 
   if (uinfo->user_quota.enabled) {
     user_quota = uinfo->user_quota;
   } else {
-    user_quota = store->svc.quota->get_user_quota();
+    user_quota = store->svc()->quota->get_user_quota();
   }
 
   return 0;
@@ -1488,9 +1488,9 @@ int RGWGetObj::read_user_manifest_part(rgw_bucket& bucket,
       << " end=" << cur_end << dendl;
 
   obj_ctx.set_atomic(part);
-  store->set_prefetch_data(&obj_ctx, part);
+  store->getRados()->set_prefetch_data(&obj_ctx, part);
 
-  RGWRados::Object op_target(store, s->bucket_info, obj_ctx, part);
+  RGWRados::Object op_target(store->getRados(), s->bucket_info, obj_ctx, part);
   RGWRados::Object::Read read_op(&op_target);
 
   if (!swift_slo) {
@@ -1561,7 +1561,7 @@ int RGWGetObj::read_user_manifest_part(rgw_bucket& bucket,
 }
 
 static int iterate_user_manifest_parts(CephContext * const cct,
-                                       RGWRados * const store,
+                                       rgw::sal::RGWRadosStore * const store,
                                        const off_t ofs,
                                        const off_t end,
                                        RGWBucketInfo *pbucket_info,
@@ -1590,7 +1590,7 @@ static int iterate_user_manifest_parts(CephContext * const cct,
 
   utime_t start_time = ceph_clock_now();
 
-  RGWRados::Bucket target(store, *pbucket_info);
+  RGWRados::Bucket target(store->getRados(), *pbucket_info);
   RGWRados::Bucket::List list_op(&target);
 
   list_op.params.prefix = obj_prefix;
@@ -1668,7 +1668,7 @@ struct rgw_slo_part {
 };
 
 static int iterate_slo_parts(CephContext *cct,
-                             RGWRados *store,
+                             rgw::sal::RGWRadosStore *store,
                              off_t ofs,
                              off_t end,
                              map<uint64_t, rgw_slo_part>& slo_parts,
@@ -1786,8 +1786,8 @@ int RGWGetObj::handle_user_manifest(const char *prefix)
 
   if (bucket_name.compare(s->bucket.name) != 0) {
     map<string, bufferlist> bucket_attrs;
-    auto obj_ctx = store->svc.sysobj->init_obj_ctx();
-    int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
+    auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
+    int r = store->getRados()->get_bucket_info(obj_ctx, s->user->user_id.tenant,
                                  bucket_name, bucket_info, NULL,
                                  s->yield, &bucket_attrs);
     if (r < 0) {
@@ -1798,7 +1798,7 @@ int RGWGetObj::handle_user_manifest(const char *prefix)
     bucket = bucket_info.bucket;
     pbucket_info = &bucket_info;
     bucket_acl = &_bucket_acl;
-    r = read_bucket_policy(store->ctl.user, s, bucket_info, bucket_attrs, bucket_acl, bucket);
+    r = read_bucket_policy(store->ctl()->user, s, bucket_info, bucket_attrs, bucket_acl, bucket);
     if (r < 0) {
       ldpp_dout(this, 0) << "failed to read bucket policy" << dendl;
       return r;
@@ -1920,8 +1920,8 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl)
 
         RGWBucketInfo bucket_info;
         map<string, bufferlist> bucket_attrs;
-        auto obj_ctx = store->svc.sysobj->init_obj_ctx();
-        int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
+        auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
+        int r = store->getRados()->get_bucket_info(obj_ctx, s->user->user_id.tenant,
                                        bucket_name, bucket_info, nullptr,
                                        s->yield, &bucket_attrs);
         if (r < 0) {
@@ -1931,7 +1931,7 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl)
         }
         bucket = bucket_info.bucket;
         bucket_acl = &_bucket_acl;
-        r = read_bucket_policy(store->ctl.user, s, bucket_info, bucket_attrs, bucket_acl,
+        r = read_bucket_policy(store->ctl()->user, s, bucket_info, bucket_attrs, bucket_acl,
                                bucket);
         if (r < 0) {
           ldpp_dout(this, 0) << "failed to read bucket ACL for bucket "
@@ -2000,7 +2000,7 @@ int RGWGetObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len)
   /* garbage collection related handling */
   utime_t start_time = ceph_clock_now();
   if (start_time > gc_invalidate_time) {
-    int r = store->defer_gc(s->obj_ctx, s->bucket_info, obj, s->yield);
+    int r = store->getRados()->defer_gc(s->obj_ctx, s->bucket_info, obj, s->yield);
     if (r < 0) {
       ldpp_dout(this, 0) << "WARNING: could not defer gc entry for obj" << dendl;
     }
@@ -2092,7 +2092,7 @@ void RGWGetObj::execute()
 
   perfcounter->inc(l_rgw_get);
 
-  RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
+  RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
   RGWRados::Object::Read read_op(&op_target);
 
   op_ret = get_params();
@@ -2310,7 +2310,7 @@ void RGWListBuckets::execute()
   }
 
   if (supports_account_metadata()) {
-    op_ret = store->ctl.user->get_attrs_by_uid(s->user->user_id, &attrs, s->yield);
+    op_ret = store->ctl()->user->get_attrs_by_uid(s->user->user_id, &attrs, s->yield);
     if (op_ret < 0) {
       goto send_end;
     }
@@ -2341,7 +2341,7 @@ void RGWListBuckets::execute()
     /* We need to have stats for all our policies - even if a given policy
      * isn't actually used in a given account. In such situation its usage
      * stats would be simply full of zeros. */
-    for (const auto& policy : store->svc.zone->get_zonegroup().placement_targets) {
+    for (const auto& policy : store->svc()->zone->get_zonegroup().placement_targets) {
       policies_stats.emplace(policy.second.name,
                              decltype(policies_stats)::mapped_type());
     }
@@ -2418,7 +2418,7 @@ void RGWGetUsage::execute()
   RGWUsageIter usage_iter;
   
   while (is_truncated) {
-    op_ret = store->read_usage(s->user->user_id, s->bucket_name, start_epoch, end_epoch, max_entries,
+    op_ret = store->getRados()->read_usage(s->user->user_id, s->bucket_name, start_epoch, end_epoch, max_entries,
                                 &is_truncated, usage_iter, usage);
 
     if (op_ret == -ENOENT) {
@@ -2443,7 +2443,7 @@ void RGWGetUsage::execute()
     return;
   }
 
-  op_ret = store->ctl.user->read_stats(s->user->user_id, &stats);
+  op_ret = store->ctl()->user->read_stats(s->user->user_id, &stats);
   if (op_ret < 0) {
     ldpp_dout(this, 0) << "ERROR: can't read user header"  << dendl;
     return;
@@ -2482,7 +2482,7 @@ void RGWStatAccount::execute()
       /* We need to have stats for all our policies - even if a given policy
        * isn't actually used in a given account. In such situation its usage
        * stats would be simply full of zeros. */
-      for (const auto& policy : store->svc.zone->get_zonegroup().placement_targets) {
+      for (const auto& policy : store->svc()->zone->get_zonegroup().placement_targets) {
         policies_stats.emplace(policy.second.name,
                                decltype(policies_stats)::mapped_type());
       }
@@ -2557,7 +2557,7 @@ void RGWSetBucketVersioning::execute()
     return;
   }
 
-  if (!store->svc.zone->is_meta_master()) {
+  if (!store->svc()->zone->is_meta_master()) {
     op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
     if (op_ret < 0) {
       ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -2567,7 +2567,7 @@ void RGWSetBucketVersioning::execute()
 
   bool modified = mfa_set_status;
 
-  op_ret = retry_raced_bucket_write(store, s, [&] {
+  op_ret = retry_raced_bucket_write(store->getRados(), s, [&] {
       if (mfa_set_status) {
         if (mfa_status) {
           s->bucket_info.flags |= BUCKET_MFA_ENABLED;
@@ -2586,7 +2586,7 @@ void RGWSetBucketVersioning::execute()
       } else {
        return op_ret;
       }
-      return store->put_bucket_instance_info(s->bucket_info, false, real_time(),
+      return store->getRados()->put_bucket_instance_info(s->bucket_info, false, real_time(),
                                              &s->bucket_attrs);
     });
 
@@ -2635,7 +2635,7 @@ void RGWSetBucketWebsite::execute()
   if (op_ret < 0)
     return;
 
-  if (!store->svc.zone->is_meta_master()) {
+  if (!store->svc()->zone->is_meta_master()) {
     op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
     if (op_ret < 0) {
       ldpp_dout(this, 0) << " forward_request_to_master returned ret=" << op_ret << dendl;
@@ -2643,10 +2643,10 @@ void RGWSetBucketWebsite::execute()
     }
   }
 
-  op_ret = retry_raced_bucket_write(store, s, [this] {
+  op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
       s->bucket_info.has_website = true;
       s->bucket_info.website_conf = website_conf;
-      op_ret = store->put_bucket_instance_info(s->bucket_info, false,
+      op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false,
                                               real_time(), &s->bucket_attrs);
       return op_ret;
     });
@@ -2671,7 +2671,7 @@ void RGWDeleteBucketWebsite::pre_exec()
 void RGWDeleteBucketWebsite::execute()
 {
 
-  if (!store->svc.zone->is_meta_master()) {
+  if (!store->svc()->zone->is_meta_master()) {
     bufferlist in_data;
     op_ret = forward_request_to_master(s, nullptr, store, in_data, nullptr);
     if (op_ret < 0) {
@@ -2680,10 +2680,10 @@ void RGWDeleteBucketWebsite::execute()
       return;
     }
   }
-  op_ret = retry_raced_bucket_write(store, s, [this] {
+  op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
       s->bucket_info.has_website = false;
       s->bucket_info.website_conf = RGWBucketWebsiteConf();
-      op_ret = store->put_bucket_instance_info(s->bucket_info, false,
+      op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false,
                                               real_time(), &s->bucket_attrs);
       return op_ret;
     });
@@ -2720,7 +2720,7 @@ void RGWStatBucket::execute()
   bucket.bucket = s->bucket;
   buckets.add(bucket);
   map<string, RGWBucketEnt>& m = buckets.get_buckets();
-  op_ret = store->update_containers_stats(m);
+  op_ret = store->getRados()->update_containers_stats(m);
   if (! op_ret)
     op_ret = -EEXIST;
   if (op_ret > 0) {
@@ -2793,13 +2793,13 @@ void RGWListBucket::execute()
     map<string, RGWBucketEnt> m;
     m[s->bucket.name] = RGWBucketEnt();
     m.begin()->second.bucket = s->bucket;
-    op_ret = store->update_containers_stats(m);
+    op_ret = store->getRados()->update_containers_stats(m);
     if (op_ret > 0) {
       bucket = m.begin()->second;
     }
   }
 
-  RGWRados::Bucket target(store, s->bucket_info);
+  RGWRados::Bucket target(store->getRados(), s->bucket_info);
   if (shard_id >= 0) {
     target.set_shard_id(shard_id);
   }
@@ -2876,10 +2876,10 @@ int RGWCreateBucket::verify_permission()
 }
 
 static int forward_request_to_master(struct req_state *s, obj_version *objv,
-                                   RGWRados *store, bufferlist& in_data,
+                                   rgw::sal::RGWRadosStore *store, bufferlist& in_data,
                                    JSONParser *jp, req_info *forward_info)
 {
-  if (!store->svc.zone->get_master_conn()) {
+  if (!store->svc()->zone->get_master_conn()) {
     ldpp_dout(s, 0) << "rest connection is invalid" << dendl;
     return -EINVAL;
   }
@@ -2887,7 +2887,7 @@ static int forward_request_to_master(struct req_state *s, obj_version *objv,
   bufferlist response;
   string uid_str = s->user->user_id.to_str();
 #define MAX_REST_RESPONSE (128 * 1024) // we expect a very small response
-  int ret = store->svc.zone->get_master_conn()->forward(uid_str, (forward_info ? *forward_info : s->info),
+  int ret = store->svc()->zone->get_master_conn()->forward(uid_str, (forward_info ? *forward_info : s->info),
                                                         objv, MAX_REST_RESPONSE, &in_data, &response);
   if (ret < 0)
     return ret;
@@ -3080,7 +3080,7 @@ void RGWCreateBucket::execute()
   bool existed;
   string bucket_name;
   rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_name);
-  rgw_raw_obj obj(store->svc.zone->get_zone_params().domain_root, bucket_name);
+  rgw_raw_obj obj(store->svc()->zone->get_zone_params().domain_root, bucket_name);
   obj_version objv, *pobjv = NULL;
 
   op_ret = get_params();
@@ -3089,7 +3089,7 @@ void RGWCreateBucket::execute()
 
   if (!relaxed_region_enforcement &&
       !location_constraint.empty() &&
-      !store->svc.zone->has_zonegroup_api(location_constraint)) {
+      !store->svc()->zone->has_zonegroup_api(location_constraint)) {
       ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")"
                        << " can't be found." << dendl;
       op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
@@ -3097,22 +3097,22 @@ void RGWCreateBucket::execute()
       return;
   }
 
-  if (!relaxed_region_enforcement && !store->svc.zone->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() &&
-      store->svc.zone->get_zonegroup().api_name != location_constraint) {
+  if (!relaxed_region_enforcement && !store->svc()->zone->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() &&
+      store->svc()->zone->get_zonegroup().api_name != location_constraint) {
     ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")"
-                     << " doesn't match zonegroup" << " (" << store->svc.zone->get_zonegroup().api_name << ")"
+                     << " doesn't match zonegroup" << " (" << store->svc()->zone->get_zonegroup().api_name << ")"
                      << dendl;
     op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
     s->err.message = "The specified location-constraint is not valid";
     return;
   }
 
-  const auto& zonegroup = store->svc.zone->get_zonegroup();
+  const auto& zonegroup = store->svc()->zone->get_zonegroup();
   if (!placement_rule.name.empty() &&
       !zonegroup.placement_targets.count(placement_rule.name)) {
     ldpp_dout(this, 0) << "placement target (" << placement_rule.name << ")"
                      << " doesn't exist in the placement targets of zonegroup"
-                     << " (" << store->svc.zone->get_zonegroup().api_name << ")" << dendl;
+                     << " (" << store->svc()->zone->get_zonegroup().api_name << ")" << dendl;
     op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
     s->err.message = "The specified placement target does not exist";
     return;
@@ -3120,7 +3120,7 @@ void RGWCreateBucket::execute()
 
   /* we need to make sure we read bucket info, it's not read before for this
    * specific request */
-  op_ret = store->get_bucket_info(*s->sysobj_ctx, s->bucket_tenant, s->bucket_name,
+  op_ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, s->bucket_tenant, s->bucket_name,
                                  s->bucket_info, nullptr, s->yield, &s->bucket_attrs);
   if (op_ret < 0 && op_ret != -ENOENT)
     return;
@@ -3129,7 +3129,7 @@ void RGWCreateBucket::execute()
   s->bucket_owner.set_id(s->user->user_id);
   s->bucket_owner.set_name(s->user->display_name);
   if (s->bucket_exists) {
-    int r = rgw_op_get_bucket_policy_from_attr(s->cct, store->ctl.user, s->bucket_info,
+    int r = rgw_op_get_bucket_policy_from_attr(s->cct, store->ctl()->user, s->bucket_info,
                                                s->bucket_attrs, &old_policy);
     if (r >= 0)  {
       if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) {
@@ -3144,7 +3144,7 @@ void RGWCreateBucket::execute()
   uint32_t *pmaster_num_shards;
   real_time creation_time;
 
-  if (!store->svc.zone->is_meta_master()) {
+  if (!store->svc()->zone->is_meta_master()) {
     JSONParser jp;
     op_ret = forward_request_to_master(s, NULL, store, in_data, &jp);
     if (op_ret < 0) {
@@ -3171,10 +3171,10 @@ void RGWCreateBucket::execute()
   if (s->system_request) {
     zonegroup_id = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup");
     if (zonegroup_id.empty()) {
-      zonegroup_id = store->svc.zone->get_zonegroup().get_id();
+      zonegroup_id = store->svc()->zone->get_zonegroup().get_id();
     }
   } else {
-    zonegroup_id = store->svc.zone->get_zonegroup().get_id();
+    zonegroup_id = store->svc()->zone->get_zonegroup().get_id();
   }
 
   if (s->bucket_exists) {
@@ -3182,7 +3182,7 @@ void RGWCreateBucket::execute()
     rgw_bucket bucket;
     bucket.tenant = s->bucket_tenant;
     bucket.name = s->bucket_name;
-    op_ret = store->svc.zone->select_bucket_placement(*(s->user), zonegroup_id,
+    op_ret = store->svc()->zone->select_bucket_placement(*(s->user), zonegroup_id,
                                            placement_rule,
                                            &selected_placement_rule, nullptr);
     if (selected_placement_rule != s->bucket_info.placement_rule) {
@@ -3239,7 +3239,7 @@ void RGWCreateBucket::execute()
   }
 
 
-  op_ret = store->create_bucket(*(s->user), s->bucket, zonegroup_id,
+  op_ret = store->getRados()->create_bucket(*(s->user), s->bucket, zonegroup_id,
                                 placement_rule, s->bucket_info.swift_ver_location,
                                 pquota_info, attrs,
                                 info, pobjv, &ep_objv, creation_time,
@@ -3267,11 +3267,11 @@ void RGWCreateBucket::execute()
     s->bucket = info.bucket;
   }
 
-  op_ret = store->ctl.bucket->link_bucket(s->user->user_id, s->bucket,
+  op_ret = store->ctl()->bucket->link_bucket(s->user->user_id, s->bucket,
                                           info.creation_time, s->yield, false);
   if (op_ret && !existed && op_ret != -EEXIST) {
     /* if it exists (or previously existed), don't remove it! */
-    op_ret = store->ctl.bucket->unlink_bucket(s->user->user_id, s->bucket, s->yield);
+    op_ret = store->ctl()->bucket->unlink_bucket(s->user->user_id, s->bucket, s->yield);
     if (op_ret < 0) {
       ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
                       << dendl;
@@ -3290,7 +3290,7 @@ void RGWCreateBucket::execute()
       RGWBucketInfo binfo;
       map<string, bufferlist> battrs;
 
-      op_ret = store->get_bucket_info(*s->sysobj_ctx, s->bucket_tenant, s->bucket_name,
+      op_ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, s->bucket_tenant, s->bucket_name,
                                       binfo, nullptr, s->yield, &battrs);
       if (op_ret < 0) {
         return;
@@ -3327,7 +3327,7 @@ void RGWCreateBucket::execute()
       s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
 
       /* This will also set the quota on the bucket. */
-      op_ret = store->ctl.bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
+      op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
                                                            &s->bucket_info.objv_tracker,
                                                            s->yield);
     } while (op_ret == -ECANCELED && tries++ < 20);
@@ -3385,17 +3385,17 @@ void RGWDeleteBucket::execute()
     }
   }
 
-  op_ret = store->ctl.bucket->sync_user_stats(s->user->user_id, s->bucket_info);
+  op_ret = store->ctl()->bucket->sync_user_stats(s->user->user_id, s->bucket_info);
   if ( op_ret < 0) {
      ldpp_dout(this, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl;
   }
   
-  op_ret = store->check_bucket_empty(s->bucket_info, s->yield);
+  op_ret = store->getRados()->check_bucket_empty(s->bucket_info, s->yield);
   if (op_ret < 0) {
     return;
   }
 
-  if (!store->svc.zone->is_meta_master()) {
+  if (!store->svc()->zone->is_meta_master()) {
     bufferlist in_data;
     op_ret = forward_request_to_master(s, &ot.read_version, store, in_data,
                                       NULL);
@@ -3430,7 +3430,7 @@ void RGWDeleteBucket::execute()
     return;
   }
 
-  op_ret = store->delete_bucket(s->bucket_info, ot, s->yield, false);
+  op_ret = store->getRados()->delete_bucket(s->bucket_info, ot, s->yield, false);
 
   if (op_ret == -ECANCELED) {
     // lost a race, either with mdlog sync or another delete bucket operation.
@@ -3440,7 +3440,7 @@ void RGWDeleteBucket::execute()
   }
 
   if (op_ret == 0) {
-    op_ret = store->ctl.bucket->unlink_bucket(s->bucket_info.owner,
+    op_ret = store->ctl()->bucket->unlink_bucket(s->bucket_info.owner,
                                               s->bucket, s->yield, false);
     if (op_ret < 0) {
       ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
@@ -3460,8 +3460,8 @@ int RGWPutObj::verify_permission()
     rgw_obj_key cs_object(copy_source_object_name, copy_source_version_id);
 
     rgw_obj obj(cs_bucket, cs_object);
-    store->set_atomic(s->obj_ctx, obj);
-    store->set_prefetch_data(s->obj_ctx, obj);
+    store->getRados()->set_atomic(s->obj_ctx, obj);
+    store->getRados()->set_prefetch_data(s->obj_ctx, obj);
 
     /* check source object permissions */
     if (read_obj_policy(store, s, copy_source_bucket_info, cs_attrs, &cs_acl, nullptr,
@@ -3614,7 +3614,7 @@ int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl)
   rgw_obj_key obj_key(copy_source_object_name, copy_source_version_id);
   rgw_obj obj(copy_source_bucket_info.bucket, obj_key);
 
-  RGWRados::Object op_target(store, copy_source_bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
+  RGWRados::Object op_target(store->getRados(), copy_source_bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
   RGWRados::Object::Read read_op(&op_target);
   read_op.params.obj_size = &obj_size;
   read_op.params.attrs = &attrs;
@@ -3744,13 +3744,13 @@ void RGWPutObj::execute()
 
   if (!chunked_upload) { /* with chunked upload we don't know how big is the upload.
                             we also check sizes at the end anyway */
-    op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
+    op_ret = store->getRados()->check_quota(s->bucket_owner.get_id(), s->bucket,
                                user_quota, bucket_quota, s->content_length);
     if (op_ret < 0) {
       ldpp_dout(this, 20) << "check_quota() returned ret=" << op_ret << dendl;
       return;
     }
-    op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
+    op_ret = store->getRados()->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
     if (op_ret < 0) {
       ldpp_dout(this, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
       return;
@@ -3768,7 +3768,7 @@ void RGWPutObj::execute()
 
   /* Handle object versioning of Swift API. */
   if (! multipart) {
-    op_ret = store->swift_versioning_copy(obj_ctx,
+    op_ret = store->getRados()->swift_versioning_copy(obj_ctx,
                                           s->bucket_owner.get_id(),
                                           s->bucket_info,
                                           obj,
@@ -3824,7 +3824,7 @@ void RGWPutObj::execute()
       if (!version_id.empty()) {
         obj.key.set_instance(version_id);
       } else {
-        store->gen_rand_obj_instance_name(&obj);
+        store->getRados()->gen_rand_obj_instance_name(&obj);
         version_id = obj.key.instance;
       }
     }
@@ -3847,7 +3847,7 @@ void RGWPutObj::execute()
     rgw_obj obj(copy_source_bucket_info.bucket, obj_key.name);
 
     RGWObjState *astate;
-    op_ret = store->get_obj_state(&obj_ctx, copy_source_bucket_info, obj,
+    op_ret = store->getRados()->get_obj_state(&obj_ctx, copy_source_bucket_info, obj,
                                   &astate, true, s->yield, false);
     if (op_ret < 0) {
       ldpp_dout(this, 0) << "ERROR: get copy source obj state returned with error" << op_ret << dendl;
@@ -3867,7 +3867,7 @@ void RGWPutObj::execute()
   // no filters by default
   DataProcessor *filter = processor.get();
 
-  const auto& compression_type = store->svc.zone->get_zone_params().get_compression_type(*pdest_placement);
+  const auto& compression_type = store->svc()->zone->get_zone_params().get_compression_type(*pdest_placement);
   CompressorRef plugin;
   boost::optional<RGWPutObj_Compress> compressor;
 
@@ -3949,14 +3949,14 @@ void RGWPutObj::execute()
     return;
   }
 
-  op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
+  op_ret = store->getRados()->check_quota(s->bucket_owner.get_id(), s->bucket,
                               user_quota, bucket_quota, s->obj_size);
   if (op_ret < 0) {
     ldpp_dout(this, 20) << "second check_quota() returned op_ret=" << op_ret << dendl;
     return;
   }
 
-  op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
+  op_ret = store->getRados()->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
   if (op_ret < 0) {
     ldpp_dout(this, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
     return;
@@ -4124,7 +4124,7 @@ void RGWPostObj::execute()
     ceph::buffer::list bl, aclbl;
     int len = 0;
 
-    op_ret = store->check_quota(s->bucket_owner.get_id(),
+    op_ret = store->getRados()->check_quota(s->bucket_owner.get_id(),
                                 s->bucket,
                                 user_quota,
                                 bucket_quota,
@@ -4133,7 +4133,7 @@ void RGWPostObj::execute()
       return;
     }
 
-    op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
+    op_ret = store->getRados()->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
     if (op_ret < 0) {
       return;
     }
@@ -4155,7 +4155,7 @@ void RGWPostObj::execute()
 
     rgw_obj obj(s->bucket, get_current_filename());
     if (s->bucket_info.versioning_enabled()) {
-      store->gen_rand_obj_instance_name(&obj);
+      store->getRados()->gen_rand_obj_instance_name(&obj);
     }
 
     auto aio = rgw::make_throttle(s->cct->_conf->rgw_put_obj_min_window_size,
@@ -4183,7 +4183,7 @@ void RGWPostObj::execute()
     if (encrypt != nullptr) {
       filter = encrypt.get();
     } else {
-      const auto& compression_type = store->svc.zone->get_zone_params().get_compression_type(
+      const auto& compression_type = store->svc()->zone->get_zone_params().get_compression_type(
           s->dest_placement);
       if (compression_type != "none") {
         plugin = Compressor::create(s->cct, compression_type);
@@ -4236,13 +4236,13 @@ void RGWPostObj::execute()
     s->obj_size = ofs;
 
 
-    op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
+    op_ret = store->getRados()->check_quota(s->bucket_owner.get_id(), s->bucket,
                                 user_quota, bucket_quota, s->obj_size);
     if (op_ret < 0) {
       return;
     }
 
-    op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
+    op_ret = store->getRados()->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
     if (op_ret < 0) {
       return;
     }
@@ -4335,7 +4335,7 @@ int RGWPutMetadataAccount::init_processing()
     return op_ret;
   }
 
-  op_ret = store->ctl.user->get_attrs_by_uid(s->user->user_id, &orig_attrs,
+  op_ret = store->ctl()->user->get_attrs_by_uid(s->user->user_id, &orig_attrs,
                                             s->yield,
                                              &acct_op_tracker);
   if (op_ret < 0) {
@@ -4398,7 +4398,7 @@ void RGWPutMetadataAccount::execute()
 {
   /* Params have been extracted earlier. See init_processing(). */
   RGWUserInfo new_uinfo;
-  op_ret = store->ctl.user->get_info_by_uid(s->user->user_id, &new_uinfo, s->yield,
+  op_ret = store->ctl()->user->get_info_by_uid(s->user->user_id, &new_uinfo, s->yield,
                                             RGWUserCtl::GetParams()
                                             .set_objv_tracker(&acct_op_tracker));
   if (op_ret < 0) {
@@ -4419,7 +4419,7 @@ void RGWPutMetadataAccount::execute()
 
   /* We are passing here the current (old) user info to allow the function
    * optimize-out some operations. */
-  op_ret = store->ctl.user->store_info(new_uinfo, s->yield,
+  op_ret = store->ctl()->user->store_info(new_uinfo, s->yield,
                                        RGWUserCtl::PutParams()
                                        .set_old_info(s->user)
                                        .set_objv_tracker(&acct_op_tracker)
@@ -4458,7 +4458,7 @@ void RGWPutMetadataBucket::execute()
     return;
   }
 
-  op_ret = retry_raced_bucket_write(store, s, [this] {
+  op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
       /* Encode special metadata first as we're using std::map::emplace under
        * the hood. This method will add the new items only if the map doesn't
        * contain such keys yet. */
@@ -4509,7 +4509,7 @@ void RGWPutMetadataBucket::execute()
       /* Setting attributes also stores the provided bucket info. Due
        * to this fact, the new quota settings can be serialized with
        * the same call. */
-      op_ret = store->ctl.bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
+      op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
                                                            &s->bucket_info.objv_tracker,
                                                            s->yield);
       return op_ret;
@@ -4538,7 +4538,7 @@ void RGWPutMetadataObject::execute()
   rgw_obj target_obj;
   map<string, bufferlist> attrs, orig_attrs, rmattrs;
 
-  store->set_atomic(s->obj_ctx, obj);
+  store->getRados()->set_atomic(s->obj_ctx, obj);
 
   op_ret = get_params();
   if (op_ret < 0) {
@@ -4576,7 +4576,7 @@ void RGWPutMetadataObject::execute()
     }
   }
 
-  op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, target_obj,
+  op_ret = store->getRados()->set_attrs(s->obj_ctx, s->bucket_info, target_obj,
     attrs, &rmattrs, s->yield);
 }
 
@@ -4782,7 +4782,7 @@ void RGWDeleteObj::execute()
     obj_ctx->set_atomic(obj);
 
     bool ver_restored = false;
-    op_ret = store->swift_versioning_restore(*s->sysobj_ctx, *obj_ctx, s->bucket_owner.get_id(),
+    op_ret = store->getRados()->swift_versioning_restore(*s->sysobj_ctx, *obj_ctx, s->bucket_owner.get_id(),
                                              s->bucket_info, obj, ver_restored, this);
     if (op_ret < 0) {
       return;
@@ -4792,7 +4792,7 @@ void RGWDeleteObj::execute()
       /* Swift's versioning mechanism hasn't found any previous version of
        * the object that could be restored. This means we should proceed
        * with the regular delete path. */
-      RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj);
+      RGWRados::Object del_target(store->getRados(), s->bucket_info, *obj_ctx, obj);
       RGWRados::Object::Delete del_op(&del_target);
 
       op_ret = get_system_versioning_params(s, &del_op.params.olh_epoch,
@@ -4889,11 +4889,11 @@ int RGWCopyObj::verify_permission()
   map<string, bufferlist> src_attrs;
 
   if (s->bucket_instance_id.empty()) {
-    op_ret = store->get_bucket_info(*s->sysobj_ctx, src_tenant_name, src_bucket_name, src_bucket_info, NULL, s->yield, &src_attrs);
+    op_ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, src_tenant_name, src_bucket_name, src_bucket_info, NULL, s->yield, &src_attrs);
   } else {
     /* will only happen in intra region sync where the source and dest bucket is the same */
     rgw_bucket b(rgw_bucket_key(src_tenant_name, src_bucket_name, s->bucket_instance_id));
-    op_ret = store->get_bucket_instance_info(*s->sysobj_ctx, b, src_bucket_info, NULL, &src_attrs, s->yield);
+    op_ret = store->getRados()->get_bucket_instance_info(*s->sysobj_ctx, b, src_bucket_info, NULL, &src_attrs, s->yield);
   }
   if (op_ret < 0) {
     if (op_ret == -ENOENT) {
@@ -4907,8 +4907,8 @@ int RGWCopyObj::verify_permission()
   /* get buckets info (source and dest) */
   if (s->local_source &&  source_zone.empty()) {
     rgw_obj src_obj(src_bucket, src_object);
-    store->set_atomic(s->obj_ctx, src_obj);
-    store->set_prefetch_data(s->obj_ctx, src_obj);
+    store->getRados()->set_atomic(s->obj_ctx, src_obj);
+    store->getRados()->set_prefetch_data(s->obj_ctx, src_obj);
 
     rgw_placement_rule src_placement;
 
@@ -4960,7 +4960,7 @@ int RGWCopyObj::verify_permission()
     dest_bucket_info = src_bucket_info;
     dest_attrs = src_attrs;
   } else {
-    op_ret = store->get_bucket_info(*s->sysobj_ctx, dest_tenant_name, dest_bucket_name,
+    op_ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, dest_tenant_name, dest_bucket_name,
                                     dest_bucket_info, nullptr, s->yield, &dest_attrs);
     if (op_ret < 0) {
       if (op_ret == -ENOENT) {
@@ -4973,10 +4973,10 @@ int RGWCopyObj::verify_permission()
   dest_bucket = dest_bucket_info.bucket;
 
   rgw_obj dest_obj(dest_bucket, dest_object);
-  store->set_atomic(s->obj_ctx, dest_obj);
+  store->getRados()->set_atomic(s->obj_ctx, dest_obj);
 
   /* check dest bucket permissions */
-  op_ret = read_bucket_policy(store->ctl.user, s, dest_bucket_info, dest_attrs,
+  op_ret = read_bucket_policy(store->ctl()->user, s, dest_bucket_info, dest_attrs,
                               &dest_bucket_policy, dest_bucket);
   if (op_ret < 0) {
     return op_ret;
@@ -5084,7 +5084,7 @@ void RGWCopyObj::execute()
   if ( ! version_id.empty()) {
     dst_obj.key.set_instance(version_id);
   } else if (dest_bucket_info.versioning_enabled()) {
-    store->gen_rand_obj_instance_name(&dst_obj);
+    store->getRados()->gen_rand_obj_instance_name(&dst_obj);
   }
 
   obj_ctx.set_atomic(src_obj);
@@ -5096,7 +5096,7 @@ void RGWCopyObj::execute()
 
   /* Handle object versioning of Swift API. In case of copying to remote this
    * should fail gently (op_ret == 0) as the dst_obj will not exist here. */
-  op_ret = store->swift_versioning_copy(obj_ctx,
+  op_ret = store->getRados()->swift_versioning_copy(obj_ctx,
                                         dest_bucket_info.owner,
                                         dest_bucket_info,
                                         dst_obj,
@@ -5106,7 +5106,7 @@ void RGWCopyObj::execute()
     return;
   }
 
-  op_ret = store->copy_obj(obj_ctx,
+  op_ret = store->getRados()->copy_obj(obj_ctx,
                           s->user->user_id,
                           &s->info,
                           source_zone,
@@ -5340,7 +5340,7 @@ void RGWPutACLs::execute()
   }
 
   // forward bucket acl requests to meta master zone
-  if (s->object.empty() && !store->svc.zone->is_meta_master()) {
+  if (s->object.empty() && !store->svc()->zone->is_meta_master()) {
     bufferlist in_data;
     // include acl data unless it was generated from a canned_acl
     if (s->canned_acl.empty()) {
@@ -5359,7 +5359,7 @@ void RGWPutACLs::execute()
     *_dout << dendl;
   }
 
-  op_ret = policy->rebuild(store->ctl.user, &owner, new_policy);
+  op_ret = policy->rebuild(store->ctl()->user, &owner, new_policy);
   if (op_ret < 0)
     return;
 
@@ -5374,13 +5374,13 @@ void RGWPutACLs::execute()
 
   if (!s->object.empty()) {
     obj = rgw_obj(s->bucket, s->object);
-    store->set_atomic(s->obj_ctx, obj);
+    store->getRados()->set_atomic(s->obj_ctx, obj);
     //if instance is empty, we should modify the latest object
     op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_ACL, bl);
   } else {
     attrs = s->bucket_attrs;
     attrs[RGW_ATTR_ACL] = bl;
-    op_ret = store->ctl.bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
+    op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
                                                          &s->bucket_info.objv_tracker,
                                                          s->yield);
   }
@@ -5456,7 +5456,7 @@ void RGWPutLC::execute()
     return;
   }
 
-  op_ret = config.rebuild(store, new_config);
+  op_ret = config.rebuild(store->getRados(), new_config);
   if (op_ret < 0)
     return;
 
@@ -5468,7 +5468,7 @@ void RGWPutLC::execute()
     ldpp_dout(this, 15) << "New LifecycleConfiguration:" << ss.str() << dendl;
   }
 
-  if (!store->svc.zone->is_meta_master()) {
+  if (!store->svc()->zone->is_meta_master()) {
     op_ret = forward_request_to_master(s, nullptr, store, data, nullptr);
     if (op_ret < 0) {
       ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -5476,7 +5476,7 @@ void RGWPutLC::execute()
     }
   }
 
-  op_ret = store->get_lc()->set_bucket_config(s->bucket_info, s->bucket_attrs, &new_config);
+  op_ret = store->getRados()->get_lc()->set_bucket_config(s->bucket_info, s->bucket_attrs, &new_config);
   if (op_ret < 0) {
     return;
   }
@@ -5485,7 +5485,7 @@ void RGWPutLC::execute()
 
 void RGWDeleteLC::execute()
 {
-  if (!store->svc.zone->is_meta_master()) {
+  if (!store->svc()->zone->is_meta_master()) {
     bufferlist data;
     op_ret = forward_request_to_master(s, nullptr, store, data, nullptr);
     if (op_ret < 0) {
@@ -5495,7 +5495,7 @@ void RGWDeleteLC::execute()
   }
   map<string, bufferlist> attrs = s->bucket_attrs;
   attrs.erase(RGW_ATTR_LC);
-  op_ret = store->ctl.bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
+  op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
                                                        &s->bucket_info.objv_tracker,
                                                        s->yield);
   if (op_ret < 0) {
@@ -5504,7 +5504,7 @@ void RGWDeleteLC::execute()
     return;
   }
 
-  op_ret = store->get_lc()->remove_bucket_config(s->bucket_info, s->bucket_attrs);
+  op_ret = store->getRados()->get_lc()->remove_bucket_config(s->bucket_info, s->bucket_attrs);
   if (op_ret < 0) {
     return;
   }
@@ -5542,7 +5542,7 @@ void RGWPutCORS::execute()
   if (op_ret < 0)
     return;
 
-  if (!store->svc.zone->is_meta_master()) {
+  if (!store->svc()->zone->is_meta_master()) {
     op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
     if (op_ret < 0) {
       ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -5550,10 +5550,10 @@ void RGWPutCORS::execute()
     }
   }
 
-  op_ret = retry_raced_bucket_write(store, s, [this] {
+  op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
       map<string, bufferlist> attrs = s->bucket_attrs;
       attrs[RGW_ATTR_CORS] = cors_bl;
-      return store->ctl.bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
+      return store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
                                                          &s->bucket_info.objv_tracker,
                                                          s->yield);
     });
@@ -5567,7 +5567,7 @@ int RGWDeleteCORS::verify_permission()
 
 void RGWDeleteCORS::execute()
 {
-  if (!store->svc.zone->is_meta_master()) {
+  if (!store->svc()->zone->is_meta_master()) {
     bufferlist data;
     op_ret = forward_request_to_master(s, nullptr, store, data, nullptr);
     if (op_ret < 0) {
@@ -5576,7 +5576,7 @@ void RGWDeleteCORS::execute()
     }
   }
 
-  op_ret = retry_raced_bucket_write(store, s, [this] {
+  op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
       op_ret = read_bucket_cors();
       if (op_ret < 0)
        return op_ret;
@@ -5589,7 +5589,7 @@ void RGWDeleteCORS::execute()
 
       map<string, bufferlist> attrs = s->bucket_attrs;
       attrs.erase(RGW_ATTR_CORS);
-      op_ret = store->ctl.bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
+      op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
                                                            &s->bucket_info.objv_tracker,
                                                            s->yield);
       if (op_ret < 0) {
@@ -5682,7 +5682,7 @@ void RGWSetRequestPayment::pre_exec()
 void RGWSetRequestPayment::execute()
 {
 
-  if (!store->svc.zone->is_meta_master()) {
+  if (!store->svc()->zone->is_meta_master()) {
     op_ret = forward_request_to_master(s, nullptr, store, in_data, nullptr);
     if (op_ret < 0) {
       ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -5696,7 +5696,7 @@ void RGWSetRequestPayment::execute()
     return;
 
   s->bucket_info.requester_pays = requester_pays;
-  op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(),
+  op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false, real_time(),
                                           &s->bucket_attrs);
   if (op_ret < 0) {
     ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
@@ -5785,7 +5785,7 @@ void RGWInitMultipart::execute()
     obj.set_in_extra_data(true);
     obj.index_hash_source = s->object.name;
 
-    RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
+    RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
     op_target.set_versioning_disabled(true); /* no versioning for multipart meta */
 
     RGWRados::Object::Write obj_op(&op_target);
@@ -5930,10 +5930,10 @@ void RGWCompleteMultipart::execute()
     s->cct->_conf.get_val<int64_t>("rgw_mp_lock_max_time");
   utime_t dur(max_lock_secs_mp, 0);
 
-  store->obj_to_raw((s->bucket_info).placement_rule, meta_obj, &raw_obj);
-  store->get_obj_data_pool((s->bucket_info).placement_rule,
+  store->getRados()->obj_to_raw((s->bucket_info).placement_rule, meta_obj, &raw_obj);
+  store->getRados()->get_obj_data_pool((s->bucket_info).placement_rule,
                           meta_obj,&meta_pool);
-  store->open_pool_ctx(meta_pool, serializer.ioctx, true);
+  store->getRados()->open_pool_ctx(meta_pool, serializer.ioctx, true);
 
   op_ret = serializer.try_lock(raw_obj.oid, dur);
   if (op_ret < 0) {
@@ -6009,7 +6009,7 @@ void RGWCompleteMultipart::execute()
         op_ret = -ERR_INVALID_PART;
         return;
       } else {
-        manifest.append(obj_part.manifest, store->svc.zone);
+        manifest.append(obj_part.manifest, store->svc()->zone);
       }
 
       bool part_compressed = (obj_part.cs_info.compression_type != "none");
@@ -6075,7 +6075,7 @@ void RGWCompleteMultipart::execute()
     if (!version_id.empty()) {
       target_obj.key.set_instance(version_id);
     } else {
-      store->gen_rand_obj_instance_name(&target_obj);
+      store->getRados()->gen_rand_obj_instance_name(&target_obj);
       version_id = target_obj.key.get_instance();
     }
   }
@@ -6084,7 +6084,7 @@ void RGWCompleteMultipart::execute()
 
   obj_ctx.set_atomic(target_obj);
 
-  RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), target_obj);
+  RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), target_obj);
   RGWRados::Object::Write obj_op(&op_target);
 
   obj_op.meta.manifest = &manifest;
@@ -6101,7 +6101,7 @@ void RGWCompleteMultipart::execute()
     return;
 
   // remove the upload obj
-  int r = store->delete_obj(*static_cast<RGWObjectCtx *>(s->obj_ctx),
+  int r = store->getRados()->delete_obj(*static_cast<RGWObjectCtx *>(s->obj_ctx),
                            s->bucket_info, meta_obj, 0);
   if (r >= 0)  {
     /* serializer's exclusive lock is released */
@@ -6444,7 +6444,7 @@ void RGWDeleteMultiObj::execute()
 
     obj_ctx->set_atomic(obj);
 
-    RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj);
+    RGWRados::Object del_target(store->getRados(), s->bucket_info, *obj_ctx, obj);
     RGWRados::Object::Delete del_op(&del_target);
 
     del_op.params.bucket_owner = s->bucket_owner.get_id();
@@ -6480,7 +6480,7 @@ bool RGWBulkDelete::Deleter::verify_permission(RGWBucketInfo& binfo,
                                                ACLOwner& bucket_owner /* out */)
 {
   RGWAccessControlPolicy bacl(store->ctx());
-  int ret = read_bucket_policy(store->ctl.user, s, binfo, battrs, &bacl, binfo.bucket);
+  int ret = read_bucket_policy(store->ctl()->user, s, binfo, battrs, &bacl, binfo.bucket);
   if (ret < 0) {
     return false;
   }
@@ -6506,7 +6506,7 @@ bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path)
 
   rgw_bucket b(rgw_bucket_key(s->user->user_id.tenant, path.bucket_name));
 
-  int ret = store->ctl.bucket->read_bucket_info(b, &binfo, s->yield,
+  int ret = store->ctl()->bucket->read_bucket_info(b, &binfo, s->yield,
                                                RGWBucketCtl::BucketInstance::GetParams()
                                                  .set_attrs(&battrs),
                                                &ot);
@@ -6523,7 +6523,7 @@ bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path)
     rgw_obj obj(binfo.bucket, path.obj_key);
     obj_ctx.set_atomic(obj);
 
-    RGWRados::Object del_target(store, binfo, obj_ctx, obj);
+    RGWRados::Object del_target(store->getRados(), binfo, obj_ctx, obj);
     RGWRados::Object::Delete del_op(&del_target);
 
     del_op.params.bucket_owner = binfo.owner;
@@ -6535,9 +6535,9 @@ bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path)
       goto delop_fail;
     }
   } else {
-    ret = store->delete_bucket(binfo, ot, s->yield);
+    ret = store->getRados()->delete_bucket(binfo, ot, s->yield);
     if (0 == ret) {
-      ret = store->ctl.bucket->unlink_bucket(binfo.owner, binfo.bucket, s->yield, false);
+      ret = store->ctl()->bucket->unlink_bucket(binfo.owner, binfo.bucket, s->yield, false);
       if (ret < 0) {
         ldpp_dout(s, 0) << "WARNING: failed to unlink bucket: ret=" << ret << dendl;
       }
@@ -6546,7 +6546,7 @@ bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path)
       goto delop_fail;
     }
 
-    if (!store->svc.zone->is_meta_master()) {
+    if (!store->svc()->zone->is_meta_master()) {
       bufferlist in_data;
       ret = forward_request_to_master(s, &ot.read_version, store, in_data,
                                       nullptr);
@@ -6761,12 +6761,12 @@ static void forward_req_info(CephContext *cct, req_info& info, const std::string
   info.effective_uri = "/" + bucket_name;
 }
 
-void RGWBulkUploadOp::init(RGWRados* const store,
+void RGWBulkUploadOp::init(rgw::sal::RGWRadosStore* const store,
                            struct req_state* const s,
                            RGWHandler* const h)
 {
   RGWOp::init(store, s, h);
-  dir_ctx.emplace(store->svc.sysobj->init_obj_ctx());
+  dir_ctx.emplace(store->svc()->sysobj->init_obj_ctx());
 }
 
 int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
@@ -6782,14 +6782,14 @@ int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
   rgw_obj_key object_junk;
   std::tie(bucket_name, object_junk) =  *parse_path(path);
 
-  rgw_raw_obj obj(store->svc.zone->get_zone_params().domain_root,
+  rgw_raw_obj obj(store->svc()->zone->get_zone_params().domain_root,
                   rgw_make_bucket_entry_name(s->bucket_tenant, bucket_name));
 
   /* we need to make sure we read bucket info, it's not read before for this
    * specific request */
   RGWBucketInfo binfo;
   std::map<std::string, ceph::bufferlist> battrs;
-  op_ret = store->get_bucket_info(*dir_ctx, s->bucket_tenant, bucket_name,
+  op_ret = store->getRados()->get_bucket_info(*dir_ctx, s->bucket_tenant, bucket_name,
                                   binfo, nullptr, s->yield, &battrs);
   if (op_ret < 0 && op_ret != -ENOENT) {
     return op_ret;
@@ -6798,7 +6798,7 @@ int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
 
   if (bucket_exists) {
     RGWAccessControlPolicy old_policy(s->cct);
-    int r = rgw_op_get_bucket_policy_from_attr(s->cct, store->ctl.user, binfo,
+    int r = rgw_op_get_bucket_policy_from_attr(s->cct, store->ctl()->user, binfo,
                                                battrs, &old_policy);
     if (r >= 0)  {
       if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) {
@@ -6814,7 +6814,7 @@ int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
   real_time creation_time;
   obj_version objv, ep_objv, *pobjv = nullptr;
 
-  if (! store->svc.zone->is_meta_master()) {
+  if (! store->svc()->zone->is_meta_master()) {
     JSONParser jp;
     ceph::bufferlist in_data;
     req_info info = s->info;
@@ -6847,8 +6847,8 @@ int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
     rgw_bucket bucket;
     bucket.tenant = s->bucket_tenant;
     bucket.name = s->bucket_name;
-    op_ret = store->svc.zone->select_bucket_placement(*(s->user),
-                                            store->svc.zone->get_zonegroup().get_id(),
+    op_ret = store->svc()->zone->select_bucket_placement(*(s->user),
+                                            store->svc()->zone->get_zonegroup().get_id(),
                                             placement_rule,
                                             &selected_placement_rule,
                                             nullptr);
@@ -6876,9 +6876,9 @@ int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
 
 
   RGWBucketInfo out_info;
-  op_ret = store->create_bucket(*(s->user),
+  op_ret = store->getRados()->create_bucket(*(s->user),
                                 bucket,
-                                store->svc.zone->get_zonegroup().get_id(),
+                                store->svc()->zone->get_zonegroup().get_id(),
                                 placement_rule, binfo.swift_ver_location,
                                 pquota_info, attrs,
                                 out_info, pobjv, &ep_objv, creation_time,
@@ -6908,12 +6908,12 @@ int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
     bucket = out_info.bucket;
   }
 
-  op_ret = store->ctl.bucket->link_bucket(s->user->user_id, bucket,
+  op_ret = store->ctl()->bucket->link_bucket(s->user->user_id, bucket,
                                           out_info.creation_time,
                                          s->yield, false);
   if (op_ret && !existed && op_ret != -EEXIST) {
     /* if it exists (or previously existed), don't remove it! */
-    op_ret = store->ctl.bucket->unlink_bucket(s->user->user_id, bucket, s->yield);
+    op_ret = store->ctl()->bucket->unlink_bucket(s->user->user_id, bucket, s->yield);
     if (op_ret < 0) {
       ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret << dendl;
     }
@@ -6932,7 +6932,7 @@ bool RGWBulkUploadOp::handle_file_verify_permission(RGWBucketInfo& binfo,
                                                     ACLOwner& bucket_owner /* out */)
 {
   RGWAccessControlPolicy bacl(store->ctx());
-  op_ret = read_bucket_policy(store->ctl.user, s, binfo, battrs, &bacl, binfo.bucket);
+  op_ret = read_bucket_policy(store->ctl()->user, s, binfo, battrs, &bacl, binfo.bucket);
   if (op_ret < 0) {
     ldpp_dout(this, 20) << "cannot read_policy() for bucket" << dendl;
     return false;
@@ -6983,7 +6983,7 @@ int RGWBulkUploadOp::handle_file(const boost::string_ref path,
   RGWBucketInfo binfo;
   std::map<std::string, ceph::bufferlist> battrs;
   ACLOwner bowner;
-  op_ret = store->get_bucket_info(*s->sysobj_ctx, s->user->user_id.tenant,
+  op_ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, s->user->user_id.tenant,
                                   bucket_name, binfo, nullptr, s->yield, &battrs);
   if (op_ret == -ENOENT) {
     ldpp_dout(this, 20) << "non existent directory=" << bucket_name << dendl;
@@ -6999,20 +6999,20 @@ int RGWBulkUploadOp::handle_file(const boost::string_ref path,
     return op_ret;
   }
 
-  op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
+  op_ret = store->getRados()->check_quota(bowner.get_id(), binfo.bucket,
                               user_quota, bucket_quota, size);
   if (op_ret < 0) {
     return op_ret;
   }
 
-  op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
+  op_ret = store->getRados()->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
   if (op_ret < 0) {
     return op_ret;
   }
 
   rgw_obj obj(binfo.bucket, object);
   if (s->bucket_info.versioning_enabled()) {
-    store->gen_rand_obj_instance_name(&obj);
+    store->getRados()->gen_rand_obj_instance_name(&obj);
   }
 
   rgw_placement_rule dest_placement = s->dest_placement;
@@ -7034,7 +7034,7 @@ int RGWBulkUploadOp::handle_file(const boost::string_ref path,
   /* No filters by default. */
   DataProcessor *filter = &processor;
 
-  const auto& compression_type = store->svc.zone->get_zone_params().get_compression_type(
+  const auto& compression_type = store->svc()->zone->get_zone_params().get_compression_type(
       dest_placement);
   CompressorRef plugin;
   boost::optional<RGWPutObj_Compress> compressor;
@@ -7086,14 +7086,14 @@ int RGWBulkUploadOp::handle_file(const boost::string_ref path,
     return op_ret;
   }
 
-  op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
+  op_ret = store->getRados()->check_quota(bowner.get_id(), binfo.bucket,
                              user_quota, bucket_quota, size);
   if (op_ret < 0) {
     ldpp_dout(this, 20) << "quota exceeded for path=" << path << dendl;
     return op_ret;
   }
 
-  op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
+  op_ret = store->getRados()->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
   if (op_ret < 0) {
     return op_ret;
   }
@@ -7291,13 +7291,13 @@ void RGWSetAttrs::execute()
   rgw_obj obj(s->bucket, s->object);
 
   if (!s->object.empty()) {
-    store->set_atomic(s->obj_ctx, obj);
-    op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, nullptr, s->yield);
+    store->getRados()->set_atomic(s->obj_ctx, obj);
+    op_ret = store->getRados()->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, nullptr, s->yield);
   } else {
     for (auto& iter : attrs) {
       s->bucket_attrs[iter.first] = std::move(iter.second);
     }
-    op_ret = store->ctl.bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
+    op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
                                                          &s->bucket_info.objv_tracker,
                                                          s->yield);
   }
@@ -7311,7 +7311,7 @@ void RGWGetObjLayout::pre_exec()
 void RGWGetObjLayout::execute()
 {
   rgw_obj obj(s->bucket, s->object);
-  RGWRados::Object target(store,
+  RGWRados::Object target(store->getRados(),
                           s->bucket_info,
                           *static_cast<RGWObjectCtx *>(s->obj_ctx),
                           rgw_obj(s->bucket, s->object));
@@ -7352,7 +7352,7 @@ void RGWConfigBucketMetaSearch::execute()
 
   s->bucket_info.mdsearch_config = mdsearch_config;
 
-  op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
+  op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
   if (op_ret < 0) {
     ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
         << " returned err=" << op_ret << dendl;
@@ -7392,7 +7392,7 @@ void RGWDelBucketMetaSearch::execute()
 {
   s->bucket_info.mdsearch_config.clear();
 
-  op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
+  op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
   if (op_ret < 0) {
     ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
         << " returned err=" << op_ret << dendl;
@@ -7405,7 +7405,7 @@ RGWHandler::~RGWHandler()
 {
 }
 
-int RGWHandler::init(RGWRados *_store,
+int RGWHandler::init(rgw::sal::RGWRadosStore *_store,
                      struct req_state *_s,
                      rgw::io::BasicClient *cio)
 {
@@ -7506,7 +7506,7 @@ void RGWPutBucketPolicy::execute()
     return;
   }
 
-  if (!store->svc.zone->is_meta_master()) {
+  if (!store->svc()->zone->is_meta_master()) {
     op_ret = forward_request_to_master(s, NULL, store, data, nullptr);
     if (op_ret < 0) {
       ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -7516,11 +7516,11 @@ void RGWPutBucketPolicy::execute()
 
   try {
     const Policy p(s->cct, s->bucket_tenant, data);
-    op_ret = retry_raced_bucket_write(store, s, [&p, this] {
+    op_ret = retry_raced_bucket_write(store->getRados(), s, [&p, this] {
        auto attrs = s->bucket_attrs;
        attrs[RGW_ATTR_IAM_POLICY].clear();
        attrs[RGW_ATTR_IAM_POLICY].append(p.text);
-       op_ret = store->ctl.bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
+       op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
                                                              &s->bucket_info.objv_tracker,
                                                              s->yield);
        return op_ret;
@@ -7593,10 +7593,10 @@ int RGWDeleteBucketPolicy::verify_permission()
 
 void RGWDeleteBucketPolicy::execute()
 {
-  op_ret = retry_raced_bucket_write(store, s, [this] {
+  op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
       auto attrs = s->bucket_attrs;
       attrs.erase(RGW_ATTR_IAM_POLICY);
-      op_ret = store->ctl.bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
+      op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
                                                            &s->bucket_info.objv_tracker,
                                                            s->yield);
       return op_ret;
@@ -7649,7 +7649,7 @@ void RGWPutBucketObjectLock::execute()
     return;
   }
 
-  if (!store->svc.zone->is_meta_master()) {
+  if (!store->svc()->zone->is_meta_master()) {
     op_ret = forward_request_to_master(s, NULL, store, data, nullptr);
     if (op_ret < 0) {
       ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -7657,9 +7657,9 @@ void RGWPutBucketObjectLock::execute()
     }
   }
 
-  op_ret = retry_raced_bucket_write(store, s, [this] {
+  op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
     s->bucket_info.obj_lock = obj_lock;
-    op_ret = store->put_bucket_instance_info(s->bucket_info, false,
+    op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false,
                                              real_time(), &s->bucket_attrs);
     return op_ret;
   });
@@ -7914,7 +7914,7 @@ void RGWGetObjLegalHold::execute()
 
 void RGWGetClusterStat::execute()
 {
-  op_ret = this->store->get_rados_handle()->cluster_stat(stats_op);
+  op_ret = this->store->getRados()->get_rados_handle()->cluster_stat(stats_op);
 }
 
 
index 302590e6908440e64309a152d0d01704164bc52f..d9401d87677f25675d433d2b3f24b4cc00dd6096 100644 (file)
@@ -79,7 +79,7 @@ int rgw_op_get_bucket_policy_from_attr(CephContext *cct,
 
 class RGWHandler {
 protected:
-  RGWRados *store{nullptr};
+  rgw::sal::RGWRadosStore* store{nullptr};
   struct req_state *s{nullptr};
 
   int do_init_permissions();
@@ -89,7 +89,7 @@ public:
   RGWHandler() {}
   virtual ~RGWHandler();
 
-  virtual int init(RGWRados *store,
+  virtual int init(rgw::sal::RGWRadosStore* store,
                    struct req_state* _s,
                    rgw::io::BasicClient* cio);
 
@@ -126,7 +126,7 @@ class RGWOp : public DoutPrefixProvider {
 protected:
   struct req_state *s;
   RGWHandler *dialect_handler;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   RGWCORSConfiguration bucket_cors;
   bool cors_exist;
   RGWQuotaInfo bucket_quota;
@@ -159,7 +159,7 @@ public:
     return 0;
   }
 
-  virtual void init(RGWRados *store, struct req_state *s, RGWHandler *dialect_handler) {
+  virtual void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *dialect_handler) {
     this->store = store;
     this->s = s;
     this->dialect_handler = dialect_handler;
@@ -483,11 +483,11 @@ public:
     unsigned int num_unfound;
     std::list<fail_desc_t> failures;
 
-    RGWRados * const store;
+    rgw::sal::RGWRadosStore * const store;
     req_state * const s;
 
   public:
-    Deleter(const DoutPrefixProvider* dpp, RGWRados * const str, req_state * const s)
+    Deleter(const DoutPrefixProvider* dpp, rgw::sal::RGWRadosStore * const str, req_state * const s)
       : dpp(dpp),
         num_deleted(0),
         num_unfound(0),
@@ -596,7 +596,7 @@ public:
     : num_created(0) {
   }
 
-  void init(RGWRados* const store,
+  void init(rgw::sal::RGWRadosStore* const store,
             struct req_state* const s,
             RGWHandler* const h) override;
 
@@ -983,7 +983,7 @@ public:
   int verify_permission() override;
   void pre_exec() override;
   void execute() override;
-  void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+  void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
     RGWOp::init(store, s, h);
     policy.set_ctx(s->cct);
     relaxed_region_enforcement =
@@ -1136,7 +1136,7 @@ public:
     delete obj_legal_hold;
   }
 
-  void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+  void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
     RGWOp::init(store, s, h);
     policy.set_ctx(s->cct);
   }
@@ -1206,7 +1206,7 @@ public:
     attrs.emplace(std::move(key), std::move(bl)); /* key and bl are r-value refs */
   }
 
-  void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+  void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
     RGWOp::init(store, s, h);
     policy.set_ctx(s->cct);
   }
@@ -1247,7 +1247,7 @@ public:
       has_policy(false) {
   }
 
-  void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+  void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
     RGWOp::init(store, s, h);
     policy.set_ctx(s->cct);
   }
@@ -1286,7 +1286,7 @@ public:
     attrs.emplace(std::move(key), std::move(bl)); /* key and bl are r-value refs */
   }
 
-  void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+  void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
     RGWOp::init(store, s, h);
     policy.set_ctx(s->cct);
   }
@@ -1313,7 +1313,7 @@ public:
     : dlo_manifest(NULL)
   {}
 
-  void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+  void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
     RGWOp::init(store, s, h);
     policy.set_ctx(s->cct);
   }
@@ -1435,7 +1435,7 @@ public:
     attrs.emplace(std::move(key), std::move(bl));
   }
 
-  void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+  void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
     RGWOp::init(store, s, h);
     dest_policy.set_ctx(s->cct);
   }
@@ -1488,7 +1488,7 @@ public:
   void pre_exec() override;
   void execute() override;
 
-  virtual int get_policy_from_state(RGWRados *store, struct req_state *s, stringstream& ss) { return 0; }
+  virtual int get_policy_from_state(rgw::sal::RGWRadosStore *store, struct req_state *s, stringstream& ss) { return 0; }
   virtual int get_params() = 0;
   void send_response() override = 0;
   const char* name() const override { return "put_acls"; }
@@ -1525,7 +1525,7 @@ public:
   }
   ~RGWPutLC() override {}
 
-  void init(RGWRados *store, struct req_state *s, RGWHandler *dialect_handler) override {
+  void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *dialect_handler) override {
 #define COOKIE_LEN 16
     char buf[COOKIE_LEN + 1];
 
@@ -1683,7 +1683,7 @@ protected:
 public:
   RGWInitMultipart() {}
 
-  void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+  void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
     RGWOp::init(store, s, h);
     policy.set_ctx(s->cct);
   }
@@ -1773,7 +1773,7 @@ public:
     truncated = false;
   }
 
-  void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+  void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
     RGWOp::init(store, s, h);
     policy = RGWAccessControlPolicy(s->cct);
   }
@@ -1812,7 +1812,7 @@ public:
     default_max = 0;
   }
 
-  void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+  void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
     RGWOp::init(store, s, h);
     max_uploads = default_max;
   }
@@ -1916,13 +1916,13 @@ public:
   uint32_t op_mask() override { return RGW_OP_TYPE_READ; }
 };
 
-extern int rgw_build_bucket_policies(RGWRados* store, struct req_state* s);
-extern int rgw_build_object_policies(RGWRados *store, struct req_state *s,
+extern int rgw_build_bucket_policies(rgw::sal::RGWRadosStore* store, struct req_state* s);
+extern int rgw_build_object_policies(rgw::sal::RGWRadosStore *store, struct req_state *s,
                                     bool prefetch_data);
-extern void rgw_build_iam_environment(RGWRados* store,
+extern void rgw_build_iam_environment(rgw::sal::RGWRadosStore* store,
                                                                          struct req_state* s);
 extern vector<rgw::IAM::Policy> get_iam_user_policy_from_attr(CephContext* cct,
-                        RGWRados* store,
+                        rgw::sal::RGWRadosStore* store,
                         map<string, bufferlist>& attrs,
                         const string& tenant);
 
@@ -2338,7 +2338,7 @@ protected:
 public:
   RGWGetClusterStat() {}
 
-  void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+  void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
     RGWOp::init(store, s, h);
   }
   int verify_permission() override {return 0;}
index 4a9f4b3b3745874a81c16fa2d061a095013d4c3c..737ac52b571bdeb88f3bfe4892ed7dff60bac1aa 100644 (file)
@@ -146,8 +146,8 @@ int RGWOrphanStore::list_jobs(map <string,RGWOrphanSearchState>& job_list)
 
 int RGWOrphanStore::init()
 {
-  const rgw_pool& log_pool = store->svc.zone->get_zone_params().log_pool;
-  int r = rgw_init_ioctx(store->get_rados_handle(), log_pool, ioctx);
+  const rgw_pool& log_pool = store->svc()->zone->get_zone_params().log_pool;
+  int r = rgw_init_ioctx(store->getRados()->get_rados_handle(), log_pool, ioctx);
   if (r < 0) {
     cerr << "ERROR: failed to open log pool (" << log_pool << " ret=" << r << std::endl;
     return r;
@@ -293,7 +293,7 @@ int RGWOrphanSearch::build_all_oids_index()
 {
   librados::IoCtx ioctx;
 
-  int ret = rgw_init_ioctx(store->get_rados_handle(), search_info.pool, ioctx);
+  int ret = rgw_init_ioctx(store->getRados()->get_rados_handle(), search_info.pool, ioctx);
   if (ret < 0) {
     lderr(store->ctx()) << __func__ << ": rgw_init_ioctx() returned ret=" << ret << dendl;
     return ret;
@@ -372,7 +372,7 @@ int RGWOrphanSearch::build_buckets_instance_index()
   void *handle;
   int max = 1000;
   string section = "bucket.instance";
-  int ret = store->ctl.meta.mgr->list_keys_init(section, &handle);
+  int ret = store->ctl()->meta.mgr->list_keys_init(section, &handle);
   if (ret < 0) {
     lderr(store->ctx()) << "ERROR: can't get key: " << cpp_strerror(-ret) << dendl;
     return -ret;
@@ -389,7 +389,7 @@ int RGWOrphanSearch::build_buckets_instance_index()
 
   do {
     list<string> keys;
-    ret = store->ctl.meta.mgr->list_keys_next(handle, max, keys, &truncated);
+    ret = store->ctl()->meta.mgr->list_keys_next(handle, max, keys, &truncated);
     if (ret < 0) {
       lderr(store->ctx()) << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << dendl;
       return -ret;
@@ -419,7 +419,7 @@ int RGWOrphanSearch::build_buckets_instance_index()
     lderr(store->ctx()) << __func__ << ": ERROR: log_oids() returned ret=" << ret << dendl;
     return ret;
   }
-  store->ctl.meta.mgr->list_keys_complete(handle);
+  store->ctl()->meta.mgr->list_keys_complete(handle);
 
   return 0;
 }
@@ -448,7 +448,7 @@ int RGWOrphanSearch::handle_stat_result(map<int, list<string> >& oids, RGWRados:
 
     RGWObjManifest::obj_iterator miter;
     for (miter = manifest.obj_begin(); miter != manifest.obj_end(); ++miter) {
-      const rgw_raw_obj& loc = miter.get_location().get_raw_obj(store);
+      const rgw_raw_obj& loc = miter.get_location().get_raw_obj(store->getRados());
       string s = loc.oid;
       obj_oids.insert(obj_fingerprint(s));
     }
@@ -487,7 +487,7 @@ done:
 int RGWOrphanSearch::build_linked_oids_for_bucket(const string& bucket_instance_id, map<int, list<string> >& oids)
 {
   RGWObjectCtx obj_ctx(store);
-  auto sysobj_ctx = store->svc.sysobj->init_obj_ctx();
+  auto sysobj_ctx = store->svc()->sysobj->init_obj_ctx();
 
   rgw_bucket orphan_bucket;
   int shard_id;
@@ -500,7 +500,7 @@ int RGWOrphanSearch::build_linked_oids_for_bucket(const string& bucket_instance_
   }
 
   RGWBucketInfo cur_bucket_info;
-  ret = store->get_bucket_info(sysobj_ctx, orphan_bucket.tenant,
+  ret = store->getRados()->get_bucket_info(sysobj_ctx, orphan_bucket.tenant,
                               orphan_bucket.name, cur_bucket_info, nullptr, null_yield);
   if (ret < 0) {
     if (ret == -ENOENT) {
@@ -526,7 +526,7 @@ int RGWOrphanSearch::build_linked_oids_for_bucket(const string& bucket_instance_
   }
 
   RGWBucketInfo bucket_info;
-  ret = store->get_bucket_instance_info(sysobj_ctx, bucket_instance_id, bucket_info, nullptr, nullptr, null_yield);
+  ret = store->getRados()->get_bucket_instance_info(sysobj_ctx, bucket_instance_id, bucket_info, nullptr, nullptr, null_yield);
   if (ret < 0) {
     if (ret == -ENOENT) {
       /* probably raced with bucket removal */
@@ -537,7 +537,7 @@ int RGWOrphanSearch::build_linked_oids_for_bucket(const string& bucket_instance_
   }
 
   ldout(store->ctx(), 10) << "building linked oids for bucket instance: " << bucket_instance_id << dendl;
-  RGWRados::Bucket target(store, bucket_info);
+  RGWRados::Bucket target(store->getRados(), bucket_info);
   RGWRados::Bucket::List list_op(&target);
 
   string marker;
@@ -578,7 +578,7 @@ int RGWOrphanSearch::build_linked_oids_for_bucket(const string& bucket_instance_
 
       rgw_obj obj(bucket_info.bucket, entry.key);
 
-      RGWRados::Object op_target(store, bucket_info, obj_ctx, obj);
+      RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, obj);
 
       stat_ops.push_back(RGWRados::Object::Stat(&op_target));
       RGWRados::Object::Stat& op = stat_ops.back();
@@ -737,7 +737,7 @@ int RGWOrphanSearch::compare_oid_indexes()
 
   librados::IoCtx data_ioctx;
 
-  int ret = rgw_init_ioctx(store->get_rados_handle(), search_info.pool, data_ioctx);
+  int ret = rgw_init_ioctx(store->getRados()->get_rados_handle(), search_info.pool, data_ioctx);
   if (ret < 0) {
     lderr(store->ctx()) << __func__ << ": rgw_init_ioctx() returned ret=" << ret << dendl;
     return ret;
index 7d808eeda65d3ddc786effc48259881c45349871..8e97f989cc2663fa093ab184f62185b5479e6ef3 100644 (file)
@@ -20,7 +20,7 @@
 #include "common/Formatter.h"
 #include "common/errno.h"
 
-#include "rgw_rados.h"
+#include "rgw_sal.h"
 
 #define dout_subsys ceph_subsys_rgw
 
@@ -124,13 +124,13 @@ struct RGWOrphanSearchState {
 WRITE_CLASS_ENCODER(RGWOrphanSearchState)
 
 class RGWOrphanStore {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   librados::IoCtx ioctx;
 
   string oid;
 
 public:
-  explicit RGWOrphanStore(RGWRados *_store) : store(_store), oid(RGW_ORPHAN_INDEX_OID) {}
+  explicit RGWOrphanStore(rgw::sal::RGWRadosStore *_store) : store(_store), oid(RGW_ORPHAN_INDEX_OID) {}
 
   librados::IoCtx& get_ioctx() { return ioctx; }
 
@@ -148,7 +148,7 @@ public:
 
 
 class RGWOrphanSearch {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
 
   RGWOrphanStore orphan_store;
 
@@ -186,7 +186,7 @@ class RGWOrphanSearch {
 
   int remove_index(map<int, string>& index);
 public:
-  RGWOrphanSearch(RGWRados *_store, int _max_ios, uint64_t _stale_secs) : store(_store), orphan_store(store), max_concurrent_ios(_max_ios), stale_secs(_stale_secs) {}
+  RGWOrphanSearch(rgw::sal::RGWRadosStore *_store, int _max_ios, uint64_t _stale_secs) : store(_store), orphan_store(store), max_concurrent_ios(_max_ios), stale_secs(_stale_secs) {}
 
   int save_state() {
     RGWOrphanSearchState state;
index 6e51086b2dde546d1f81b370e9681df6bbed23d7..502a923ca8dc53f748de7ceeb7594be8dea645a2 100644 (file)
@@ -4,6 +4,10 @@
 #ifndef CEPH_RGW_OTP_H
 #define CEPH_RGW_OTP_H
 
+namespace rgw { namespace sal {
+class RGWRadosStore;
+} }
+
 #include "cls/otp/cls_otp_types.h"
 #include "services/svc_meta_be_otp.h"
 
index e3db85dfb12b79dea59a0adec973bbcbd788ffd7..80b7d442639c6063332632148f10053881758f80 100644 (file)
@@ -7,6 +7,7 @@
 #include "rgw_period_pusher.h"
 #include "rgw_cr_rest.h"
 #include "rgw_zone.h"
+#include "rgw_sal.h"
 
 #include "services/svc_zone.h"
 
@@ -156,17 +157,17 @@ class RGWPeriodPusher::CRThread {
 };
 
 
-RGWPeriodPusher::RGWPeriodPusher(RGWRados* store)
+RGWPeriodPusher::RGWPeriodPusher(rgw::sal::RGWRadosStore* store)
   : cct(store->ctx()), store(store)
 {
-  const auto& realm = store->svc.zone->get_realm();
+  const auto& realm = store->svc()->zone->get_realm();
   auto& realm_id = realm.get_id();
   if (realm_id.empty()) // no realm configuration
     return;
 
   // always send out the current period on startup
   RGWPeriod period;
-  int r = period.init(cct, store->svc.sysobj, realm_id, realm.get_name());
+  int r = period.init(cct, store->svc()->sysobj, realm_id, realm.get_name());
   if (r < 0) {
     lderr(cct) << "failed to load period for realm " << realm_id << dendl;
     return;
@@ -221,7 +222,7 @@ void RGWPeriodPusher::handle_notify(RGWZonesNeedPeriod&& period)
 
   // find our zonegroup in the new period
   auto& zonegroups = period.get_map().zonegroups;
-  auto i = zonegroups.find(store->svc.zone->get_zonegroup().get_id());
+  auto i = zonegroups.find(store->svc()->zone->get_zonegroup().get_id());
   if (i == zonegroups.end()) {
     lderr(cct) << "The new period does not contain my zonegroup!" << dendl;
     return;
@@ -229,7 +230,7 @@ void RGWPeriodPusher::handle_notify(RGWZonesNeedPeriod&& period)
   auto& my_zonegroup = i->second;
 
   // if we're not a master zone, we're not responsible for pushing any updates
-  if (my_zonegroup.master_zone != store->svc.zone->get_zone_params().get_id())
+  if (my_zonegroup.master_zone != store->svc()->zone->get_zone_params().get_id())
     return;
 
   // construct a map of the zones that need this period. the map uses the same
@@ -238,11 +239,11 @@ void RGWPeriodPusher::handle_notify(RGWZonesNeedPeriod&& period)
   auto hint = conns.end();
 
   // are we the master zonegroup in this period?
-  if (period.get_map().master_zonegroup == store->svc.zone->get_zonegroup().get_id()) {
+  if (period.get_map().master_zonegroup == store->svc()->zone->get_zonegroup().get_id()) {
     // update other zonegroup endpoints
     for (auto& zg : zonegroups) {
       auto& zonegroup = zg.second;
-      if (zonegroup.get_id() == store->svc.zone->get_zonegroup().get_id())
+      if (zonegroup.get_id() == store->svc()->zone->get_zonegroup().get_id())
         continue;
       if (zonegroup.endpoints.empty())
         continue;
@@ -250,14 +251,14 @@ void RGWPeriodPusher::handle_notify(RGWZonesNeedPeriod&& period)
       hint = conns.emplace_hint(
           hint, std::piecewise_construct,
           std::forward_as_tuple(zonegroup.get_id()),
-          std::forward_as_tuple(cct, store->svc.zone, zonegroup.get_id(), zonegroup.endpoints));
+          std::forward_as_tuple(cct, store->svc()->zone, zonegroup.get_id(), zonegroup.endpoints));
     }
   }
 
   // update other zone endpoints
   for (auto& z : my_zonegroup.zones) {
     auto& zone = z.second;
-    if (zone.id == store->svc.zone->get_zone_params().get_id())
+    if (zone.id == store->svc()->zone->get_zone_params().get_id())
       continue;
     if (zone.endpoints.empty())
       continue;
@@ -265,7 +266,7 @@ void RGWPeriodPusher::handle_notify(RGWZonesNeedPeriod&& period)
     hint = conns.emplace_hint(
         hint, std::piecewise_construct,
         std::forward_as_tuple(zone.id),
-        std::forward_as_tuple(cct, store->svc.zone, zone.id, zone.endpoints));
+        std::forward_as_tuple(cct, store->svc()->zone, zone.id, zone.endpoints));
   }
 
   if (conns.empty()) {
@@ -291,7 +292,7 @@ void RGWPeriodPusher::pause()
   store = nullptr;
 }
 
-void RGWPeriodPusher::resume(RGWRados* store)
+void RGWPeriodPusher::resume(rgw::sal::RGWRadosStore* store)
 {
   std::lock_guard<std::mutex> lock(mutex);
   this->store = store;
index fdadd226621665f0a958f8f353370be277729ddd..db55a646e4fb82d43665115ce2eac015ea40e130 100644 (file)
 
 #include "rgw_realm_reloader.h"
 
-class RGWRados;
+namespace rgw {
+namespace sal {
+class RGWRadosStore;
+}
+}
+
 class RGWPeriod;
 
 // RGWRealmNotify payload for push coordination
@@ -23,7 +28,7 @@ using RGWZonesNeedPeriod = RGWPeriod;
 class RGWPeriodPusher final : public RGWRealmWatcher::Watcher,
                               public RGWRealmReloader::Pauser {
  public:
-  explicit RGWPeriodPusher(RGWRados* store);
+  explicit RGWPeriodPusher(rgw::sal::RGWRadosStore* store);
   ~RGWPeriodPusher() override;
 
   /// respond to realm notifications by pushing new periods to other zones
@@ -34,13 +39,13 @@ class RGWPeriodPusher final : public RGWRealmWatcher::Watcher,
   void pause() override;
 
   /// continue processing notifications with a new RGWRados instance
-  void resume(RGWRados* store) override;
+  void resume(rgw::sal::RGWRadosStore* store) override;
 
  private:
   void handle_notify(RGWZonesNeedPeriod&& period);
 
   CephContext *const cct;
-  RGWRados* store;
+  rgw::sal::RGWRadosStore* store;
 
   std::mutex mutex;
   epoch_t realm_epoch{0}; //< the current realm epoch being sent
index ad43b5d33d641b74fb92e55820fbf00257dc17be..e21ab37d2c7264369c00e5e56c9d6bf4b212621c 100644 (file)
@@ -166,7 +166,7 @@ int rgw_process_authenticated(RGWHandler_REST * const handler,
   return 0;
 }
 
-int process_request(RGWRados* const store,
+int process_request(rgw::sal::RGWRadosStore* const store,
                     RGWREST* const rest,
                     RGWRequest* const req,
                     const std::string& frontend_prefix,
@@ -193,7 +193,7 @@ int process_request(RGWRados* const store,
   RGWObjectCtx rados_ctx(store, s);
   s->obj_ctx = &rados_ctx;
 
-  auto sysobj_ctx = store->svc.sysobj->init_obj_ctx();
+  auto sysobj_ctx = store->svc()->sysobj->init_obj_ctx();
   s->sysobj_ctx = &sysobj_ctx;
 
   if (ret < 0) {
@@ -202,9 +202,9 @@ int process_request(RGWRados* const store,
     return ret;
   }
 
-  s->req_id = store->svc.zone_utils->unique_id(req->id);
-  s->trans_id = store->svc.zone_utils->unique_trans_id(req->id);
-  s->host_id = store->host_id;
+  s->req_id = store->svc()->zone_utils->unique_id(req->id);
+  s->trans_id = store->svc()->zone_utils->unique_trans_id(req->id);
+  s->host_id = store->getRados()->host_id;
   s->yield = yield;
 
   ldpp_dout(s, 2) << "initializing for trans_id = " << s->trans_id << dendl;
@@ -294,7 +294,7 @@ done:
   }
 
   if (should_log) {
-    rgw_log_op(store, rest, s, (op ? op->name() : "unknown"), olog);
+    rgw_log_op(store->getRados(), rest, s, (op ? op->name() : "unknown"), olog);
   }
 
   if (http_ret != nullptr) {
index c3b27bd7d73ef303b35c0341b64bb7d8fc832c43..0230a0289345c709cf4759b89e6749b8bf3bb6d7 100644 (file)
@@ -33,7 +33,7 @@ namespace rgw::dmclock {
 }
 
 struct RGWProcessEnv {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   RGWREST *rest;
   OpsLogSocket *olog;
   int port;
@@ -47,7 +47,7 @@ class RGWProcess {
   deque<RGWRequest*> m_req_queue;
 protected:
   CephContext *cct;
-  RGWRados* store;
+  rgw::sal::RGWRadosStore* store;
   rgw_auth_registry_ptr_t auth_registry;
   OpsLogSocket* olog;
   ThreadPool m_tp;
@@ -114,7 +114,7 @@ public:
     m_tp.pause();
   }
 
-  void unpause_with_new_config(RGWRados* const store,
+  void unpause_with_new_config(rgw::sal::RGWRadosStore* const store,
                                rgw_auth_registry_ptr_t auth_registry) {
     this->store = store;
     this->auth_registry = std::move(auth_registry);
@@ -173,7 +173,7 @@ public:
   void set_access_key(RGWAccessKey& key) { access_key = key; }
 };
 /* process stream request */
-extern int process_request(RGWRados* store,
+extern int process_request(rgw::sal::RGWRadosStore* store,
                            RGWREST* rest,
                            RGWRequest* req,
                            const std::string& frontend_prefix,
index 73872a18b920623c34d3aed5c5a4e363ccc90d4c..690dbed89bd489c6fb85763f0d7b899d404514b9 100644 (file)
@@ -2,7 +2,7 @@
 // vim: ts=8 sw=2 smarttab
 
 #include "rgw_b64.h"
-#include "rgw_rados.h"
+#include "rgw_sal.h"
 #include "rgw_pubsub.h"
 #include "rgw_tools.h"
 #include "rgw_xml.h"
@@ -162,28 +162,28 @@ void rgw_pubsub_sub_config::dump(Formatter *f) const
   encode_json("s3_id", s3_id, f);
 }
 
-RGWUserPubSub::RGWUserPubSub(RGWRados *_store, const rgw_user& _user) : store(_store),
+RGWUserPubSub::RGWUserPubSub(rgw::sal::RGWRadosStore *_store, const rgw_user& _user) : store(_store),
                                                                         user(_user),
-                                                                        obj_ctx(store->svc.sysobj->init_obj_ctx())
+                                                                        obj_ctx(store->svc()->sysobj->init_obj_ctx())
 {
   get_user_meta_obj(&user_meta_obj);
 }
 
 void RGWUserPubSub::get_user_meta_obj(rgw_raw_obj *obj) const {
-  *obj = rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, user_meta_oid());
+  *obj = rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, user_meta_oid());
 }
 
 void RGWUserPubSub::get_bucket_meta_obj(const rgw_bucket& bucket, rgw_raw_obj *obj) const {
-  *obj = rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, bucket_meta_oid(bucket));
+  *obj = rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, bucket_meta_oid(bucket));
 }
 
 void RGWUserPubSub::get_sub_meta_obj(const string& name, rgw_raw_obj *obj) const {
-  *obj = rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sub_meta_oid(name));
+  *obj = rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sub_meta_oid(name));
 }
 
 int RGWUserPubSub::remove(const rgw_raw_obj& obj, RGWObjVersionTracker *objv_tracker)
 {
-  int ret = rgw_delete_system_obj(store->svc.sysobj, obj.pool, obj.oid, objv_tracker);
+  int ret = rgw_delete_system_obj(store->svc()->sysobj, obj.pool, obj.oid, objv_tracker);
   if (ret < 0) {
     return ret;
   }
@@ -264,7 +264,7 @@ int RGWUserPubSub::get_topic(const string& name, rgw_pubsub_topic_subs *result)
 int RGWUserPubSub::Bucket::create_notification(const string& topic_name, const EventTypeList& events)
 {
   rgw_pubsub_topic_subs user_topic_info;
-  RGWRados *store = ps->store;
+  rgw::sal::RGWRadosStore *store = ps->store;
 
   int ret = ps->get_topic(topic_name, &user_topic_info);
   if (ret < 0) {
@@ -303,7 +303,7 @@ int RGWUserPubSub::Bucket::create_notification(const string& topic_name, const E
 int RGWUserPubSub::Bucket::remove_notification(const string& topic_name)
 {
   rgw_pubsub_topic_subs user_topic_info;
-  RGWRados *store = ps->store;
+  rgw::sal::RGWRadosStore *store = ps->store;
 
   int ret = ps->get_topic(topic_name, &user_topic_info);
   if (ret < 0) {
@@ -423,7 +423,7 @@ int RGWUserPubSub::Sub::subscribe(const string& topic, const rgw_pubsub_sub_dest
 {
   RGWObjVersionTracker user_objv_tracker;
   rgw_pubsub_user_topics topics;
-  RGWRados *store = ps->store;
+  rgw::sal::RGWRadosStore *store = ps->store;
 
   int ret = ps->read_user_topics(&topics, &user_objv_tracker);
   if (ret < 0) {
@@ -467,7 +467,7 @@ int RGWUserPubSub::Sub::unsubscribe(const string& _topic)
 {
   string topic = _topic;
   RGWObjVersionTracker sobjv_tracker;
-  RGWRados *store = ps->store;
+  rgw::sal::RGWRadosStore *store = ps->store;
 
   if (topic.empty()) {
     rgw_pubsub_sub_config sub_conf;
@@ -525,7 +525,7 @@ void RGWUserPubSub::SubWithEvents<EventType>::list_events_result::dump(Formatter
 template<typename EventType>
 int RGWUserPubSub::SubWithEvents<EventType>::list_events(const string& marker, int max_events)
 {
-  RGWRados *store = ps->store;
+  RGWRados *store = ps->store->getRados();
   rgw_pubsub_sub_config sub_conf;
   int ret = get_conf(&sub_conf);
   if (ret < 0) {
@@ -591,7 +591,7 @@ int RGWUserPubSub::SubWithEvents<EventType>::list_events(const string& marker, i
 template<typename EventType>
 int RGWUserPubSub::SubWithEvents<EventType>::remove_event(const string& event_id)
 {
-  RGWRados *store = ps->store;
+  rgw::sal::RGWRadosStore *store = ps->store;
   rgw_pubsub_sub_config sub_conf;
   int ret = get_conf(&sub_conf);
   if (ret < 0) {
@@ -601,8 +601,8 @@ int RGWUserPubSub::SubWithEvents<EventType>::remove_event(const string& event_id
 
   RGWBucketInfo bucket_info;
   string tenant;
-  RGWSysObjectCtx sysobj_ctx(store->svc.sysobj->init_obj_ctx());
-  ret = store->get_bucket_info(sysobj_ctx, tenant, sub_conf.dest.bucket_name, bucket_info, nullptr, null_yield, nullptr);
+  RGWSysObjectCtx sysobj_ctx(store->svc()->sysobj->init_obj_ctx());
+  ret = store->getRados()->get_bucket_info(sysobj_ctx, tenant, sub_conf.dest.bucket_name, bucket_info, nullptr, null_yield, nullptr);
   if (ret < 0) {
     ldout(store->ctx(), 1) << "ERROR: failed to read bucket info for events bucket: bucket=" << sub_conf.dest.bucket_name << " ret=" << ret << dendl;
     return ret;
@@ -615,7 +615,7 @@ int RGWUserPubSub::SubWithEvents<EventType>::remove_event(const string& event_id
 
   obj_ctx.set_atomic(obj);
 
-  RGWRados::Object del_target(store, bucket_info, obj_ctx, obj);
+  RGWRados::Object del_target(store->getRados(), bucket_info, obj_ctx, obj);
   RGWRados::Object::Delete del_op(&del_target);
 
   del_op.params.bucket_owner = bucket_info.owner;
index e109e8c4ad87d3d59ef5b02eab951a3508dc1495..d1440b12a983e258b8a64fb022ef78d8975ff9c8 100644 (file)
@@ -422,7 +422,7 @@ class RGWUserPubSub
 {
   friend class Bucket;
 
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   rgw_user user;
   RGWSysObjectCtx obj_ctx;
 
@@ -452,7 +452,7 @@ class RGWUserPubSub
   int write_user_topics(const rgw_pubsub_user_topics& topics, RGWObjVersionTracker *objv_tracker);
 
 public:
-  RGWUserPubSub(RGWRados *_store, const rgw_user& _user);
+  RGWUserPubSub(rgw::sal::RGWRadosStore *_store, const rgw_user& _user);
 
   class Bucket {
     friend class RGWUserPubSub;
@@ -616,7 +616,7 @@ int RGWUserPubSub::write(const rgw_raw_obj& obj, const T& info, RGWObjVersionTra
   bufferlist bl;
   encode(info, bl);
 
-  auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+  auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
   int ret = rgw_put_system_obj(obj_ctx, obj.pool, obj.oid,
                            bl, false, objv_tracker,
                            real_time());
index 58afda85d9f5903176608d3fb3803429b786c0c8..56a916df193cf6cab4c71546d9e2644bd26a00fe 100644 (file)
@@ -75,7 +75,7 @@ static int process_completed(const AioResultList& completed, RawObjSet *written)
 
 int RadosWriter::set_stripe_obj(const rgw_raw_obj& raw_obj)
 {
-  stripe_obj = store->svc.rados->obj(raw_obj);
+  stripe_obj = store->svc()->rados->obj(raw_obj);
   return stripe_obj.open();
 }
 
@@ -126,7 +126,7 @@ RadosWriter::~RadosWriter()
   std::optional<rgw_raw_obj> raw_head;
   if (!head_obj.empty()) {
     raw_head.emplace();
-    store->obj_to_raw(bucket_info.placement_rule, head_obj, &*raw_head);
+    store->getRados()->obj_to_raw(bucket_info.placement_rule, head_obj, &*raw_head);
   }
 
   /**
@@ -148,7 +148,7 @@ RadosWriter::~RadosWriter()
       continue;
     }
 
-    int r = store->delete_raw_obj(obj);
+    int r = store->getRados()->delete_raw_obj(obj);
     if (r < 0 && r != -ENOENT) {
       ldpp_dout(dpp, 5) << "WARNING: failed to remove obj (" << obj << "), leaked" << dendl;
     }
@@ -156,7 +156,7 @@ RadosWriter::~RadosWriter()
 
   if (need_to_remove_head) {
     ldpp_dout(dpp, 5) << "NOTE: we are going to process the head obj (" << *raw_head << ")" << dendl;
-    int r = store->delete_obj(obj_ctx, bucket_info, head_obj, 0, 0);
+    int r = store->getRados()->delete_obj(obj_ctx, bucket_info, head_obj, 0, 0);
     if (r < 0 && r != -ENOENT) {
       ldpp_dout(dpp, 0) << "WARNING: failed to remove obj (" << *raw_head << "), leaked" << dendl;
     }
@@ -173,10 +173,10 @@ int ManifestObjectProcessor::next(uint64_t offset, uint64_t *pstripe_size)
     return r;
   }
 
-  rgw_raw_obj stripe_obj = manifest_gen.get_cur_obj(store);
+  rgw_raw_obj stripe_obj = manifest_gen.get_cur_obj(store->getRados());
 
   uint64_t chunk_size = 0;
-  r = store->get_max_chunk_size(stripe_obj.pool, &chunk_size);
+  r = store->getRados()->get_max_chunk_size(stripe_obj.pool, &chunk_size);
   if (r < 0) {
     return r;
   }
@@ -208,11 +208,11 @@ int AtomicObjectProcessor::prepare(optional_yield y)
   uint64_t alignment;
   rgw_pool head_pool;
 
-  if (!store->get_obj_data_pool(bucket_info.placement_rule, head_obj, &head_pool)) {
+  if (!store->getRados()->get_obj_data_pool(bucket_info.placement_rule, head_obj, &head_pool)) {
     return -EIO;
   }
 
-  int r = store->get_max_chunk_size(head_pool, &max_head_chunk_size, &alignment);
+  int r = store->getRados()->get_max_chunk_size(head_pool, &max_head_chunk_size, &alignment);
   if (r < 0) {
     return r;
   }
@@ -221,14 +221,14 @@ int AtomicObjectProcessor::prepare(optional_yield y)
 
   if (bucket_info.placement_rule != tail_placement_rule) {
     rgw_pool tail_pool;
-    if (!store->get_obj_data_pool(tail_placement_rule, head_obj, &tail_pool)) {
+    if (!store->getRados()->get_obj_data_pool(tail_placement_rule, head_obj, &tail_pool)) {
       return -EIO;
     }
 
     if (tail_pool != head_pool) {
       same_pool = false;
 
-      r = store->get_max_chunk_size(tail_pool, &chunk_size);
+      r = store->getRados()->get_max_chunk_size(tail_pool, &chunk_size);
       if (r < 0) {
         return r;
       }
@@ -245,7 +245,7 @@ int AtomicObjectProcessor::prepare(optional_yield y)
   uint64_t stripe_size;
   const uint64_t default_stripe_size = store->ctx()->_conf->rgw_obj_stripe_size;
 
-  store->get_max_aligned_size(default_stripe_size, alignment, &stripe_size);
+  store->getRados()->get_max_aligned_size(default_stripe_size, alignment, &stripe_size);
 
   manifest.set_trivial_rule(head_max_size, stripe_size);
 
@@ -257,7 +257,7 @@ int AtomicObjectProcessor::prepare(optional_yield y)
     return r;
   }
 
-  rgw_raw_obj stripe_obj = manifest_gen.get_cur_obj(store);
+  rgw_raw_obj stripe_obj = manifest_gen.get_cur_obj(store->getRados());
 
   r = writer.set_stripe_obj(stripe_obj);
   if (r < 0) {
@@ -295,7 +295,7 @@ int AtomicObjectProcessor::complete(size_t accounted_size,
 
   obj_ctx.set_atomic(head_obj);
 
-  RGWRados::Object op_target(store, bucket_info, obj_ctx, head_obj);
+  RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, head_obj);
 
   /* some object types shouldn't be versioned, e.g., multipart parts */
   op_target.set_versioning_disabled(!bucket_info.versioning_enabled());
@@ -367,12 +367,12 @@ int MultipartObjectProcessor::prepare_head()
   uint64_t stripe_size;
   uint64_t alignment;
 
-  int r = store->get_max_chunk_size(tail_placement_rule, target_obj, &chunk_size, &alignment);
+  int r = store->getRados()->get_max_chunk_size(tail_placement_rule, target_obj, &chunk_size, &alignment);
   if (r < 0) {
     ldpp_dout(dpp, 0) << "ERROR: unexpected: get_max_chunk_size(): placement_rule=" << tail_placement_rule.to_str() << " obj=" << target_obj << " returned r=" << r << dendl;
     return r;
   }
-  store->get_max_aligned_size(default_stripe_size, alignment, &stripe_size);
+  store->getRados()->get_max_aligned_size(default_stripe_size, alignment, &stripe_size);
 
   manifest.set_multipart_part_rule(stripe_size, part_num);
 
@@ -384,7 +384,7 @@ int MultipartObjectProcessor::prepare_head()
     return r;
   }
 
-  rgw_raw_obj stripe_obj = manifest_gen.get_cur_obj(store);
+  rgw_raw_obj stripe_obj = manifest_gen.get_cur_obj(store->getRados());
   RGWSI_Tier_RADOS::raw_obj_to_obj(head_obj.bucket, stripe_obj, &head_obj);
   head_obj.index_hash_source = target_obj.key.name;
 
@@ -431,7 +431,7 @@ int MultipartObjectProcessor::complete(size_t accounted_size,
     return r;
   }
 
-  RGWRados::Object op_target(store, bucket_info, obj_ctx, head_obj);
+  RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, head_obj);
   op_target.set_versioning_disabled(true);
   RGWRados::Object::Write obj_op(&op_target);
 
@@ -480,9 +480,9 @@ int MultipartObjectProcessor::complete(size_t accounted_size,
 
   rgw_raw_obj raw_meta_obj;
 
-  store->obj_to_raw(bucket_info.placement_rule, meta_obj, &raw_meta_obj);
+  store->getRados()->obj_to_raw(bucket_info.placement_rule, meta_obj, &raw_meta_obj);
 
-  auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+  auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
   auto sysobj = obj_ctx.get_obj(raw_meta_obj);
 
   r = sysobj.omap()
@@ -515,7 +515,7 @@ int AppendObjectProcessor::process_first_chunk(bufferlist &&data, rgw::putobj::D
 int AppendObjectProcessor::prepare(optional_yield y)
 {
   RGWObjState *astate;
-  int r = store->get_obj_state(&obj_ctx, bucket_info, head_obj, &astate, y);
+  int r = store->getRados()->get_obj_state(&obj_ctx, bucket_info, head_obj, &astate, y);
   if (r < 0) {
     return r;
   }
@@ -570,10 +570,10 @@ int AppendObjectProcessor::prepare(optional_yield y)
   if (r < 0) {
     return r;
   }
-  rgw_raw_obj stripe_obj = manifest_gen.get_cur_obj(store);
+  rgw_raw_obj stripe_obj = manifest_gen.get_cur_obj(store->getRados());
 
   uint64_t chunk_size = 0;
-  r = store->get_max_chunk_size(stripe_obj.pool, &chunk_size);
+  r = store->getRados()->get_max_chunk_size(stripe_obj.pool, &chunk_size);
   if (r < 0) {
     return r;
   }
@@ -609,12 +609,12 @@ int AppendObjectProcessor::complete(size_t accounted_size, const string &etag, c
     return r;
   }
   obj_ctx.set_atomic(head_obj);
-  RGWRados::Object op_target(store, bucket_info, obj_ctx, head_obj);
+  RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, head_obj);
   //For Append obj, disable versioning
   op_target.set_versioning_disabled(true);
   RGWRados::Object::Write obj_op(&op_target);
   if (cur_manifest) {
-    cur_manifest->append(manifest, store->svc.zone);
+    cur_manifest->append(manifest, store->svc()->zone);
     obj_op.meta.manifest = cur_manifest;
   } else {
     obj_op.meta.manifest = &manifest;
index 78efd33cd1f275658111f9fc54099d78eb9dc08d..3fa40626b5adf504567a5005b0552147e18633be 100644 (file)
@@ -79,7 +79,7 @@ using RawObjSet = std::set<rgw_raw_obj>;
 // a data sink that writes to rados objects and deletes them on cancelation
 class RadosWriter : public DataProcessor {
   Aio *const aio;
-  RGWRados *const store;
+  rgw::sal::RGWRadosStore *const store;
   const RGWBucketInfo& bucket_info;
   RGWObjectCtx& obj_ctx;
   const rgw_obj head_obj;
@@ -89,7 +89,8 @@ class RadosWriter : public DataProcessor {
   optional_yield y;
 
  public:
-  RadosWriter(Aio *aio, RGWRados *store, const RGWBucketInfo& bucket_info,
+  RadosWriter(Aio *aio, rgw::sal::RGWRadosStore *store,
+             const RGWBucketInfo& bucket_info,
               RGWObjectCtx& obj_ctx, const rgw_obj& head_obj,
               const DoutPrefixProvider *dpp, optional_yield y)
     : aio(aio), store(store), bucket_info(bucket_info),
@@ -118,7 +119,7 @@ class RadosWriter : public DataProcessor {
 class ManifestObjectProcessor : public HeadObjectProcessor,
                                 public StripeGenerator {
  protected:
-  RGWRados *const store;
+  rgw::sal::RGWRadosStore *const store;
   const RGWBucketInfo& bucket_info;
   rgw_placement_rule tail_placement_rule;
   const rgw_user& owner;
@@ -136,7 +137,7 @@ class ManifestObjectProcessor : public HeadObjectProcessor,
   int next(uint64_t offset, uint64_t *stripe_size) override;
 
  public:
-  ManifestObjectProcessor(Aio *aio, RGWRados *store,
+  ManifestObjectProcessor(Aio *aio, rgw::sal::RGWRadosStore *store,
                           const RGWBucketInfo& bucket_info,
                           const rgw_placement_rule *ptail_placement_rule,
                           const rgw_user& owner, RGWObjectCtx& obj_ctx,
@@ -169,7 +170,7 @@ class AtomicObjectProcessor : public ManifestObjectProcessor {
 
   int process_first_chunk(bufferlist&& data, DataProcessor **processor) override;
  public:
-  AtomicObjectProcessor(Aio *aio, RGWRados *store,
+  AtomicObjectProcessor(Aio *aio, rgw::sal::RGWRadosStore *store,
                         const RGWBucketInfo& bucket_info,
                         const rgw_placement_rule *ptail_placement_rule,
                         const rgw_user& owner,
@@ -213,7 +214,7 @@ class MultipartObjectProcessor : public ManifestObjectProcessor {
   // prepare the head stripe and manifest
   int prepare_head();
  public:
-  MultipartObjectProcessor(Aio *aio, RGWRados *store,
+  MultipartObjectProcessor(Aio *aio, rgw::sal::RGWRadosStore *store,
                            const RGWBucketInfo& bucket_info,
                            const rgw_placement_rule *ptail_placement_rule,
                            const rgw_user& owner, RGWObjectCtx& obj_ctx,
@@ -256,7 +257,7 @@ class MultipartObjectProcessor : public ManifestObjectProcessor {
     int process_first_chunk(bufferlist&& data, DataProcessor **processor) override;
 
   public:
-    AppendObjectProcessor(Aio *aio, RGWRados *store, const RGWBucketInfo& bucket_info,
+    AppendObjectProcessor(Aio *aio, rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info,
                           const rgw_placement_rule *ptail_placement_rule,
                           const rgw_user& owner, RGWObjectCtx& obj_ctx,const rgw_obj& head_obj,
                           const std::string& unique_tag, uint64_t position,
index c6f0774d1e6c5363781ab6c97d16ce06b0a3cb65..2acdf645470a14c2bb56dac21954dc69087c5cee 100644 (file)
@@ -21,7 +21,7 @@
 #include "common/ceph_mutex.h"
 
 #include "rgw_common.h"
-#include "rgw_rados.h"
+#include "rgw_sal.h"
 #include "rgw_quota.h"
 #include "rgw_bucket.h"
 #include "rgw_user.h"
@@ -44,7 +44,7 @@ struct RGWQuotaCacheStats {
 template<class T>
 class RGWQuotaCache {
 protected:
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   lru_map<T, RGWQuotaCacheStats> stats_map;
   RefCountedWaitObject *async_refcount;
 
@@ -73,7 +73,7 @@ protected:
 
   virtual void data_modified(const rgw_user& user, rgw_bucket& bucket) {}
 public:
-  RGWQuotaCache(RGWRados *_store, int size) : store(_store), stats_map(size) {
+  RGWQuotaCache(rgw::sal::RGWRadosStore *_store, int size) : store(_store), stats_map(size) {
     async_refcount = new RefCountedWaitObject;
   }
   virtual ~RGWQuotaCache() {
@@ -92,10 +92,10 @@ public:
 
   class AsyncRefreshHandler {
   protected:
-    RGWRados *store;
+    rgw::sal::RGWRadosStore *store;
     RGWQuotaCache<T> *cache;
   public:
-    AsyncRefreshHandler(RGWRados *_store, RGWQuotaCache<T> *_cache) : store(_store), cache(_cache) {}
+    AsyncRefreshHandler(rgw::sal::RGWRadosStore *_store, RGWQuotaCache<T> *_cache) : store(_store), cache(_cache) {}
     virtual ~AsyncRefreshHandler() {}
 
     virtual int init_fetch() = 0;
@@ -280,7 +280,7 @@ class BucketAsyncRefreshHandler : public RGWQuotaCache<rgw_bucket>::AsyncRefresh
                                   public RGWGetBucketStats_CB {
   rgw_user user;
 public:
-  BucketAsyncRefreshHandler(RGWRados *_store, RGWQuotaCache<rgw_bucket> *_cache,
+  BucketAsyncRefreshHandler(rgw::sal::RGWRadosStore *_store, RGWQuotaCache<rgw_bucket> *_cache,
                             const rgw_user& _user, const rgw_bucket& _bucket) :
                                       RGWQuotaCache<rgw_bucket>::AsyncRefreshHandler(_store, _cache),
                                       RGWGetBucketStats_CB(_bucket), user(_user) {}
@@ -294,9 +294,9 @@ int BucketAsyncRefreshHandler::init_fetch()
 {
   RGWBucketInfo bucket_info;
 
-  auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+  auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
 
-  int r = store->get_bucket_instance_info(obj_ctx, bucket, bucket_info, NULL, NULL, null_yield);
+  int r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket, bucket_info, NULL, NULL, null_yield);
   if (r < 0) {
     ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket << " r=" << r << dendl;
     return r;
@@ -304,7 +304,7 @@ int BucketAsyncRefreshHandler::init_fetch()
 
   ldout(store->ctx(), 20) << "initiating async quota refresh for bucket=" << bucket << dendl;
 
-  r = store->get_bucket_stats_async(bucket_info, RGW_NO_SHARD, this);
+  r = store->getRados()->get_bucket_stats_async(bucket_info, RGW_NO_SHARD, this);
   if (r < 0) {
     ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket.name << dendl;
 
@@ -353,7 +353,7 @@ protected:
   int fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats) override;
 
 public:
-  explicit RGWBucketStatsCache(RGWRados *_store) : RGWQuotaCache<rgw_bucket>(_store, _store->ctx()->_conf->rgw_bucket_quota_cache_size) {
+  explicit RGWBucketStatsCache(rgw::sal::RGWRadosStore *_store) : RGWQuotaCache<rgw_bucket>(_store, _store->ctx()->_conf->rgw_bucket_quota_cache_size) {
   }
 
   AsyncRefreshHandler *allocate_refresh_handler(const rgw_user& user, const rgw_bucket& bucket) override {
@@ -365,9 +365,9 @@ int RGWBucketStatsCache::fetch_stats_from_storage(const rgw_user& user, const rg
 {
   RGWBucketInfo bucket_info;
 
-  RGWSysObjectCtx obj_ctx = store->svc.sysobj->init_obj_ctx();
+  RGWSysObjectCtx obj_ctx = store->svc()->sysobj->init_obj_ctx();
 
-  int r = store->get_bucket_instance_info(obj_ctx, bucket, bucket_info, NULL, NULL, null_yield);
+  int r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket, bucket_info, NULL, NULL, null_yield);
   if (r < 0) {
     ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket << " r=" << r << dendl;
     return r;
@@ -377,7 +377,7 @@ int RGWBucketStatsCache::fetch_stats_from_storage(const rgw_user& user, const rg
   string master_ver;
 
   map<RGWObjCategory, RGWStorageStats> bucket_stats;
-  r = store->get_bucket_stats(bucket_info, RGW_NO_SHARD, &bucket_ver,
+  r = store->getRados()->get_bucket_stats(bucket_info, RGW_NO_SHARD, &bucket_ver,
                                   &master_ver, bucket_stats, nullptr);
   if (r < 0) {
     ldout(store->ctx(), 0) << "could not get bucket stats for bucket="
@@ -402,7 +402,7 @@ class UserAsyncRefreshHandler : public RGWQuotaCache<rgw_user>::AsyncRefreshHand
                                 public RGWGetUserStats_CB {
   rgw_bucket bucket;
 public:
-  UserAsyncRefreshHandler(RGWRados *_store, RGWQuotaCache<rgw_user> *_cache,
+  UserAsyncRefreshHandler(rgw::sal::RGWRadosStore *_store, RGWQuotaCache<rgw_user> *_cache,
                           const rgw_user& _user, const rgw_bucket& _bucket) :
                           RGWQuotaCache<rgw_user>::AsyncRefreshHandler(_store, _cache),
                           RGWGetUserStats_CB(_user),
@@ -416,7 +416,7 @@ public:
 int UserAsyncRefreshHandler::init_fetch()
 {
   ldout(store->ctx(), 20) << "initiating async quota refresh for user=" << user << dendl;
-  int r = store->ctl.user->read_stats_async(user, this);
+  int r = store->ctl()->user->read_stats_async(user, this);
   if (r < 0) {
     ldout(store->ctx(), 0) << "could not get bucket info for user=" << user << dendl;
 
@@ -573,7 +573,7 @@ protected:
   }
 
 public:
-  RGWUserStatsCache(RGWRados *_store, bool quota_threads) : RGWQuotaCache<rgw_user>(_store, _store->ctx()->_conf->rgw_bucket_quota_cache_size),
+  RGWUserStatsCache(rgw::sal::RGWRadosStore *_store, bool quota_threads) : RGWQuotaCache<rgw_user>(_store, _store->ctx()->_conf->rgw_bucket_quota_cache_size),
                                         rwlock("RGWUserStatsCache::rwlock") {
     if (quota_threads) {
       buckets_sync_thread = new BucketsSyncThread(store->ctx(), this);
@@ -615,7 +615,7 @@ public:
 
 int RGWUserStatsCache::fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats)
 {
-  int r = store->ctl.user->read_stats(user, &stats);
+  int r = store->ctl()->user->read_stats(user, &stats);
   if (r < 0) {
     ldout(store->ctx(), 0) << "could not get user stats for user=" << user << dendl;
     return r;
@@ -628,13 +628,13 @@ int RGWUserStatsCache::sync_bucket(const rgw_user& user, rgw_bucket& bucket)
 {
   RGWBucketInfo bucket_info;
 
-  int r = store->ctl.bucket->read_bucket_instance_info(bucket, &bucket_info, null_yield);
+  int r = store->ctl()->bucket->read_bucket_instance_info(bucket, &bucket_info, null_yield);
   if (r < 0) {
     ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket << " r=" << r << dendl;
     return r;
   }
 
-  r = store->ctl.bucket->sync_user_stats(user, bucket_info);
+  r = store->ctl()->bucket->sync_user_stats(user, bucket_info);
   if (r < 0) {
     ldout(store->ctx(), 0) << "ERROR: sync_user_stats() for user=" << user << ", bucket=" << bucket << " returned " << r << dendl;
     return r;
@@ -650,7 +650,7 @@ int RGWUserStatsCache::sync_user(const rgw_user& user)
   ceph::real_time last_stats_sync;
   ceph::real_time last_stats_update;
 
-  int ret = store->ctl.user->read_stats(user_str, &stats, &last_stats_sync, &last_stats_update);
+  int ret = store->ctl()->user->read_stats(user_str, &stats, &last_stats_sync, &last_stats_update);
   if (ret < 0) {
     ldout(store->ctx(), 5) << "ERROR: can't read user header: ret=" << ret << dendl;
     return ret;
@@ -682,7 +682,7 @@ int RGWUserStatsCache::sync_all_users()
   string key = "user";
   void *handle;
 
-  int ret = store->ctl.meta.mgr->list_keys_init(key, &handle);
+  int ret = store->ctl()->meta.mgr->list_keys_init(key, &handle);
   if (ret < 0) {
     ldout(store->ctx(), 10) << "ERROR: can't get key: ret=" << ret << dendl;
     return ret;
@@ -693,7 +693,7 @@ int RGWUserStatsCache::sync_all_users()
 
   do {
     list<string> keys;
-    ret = store->ctl.meta.mgr->list_keys_next(handle, max, keys, &truncated);
+    ret = store->ctl()->meta.mgr->list_keys_next(handle, max, keys, &truncated);
     if (ret < 0) {
       ldout(store->ctx(), 0) << "ERROR: lists_keys_next(): ret=" << ret << dendl;
       goto done;
@@ -715,7 +715,7 @@ int RGWUserStatsCache::sync_all_users()
 
   ret = 0;
 done:
-  store->ctl.meta.mgr->list_keys_complete(handle);
+  store->ctl()->meta.mgr->list_keys_complete(handle);
   return ret;
 }
 
@@ -883,7 +883,7 @@ const RGWQuotaInfoApplier& RGWQuotaInfoApplier::get_instance(
 
 
 class RGWQuotaHandlerImpl : public RGWQuotaHandler {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   RGWBucketStatsCache bucket_stats_cache;
   RGWUserStatsCache user_stats_cache;
 
@@ -917,7 +917,7 @@ class RGWQuotaHandlerImpl : public RGWQuotaHandler {
     return 0;
   }
 public:
-  RGWQuotaHandlerImpl(RGWRados *_store, bool quota_threads) : store(_store),
+  RGWQuotaHandlerImpl(rgw::sal::RGWRadosStore *_store, bool quota_threads) : store(_store),
                                     bucket_stats_cache(_store),
                                     user_stats_cache(_store, quota_threads) {}
 
@@ -999,7 +999,7 @@ public:
 };
 
 
-RGWQuotaHandler *RGWQuotaHandler::generate_handler(RGWRados *store, bool quota_threads)
+RGWQuotaHandler *RGWQuotaHandler::generate_handler(rgw::sal::RGWRadosStore *store, bool quota_threads)
 {
   return new RGWQuotaHandlerImpl(store, quota_threads);
 }
index a048aa7d809038779951f384a8ac64d502e529de..21bb1d33f67ead9649a9454bea510e654d1c3a08 100644 (file)
@@ -27,8 +27,10 @@ static inline int64_t rgw_rounded_kb(int64_t bytes)
   return (bytes + 1023) / 1024;
 }
 
-class RGWRados;
 class JSONObj;
+namespace rgw { namespace sal {
+  class RGWRadosStore;
+} }
 
 struct RGWQuotaInfo {
   template<class T> friend class RGWQuotaCache;
@@ -112,7 +114,7 @@ public:
 
   virtual void update_stats(const rgw_user& bucket_owner, rgw_bucket& bucket, int obj_delta, uint64_t added_bytes, uint64_t removed_bytes) = 0;
 
-  static RGWQuotaHandler *generate_handler(RGWRados *store, bool quota_threads);
+  static RGWQuotaHandler *generate_handler(rgw::sal::RGWRadosStore *store, bool quota_threads);
   static void free_handler(RGWQuotaHandler *handler);
 };
 
index 12e74eb7c582545e3816dff76b2b07b06f52739f..f0426e0252cd3c8a173781c94418962ac3ea9deb 100644 (file)
@@ -19,7 +19,7 @@
 #include "common/Formatter.h"
 #include "common/Throttle.h"
 
-#include "rgw_rados.h"
+#include "rgw_sal.h"
 #include "rgw_zone.h"
 #include "rgw_cache.h"
 #include "rgw_acl.h"
@@ -483,8 +483,8 @@ class RGWMetaSyncProcessorThread : public RGWSyncProcessorThread
     sync.stop();
   }
 public:
-  RGWMetaSyncProcessorThread(RGWRados *_store, RGWAsyncRadosProcessor *async_rados)
-    : RGWSyncProcessorThread(_store, "meta-sync"), sync(_store, async_rados) {}
+  RGWMetaSyncProcessorThread(rgw::sal::RGWRadosStore *_store, RGWAsyncRadosProcessor *async_rados)
+    : RGWSyncProcessorThread(_store->getRados(), "meta-sync"), sync(_store, async_rados) {}
 
   void wakeup_sync_shards(set<int>& shard_ids) {
     for (set<int>::iterator iter = shard_ids.begin(); iter != shard_ids.end(); ++iter) {
@@ -526,9 +526,9 @@ class RGWDataSyncProcessorThread : public RGWSyncProcessorThread
     sync.stop();
   }
 public:
-  RGWDataSyncProcessorThread(RGWRados *_store, RGWAsyncRadosProcessor *async_rados,
+  RGWDataSyncProcessorThread(rgw::sal::RGWRadosStore *_store, RGWAsyncRadosProcessor *async_rados,
                              const RGWZone* source_zone)
-    : RGWSyncProcessorThread(_store, "data-sync"),
+    : RGWSyncProcessorThread(_store->getRados(), "data-sync"),
       counters(sync_counters::build(store->ctx(), std::string("data-sync-from-") + source_zone->name)),
       sync(_store, async_rados, source_zone->id, counters.get()),
       initialized(false) {}
@@ -565,7 +565,7 @@ public:
 class RGWSyncLogTrimThread : public RGWSyncProcessorThread, DoutPrefixProvider
 {
   RGWCoroutinesManager crs;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   rgw::BucketTrimManager *bucket_trim;
   RGWHTTPManager http;
   const utime_t trim_interval;
@@ -573,10 +573,10 @@ class RGWSyncLogTrimThread : public RGWSyncProcessorThread, DoutPrefixProvider
   uint64_t interval_msec() override { return 0; }
   void stop_process() override { crs.stop(); }
 public:
-  RGWSyncLogTrimThread(RGWRados *store, rgw::BucketTrimManager *bucket_trim,
+  RGWSyncLogTrimThread(rgw::sal::RGWRadosStore *store, rgw::BucketTrimManager *bucket_trim,
                        int interval)
-    : RGWSyncProcessorThread(store, "sync-log-trim"),
-      crs(store->ctx(), store->get_cr_registry()), store(store),
+    : RGWSyncProcessorThread(store->getRados(), "sync-log-trim"),
+      crs(store->ctx(), store->getRados()->get_cr_registry()), store(store),
       bucket_trim(bucket_trim),
       http(store->ctx(), crs.get_completion_mgr()),
       trim_interval(interval, 0)
@@ -1172,7 +1172,7 @@ int RGWRados::init_complete()
   gc = new RGWGC();
   gc->initialize(cct, this);
 
-  obj_expirer = new RGWObjectExpirer(this);
+  obj_expirer = new RGWObjectExpirer(this->store);
 
   if (use_gc_thread) {
     gc->start_processor();
@@ -1214,7 +1214,7 @@ int RGWRados::init_complete()
     }
     auto async_processor = svc.rados->get_async_processor();
     std::lock_guard l{meta_sync_thread_lock};
-    meta_sync_processor_thread = new RGWMetaSyncProcessorThread(this, async_processor);
+    meta_sync_processor_thread = new RGWMetaSyncProcessorThread(this->store, async_processor);
     ret = meta_sync_processor_thread->init();
     if (ret < 0) {
       ldout(cct, 0) << "ERROR: failed to initialize meta sync thread" << dendl;
@@ -1226,7 +1226,7 @@ int RGWRados::init_complete()
     rgw::BucketTrimConfig config;
     rgw::configure_bucket_trim(cct, config);
 
-    bucket_trim.emplace(this, config);
+    bucket_trim.emplace(this->store, config);
     ret = bucket_trim->init();
     if (ret < 0) {
       ldout(cct, 0) << "ERROR: failed to start bucket trim manager" << dendl;
@@ -1237,7 +1237,7 @@ int RGWRados::init_complete()
     std::lock_guard dl{data_sync_thread_lock};
     for (auto source_zone : svc.zone->get_data_sync_source_zones()) {
       ldout(cct, 5) << "starting data sync thread for zone " << source_zone->name << dendl;
-      auto *thread = new RGWDataSyncProcessorThread(this, svc.rados->get_async_processor(), source_zone);
+      auto *thread = new RGWDataSyncProcessorThread(this->store, svc.rados->get_async_processor(), source_zone);
       ret = thread->init();
       if (ret < 0) {
         ldout(cct, 0) << "ERROR: failed to initialize data sync thread" << dendl;
@@ -1248,7 +1248,7 @@ int RGWRados::init_complete()
     }
     auto interval = cct->_conf->rgw_sync_log_trim_interval;
     if (interval > 0) {
-      sync_log_trimmer = new RGWSyncLogTrimThread(this, &*bucket_trim, interval);
+      sync_log_trimmer = new RGWSyncLogTrimThread(this->store, &*bucket_trim, interval);
       ret = sync_log_trimmer->init();
       if (ret < 0) {
         ldout(cct, 0) << "ERROR: failed to initialize sync log trim thread" << dendl;
@@ -1261,12 +1261,12 @@ int RGWRados::init_complete()
   data_notifier->start();
 
   lc = new RGWLC();
-  lc->initialize(cct, this);
+  lc->initialize(cct, this->store);
 
   if (use_lc_thread)
     lc->start_processor();
 
-  quota_handler = RGWQuotaHandler::generate_handler(this, quota_threads);
+  quota_handler = RGWQuotaHandler::generate_handler(this->store, quota_threads);
 
   bucket_index_max_shards = (cct->_conf->rgw_override_bucket_index_max_shards ? cct->_conf->rgw_override_bucket_index_max_shards :
                              zone.bucket_index_max_shards);
@@ -1288,7 +1288,7 @@ int RGWRados::init_complete()
 
   reshard_wait = std::make_shared<RGWReshardWait>();
 
-  reshard = new RGWReshard(this);
+  reshard = new RGWReshard(this->store);
 
   /* only the master zone in the zonegroup reshards buckets */
   run_reshard_thread = run_reshard_thread && (zonegroup.master_zone == zone.id);
@@ -2430,7 +2430,7 @@ int RGWRados::fix_tail_obj_locator(const RGWBucketInfo& bucket_info, rgw_obj_key
   }
 
   RGWObjState *astate = NULL;
-  RGWObjectCtx rctx(this);
+  RGWObjectCtx rctx(this->store);
   r = get_obj_state(&rctx, bucket_info, obj, &astate, false, y);
   if (r < 0)
     return r;
@@ -3325,7 +3325,7 @@ int RGWRados::rewrite_obj(RGWBucketInfo& dest_bucket_info, const rgw_obj& obj, c
 
   real_time mtime;
   uint64_t obj_size;
-  RGWObjectCtx rctx(this);
+  RGWObjectCtx rctx(this->store);
 
   RGWRados::Object op_target(this, dest_bucket_info, rctx, obj);
   RGWRados::Object::Read read_op(&op_target);
@@ -3597,7 +3597,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx,
   rgw::BlockingAioThrottle aio(cct->_conf->rgw_put_obj_min_window_size);
   using namespace rgw::putobj;
   const rgw_placement_rule *ptail_rule = (dest_placement_rule ? &(*dest_placement_rule) : nullptr);
-  AtomicObjectProcessor processor(&aio, this, dest_bucket_info, ptail_rule, user_id,
+  AtomicObjectProcessor processor(&aio, this->store, dest_bucket_info, ptail_rule, user_id,
                                   obj_ctx, dest_obj, olh_epoch, tag, dpp, null_yield);
   RGWRESTConn *conn;
   auto& zone_conn_map = svc.zone->get_zone_conn_map();
@@ -4188,7 +4188,7 @@ int RGWRados::copy_obj_data(RGWObjectCtx& obj_ctx,
   using namespace rgw::putobj;
   // do not change the null_yield in the initialization of this AtomicObjectProcessor
   // it causes crashes in the ragweed tests
-  AtomicObjectProcessor processor(&aio, this, dest_bucket_info, &dest_placement,
+  AtomicObjectProcessor processor(&aio, this->store, dest_bucket_info, &dest_placement,
                                   dest_bucket_info.owner, obj_ctx,
                                   dest_obj, olh_epoch, tag, dpp, null_yield);
   int ret = processor.prepare(y);
@@ -5485,7 +5485,7 @@ int RGWRados::set_attrs(void *ctx, const RGWBucketInfo& bucket_info, rgw_obj& sr
   if (!op.size())
     return 0;
 
-  RGWObjectCtx obj_ctx(this);
+  RGWObjectCtx obj_ctx(this->store);
 
   bufferlist bl;
   RGWRados::Bucket bop(this, bucket_info);
@@ -6415,10 +6415,10 @@ int RGWRados::block_while_resharding(RGWRados::BucketShard *bs,
       // since we expect to do this rarely, we'll do our work in a
       // block and erase our work after each try
 
-      RGWObjectCtx obj_ctx(this);
+      RGWObjectCtx obj_ctx(this->store);
       const rgw_bucket& b = bs->bucket;
       std::string bucket_id = b.get_key();
-      RGWBucketReshardLock reshard_lock(this, bucket_info, true);
+      RGWBucketReshardLock reshard_lock(this->store, bucket_info, true);
       ret = reshard_lock.lock();
       if (ret < 0) {
        ldout(cct, 20) << __func__ <<
@@ -6428,7 +6428,7 @@ int RGWRados::block_while_resharding(RGWRados::BucketShard *bs,
        ldout(cct, 10) << __func__ <<
          " INFO: was able to take reshard lock for bucket " <<
          bucket_id << dendl;
-       ret = RGWBucketReshard::clear_resharding(this, bucket_info);
+       ret = RGWBucketReshard::clear_resharding(this->store, bucket_info);
        if (ret < 0) {
          reshard_lock.unlock();
          ldout(cct, 0) << __func__ <<
@@ -8296,7 +8296,7 @@ int RGWRados::check_disk_state(librados::IoCtx io_ctx,
   io_ctx.locator_set_key(list_state.locator);
 
   RGWObjState *astate = NULL;
-  RGWObjectCtx rctx(this);
+  RGWObjectCtx rctx(this->store);
   int r = get_obj_state(&rctx, bucket_info, obj, &astate, false, y);
   if (r < 0)
     return r;
@@ -8463,7 +8463,7 @@ int RGWRados::check_bucket_shards(const RGWBucketInfo& bucket_info, const rgw_bu
 
 int RGWRados::add_bucket_to_reshard(const RGWBucketInfo& bucket_info, uint32_t new_num_shards)
 {
-  RGWReshard reshard(this);
+  RGWReshard reshard(this->store);
 
   uint32_t num_source_shards = (bucket_info.num_shards > 0 ? bucket_info.num_shards : 1);
 
@@ -8528,56 +8528,6 @@ uint64_t RGWRados::next_bucket_id()
   return ++max_bucket_id;
 }
 
-RGWRados *RGWStoreManager::init_storage_provider(CephContext *cct, bool use_gc_thread, bool use_lc_thread,
-                                                bool quota_threads, bool run_sync_thread, bool run_reshard_thread, bool use_cache)
-{
-  RGWRados *store = new RGWRados;
-
-  if ((*store).set_use_cache(use_cache)
-              .set_run_gc_thread(use_gc_thread)
-              .set_run_lc_thread(use_lc_thread)
-              .set_run_quota_threads(quota_threads)
-              .set_run_sync_thread(run_sync_thread)
-              .set_run_reshard_thread(run_reshard_thread)
-              .initialize(cct) < 0) {
-    delete store;
-    return NULL;
-  }
-
-  return store;
-}
-
-RGWRados *RGWStoreManager::init_raw_storage_provider(CephContext *cct)
-{
-  RGWRados *store = NULL;
-  store = new RGWRados;
-
-  store->set_context(cct);
-
-  int ret = store->init_svc(true);
-  if (ret < 0) {
-    ldout(cct, 0) << "ERROR: failed to init services (ret=" << cpp_strerror(-ret) << ")" << dendl;
-    return nullptr;
-  }
-
-  if (store->init_rados() < 0) {
-    delete store;
-    return nullptr;
-  }
-
-  return store;
-}
-
-void RGWStoreManager::close_storage(RGWRados *store)
-{
-  if (!store)
-    return;
-
-  store->finalize();
-
-  delete store;
-}
-
 librados::Rados* RGWRados::get_rados_handle()
 {
   return &rados;
index d5963429dc435787c717c8c3200398a1e2088406..60fec069e80eab1ce08d99f880c5c5af8459a8d6 100644 (file)
@@ -203,20 +203,20 @@ struct RGWObjState {
 };
 
 class RGWObjectCtx {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   ceph::shared_mutex lock = ceph::make_shared_mutex("RGWObjectCtx");
   void *s{nullptr};
 
   std::map<rgw_obj, RGWObjState> objs_state;
 public:
-  explicit RGWObjectCtx(RGWRados *_store) : store(_store) {}
-  explicit RGWObjectCtx(RGWRados *_store, void *_s) : store(_store), s(_s) {}
+  explicit RGWObjectCtx(rgw::sal::RGWRadosStore *_store) : store(_store) {}
+  explicit RGWObjectCtx(rgw::sal::RGWRadosStore *_store, void *_s) : store(_store), s(_s) {}
 
   void *get_private() {
     return s;
   }
 
-  RGWRados *get_store() {
+  rgw::sal::RGWRadosStore *get_store() {
     return store;
   }
 
@@ -348,6 +348,7 @@ public:
 
 class RGWGetDirHeader_CB;
 class RGWGetUserHeader_CB;
+namespace rgw { namespace sal { class RGWRadosStore; } }
 
 class RGWAsyncRadosProcessor;
 
@@ -397,6 +398,7 @@ class RGWRados
   ceph::mutex lock = ceph::make_mutex("rados_timer_lock");
   SafeTimer *timer;
 
+  rgw::sal::RGWRadosStore *store;
   RGWGC *gc;
   RGWLC *lc;
   RGWObjectExpirer *obj_expirer;
@@ -534,6 +536,9 @@ public:
   void set_context(CephContext *_cct) {
     cct = _cct;
   }
+  void set_store(rgw::sal::RGWRadosStore *_store) {
+    store = _store;
+  }
 
   RGWServices svc;
   RGWCtl ctl;
@@ -1489,23 +1494,4 @@ public:
   uint64_t next_bucket_id();
 };
 
-class RGWStoreManager {
-public:
-  RGWStoreManager() {}
-  static RGWRados *get_storage(CephContext *cct, bool use_gc_thread, bool use_lc_thread, bool quota_threads,
-                              bool run_sync_thread, bool run_reshard_thread, bool use_cache = true) {
-    RGWRados *store = init_storage_provider(cct, use_gc_thread, use_lc_thread, quota_threads, run_sync_thread,
-                                           run_reshard_thread, use_cache);
-    return store;
-  }
-  static RGWRados *get_raw_storage(CephContext *cct) {
-    RGWRados *store = init_raw_storage_provider(cct);
-    return store;
-  }
-  static RGWRados *init_storage_provider(CephContext *cct, bool use_gc_thread, bool use_lc_thread, bool quota_threads, bool run_sync_thread, bool run_reshard_thread, bool use_metadata_cache);
-  static RGWRados *init_raw_storage_provider(CephContext *cct);
-  static void close_storage(RGWRados *store);
-
-};
-
 #endif
index 75d96c8798734bc4c59cbf0d308d8d88e944ef36..4f5cb5b2cbbe28188b29661818d473661be62f04 100644 (file)
@@ -8,6 +8,7 @@
 #include "rgw_log.h"
 #include "rgw_rest.h"
 #include "rgw_user.h"
+#include "rgw_sal.h"
 
 #include "services/svc_zone.h"
 
@@ -25,7 +26,7 @@
 static constexpr bool USE_SAFE_TIMER_CALLBACKS = false;
 
 
-RGWRealmReloader::RGWRealmReloader(RGWRados*& store, std::map<std::string, std::string>& service_map_meta,
+RGWRealmReloader::RGWRealmReloader(rgw::sal::RGWRadosStore*& store, std::map<std::string, std::string>& service_map_meta,
                                    Pauser* frontends)
   : store(store),
     service_map_meta(service_map_meta),
@@ -113,7 +114,7 @@ void RGWRealmReloader::reload()
 
     ldout(cct, 1) << "Creating new store" << dendl;
 
-    RGWRados* store_cleanup = nullptr;
+    rgw::sal::RGWRadosStore* store_cleanup = nullptr;
     {
       std::unique_lock lock{mutex};
 
@@ -150,7 +151,7 @@ void RGWRealmReloader::reload()
     }
   }
 
-  int r = store->register_to_service_map("rgw", service_map_meta);
+  int r = store->getRados()->register_to_service_map("rgw", service_map_meta);
   if (r < 0) {
     lderr(cct) << "ERROR: failed to register to service map: " << cpp_strerror(-r) << dendl;
 
@@ -160,9 +161,9 @@ void RGWRealmReloader::reload()
   ldout(cct, 1) << "Finishing initialization of new store" << dendl;
   // finish initializing the new store
   ldout(cct, 1) << " - REST subsystem init" << dendl;
-  rgw_rest_init(cct, store, store->svc.zone->get_zonegroup());
+  rgw_rest_init(cct, store->svc()->zone->get_zonegroup());
   ldout(cct, 1) << " - usage subsystem init" << dendl;
-  rgw_log_usage_init(cct, store);
+  rgw_log_usage_init(cct, store->getRados());
 
   ldout(cct, 1) << "Resuming frontends with new realm configuration." << dendl;
 
index 24d10ae6217ac1472163f755df2d0f830a58ffa2..6852a27a5b352e013c53a6cbee6df806e9eb5a17 100644 (file)
@@ -7,7 +7,11 @@
 #include "rgw_realm_watcher.h"
 #include "common/Cond.h"
 
-class RGWRados;
+namespace rgw {
+namespace sal {
+class RGWRadosStore;
+}
+}
 
 /**
  * RGWRealmReloader responds to new period notifications by recreating RGWRados
@@ -29,10 +33,10 @@ class RGWRealmReloader : public RGWRealmWatcher::Watcher {
     /// pause all frontends while realm reconfiguration is in progress
     virtual void pause() = 0;
     /// resume all frontends with the given RGWRados instance
-    virtual void resume(RGWRados* store) = 0;
+    virtual void resume(rgw::sal::RGWRadosStore* store) = 0;
   };
 
-  RGWRealmReloader(RGWRados*& store, std::map<std::string, std::string>& service_map_meta,
+  RGWRealmReloader(rgw::sal::RGWRadosStore*& store, std::map<std::string, std::string>& service_map_meta,
                    Pauser* frontends);
   ~RGWRealmReloader() override;
 
@@ -45,8 +49,8 @@ class RGWRealmReloader : public RGWRealmWatcher::Watcher {
 
   class C_Reload; //< Context that calls reload()
 
-  /// main()'s RGWRados pointer as a reference, modified by reload()
-  RGWRados*& store;
+  /// main()'s RGWRadosStore pointer as a reference, modified by reload()
+  rgw::sal::RGWRadosStore*& store;
   std::map<std::string, std::string>& service_map_meta;
   Pauser *const frontends;
 
index 6f6d81960c78e0eaa3f05bb4f21e2d04033bd486..5ff1e6309aeeff6d2d7b87481ec0e570658499c0 100644 (file)
@@ -8,6 +8,7 @@
 #include "rgw_zone.h"
 #include "rgw_bucket.h"
 #include "rgw_reshard.h"
+#include "rgw_sal.h"
 #include "cls/rgw/cls_rgw_client.h"
 #include "cls/lock/cls_lock_client.h"
 #include "common/errno.h"
@@ -27,7 +28,7 @@ const string bucket_instance_lock_name = "bucket_instance_lock";
 
 
 class BucketReshardShard {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   const RGWBucketInfo& bucket_info;
   int num_shard;
   RGWRados::BucketShard bs;
@@ -69,10 +70,10 @@ class BucketReshardShard {
   }
 
 public:
-  BucketReshardShard(RGWRados *_store, const RGWBucketInfo& _bucket_info,
+  BucketReshardShard(rgw::sal::RGWRadosStore *_store, const RGWBucketInfo& _bucket_info,
                      int _num_shard,
                      deque<librados::AioCompletion *>& _completions) :
-    store(_store), bucket_info(_bucket_info), bs(store),
+    store(_store), bucket_info(_bucket_info), bs(store->getRados()),
     aio_completions(_completions)
   {
     num_shard = (bucket_info.num_shards > 0 ? _num_shard : -1);
@@ -115,7 +116,7 @@ public:
 
     librados::ObjectWriteOperation op;
     for (auto& entry : entries) {
-      store->bi_put(op, bs, entry);
+      store->getRados()->bi_put(op, bs, entry);
     }
     cls_rgw_bucket_update_stats(op, false, stats);
 
@@ -148,14 +149,14 @@ public:
 
 
 class BucketReshardManager {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   const RGWBucketInfo& target_bucket_info;
   deque<librados::AioCompletion *> completions;
   int num_target_shards;
   vector<BucketReshardShard *> target_shards;
 
 public:
-  BucketReshardManager(RGWRados *_store,
+  BucketReshardManager(rgw::sal::RGWRadosStore *_store,
                       const RGWBucketInfo& _target_bucket_info,
                       int _num_target_shards) :
     store(_store), target_bucket_info(_target_bucket_info),
@@ -213,7 +214,7 @@ public:
   }
 }; // class BucketReshardManager
 
-RGWBucketReshard::RGWBucketReshard(RGWRados *_store,
+RGWBucketReshard::RGWBucketReshard(rgw::sal::RGWRadosStore *_store,
                                   const RGWBucketInfo& _bucket_info,
                                   const map<string, bufferlist>& _bucket_attrs,
                                   RGWBucketReshardLock* _outer_reshard_lock) :
@@ -222,7 +223,7 @@ RGWBucketReshard::RGWBucketReshard(RGWRados *_store,
   outer_reshard_lock(_outer_reshard_lock)
 { }
 
-int RGWBucketReshard::set_resharding_status(RGWRados* store,
+int RGWBucketReshard::set_resharding_status(rgw::sal::RGWRadosStore* store,
                                            const RGWBucketInfo& bucket_info,
                                            const string& new_instance_id,
                                            int32_t num_shards,
@@ -236,7 +237,7 @@ int RGWBucketReshard::set_resharding_status(RGWRados* store,
   cls_rgw_bucket_instance_entry instance_entry;
   instance_entry.set_status(new_instance_id, num_shards, status);
 
-  int ret = store->bucket_set_reshard(bucket_info, instance_entry);
+  int ret = store->getRados()->bucket_set_reshard(bucket_info, instance_entry);
   if (ret < 0) {
     ldout(store->ctx(), 0) << "RGWReshard::" << __func__ << " ERROR: error setting bucket resharding flag on bucket index: "
                  << cpp_strerror(-ret) << dendl;
@@ -246,7 +247,7 @@ int RGWBucketReshard::set_resharding_status(RGWRados* store,
 }
 
 // reshard lock assumes lock is held
-int RGWBucketReshard::clear_resharding(RGWRados* store,
+int RGWBucketReshard::clear_resharding(rgw::sal::RGWRadosStore* store,
                                       const RGWBucketInfo& bucket_info)
 {
   int ret = clear_index_shard_reshard_status(store, bucket_info);
@@ -258,7 +259,7 @@ int RGWBucketReshard::clear_resharding(RGWRados* store,
   }
 
   cls_rgw_bucket_instance_entry instance_entry;
-  ret = store->bucket_set_reshard(bucket_info, instance_entry);
+  ret = store->getRados()->bucket_set_reshard(bucket_info, instance_entry);
   if (ret < 0) {
     ldout(store->ctx(), 0) << "RGWReshard::" << __func__ <<
       " ERROR: error setting bucket resharding flag on bucket index: " <<
@@ -269,7 +270,7 @@ int RGWBucketReshard::clear_resharding(RGWRados* store,
   return 0;
 }
 
-int RGWBucketReshard::clear_index_shard_reshard_status(RGWRados* store,
+int RGWBucketReshard::clear_index_shard_reshard_status(rgw::sal::RGWRadosStore* store,
                                                       const RGWBucketInfo& bucket_info)
 {
   uint32_t num_shards = bucket_info.num_shards;
@@ -290,7 +291,7 @@ int RGWBucketReshard::clear_index_shard_reshard_status(RGWRados* store,
   return 0;
 }
 
-static int create_new_bucket_instance(RGWRados *store,
+static int create_new_bucket_instance(rgw::sal::RGWRadosStore *store,
                                      int new_num_shards,
                                      const RGWBucketInfo& bucket_info,
                                      map<string, bufferlist>& attrs,
@@ -298,7 +299,7 @@ static int create_new_bucket_instance(RGWRados *store,
 {
   new_bucket_info = bucket_info;
 
-  store->create_bucket_id(&new_bucket_info.bucket.bucket_id);
+  store->getRados()->create_bucket_id(&new_bucket_info.bucket.bucket_id);
 
   new_bucket_info.num_shards = new_num_shards;
   new_bucket_info.objv_tracker.clear();
@@ -306,13 +307,13 @@ static int create_new_bucket_instance(RGWRados *store,
   new_bucket_info.new_bucket_instance_id.clear();
   new_bucket_info.reshard_status = 0;
 
-  int ret = store->svc.bi->init_index(new_bucket_info);
+  int ret = store->svc()->bi->init_index(new_bucket_info);
   if (ret < 0) {
     cerr << "ERROR: failed to init new bucket indexes: " << cpp_strerror(-ret) << std::endl;
     return ret;
   }
 
-  ret = store->put_bucket_instance_info(new_bucket_info, true, real_time(), &attrs);
+  ret = store->getRados()->put_bucket_instance_info(new_bucket_info, true, real_time(), &attrs);
   if (ret < 0) {
     cerr << "ERROR: failed to store new bucket instance info: " << cpp_strerror(-ret) << std::endl;
     return ret;
@@ -343,7 +344,7 @@ int RGWBucketReshard::cancel()
 
 class BucketInfoReshardUpdate
 {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   RGWBucketInfo bucket_info;
   std::map<string, bufferlist> bucket_attrs;
 
@@ -351,7 +352,7 @@ class BucketInfoReshardUpdate
 
   int set_status(cls_rgw_reshard_status s) {
     bucket_info.reshard_status = s;
-    int ret = store->put_bucket_instance_info(bucket_info, false, real_time(), &bucket_attrs);
+    int ret = store->getRados()->put_bucket_instance_info(bucket_info, false, real_time(), &bucket_attrs);
     if (ret < 0) {
       ldout(store->ctx(), 0) << "ERROR: failed to write bucket info, ret=" << ret << dendl;
       return ret;
@@ -360,7 +361,7 @@ class BucketInfoReshardUpdate
   }
 
 public:
-  BucketInfoReshardUpdate(RGWRados *_store,
+  BucketInfoReshardUpdate(rgw::sal::RGWRadosStore *_store,
                          RGWBucketInfo& _bucket_info,
                           map<string, bufferlist>& _bucket_attrs,
                          const string& new_bucket_id) :
@@ -405,7 +406,7 @@ public:
 };
 
 
-RGWBucketReshardLock::RGWBucketReshardLock(RGWRados* _store,
+RGWBucketReshardLock::RGWBucketReshardLock(rgw::sal::RGWRadosStore* _store,
                                           const std::string& reshard_lock_oid,
                                           bool _ephemeral) :
   store(_store),
@@ -430,10 +431,10 @@ int RGWBucketReshardLock::lock() {
   internal_lock.set_must_renew(false);
   int ret;
   if (ephemeral) {
-    ret = internal_lock.lock_exclusive_ephemeral(&store->reshard_pool_ctx,
+    ret = internal_lock.lock_exclusive_ephemeral(&store->getRados()->reshard_pool_ctx,
                                                 lock_oid);
   } else {
-    ret = internal_lock.lock_exclusive(&store->reshard_pool_ctx, lock_oid);
+    ret = internal_lock.lock_exclusive(&store->getRados()->reshard_pool_ctx, lock_oid);
   }
   if (ret < 0) {
     ldout(store->ctx(), 0) << "RGWReshardLock::" << __func__ <<
@@ -446,7 +447,7 @@ int RGWBucketReshardLock::lock() {
 }
 
 void RGWBucketReshardLock::unlock() {
-  int ret = internal_lock.unlock(&store->reshard_pool_ctx, lock_oid);
+  int ret = internal_lock.unlock(&store->getRados()->reshard_pool_ctx, lock_oid);
   if (ret < 0) {
     ldout(store->ctx(), 0) << "WARNING: RGWBucketReshardLock::" << __func__ <<
       " failed to drop lock on " << lock_oid << " ret=" << ret << dendl;
@@ -457,10 +458,10 @@ int RGWBucketReshardLock::renew(const Clock::time_point& now) {
   internal_lock.set_must_renew(true);
   int ret;
   if (ephemeral) {
-    ret = internal_lock.lock_exclusive_ephemeral(&store->reshard_pool_ctx,
+    ret = internal_lock.lock_exclusive_ephemeral(&store->getRados()->reshard_pool_ctx,
                                                 lock_oid);
   } else {
-    ret = internal_lock.lock_exclusive(&store->reshard_pool_ctx, lock_oid);
+    ret = internal_lock.lock_exclusive(&store->getRados()->reshard_pool_ctx, lock_oid);
   }
   if (ret < 0) { /* expired or already locked by another processor */
     std::stringstream error_s;
@@ -546,7 +547,7 @@ int RGWBucketReshard::do_reshard(int num_shards,
     marker.clear();
     while (is_truncated) {
       entries.clear();
-      ret = store->bi_list(bucket, i, string(), marker, max_entries, &entries, &is_truncated);
+      ret = store->getRados()->bi_list(bucket, i, string(), marker, max_entries, &entries, &is_truncated);
       if (ret < 0 && ret != -ENOENT) {
        derr << "ERROR: bi_list(): " << cpp_strerror(-ret) << dendl;
        return ret;
@@ -572,7 +573,7 @@ int RGWBucketReshard::do_reshard(int num_shards,
        bool account = entry.get_info(&cls_key, &category, &stats);
        rgw_obj_key key(cls_key);
        rgw_obj obj(new_bucket_info.bucket, key);
-       int ret = store->get_target_shard_id(new_bucket_info, obj.get_hash_object(), &target_shard_id);
+       int ret = store->getRados()->get_target_shard_id(new_bucket_info, obj.get_hash_object(), &target_shard_id);
        if (ret < 0) {
          lderr(store->ctx()) << "ERROR: get_target_shard_id() returned ret=" << ret << dendl;
          return ret;
@@ -630,7 +631,7 @@ int RGWBucketReshard::do_reshard(int num_shards,
     return -EIO;
   }
 
-  ret = store->ctl.bucket->link_bucket(new_bucket_info.owner, new_bucket_info.bucket, bucket_info.creation_time, null_yield);
+  ret = store->ctl()->bucket->link_bucket(new_bucket_info.owner, new_bucket_info.bucket, bucket_info.creation_time, null_yield);
   if (ret < 0) {
     lderr(store->ctx()) << "failed to link new bucket instance (bucket_id=" << new_bucket_info.bucket.bucket_id << ": " << cpp_strerror(-ret) << ")" << dendl;
     return ret;
@@ -648,7 +649,7 @@ int RGWBucketReshard::do_reshard(int num_shards,
 
 int RGWBucketReshard::get_status(list<cls_rgw_bucket_instance_entry> *status)
 {
-  return store->svc.bi_rados->get_reshard_status(bucket_info, status);
+  return store->svc()->bi_rados->get_reshard_status(bucket_info, status);
 }
 
 
@@ -701,14 +702,14 @@ int RGWBucketReshard::execute(int num_shards, int max_op_entries,
   // best effort and don't report out an error; the lock isn't needed
   // at this point since all we're using a best effor to to remove old
   // shard objects
-  ret = store->svc.bi->clean_index(bucket_info);
+  ret = store->svc()->bi->clean_index(bucket_info);
   if (ret < 0) {
     lderr(store->ctx()) << "Error: " << __func__ <<
       " failed to clean up old shards; " <<
       "RGWRados::clean_bucket_index returned " << ret << dendl;
   }
 
-  ret = store->ctl.bucket->remove_bucket_instance_info(bucket_info.bucket,
+  ret = store->ctl()->bucket->remove_bucket_instance_info(bucket_info.bucket,
                                                        bucket_info, null_yield);
   if (ret < 0) {
     lderr(store->ctx()) << "Error: " << __func__ <<
@@ -731,14 +732,14 @@ error_out:
   // since the real problem is the issue that led to this error code
   // path, we won't touch ret and instead use another variable to
   // temporarily error codes
-  int ret2 = store->svc.bi->clean_index(new_bucket_info);
+  int ret2 = store->svc()->bi->clean_index(new_bucket_info);
   if (ret2 < 0) {
     lderr(store->ctx()) << "Error: " << __func__ <<
       " failed to clean up shards from failed incomplete resharding; " <<
       "RGWRados::clean_bucket_index returned " << ret2 << dendl;
   }
 
-  ret2 = store->ctl.bucket->remove_bucket_instance_info(new_bucket_info.bucket,
+  ret2 = store->ctl()->bucket->remove_bucket_instance_info(new_bucket_info.bucket,
                                                         new_bucket_info,
                                                        null_yield);
   if (ret2 < 0) {
@@ -752,7 +753,7 @@ error_out:
 } // execute
 
 
-RGWReshard::RGWReshard(RGWRados* _store, bool _verbose, ostream *_out,
+RGWReshard::RGWReshard(rgw::sal::RGWRadosStore* _store, bool _verbose, ostream *_out,
                        Formatter *_formatter) :
   store(_store), instance_lock(bucket_instance_lock_name),
   verbose(_verbose), out(_out), formatter(_formatter)
@@ -781,7 +782,7 @@ void RGWReshard::get_bucket_logshard_oid(const string& tenant, const string& buc
 
 int RGWReshard::add(cls_rgw_reshard_entry& entry)
 {
-  if (!store->svc.zone->can_reshard()) {
+  if (!store->svc()->zone->can_reshard()) {
     ldout(store->ctx(), 20) << __func__ << " Resharding is disabled"  << dendl;
     return 0;
   }
@@ -793,7 +794,7 @@ int RGWReshard::add(cls_rgw_reshard_entry& entry)
   librados::ObjectWriteOperation op;
   cls_rgw_reshard_add(op, entry);
 
-  int ret = store->reshard_pool_ctx.operate(logshard_oid, &op);
+  int ret = store->getRados()->reshard_pool_ctx.operate(logshard_oid, &op);
   if (ret < 0) {
     lderr(store->ctx()) << "ERROR: failed to add entry to reshard log, oid=" << logshard_oid << " tenant=" << entry.tenant << " bucket=" << entry.bucket_name << dendl;
     return ret;
@@ -831,7 +832,7 @@ int RGWReshard::list(int logshard_num, string& marker, uint32_t max, std::list<c
 
   get_logshard_oid(logshard_num, &logshard_oid);
 
-  int ret = cls_rgw_reshard_list(store->reshard_pool_ctx, logshard_oid, marker, max, entries, is_truncated);
+  int ret = cls_rgw_reshard_list(store->getRados()->reshard_pool_ctx, logshard_oid, marker, max, entries, is_truncated);
 
   if (ret < 0) {
     if (ret == -ENOENT) {
@@ -840,7 +841,7 @@ int RGWReshard::list(int logshard_num, string& marker, uint32_t max, std::list<c
     }
     lderr(store->ctx()) << "ERROR: failed to list reshard log entries, oid=" << logshard_oid << dendl;
     if (ret == -EACCES) {
-      lderr(store->ctx()) << "access denied to pool " << store->svc.zone->get_zone_params().reshard_pool
+      lderr(store->ctx()) << "access denied to pool " << store->svc()->zone->get_zone_params().reshard_pool
                           << ". Fix the pool access permissions of your client" << dendl;
     }
   }
@@ -854,7 +855,7 @@ int RGWReshard::get(cls_rgw_reshard_entry& entry)
 
   get_bucket_logshard_oid(entry.tenant, entry.bucket_name, &logshard_oid);
 
-  int ret = cls_rgw_reshard_get(store->reshard_pool_ctx, logshard_oid, entry);
+  int ret = cls_rgw_reshard_get(store->getRados()->reshard_pool_ctx, logshard_oid, entry);
   if (ret < 0) {
     if (ret != -ENOENT) {
       lderr(store->ctx()) << "ERROR: failed to get entry from reshard log, oid=" << logshard_oid << " tenant=" << entry.tenant <<
@@ -875,7 +876,7 @@ int RGWReshard::remove(cls_rgw_reshard_entry& entry)
   librados::ObjectWriteOperation op;
   cls_rgw_reshard_remove(op, entry);
 
-  int ret = store->reshard_pool_ctx.operate(logshard_oid, &op);
+  int ret = store->getRados()->reshard_pool_ctx.operate(logshard_oid, &op);
   if (ret < 0) {
     lderr(store->ctx()) << "ERROR: failed to remove entry from reshard log, oid=" << logshard_oid << " tenant=" << entry.tenant << " bucket=" << entry.bucket_name << dendl;
     return ret;
@@ -886,7 +887,7 @@ int RGWReshard::remove(cls_rgw_reshard_entry& entry)
 
 int RGWReshard::clear_bucket_resharding(const string& bucket_instance_oid, cls_rgw_reshard_entry& entry)
 {
-  int ret = cls_rgw_clear_bucket_resharding(store->reshard_pool_ctx, bucket_instance_oid);
+  int ret = cls_rgw_clear_bucket_resharding(store->getRados()->reshard_pool_ctx, bucket_instance_oid);
   if (ret < 0) {
     lderr(store->ctx()) << "ERROR: failed to clear bucket resharding, bucket_instance_oid=" << bucket_instance_oid << dendl;
     return ret;
@@ -978,12 +979,12 @@ int RGWReshard::process_single_logshard(int logshard_num)
        ldout(store->ctx(), 20) << __func__ << " resharding " <<
          entry.bucket_name  << dendl;
 
-        auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+        auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
        rgw_bucket bucket;
        RGWBucketInfo bucket_info;
        map<string, bufferlist> attrs;
 
-       ret = store->get_bucket_info(obj_ctx, entry.tenant, entry.bucket_name,
+       ret = store->getRados()->get_bucket_info(obj_ctx, entry.tenant, entry.bucket_name,
                                     bucket_info, nullptr, null_yield, &attrs);
        if (ret < 0) {
          ldout(cct, 0) <<  __func__ << ": Error in get_bucket_info: " <<
@@ -1044,7 +1045,7 @@ void  RGWReshard::get_logshard_oid(int shard_num, string *logshard)
 
 int RGWReshard::process_all_logshards()
 {
-  if (!store->svc.zone->can_reshard()) {
+  if (!store->svc()->zone->can_reshard()) {
     ldout(store->ctx(), 20) << __func__ << " Resharding is disabled"  << dendl;
     return 0;
   }
index 8f76616512c583913bd4473a1c114b2ee0d2ae7d..328442dba247d61d7a1bce1445d4f2e8d81a8ff9 100644 (file)
 
 
 class CephContext;
-class RGWRados;
 class RGWReshard;
+namespace rgw { namespace sal {
+  class RGWRadosStore;
+} }
 
 class RGWBucketReshardLock {
   using Clock = ceph::coarse_mono_clock;
 
-  RGWRados* store;
+  rgw::sal::RGWRadosStore* store;
   const std::string lock_oid;
   const bool ephemeral;
   rados::cls::lock::Lock internal_lock;
@@ -41,10 +43,10 @@ class RGWBucketReshardLock {
   }
 
 public:
-  RGWBucketReshardLock(RGWRados* _store,
+  RGWBucketReshardLock(rgw::sal::RGWRadosStore* _store,
                       const std::string& reshard_lock_oid,
                       bool _ephemeral);
-  RGWBucketReshardLock(RGWRados* _store,
+  RGWBucketReshardLock(rgw::sal::RGWRadosStore* _store,
                       const RGWBucketInfo& bucket_info,
                       bool _ephemeral) :
     RGWBucketReshardLock(_store, bucket_info.bucket.get_key(':'), _ephemeral)
@@ -68,7 +70,7 @@ public:
 
 private:
 
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   RGWBucketInfo bucket_info;
   std::map<string, bufferlist> bucket_attrs;
 
@@ -87,7 +89,7 @@ public:
 
   // pass nullptr for the final parameter if no outer reshard lock to
   // manage
-  RGWBucketReshard(RGWRados *_store, const RGWBucketInfo& _bucket_info,
+  RGWBucketReshard(rgw::sal::RGWRadosStore *_store, const RGWBucketInfo& _bucket_info,
                    const std::map<string, bufferlist>& _bucket_attrs,
                   RGWBucketReshardLock* _outer_reshard_lock);
   int execute(int num_shards, int max_op_entries,
@@ -96,17 +98,17 @@ public:
              RGWReshard *reshard_log = nullptr);
   int get_status(std::list<cls_rgw_bucket_instance_entry> *status);
   int cancel();
-  static int clear_resharding(RGWRados* store,
+  static int clear_resharding(rgw::sal::RGWRadosStore* store,
                              const RGWBucketInfo& bucket_info);
   int clear_resharding() {
     return clear_resharding(store, bucket_info);
   }
-  static int clear_index_shard_reshard_status(RGWRados* store,
+  static int clear_index_shard_reshard_status(rgw::sal::RGWRadosStore* store,
                                              const RGWBucketInfo& bucket_info);
   int clear_index_shard_reshard_status() {
     return clear_index_shard_reshard_status(store, bucket_info);
   }
-  static int set_resharding_status(RGWRados* store,
+  static int set_resharding_status(rgw::sal::RGWRadosStore* store,
                                   const RGWBucketInfo& bucket_info,
                                   const string& new_instance_id,
                                   int32_t num_shards,
@@ -124,7 +126,7 @@ public:
     using Clock = ceph::coarse_mono_clock;
 
 private:
-    RGWRados *store;
+    rgw::sal::RGWRadosStore *store;
     string lock_name;
     rados::cls::lock::Lock instance_lock;
     int num_logshards;
@@ -159,7 +161,7 @@ protected:
   void get_bucket_logshard_oid(const string& tenant, const string& bucket_name, string *oid);
 
 public:
-  RGWReshard(RGWRados* _store, bool _verbose = false, ostream *_out = nullptr, Formatter *_formatter = nullptr);
+  RGWReshard(rgw::sal::RGWRadosStore* _store, bool _verbose = false, ostream *_out = nullptr, Formatter *_formatter = nullptr);
   int add(cls_rgw_reshard_entry& entry);
   int update(const RGWBucketInfo& bucket_info, const RGWBucketInfo& new_bucket_info);
   int get(cls_rgw_reshard_entry& entry);
index 37ec4ac984e877806915a4fa779011087ec38205..b5cd5536d907b6ce07045edc8e08086e58b78cb2 100644 (file)
@@ -176,7 +176,7 @@ string uppercase_underscore_http_attr(const string& orig)
 static set<string> hostnames_set;
 static set<string> hostnames_s3website_set;
 
-void rgw_rest_init(CephContext *cct, RGWRados *store, const RGWZoneGroup& zone_group)
+void rgw_rest_init(CephContext *cct, const RGWZoneGroup& zone_group)
 {
   for (const auto& rgw2http : base_rgw_to_http_attrs)  {
     rgw_to_http_attrs[rgw2http.rgw_attr] = rgw2http.http_attr;
@@ -1638,7 +1638,7 @@ int RGWRESTOp::verify_permission()
   return check_caps(s->user->caps);
 }
 
-RGWOp* RGWHandler_REST::get_op(RGWRados* store)
+RGWOp* RGWHandler_REST::get_op(rgw::sal::RGWRadosStore* store)
 {
   RGWOp *op;
   switch (s->op) {
@@ -1838,7 +1838,7 @@ int RGWHandler_REST::init_permissions(RGWOp* op)
     if (! s->user->user_id.empty() && s->auth.identity->get_identity_type() != TYPE_ROLE) {
       try {
         map<string, bufferlist> uattrs;
-        if (auto ret = store->ctl.user->get_attrs_by_uid(s->user->user_id, &uattrs, null_yield); ! ret) {
+        if (auto ret = store->ctl()->user->get_attrs_by_uid(s->user->user_id, &uattrs, null_yield); ! ret) {
           if (s->iam_user_policies.empty()) {
             s->iam_user_policies = get_iam_user_policy_from_attr(s->cct, store, uattrs, s->user->user_id.tenant);
           } else {
@@ -2253,7 +2253,7 @@ int RGWREST::preprocess(struct req_state *s, rgw::io::BasicClient* cio)
 }
 
 RGWHandler_REST* RGWREST::get_handler(
-  RGWRados * const store,
+  rgw::sal::RGWRadosStore * const store,
   struct req_state* const s,
   const rgw::auth::StrategyRegistry& auth_registry,
   const std::string& frontend_prefix,
index 09916ff8f32986fb0c04187ef5acd7584678352f..8486c878d712a6b40413f0e946984d5fa955e310 100644 (file)
@@ -17,7 +17,7 @@
 
 extern std::map<std::string, std::string> rgw_to_http_attrs;
 
-extern void rgw_rest_init(CephContext *cct, RGWRados *store, const RGWZoneGroup& zone_group);
+extern void rgw_rest_init(CephContext *cct, const RGWZoneGroup& zone_group);
 
 extern void rgw_flush_formatter_and_reset(struct req_state *s,
                                         ceph::Formatter *formatter);
@@ -163,7 +163,7 @@ protected:
 public:
   RGWGetObj_ObjStore() : sent_header(false) {}
 
-  void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+  void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
     RGWGetObj::init(store, s, h);
     sent_header = false;
   }
@@ -529,7 +529,7 @@ protected:
   RGWRESTFlusher flusher;
 public:
   RGWRESTOp() : http_ret(0) {}
-  void init(RGWRados *store, struct req_state *s,
+  void init(rgw::sal::RGWRadosStore *store, struct req_state *s,
             RGWHandler *dialect_handler) override {
     RGWOp::init(store, s, dialect_handler);
     flusher.init(s, this);
@@ -570,7 +570,7 @@ public:
   int init_permissions(RGWOp* op) override;
   int read_permissions(RGWOp* op) override;
 
-  virtual RGWOp* get_op(RGWRados* store);
+  virtual RGWOp* get_op(rgw::sal::RGWRadosStore* store);
   virtual void put_op(RGWOp* op);
 };
 
@@ -655,7 +655,7 @@ class RGWREST {
   static int preprocess(struct req_state *s, rgw::io::BasicClient* rio);
 public:
   RGWREST() {}
-  RGWHandler_REST *get_handler(RGWRados *store,
+  RGWHandler_REST *get_handler(rgw::sal::RGWRadosStore *store,
                                struct req_state *s,
                                const rgw::auth::StrategyRegistry& auth_registry,
                                const std::string& frontend_prefix,
index 165f70126d7643d171d5425faf0004ab7ba04219..295820db600258b7f9e72518bed281d1133c0941 100644 (file)
@@ -267,8 +267,8 @@ void RGWOp_Set_Bucket_Quota::execute()
   if (use_http_params) {
     RGWBucketInfo bucket_info;
     map<string, bufferlist> attrs;
-    auto obj_ctx = store->svc.sysobj->init_obj_ctx();
-    http_ret = store->get_bucket_info(obj_ctx, uid.tenant, bucket, bucket_info, NULL, s->yield, &attrs);
+    auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
+    http_ret = store->getRados()->get_bucket_info(obj_ctx, uid.tenant, bucket, bucket_info, NULL, s->yield, &attrs);
     if (http_ret < 0) {
       return;
     }
index e5b863d034b94e5bab0cd5af26da67e203ce6fad..9782fbc2363c9f2e20fc65a327bc8b96d7b50380 100644 (file)
@@ -30,7 +30,7 @@
 #define dout_subsys ceph_subsys_rgw
 
 void RGWOp_ZoneGroupMap_Get::execute() {
-  http_ret = zonegroup_map.read(g_ceph_context, store->svc.sysobj);
+  http_ret = zonegroup_map.read(g_ceph_context, store->svc()->sysobj);
   if (http_ret < 0) {
     dout(5) << "failed to read zone_group map" << dendl;
   }
@@ -58,7 +58,7 @@ void RGWOp_ZoneGroupMap_Get::send_response() {
 }
 
 void RGWOp_ZoneConfig_Get::send_response() {
-  const RGWZoneParams& zone_params = store->svc.zone->get_zone_params();
+  const RGWZoneParams& zone_params = store->svc()->zone->get_zone_params();
 
   set_req_state_err(s, http_ret);
   dump_errno(s);
index ef0e958df35212a2f971c85027da7248d0f0dd26..5436ef0e7ca4a44807ba21a66976409a9ee83554 100644 (file)
@@ -77,7 +77,7 @@ RGWOp *RGWHandler_REST_IAM::op_post()
   return nullptr;
 }
 
-int RGWHandler_REST_IAM::init(RGWRados *store,
+int RGWHandler_REST_IAM::init(rgw::sal::RGWRadosStore *store,
                               struct req_state *s,
                               rgw::io::BasicClient *cio)
 {
index e9dbfcd0f853111e52c8743997ca4285b9b5a8ed..f2f34b78826ba7633d12e8004681d5a820e23b8b 100644 (file)
@@ -22,7 +22,7 @@ public:
       post_body(post_body) {}
   ~RGWHandler_REST_IAM() override = default;
 
-  int init(RGWRados *store,
+  int init(rgw::sal::RGWRadosStore *store,
            struct req_state *s,
            rgw::io::BasicClient *cio) override;
   int authorize(const DoutPrefixProvider* dpp) override;
index 0fcb3f1d36ec797ec2b9412c42d68c6d4ed44034..edf64a098201a893090ef3b36d6f3bb89d55e04d 100644 (file)
@@ -96,7 +96,7 @@ void RGWOp_MDLog_List::execute() {
 
   if (period.empty()) {
     ldout(s->cct, 5) << "Missing period id trying to use current" << dendl;
-    period = store->svc.zone->get_current_period_id();
+    period = store->svc()->zone->get_current_period_id();
     if (period.empty()) {
       ldout(s->cct, 5) << "Missing period id" << dendl;
       http_ret = -EINVAL;
@@ -104,7 +104,7 @@ void RGWOp_MDLog_List::execute() {
     }
   }
 
-  RGWMetadataLog meta_log{s->cct, store->svc.zone, store->svc.cls, period};
+  RGWMetadataLog meta_log{s->cct, store->svc()->zone, store->svc()->cls, period};
 
   meta_log.init_list_entries(shard_id, ut_st, ut_et, marker, &handle);
 
@@ -130,7 +130,7 @@ void RGWOp_MDLog_List::send_response() {
     for (list<cls_log_entry>::iterator iter = entries.begin();
         iter != entries.end(); ++iter) {
       cls_log_entry& entry = *iter;
-      store->ctl.meta.mgr->dump_log_entry(entry, s->formatter);
+      store->ctl()->meta.mgr->dump_log_entry(entry, s->formatter);
       flusher.flush();
     }
     s->formatter->close_section();
@@ -141,7 +141,7 @@ void RGWOp_MDLog_List::send_response() {
 
 void RGWOp_MDLog_Info::execute() {
   num_objects = s->cct->_conf->rgw_md_log_max_shards;
-  period = store->svc.mdlog->read_oldest_log_period();
+  period = store->svc()->mdlog->read_oldest_log_period();
   http_ret = period.get_error();
 }
 
@@ -174,7 +174,7 @@ void RGWOp_MDLog_ShardInfo::execute() {
 
   if (period.empty()) {
     ldout(s->cct, 5) << "Missing period id trying to use current" << dendl;
-    period = store->svc.zone->get_current_period_id();
+    period = store->svc()->zone->get_current_period_id();
 
     if (period.empty()) {
       ldout(s->cct, 5) << "Missing period id" << dendl;
@@ -182,7 +182,7 @@ void RGWOp_MDLog_ShardInfo::execute() {
       return;
     }
   }
-  RGWMetadataLog meta_log{s->cct, store->svc.zone, store->svc.cls, period};
+  RGWMetadataLog meta_log{s->cct, store->svc()->zone, store->svc()->cls, period};
 
   http_ret = meta_log.get_info(shard_id, &info);
 }
@@ -233,7 +233,7 @@ void RGWOp_MDLog_Delete::execute() {
 
   if (period.empty()) {
     ldout(s->cct, 5) << "Missing period id trying to use current" << dendl;
-    period = store->svc.zone->get_current_period_id();
+    period = store->svc()->zone->get_current_period_id();
 
     if (period.empty()) {
       ldout(s->cct, 5) << "Missing period id" << dendl;
@@ -241,7 +241,7 @@ void RGWOp_MDLog_Delete::execute() {
       return;
     }
   }
-  RGWMetadataLog meta_log{s->cct, store->svc.zone, store->svc.cls, period};
+  RGWMetadataLog meta_log{s->cct, store->svc()->zone, store->svc()->cls, period};
 
   http_ret = meta_log.trim(shard_id, ut_st, ut_et, start_marker, end_marker);
 }
@@ -260,7 +260,7 @@ void RGWOp_MDLog_Lock::execute() {
 
   if (period.empty()) {
     ldout(s->cct, 5) << "Missing period id trying to use current" << dendl;
-    period = store->svc.zone->get_current_period_id();
+    period = store->svc()->zone->get_current_period_id();
   }
 
   if (period.empty() ||
@@ -281,7 +281,7 @@ void RGWOp_MDLog_Lock::execute() {
     return;
   }
 
-  RGWMetadataLog meta_log{s->cct, store->svc.zone, store->svc.cls, period};
+  RGWMetadataLog meta_log{s->cct, store->svc()->zone, store->svc()->cls, period};
   unsigned dur;
   dur = (unsigned)strict_strtol(duration_str.c_str(), 10, &err);
   if (!err.empty() || dur <= 0) {
@@ -308,7 +308,7 @@ void RGWOp_MDLog_Unlock::execute() {
 
   if (period.empty()) {
     ldout(s->cct, 5) << "Missing period id trying to use current" << dendl;
-    period = store->svc.zone->get_current_period_id();
+    period = store->svc()->zone->get_current_period_id();
   }
 
   if (period.empty() ||
@@ -328,7 +328,7 @@ void RGWOp_MDLog_Unlock::execute() {
     return;
   }
 
-  RGWMetadataLog meta_log{s->cct, store->svc.zone, store->svc.cls, period};
+  RGWMetadataLog meta_log{s->cct, store->svc()->zone, store->svc()->cls, period};
   http_ret = meta_log.unlock(shard_id, zone_id, locker_id);
 }
 
@@ -369,7 +369,7 @@ void RGWOp_MDLog_Notify::execute() {
     }
   }
 
-  store->wakeup_meta_sync_shards(updated_shards);
+  store->getRados()->wakeup_meta_sync_shards(updated_shards);
 
   http_ret = 0;
 }
@@ -398,13 +398,13 @@ void RGWOp_BILog_List::execute() {
 
   if (!bucket_instance.empty()) {
     rgw_bucket b(rgw_bucket_key(tenant_name, bn, bucket_instance));
-    http_ret = store->get_bucket_instance_info(*s->sysobj_ctx, b, bucket_info, NULL, NULL, s->yield);
+    http_ret = store->getRados()->get_bucket_instance_info(*s->sysobj_ctx, b, bucket_info, NULL, NULL, s->yield);
     if (http_ret < 0) {
       ldpp_dout(s, 5) << "could not get bucket instance info for bucket instance id=" << bucket_instance << dendl;
       return;
     }
   } else { /* !bucket_name.empty() */
-    http_ret = store->get_bucket_info(*s->sysobj_ctx, tenant_name, bucket_name, bucket_info, NULL, s->yield, NULL);
+    http_ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, tenant_name, bucket_name, bucket_info, NULL, s->yield, NULL);
     if (http_ret < 0) {
       ldpp_dout(s, 5) << "could not get bucket info for bucket=" << bucket_name << dendl;
       return;
@@ -422,7 +422,7 @@ void RGWOp_BILog_List::execute() {
   send_response();
   do {
     list<rgw_bi_log_entry> entries;
-    int ret = store->svc.bilog_rados->log_list(bucket_info, shard_id,
+    int ret = store->svc()->bilog_rados->log_list(bucket_info, shard_id,
                                                marker, max_entries - count, 
                                                entries, &truncated);
     if (ret < 0) {
@@ -491,20 +491,20 @@ void RGWOp_BILog_Info::execute() {
 
   if (!bucket_instance.empty()) {
     rgw_bucket b(rgw_bucket_key(tenant_name, bn, bucket_instance));
-    http_ret = store->get_bucket_instance_info(*s->sysobj_ctx, b, bucket_info, NULL, NULL, s->yield);
+    http_ret = store->getRados()->get_bucket_instance_info(*s->sysobj_ctx, b, bucket_info, NULL, NULL, s->yield);
     if (http_ret < 0) {
       ldpp_dout(s, 5) << "could not get bucket instance info for bucket instance id=" << bucket_instance << dendl;
       return;
     }
   } else { /* !bucket_name.empty() */
-    http_ret = store->get_bucket_info(*s->sysobj_ctx, tenant_name, bucket_name, bucket_info, NULL, s->yield, NULL);
+    http_ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, tenant_name, bucket_name, bucket_info, NULL, s->yield, NULL);
     if (http_ret < 0) {
       ldpp_dout(s, 5) << "could not get bucket info for bucket=" << bucket_name << dendl;
       return;
     }
   }
   map<RGWObjCategory, RGWStorageStats> stats;
-  int ret =  store->get_bucket_stats(bucket_info, shard_id, &bucket_ver, &master_ver, stats, &max_marker, &syncstopped);
+  int ret =  store->getRados()->get_bucket_stats(bucket_info, shard_id, &bucket_ver, &master_ver, stats, &max_marker, &syncstopped);
   if (ret < 0 && ret != -ENOENT) {
     http_ret = ret;
     return;
@@ -555,19 +555,19 @@ void RGWOp_BILog_Delete::execute() {
 
   if (!bucket_instance.empty()) {
     rgw_bucket b(rgw_bucket_key(tenant_name, bn, bucket_instance));
-    http_ret = store->get_bucket_instance_info(*s->sysobj_ctx, b, bucket_info, NULL, NULL, s->yield);
+    http_ret = store->getRados()->get_bucket_instance_info(*s->sysobj_ctx, b, bucket_info, NULL, NULL, s->yield);
     if (http_ret < 0) {
       ldpp_dout(s, 5) << "could not get bucket instance info for bucket instance id=" << bucket_instance << dendl;
       return;
     }
   } else { /* !bucket_name.empty() */
-    http_ret = store->get_bucket_info(*s->sysobj_ctx, tenant_name, bucket_name, bucket_info, NULL, s->yield, NULL);
+    http_ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, tenant_name, bucket_name, bucket_info, NULL, s->yield, NULL);
     if (http_ret < 0) {
       ldpp_dout(s, 5) << "could not get bucket info for bucket=" << bucket_name << dendl;
       return;
     }
   }
-  http_ret = store->svc.bilog_rados->log_trim(bucket_info, shard_id, start_marker, end_marker);
+  http_ret = store->svc()->bilog_rados->log_trim(bucket_info, shard_id, start_marker, end_marker);
   if (http_ret < 0) {
     ldpp_dout(s, 5) << "ERROR: trim_bi_log_entries() " << dendl;
   }
@@ -619,7 +619,7 @@ void RGWOp_DATALog_List::execute() {
 
   // Note that last_marker is updated to be the marker of the last
   // entry listed
-  http_ret = store->svc.datalog_rados->list_entries(shard_id, ut_st, ut_et,
+  http_ret = store->svc()->datalog_rados->list_entries(shard_id, ut_st, ut_et,
                                                     max_entries, entries, marker,
                                                     &last_marker, &truncated);
 }
@@ -681,7 +681,7 @@ void RGWOp_DATALog_ShardInfo::execute() {
     return;
   }
 
-  http_ret = store->svc.datalog_rados->get_info(shard_id, &info);
+  http_ret = store->svc()->datalog_rados->get_info(shard_id, &info);
 }
 
 void RGWOp_DATALog_ShardInfo::send_response() {
@@ -735,7 +735,7 @@ void RGWOp_DATALog_Notify::execute() {
     }
   }
 
-  store->wakeup_data_sync_shards(source_zone, updated_shards);
+  store->getRados()->wakeup_data_sync_shards(source_zone, updated_shards);
 
   http_ret = 0;
 }
@@ -774,7 +774,7 @@ void RGWOp_DATALog_Delete::execute() {
     return;
   }
 
-  http_ret = store->svc.datalog_rados->trim_entries(shard_id, ut_st, ut_et, start_marker, end_marker);
+  http_ret = store->svc()->datalog_rados->trim_entries(shard_id, ut_st, ut_et, start_marker, end_marker);
 }
 
 // not in header to avoid pulling in rgw_sync.h
@@ -794,7 +794,7 @@ public:
 
 void RGWOp_MDLog_Status::execute()
 {
-  auto sync = store->get_meta_sync_manager();
+  auto sync = store->getRados()->get_meta_sync_manager();
   if (sync == nullptr) {
     ldout(s->cct, 1) << "no sync manager" << dendl;
     http_ret = -ENOENT;
@@ -850,9 +850,9 @@ void RGWOp_BILog_Status::execute()
   }
 
   // read the bucket instance info for num_shards
-  auto ctx = store->svc.sysobj->init_obj_ctx();
+  auto ctx = store->svc()->sysobj->init_obj_ctx();
   RGWBucketInfo info;
-  http_ret = store->get_bucket_instance_info(ctx, bucket, info, nullptr, nullptr, s->yield);
+  http_ret = store->getRados()->get_bucket_instance_info(ctx, bucket, info, nullptr, nullptr, s->yield);
   if (http_ret < 0) {
     ldpp_dout(s, 4) << "failed to read bucket info: " << cpp_strerror(http_ret) << dendl;
     return;
@@ -890,7 +890,7 @@ public:
 void RGWOp_DATALog_Status::execute()
 {
   const auto source_zone = s->info.args.get("source-zone");
-  auto sync = store->get_data_sync_manager(source_zone);
+  auto sync = store->getRados()->get_data_sync_manager(source_zone);
   if (sync == nullptr) {
     ldout(s->cct, 1) << "no sync manager for source-zone " << source_zone << dendl;
     http_ret = -ENOENT;
index 5cc0660fee0085d1222f73d944cb25d9a14d0fee..28b76c04bbe9831fb2c28206f3ca8ceaccda4f60 100644 (file)
@@ -53,7 +53,7 @@ void RGWOp_Metadata_Get::execute() {
 
   frame_metadata_key(s, metadata_key);
 
-  auto meta_mgr = store->ctl.meta.mgr;
+  auto meta_mgr = store->ctl()->meta.mgr;
 
   /* Get keys */
   http_ret = meta_mgr->get(metadata_key, s->formatter, s->yield);
@@ -122,7 +122,7 @@ void RGWOp_Metadata_List::execute() {
      marker = "3:bf885d8f:root::sorry_janefonda_665:head";
   */
 
-  http_ret = store->ctl.meta.mgr->list_keys_init(metadata_key, marker, &handle);
+  http_ret = store->ctl()->meta.mgr->list_keys_init(metadata_key, marker, &handle);
   if (http_ret < 0) {
     dout(5) << "ERROR: can't get key: " << cpp_strerror(http_ret) << dendl;
     return;
@@ -137,7 +137,7 @@ void RGWOp_Metadata_List::execute() {
 
   s->formatter->open_array_section("keys");
 
-  auto meta_mgr = store->ctl.meta.mgr;
+  auto meta_mgr = store->ctl()->meta.mgr;
 
   uint64_t left;
   do {
@@ -262,7 +262,7 @@ void RGWOp_Metadata_Put::execute() {
     }
   }
 
-  http_ret = store->ctl.meta.mgr->put(metadata_key, bl, s->yield, sync_type,
+  http_ret = store->ctl()->meta.mgr->put(metadata_key, bl, s->yield, sync_type,
                                  &ondisk_version);
   if (http_ret < 0) {
     dout(5) << "ERROR: can't put key: " << cpp_strerror(http_ret) << dendl;
@@ -293,7 +293,7 @@ void RGWOp_Metadata_Delete::execute() {
   string metadata_key;
 
   frame_metadata_key(s, metadata_key);
-  http_ret = store->ctl.meta.mgr->remove(metadata_key, s->yield);
+  http_ret = store->ctl()->meta.mgr->remove(metadata_key, s->yield);
   if (http_ret < 0) {
     dout(5) << "ERROR: can't remove key: " << cpp_strerror(http_ret) << dendl;
     return;
index c1135128c82d29118ecf2498396e9cb2b82ed6e5..a0ca56383fe56257d23ac3077681728cffc060df 100644 (file)
@@ -72,7 +72,7 @@ void RGWOp_Period_Get::execute()
   period.set_id(period_id);
   period.set_epoch(epoch);
 
-  http_ret = period.init(store->ctx(), store->svc.sysobj, realm_id, realm_name);
+  http_ret = period.init(store->ctx(), store->svc()->sysobj, realm_id, realm_name);
   if (http_ret < 0)
     ldout(store->ctx(), 5) << "failed to read period" << dendl;
 }
@@ -95,7 +95,7 @@ void RGWOp_Period_Post::execute()
   auto cct = store->ctx();
 
   // initialize the period without reading from rados
-  period.init(cct, store->svc.sysobj, false);
+  period.init(cct, store->svc()->sysobj, false);
 
   // decode the period from input
   const auto max_size = cct->_conf->rgw_max_put_param_size;
@@ -107,9 +107,9 @@ void RGWOp_Period_Post::execute()
   }
 
   // require period.realm_id to match our realm
-  if (period.get_realm() != store->svc.zone->get_realm().get_id()) {
+  if (period.get_realm() != store->svc()->zone->get_realm().get_id()) {
     error_stream << "period with realm id " << period.get_realm()
-        << " doesn't match current realm " << store->svc.zone->get_realm().get_id() << std::endl;
+        << " doesn't match current realm " << store->svc()->zone->get_realm().get_id() << std::endl;
     http_ret = -EINVAL;
     return;
   }
@@ -118,7 +118,7 @@ void RGWOp_Period_Post::execute()
   // period that we haven't restarted with yet. we also don't want to modify
   // the objects in use by RGWRados
   RGWRealm realm(period.get_realm());
-  http_ret = realm.init(cct, store->svc.sysobj);
+  http_ret = realm.init(cct, store->svc()->sysobj);
   if (http_ret < 0) {
     lderr(cct) << "failed to read current realm: "
         << cpp_strerror(-http_ret) << dendl;
@@ -126,7 +126,7 @@ void RGWOp_Period_Post::execute()
   }
 
   RGWPeriod current_period;
-  http_ret = current_period.init(cct, store->svc.sysobj, realm.get_id());
+  http_ret = current_period.init(cct, store->svc()->sysobj, realm.get_id());
   if (http_ret < 0) {
     lderr(cct) << "failed to read current period: "
         << cpp_strerror(-http_ret) << dendl;
@@ -143,7 +143,7 @@ void RGWOp_Period_Post::execute()
   }
 
   // if it's not period commit, nobody is allowed to push to the master zone
-  if (period.get_master_zone() == store->svc.zone->get_zone_params().get_id()) {
+  if (period.get_master_zone() == store->svc()->zone->get_zone_params().get_id()) {
     ldout(cct, 10) << "master zone rejecting period id="
         << period.get_id() << " epoch=" << period.get_epoch() << dendl;
     http_ret = -EINVAL; // XXX: error code
@@ -170,7 +170,7 @@ void RGWOp_Period_Post::execute()
     return;
   }
 
-  auto period_history = store->svc.mdlog->get_period_history();
+  auto period_history = store->svc()->mdlog->get_period_history();
 
   // decide whether we can set_current_period() or set_latest_epoch()
   if (period.get_id() != current_period.get_id()) {
@@ -278,7 +278,7 @@ void RGWOp_Realm_Get::execute()
 
   // read realm
   realm.reset(new RGWRealm(id, name));
-  http_ret = realm->init(g_ceph_context, store->svc.sysobj);
+  http_ret = realm->init(g_ceph_context, store->svc()->sysobj);
   if (http_ret < 0)
     lderr(store->ctx()) << "failed to read realm id=" << id
         << " name=" << name << dendl;
@@ -319,10 +319,10 @@ void RGWOp_Realm_List::execute()
 {
   {
     // read default realm
-    RGWRealm realm(store->ctx(), store->svc.sysobj);
+    RGWRealm realm(store->ctx(), store->svc()->sysobj);
     [[maybe_unused]] int ret = realm.read_default_id(default_id);
   }
-  http_ret = store->svc.zone->list_realms(realms);
+  http_ret = store->svc()->zone->list_realms(realms);
   if (http_ret < 0)
     lderr(store->ctx()) << "failed to list realms" << dendl;
 }
index 32522e7ab34c682b5eb54589826a99bab110883f..60d029e13569b42be525fb5c5d2f5cba9c5870a9 100644 (file)
@@ -25,7 +25,7 @@ int RGWRestRole::verify_permission()
   }
 
   string role_name = s->info.args.get("RoleName");
-  RGWRole role(s->cct, store->pctl, role_name, s->user->user_id.tenant);
+  RGWRole role(s->cct, store->getRados()->pctl, role_name, s->user->user_id.tenant);
   if (op_ret = role.get(); op_ret < 0) {
     if (op_ret == -ENOENT) {
       op_ret = -ERR_NO_ROLE_FOUND;
@@ -129,7 +129,7 @@ void RGWCreateRole::execute()
   if (op_ret < 0) {
     return;
   }
-  RGWRole role(s->cct, store->pctl, role_name, role_path, trust_policy,
+  RGWRole role(s->cct, store->getRados()->pctl, role_name, role_path, trust_policy,
                 s->user->user_id.tenant, max_session_duration);
   op_ret = role.create(true);
 
@@ -228,7 +228,7 @@ void RGWGetRole::execute()
   if (op_ret < 0) {
     return;
   }
-  RGWRole role(s->cct, store->pctl, role_name, s->user->user_id.tenant);
+  RGWRole role(s->cct, store->getRados()->pctl, role_name, s->user->user_id.tenant);
   op_ret = role.get();
 
   if (op_ret == -ENOENT) {
@@ -321,7 +321,7 @@ void RGWListRoles::execute()
     return;
   }
   vector<RGWRole> result;
-  op_ret = RGWRole::get_roles_by_path_prefix(store, s->cct, path_prefix, s->user->user_id.tenant, result);
+  op_ret = RGWRole::get_roles_by_path_prefix(store->getRados(), s->cct, path_prefix, s->user->user_id.tenant, result);
 
   if (op_ret == 0) {
     s->formatter->open_array_section("ListRolesResponse");
index d91d23cd7468b4e56944a04484f65b9a3c189d96..c0b2987b82a47b4307042d34b2c7dd7dfeb8bc4d 100644 (file)
@@ -574,7 +574,7 @@ int RGWPutBucketTags_ObjStore_S3::get_params()
   ldout(s->cct, 20) << "Read " << obj_tags.count() << "tags" << dendl;
 
   // forward bucket tags requests to meta master zone
-  if (!store->svc.zone->is_meta_master()) {
+  if (!store->svc()->zone->is_meta_master()) {
     /* only need to keep this data around if we're not meta master */
     in_data = std::move(data);
   }
@@ -1230,7 +1230,7 @@ void RGWGetBucketLocation_ObjStore_S3::send_response()
   RGWZoneGroup zonegroup;
   string api_name;
 
-  int ret = store->svc.zone->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
+  int ret = store->svc()->zone->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
   if (ret >= 0) {
     api_name = zonegroup.api_name;
   } else  {
@@ -1329,7 +1329,7 @@ int RGWSetBucketVersioning_ObjStore_S3::get_params()
     return -EINVAL;
   }
 
-  if (!store->svc.zone->is_meta_master()) {
+  if (!store->svc()->zone->is_meta_master()) {
     /* only need to keep this data around if we're not meta master */
     in_data.append(data);
   }
@@ -1482,7 +1482,7 @@ void RGWStatBucket_ObjStore_S3::send_response()
   dump_start(s);
 }
 
-static int create_s3_policy(struct req_state *s, RGWRados *store,
+static int create_s3_policy(struct req_state *s, rgw::sal::RGWRadosStore *store,
                            RGWAccessControlPolicy_S3& s3policy,
                            ACLOwner& owner)
 {
@@ -1490,7 +1490,7 @@ static int create_s3_policy(struct req_state *s, RGWRados *store,
     if (!s->canned_acl.empty())
       return -ERR_INVALID_REQUEST;
 
-    return s3policy.create_from_headers(store->ctl.user, s->info.env, owner);
+    return s3policy.create_from_headers(store->ctl()->user, s->info.env, owner);
   }
 
   return s3policy.create_canned(owner, s->bucket_owner, s->canned_acl);
@@ -1731,7 +1731,7 @@ int RGWPutObj_ObjStore_S3::get_params()
          return ret;
        }
     }
-    ret = store->get_bucket_info(*s->sysobj_ctx,
+    ret = store->getRados()->get_bucket_info(*s->sysobj_ctx,
                                  copy_source_tenant_name,
                                  copy_source_bucket_name,
                                  copy_source_bucket_info,
@@ -1922,9 +1922,9 @@ void RGWPutObj_ObjStore_S3::send_response()
   end_header(s, this);
 }
 
-static inline int get_obj_attrs(RGWRados *store, struct req_state *s, rgw_obj& obj, map<string, bufferlist>& attrs)
+static inline int get_obj_attrs(rgw::sal::RGWRadosStore *store, struct req_state *s, rgw_obj& obj, map<string, bufferlist>& attrs)
 {
-  RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
+  RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
   RGWRados::Object::Read read_op(&op_target);
 
   read_op.params.attrs = &attrs;
@@ -2704,7 +2704,7 @@ int RGWPutACLs_ObjStore_S3::get_params()
   return ret;
 }
 
-int RGWPutACLs_ObjStore_S3::get_policy_from_state(RGWRados *store,
+int RGWPutACLs_ObjStore_S3::get_policy_from_state(rgw::sal::RGWRadosStore *store,
                                                  struct req_state *s,
                                                  stringstream& ss)
 {
@@ -2870,7 +2870,7 @@ int RGWPutCORS_ObjStore_S3::get_params()
   }
 
   // forward bucket cors requests to meta master zone
-  if (!store->svc.zone->is_meta_master()) {
+  if (!store->svc()->zone->is_meta_master()) {
     /* only need to keep this data around if we're not meta master */
     in_data.append(data);
   }
@@ -3321,7 +3321,7 @@ void RGWGetObjLayout_ObjStore_S3::send_response()
   f.open_array_section("data_location");
   for (auto miter = manifest->obj_begin(); miter != manifest->obj_end(); ++miter) {
     f.open_object_section("obj");
-    rgw_raw_obj raw_loc = miter.get_location().get_raw_obj(store);
+    rgw_raw_obj raw_loc = miter.get_location().get_raw_obj(store->getRados());
     uint64_t ofs = miter.get_ofs();
     uint64_t left = manifest->get_obj_size() - ofs;
     ::encode_json("ofs", miter.get_ofs(), &f);
@@ -3875,7 +3875,7 @@ int RGWHandler_REST_S3::init_from_header(struct req_state* s,
   return 0;
 }
 
-static int verify_mfa(RGWRados *store, RGWUserInfo *user, const string& mfa_str, bool *verified, const DoutPrefixProvider *dpp)
+static int verify_mfa(rgw::sal::RGWRadosStore *store, RGWUserInfo *user, const string& mfa_str, bool *verified, const DoutPrefixProvider *dpp)
 {
   vector<string> params;
   get_str_vec(mfa_str, " ", params);
@@ -3894,7 +3894,7 @@ static int verify_mfa(RGWRados *store, RGWUserInfo *user, const string& mfa_str,
     return -EACCES;
   }
 
-  int ret = store->svc.cls->mfa.check_mfa(user->user_id, serial, pin, null_yield);
+  int ret = store->svc()->cls->mfa.check_mfa(user->user_id, serial, pin, null_yield);
   if (ret < 0) {
     ldpp_dout(dpp, 20) << "NOTICE: failed to check MFA, serial=" << serial << dendl;
     return -EACCES;
@@ -3948,7 +3948,7 @@ int RGWHandler_REST_S3::postauth_init()
   return 0;
 }
 
-int RGWHandler_REST_S3::init(RGWRados *store, struct req_state *s,
+int RGWHandler_REST_S3::init(rgw::sal::RGWRadosStore *store, struct req_state *s,
                              rgw::io::BasicClient *cio)
 {
   int ret;
@@ -4059,7 +4059,7 @@ discover_aws_flavour(const req_info& info)
  * it tries AWS v4 before AWS v2
  */
 int RGW_Auth_S3::authorize(const DoutPrefixProvider *dpp,
-                           RGWRados* const store,
+                           rgw::sal::RGWRadosStore* const store,
                            const rgw::auth::StrategyRegistry& auth_registry,
                            struct req_state* const s)
 {
@@ -4081,7 +4081,7 @@ int RGW_Auth_S3::authorize(const DoutPrefixProvider *dpp,
   return ret;
 }
 
-int RGWHandler_Auth_S3::init(RGWRados *store, struct req_state *state,
+int RGWHandler_Auth_S3::init(rgw::sal::RGWRadosStore *store, struct req_state *state,
                              rgw::io::BasicClient *cio)
 {
   int ret = RGWHandler_REST_S3::init_from_header(state, RGW_FORMAT_JSON,
@@ -4145,7 +4145,7 @@ bool RGWHandler_REST_S3Website::web_dir() const {
   obj_ctx.set_prefetch_data(obj);
 
   RGWObjState* state = nullptr;
-  if (store->get_obj_state(&obj_ctx, s->bucket_info, obj, &state, false, s->yield) < 0) {
+  if (store->getRados()->get_obj_state(&obj_ctx, s->bucket_info, obj, &state, false, s->yield) < 0) {
     return false;
   }
   if (! state->exists) {
@@ -4154,7 +4154,7 @@ bool RGWHandler_REST_S3Website::web_dir() const {
   return state->exists;
 }
 
-int RGWHandler_REST_S3Website::init(RGWRados *store, req_state *s,
+int RGWHandler_REST_S3Website::init(rgw::sal::RGWRadosStore *store, req_state *s,
                                     rgw::io::BasicClient* cio)
 {
   // save the original object name before retarget() replaces it with the
@@ -4172,7 +4172,7 @@ int RGWHandler_REST_S3Website::retarget(RGWOp* op, RGWOp** new_op) {
   if (!(s->prot_flags & RGW_REST_WEBSITE))
     return 0;
 
-  int ret = store->get_bucket_info(*s->sysobj_ctx, s->bucket_tenant,
+  int ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, s->bucket_tenant,
                                  s->bucket_name, s->bucket_info, NULL,
                                  s->yield, &s->bucket_attrs);
   if (ret < 0) {
index fc6894e0316ba001968b41e9f25358522bd6e778..788b286ff9feefa5774c72138c7588910a940c82 100644 (file)
@@ -28,6 +28,7 @@
 #include "rgw_auth.h"
 #include "rgw_auth_filters.h"
 #include "rgw_sts.h"
+#include "rgw_sal.h"
 
 struct rgw_http_error {
   int http_ret;
@@ -326,7 +327,7 @@ public:
   RGWPutACLs_ObjStore_S3() {}
   ~RGWPutACLs_ObjStore_S3() override {}
 
-  int get_policy_from_state(RGWRados *store, struct req_state *s, stringstream& ss) override;
+  int get_policy_from_state(rgw::sal::RGWRadosStore *store, struct req_state *s, stringstream& ss) override;
   void send_response() override;
   int get_params() override;
 };
@@ -547,7 +548,7 @@ public:
 class RGW_Auth_S3 {
 public:
   static int authorize(const DoutPrefixProvider *dpp,
-                       RGWRados *store,
+                       rgw::sal::RGWRadosStore *store,
                        const rgw::auth::StrategyRegistry& auth_registry,
                        struct req_state *s);
 };
@@ -567,7 +568,7 @@ public:
   static int validate_bucket_name(const string& bucket);
   static int validate_object_name(const string& bucket);
 
-  int init(RGWRados *store,
+  int init(rgw::sal::RGWRadosStore *store,
            struct req_state *s,
            rgw::io::BasicClient *cio) override;
   int authorize(const DoutPrefixProvider *dpp) override {
@@ -589,7 +590,7 @@ public:
     }
   ~RGWHandler_REST_S3() override = default;
 
-  int init(RGWRados *store,
+  int init(rgw::sal::RGWRadosStore *store,
            struct req_state *s,
            rgw::io::BasicClient *cio) override;
   int authorize(const DoutPrefixProvider *dpp) override;
index 209ef964615a33d0d4e4a1199e94283823641576..236cd3c0e24b916c5741a30e8d03db1709d0ead1 100644 (file)
@@ -41,7 +41,7 @@ public:
   using RGWHandler_REST_S3::RGWHandler_REST_S3;
   ~RGWHandler_REST_S3Website() override = default;
 
-  int init(RGWRados *store, req_state *s, rgw::io::BasicClient* cio) override;
+  int init(rgw::sal::RGWRadosStore *store, req_state *s, rgw::io::BasicClient* cio) override;
   int error_handler(int err_no, string *error_content) override;
 };
 
index f508c70dbe4c53e80ce7dcb2b7e6520bc297a9b7..18e4ce62b147b31d1e6b48750993088b6a2c8f92 100644 (file)
@@ -335,7 +335,7 @@ void RGWSTSAssumeRole::execute()
 }
 
 int RGW_Auth_STS::authorize(const DoutPrefixProvider *dpp,
-                            RGWRados *store,
+                            rgw::sal::RGWRadosStore *store,
                             const rgw::auth::StrategyRegistry& auth_registry,
                             struct req_state *s)
 {
@@ -385,7 +385,7 @@ RGWOp *RGWHandler_REST_STS::op_post()
   return nullptr;
 }
 
-int RGWHandler_REST_STS::init(RGWRados *store,
+int RGWHandler_REST_STS::init(rgw::sal::RGWRadosStore *store,
                               struct req_state *s,
                               rgw::io::BasicClient *cio)
 {
index 042ec8f1a9fbe0eb86d2858df65a1b75158b79c2..73a3c7cd94737e4912451fb677aa35bf9403bdd3 100644 (file)
@@ -158,7 +158,7 @@ public:
 class RGW_Auth_STS {
 public:
   static int authorize(const DoutPrefixProvider *dpp,
-                       RGWRados *store,
+                       rgw::sal::RGWRadosStore *store,
                        const rgw::auth::StrategyRegistry& auth_registry,
                        struct req_state *s);
 };
@@ -178,7 +178,7 @@ public:
       post_body(post_body) {}
   ~RGWHandler_REST_STS() override = default;
 
-  int init(RGWRados *store,
+  int init(rgw::sal::RGWRadosStore *store,
            struct req_state *s,
            rgw::io::BasicClient *cio) override;
   int authorize(const DoutPrefixProvider* dpp) override;
index c1f6ad7f30ca67b512034947e0932705f815224a..ff8d036646e6fd5fd9ae1b3705ca25214e5eca79 100644 (file)
@@ -545,7 +545,7 @@ static void dump_container_metadata(struct req_state *s,
 void RGWStatAccount_ObjStore_SWIFT::execute()
 {
   RGWStatAccount_ObjStore::execute();
-  op_ret = store->ctl.user->get_attrs_by_uid(s->user->user_id, &attrs, s->yield);
+  op_ret = store->ctl()->user->get_attrs_by_uid(s->user->user_id, &attrs, s->yield);
 }
 
 void RGWStatAccount_ObjStore_SWIFT::send_response()
@@ -584,7 +584,7 @@ void RGWStatBucket_ObjStore_SWIFT::send_response()
 }
 
 static int get_swift_container_settings(req_state * const s,
-                                        RGWRados * const store,
+                                        rgw::sal::RGWRadosStore * const store,
                                         RGWAccessControlPolicy * const policy,
                                         bool * const has_policy,
                                         uint32_t * rw_mask,
@@ -598,7 +598,7 @@ static int get_swift_container_settings(req_state * const s,
 
   if (read_list || write_list) {
     RGWAccessControlPolicy_SWIFT swift_policy(s->cct);
-    const auto r = swift_policy.create(store->ctl.user,
+    const auto r = swift_policy.create(store->ctl()->user,
                                        s->user->user_id,
                                        s->user->display_name,
                                        read_list,
@@ -711,7 +711,7 @@ int RGWCreateBucket_ObjStore_SWIFT::get_params()
     policy.create_default(s->user->user_id, s->user->display_name);
   }
 
-  location_constraint = store->svc.zone->get_zonegroup().api_name;
+  location_constraint = store->svc()->zone->get_zonegroup().api_name;
   get_rmattrs_from_headers(s, CONT_PUT_ATTR_PREFIX,
                            CONT_REMOVE_ATTR_PREFIX, rmattr_names);
   placement_rule.init(s->info.env->get("HTTP_X_STORAGE_POLICY", ""), s->info.storage_class);
@@ -849,8 +849,8 @@ int RGWPutObj_ObjStore_SWIFT::update_slo_segment_size(rgw_slo_entry& entry) {
   if (bucket_name.compare(s->bucket.name) != 0) {
     RGWBucketInfo bucket_info;
     map<string, bufferlist> bucket_attrs;
-    auto obj_ctx = store->svc.sysobj->init_obj_ctx();
-    r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
+    auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
+    r = store->getRados()->get_bucket_info(obj_ctx, s->user->user_id.tenant,
                               bucket_name, bucket_info, nullptr,
                               s->yield, &bucket_attrs);
     if (r < 0) {
@@ -871,7 +871,7 @@ int RGWPutObj_ObjStore_SWIFT::update_slo_segment_size(rgw_slo_entry& entry) {
   RGWObjectCtx obj_ctx(store);
   obj_ctx.set_atomic(slo_seg);
 
-  RGWRados::Object op_target(store, s->bucket_info, obj_ctx, slo_seg);
+  RGWRados::Object op_target(store->getRados(), s->bucket_info, obj_ctx, slo_seg);
   RGWRados::Object::Read read_op(&op_target);
 
   bool compressed;
@@ -1052,7 +1052,7 @@ void RGWPutObj_ObjStore_SWIFT::send_response()
 }
 
 static int get_swift_account_settings(req_state * const s,
-                                      RGWRados * const store,
+                                      rgw::sal::RGWRadosStore * const store,
                                       RGWAccessControlPolicy_SWIFTAcct * const policy,
                                       bool * const has_policy)
 {
@@ -1061,7 +1061,7 @@ static int get_swift_account_settings(req_state * const s,
   const char * const acl_attr = s->info.env->get("HTTP_X_ACCOUNT_ACCESS_CONTROL");
   if (acl_attr) {
     RGWAccessControlPolicy_SWIFTAcct swift_acct_policy(s->cct);
-    const bool r = swift_acct_policy.create(store->ctl.user,
+    const bool r = swift_acct_policy.create(store->ctl()->user,
                                      s->user->user_id,
                                      s->user->display_name,
                                      string(acl_attr));
@@ -1859,7 +1859,7 @@ void RGWInfo_ObjStore_SWIFT::execute()
       s->formatter->close_section();
     }
     else {
-      pair.second.list_data(*(s->formatter), s->cct->_conf, *store);
+      pair.second.list_data(*(s->formatter), s->cct->_conf, *store->getRados());
     }
   }
 
@@ -1977,7 +1977,7 @@ bool RGWInfo_ObjStore_SWIFT::is_expired(const std::string& expires, const DoutPr
 }
 
 
-void RGWFormPost::init(RGWRados* const store,
+void RGWFormPost::init(rgw::sal::RGWRadosStore* const store,
                        req_state* const s,
                        RGWHandler* const dialect_handler)
 {
@@ -2078,7 +2078,7 @@ void RGWFormPost::get_owner_info(const req_state* const s,
    * now. It will be initialized in RGWHandler_REST_SWIFT::postauth_init(). */
   const string& bucket_name = s->init_state.url_bucket;
 
-  auto user_ctl = store->ctl.user;
+  auto user_ctl = store->ctl()->user;
 
   /* TempURL in Formpost only requires that bucket name is specified. */
   if (bucket_name.empty()) {
@@ -2110,7 +2110,7 @@ void RGWFormPost::get_owner_info(const req_state* const s,
 
   /* Need to get user info of bucket owner. */
   RGWBucketInfo bucket_info;
-  int ret = store->get_bucket_info(*s->sysobj_ctx,
+  int ret = store->getRados()->get_bucket_info(*s->sysobj_ctx,
                                    bucket_tenant, bucket_name,
                                    bucket_info, nullptr, s->yield);
   if (ret < 0) {
@@ -2353,7 +2353,7 @@ int RGWSwiftWebsiteHandler::serve_errordoc(const int http_ret,
 
   class RGWGetErrorPage : public RGWGetObj_ObjStore_SWIFT {
   public:
-    RGWGetErrorPage(RGWRados* const store,
+    RGWGetErrorPage(rgw::sal::RGWRadosStore* const store,
                     RGWHandler_REST* const handler,
                     req_state* const s,
                     const int http_ret) {
@@ -2560,7 +2560,7 @@ bool RGWSwiftWebsiteHandler::is_web_dir() const
   obj_ctx.set_prefetch_data(obj);
 
   RGWObjState* state = nullptr;
-  if (store->get_obj_state(&obj_ctx, s->bucket_info, obj, &state, false, s->yield) < 0) {
+  if (store->getRados()->get_obj_state(&obj_ctx, s->bucket_info, obj, &state, false, s->yield) < 0) {
     return false;
   }
 
@@ -2590,7 +2590,7 @@ bool RGWSwiftWebsiteHandler::is_index_present(const std::string& index)
   obj_ctx.set_prefetch_data(obj);
 
   RGWObjState* state = nullptr;
-  if (store->get_obj_state(&obj_ctx, s->bucket_info, obj, &state, false, s->yield) < 0) {
+  if (store->getRados()->get_obj_state(&obj_ctx, s->bucket_info, obj, &state, false, s->yield) < 0) {
     return false;
   }
 
@@ -3017,7 +3017,7 @@ int RGWHandler_REST_SWIFT::init_from_header(struct req_state* const s,
   return 0;
 }
 
-int RGWHandler_REST_SWIFT::init(RGWRados* store, struct req_state* s,
+int RGWHandler_REST_SWIFT::init(rgw::sal::RGWRadosStore* store, struct req_state* s,
                                rgw::io::BasicClient *cio)
 {
   struct req_init_state *t = &s->init_state;
index 79d5bd003a3d7106d82483cd73afb7026fbccbe8..ce7592fb730924bf44bd60a3a4a3db14cc53eefc 100644 (file)
@@ -273,7 +273,7 @@ public:
   RGWFormPost() = default;
   ~RGWFormPost() = default;
 
-  void init(RGWRados* store,
+  void init(rgw::sal::RGWRadosStore* store,
             req_state* s,
             RGWHandler* dialect_handler) override;
 
@@ -344,7 +344,7 @@ public:
 
 
 class RGWSwiftWebsiteHandler {
-  RGWRados* const store;
+  rgw::sal::RGWRadosStore* const store;
   req_state* const s;
   RGWHandler_REST* const handler;
 
@@ -359,7 +359,7 @@ class RGWSwiftWebsiteHandler {
   RGWOp* get_ws_index_op();
   RGWOp* get_ws_listing_op();
 public:
-  RGWSwiftWebsiteHandler(RGWRados* const store,
+  RGWSwiftWebsiteHandler(rgw::sal::RGWRadosStore* const store,
                          req_state* const s,
                          RGWHandler_REST* const handler)
     : store(store),
@@ -394,7 +394,7 @@ public:
 
   int validate_bucket_name(const string& bucket);
 
-  int init(RGWRados *store, struct req_state *s, rgw::io::BasicClient *cio) override;
+  int init(rgw::sal::RGWRadosStore *store, struct req_state *s, rgw::io::BasicClient *cio) override;
   int authorize(const DoutPrefixProvider *dpp) override;
   int postauth_init() override;
 
@@ -442,7 +442,7 @@ public:
     return website_handler->retarget_bucket(op, new_op);
   }
 
-  int init(RGWRados* const store,
+  int init(rgw::sal::RGWRadosStore* const store,
            struct req_state* const s,
            rgw::io::BasicClient* const cio) override {
     website_handler = boost::in_place<RGWSwiftWebsiteHandler>(store, s, this);
@@ -480,7 +480,7 @@ public:
     return website_handler->retarget_object(op, new_op);
   }
 
-  int init(RGWRados* const store,
+  int init(rgw::sal::RGWRadosStore* const store,
            struct req_state* const s,
            rgw::io::BasicClient* const cio) override {
     website_handler = boost::in_place<RGWSwiftWebsiteHandler>(store, s, this);
@@ -533,7 +533,7 @@ public:
     return new RGWGetCrossDomainPolicy_ObjStore_SWIFT();
   }
 
-  int init(RGWRados* const store,
+  int init(rgw::sal::RGWRadosStore* const store,
            struct req_state* const state,
            rgw::io::BasicClient* const cio) override {
     state->dialect = "swift";
@@ -589,7 +589,7 @@ public:
     return new RGWGetHealthCheck_ObjStore_SWIFT();
   }
 
-  int init(RGWRados* const store,
+  int init(rgw::sal::RGWRadosStore* const store,
            struct req_state* const state,
            rgw::io::BasicClient* const cio) override {
     state->dialect = "swift";
@@ -645,7 +645,7 @@ public:
     return new RGWInfo_ObjStore_SWIFT();
   }
 
-  int init(RGWRados* const store,
+  int init(rgw::sal::RGWRadosStore* const store,
            struct req_state* const state,
            rgw::io::BasicClient* const cio) override {
     state->dialect = "swift";
index 23b7a971068e149e473396d4342276d298e12b61..44274328d78f2329508cd721ea162c3e2f18049b 100644 (file)
@@ -52,7 +52,7 @@ void RGWOp_Usage_Get::execute() {
     }
   }
 
-  http_ret = RGWUsage::show(store, uid, bucket_name, start, end, show_entries, show_summary, &categories, flusher);
+  http_ret = RGWUsage::show(store->getRados(), uid, bucket_name, start, end, show_entries, show_summary, &categories, flusher);
 }
 
 class RGWOp_Usage_Delete : public RGWRESTOp {
@@ -92,7 +92,7 @@ void RGWOp_Usage_Delete::execute() {
     }
   }
 
-  http_ret = RGWUsage::trim(store, uid, bucket_name, start, end);
+  http_ret = RGWUsage::trim(store->getRados(), uid, bucket_name, start, end);
 }
 
 RGWOp *RGWHandler_Usage::op_get()
index 5d553454033201b0942ac805cc62cf680e4e5ed7..f8b4264ca497e7b154f4cb64fae0aa9cf3c70e61 100644 (file)
@@ -118,14 +118,14 @@ void RGWPutUserPolicy::execute()
 
   RGWUserInfo info;
   rgw_user user_id(user_name);
-  op_ret = store->ctl.user->get_info_by_uid(user_id, &info, s->yield);
+  op_ret = store->ctl()->user->get_info_by_uid(user_id, &info, s->yield);
   if (op_ret < 0) {
     op_ret = -ERR_NO_SUCH_ENTITY;
     return;
   }
 
   map<string, bufferlist> uattrs;
-  op_ret = store->ctl.user->get_attrs_by_uid(user_id, &uattrs, s->yield);
+  op_ret = store->ctl()->user->get_attrs_by_uid(user_id, &uattrs, s->yield);
   if (op_ret == -ENOENT) {
     op_ret = -ERR_NO_SUCH_ENTITY;
     return;
@@ -144,7 +144,7 @@ void RGWPutUserPolicy::execute()
     uattrs[RGW_ATTR_USER_POLICY] = in_bl;
 
     RGWObjVersionTracker objv_tracker;
-    op_ret = store->ctl.user->store_info(info, s->yield,
+    op_ret = store->ctl()->user->store_info(info, s->yield,
                                          RGWUserCtl::PutParams()
                                          .set_objv_tracker(&objv_tracker)
                                          .set_attrs(&uattrs));
@@ -193,7 +193,7 @@ void RGWGetUserPolicy::execute()
 
   rgw_user user_id(user_name);
   map<string, bufferlist> uattrs;
-  op_ret = store->ctl.user->get_attrs_by_uid(user_id, &uattrs, s->yield);
+  op_ret = store->ctl()->user->get_attrs_by_uid(user_id, &uattrs, s->yield);
   if (op_ret == -ENOENT) {
     ldout(s->cct, 0) << "ERROR: attrs not found for user" << user_name << dendl;
     op_ret = -ERR_NO_SUCH_ENTITY;
@@ -257,7 +257,7 @@ void RGWListUserPolicies::execute()
 
   rgw_user user_id(user_name);
   map<string, bufferlist> uattrs;
-  op_ret = store->ctl.user->get_attrs_by_uid(user_id, &uattrs, s->yield);
+  op_ret = store->ctl()->user->get_attrs_by_uid(user_id, &uattrs, s->yield);
   if (op_ret == -ENOENT) {
     ldout(s->cct, 0) << "ERROR: attrs not found for user" << user_name << dendl;
     op_ret = -ERR_NO_SUCH_ENTITY;
@@ -320,7 +320,7 @@ void RGWDeleteUserPolicy::execute()
   RGWUserInfo info;
   map<string, bufferlist> uattrs;
   rgw_user user_id(user_name);
-  op_ret = store->ctl.user->get_info_by_uid(user_id, &info, s->yield,
+  op_ret = store->ctl()->user->get_info_by_uid(user_id, &info, s->yield,
                                             RGWUserCtl::GetParams()
                                             .set_attrs(&uattrs));
   if (op_ret < 0) {
@@ -340,7 +340,7 @@ void RGWDeleteUserPolicy::execute()
       uattrs[RGW_ATTR_USER_POLICY] = in_bl;
 
       RGWObjVersionTracker objv_tracker;
-      op_ret = store->ctl.user->store_info(info, s->yield,
+      op_ret = store->ctl()->user->store_info(info, s->yield,
                                            RGWUserCtl::PutParams()
                                            .set_old_info(&info)
                                            .set_objv_tracker(&objv_tracker)
diff --git a/src/rgw/rgw_sal.cc b/src/rgw/rgw_sal.cc
new file mode 100644 (file)
index 0000000..e6bf1b4
--- /dev/null
@@ -0,0 +1,119 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2019 Red Hat, Inc.
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <system_error>
+#include <unistd.h>
+#include <sstream>
+
+#include "common/errno.h"
+
+#include "rgw_sal.h"
+
+#define dout_subsys ceph_subsys_rgw
+
+namespace rgw::sal {
+
+RGWObject *RGWRadosBucket::create_object(const rgw_obj_key &key)
+{
+  if (!object) {
+    object = new RGWRadosObject(store, key);
+  }
+
+  return object;
+}
+
+RGWUser *RGWRadosStore::get_user(const rgw_user &u)
+{
+  if (!user) {
+    user = new RGWRadosUser(this, u);
+  }
+
+  return user;
+}
+
+RGWSalBucket *RGWRadosStore::create_bucket(RGWUser &u, const cls_user_bucket &b)
+{
+  if (!bucket) {
+    bucket = new RGWRadosBucket(this, u, b);
+  }
+
+  return bucket;
+}
+
+void RGWRadosStore::finalize(void) {
+  if (rados)
+    rados->finalize();
+}
+
+} // namespace rgw::sal
+
+rgw::sal::RGWRadosStore *RGWStoreManager::init_storage_provider(CephContext *cct, bool use_gc_thread, bool use_lc_thread, bool quota_threads, bool run_sync_thread, bool run_reshard_thread, bool use_cache)
+{
+  RGWRados *rados = new RGWRados;
+  rgw::sal::RGWRadosStore *store = new rgw::sal::RGWRadosStore();
+
+  store->setRados(rados);
+  rados->set_store(store);
+
+  if ((*rados).set_use_cache(use_cache)
+              .set_run_gc_thread(use_gc_thread)
+              .set_run_lc_thread(use_lc_thread)
+              .set_run_quota_threads(quota_threads)
+              .set_run_sync_thread(run_sync_thread)
+              .set_run_reshard_thread(run_reshard_thread)
+              .initialize(cct) < 0) {
+    delete store;
+    return NULL;
+  }
+
+  return store;
+}
+
+rgw::sal::RGWRadosStore *RGWStoreManager::init_raw_storage_provider(CephContext *cct)
+{
+  RGWRados *rados = new RGWRados;
+  rgw::sal::RGWRadosStore *store = new rgw::sal::RGWRadosStore();
+
+  store->setRados(rados);
+  rados->set_store(store);
+
+  rados->set_context(cct);
+
+  int ret = rados->init_svc(true);
+  if (ret < 0) {
+    ldout(cct, 0) << "ERROR: failed to init services (ret=" << cpp_strerror(-ret) << ")" << dendl;
+    delete store;
+    return nullptr;
+  }
+
+  if (rados->init_rados() < 0) {
+    delete store;
+    return nullptr;
+  }
+
+  return store;
+}
+
+void RGWStoreManager::close_storage(rgw::sal::RGWRadosStore *store)
+{
+  if (!store)
+    return;
+
+  store->finalize();
+
+  delete store;
+}
diff --git a/src/rgw/rgw_sal.h b/src/rgw/rgw_sal.h
new file mode 100644 (file)
index 0000000..c472ca0
--- /dev/null
@@ -0,0 +1,230 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2019 Red Hat, Inc.
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#pragma once
+
+#include "rgw_rados.h"
+#include "rgw_user.h"
+
+namespace rgw { namespace sal {
+
+#define RGW_SAL_VERSION 1
+
+class RGWUser;
+class RGWSalBucket;
+class RGWObject;
+
+typedef std::vector<RGWSalBucket> RGWBucketList;
+typedef std::map<string, string> RGWAttrs;
+
+class RGWStore {
+  public:
+    RGWStore() {}
+    virtual ~RGWStore() = default;
+
+    virtual RGWUser* get_user(const rgw_user &u) = 0;
+    virtual RGWSalBucket* get_bucket(RGWUser &u, const cls_user_bucket &b) = 0;
+    virtual RGWSalBucket* create_bucket(RGWUser &u, const cls_user_bucket &b) = 0;
+    virtual RGWBucketList* list_buckets(void) = 0;
+
+    virtual void finalize(void)=0;
+
+    virtual CephContext *ctx(void)=0;
+};
+
+class RGWUser {
+  protected:
+    rgw_user user;
+
+  public:
+    RGWUser() : user() {}
+    RGWUser(const rgw_user &_u) : user(_u) {}
+    virtual ~RGWUser() = default;
+
+    virtual RGWBucketList* list_buckets(void) = 0;
+};
+
+class RGWSalBucket {
+  protected:
+    cls_user_bucket ub;
+
+  public:
+    RGWSalBucket() : ub() {}
+    RGWSalBucket(const cls_user_bucket &_b) : ub(_b) {}
+    virtual ~RGWSalBucket() = default;
+
+    virtual RGWObject* get_object(const rgw_obj_key &key) = 0;
+    virtual RGWBucketList* list(void) = 0;
+    virtual RGWObject* create_object(const rgw_obj_key &key /* Attributes */) = 0;
+    virtual RGWAttrs& get_attrs(void) = 0;
+    virtual int set_attrs(RGWAttrs &attrs) = 0;
+    virtual int delete_bucket(void) = 0;
+    virtual RGWAccessControlPolicy& get_acl(void) = 0;
+    virtual int set_acl(const RGWAccessControlPolicy &acl) = 0;
+};
+
+class RGWObject {
+  protected:
+    rgw_obj_key key;
+
+  public:
+    RGWObject() : key() {}
+    RGWObject(const rgw_obj_key &_k) : key(_k) {}
+    virtual ~RGWObject() = default;
+
+    virtual int read(off_t offset, off_t length, std::iostream &stream) = 0;
+    virtual int write(off_t offset, off_t length, std::iostream &stream) = 0;
+    virtual RGWAttrs& get_attrs(void) = 0;
+    virtual int set_attrs(RGWAttrs &attrs) = 0;
+    virtual int delete_object(void) = 0;
+    virtual RGWAccessControlPolicy& get_acl(void) = 0;
+    virtual int set_acl(const RGWAccessControlPolicy &acl) = 0;
+};
+
+
+class RGWRadosStore;
+
+class RGWRadosUser : public RGWUser {
+  private:
+    RGWRadosStore *store;
+
+  public:
+    RGWRadosUser(RGWRadosStore *_st, const rgw_user &_u) : RGWUser(_u), store(_st) { }
+    RGWRadosUser() {}
+
+    RGWBucketList* list_buckets(void) { return new RGWBucketList(); }
+};
+
+class RGWRadosObject : public RGWObject {
+  private:
+    RGWRadosStore *store;
+    RGWAttrs attrs;
+    RGWAccessControlPolicy acls;
+
+  public:
+    RGWRadosObject()
+      : attrs(),
+        acls() {
+    }
+
+    RGWRadosObject(RGWRadosStore *_st, const rgw_obj_key &_k)
+      : RGWObject(_k),
+       store(_st),
+       attrs(),
+        acls() {
+    }
+
+    int read(off_t offset, off_t length, std::iostream &stream) { return length; }
+    int write(off_t offset, off_t length, std::iostream &stream) { return length; }
+    RGWAttrs& get_attrs(void) { return attrs; }
+    int set_attrs(RGWAttrs &a) { attrs = a; return 0; }
+    int delete_object(void) { return 0; }
+    RGWAccessControlPolicy& get_acl(void) { return acls; }
+    int set_acl(const RGWAccessControlPolicy &acl) { acls = acl; return 0; }
+};
+
+class RGWRadosBucket : public RGWSalBucket {
+  private:
+    RGWRadosStore *store;
+    RGWRadosObject *object;
+    RGWAttrs attrs;
+    RGWAccessControlPolicy acls;
+    RGWRadosUser user;
+
+  public:
+    RGWRadosBucket()
+      : object(nullptr),
+        attrs(),
+        acls(),
+       user() {
+    }
+
+    RGWRadosBucket(RGWRadosStore *_st, RGWUser &_u, const cls_user_bucket &_b)
+      : RGWSalBucket(_b),
+       store(_st),
+       object(nullptr),
+        attrs(),
+        acls(),
+       user(dynamic_cast<RGWRadosUser&>(_u)) {
+    }
+
+    RGWObject* get_object(const rgw_obj_key &key) { return object; }
+    RGWBucketList* list(void) { return new RGWBucketList(); }
+    RGWObject* create_object(const rgw_obj_key &key /* Attributes */) override;
+    RGWAttrs& get_attrs(void) { return attrs; }
+    int set_attrs(RGWAttrs &a) { attrs = a; return 0; }
+    int delete_bucket(void) { return 0; }
+    RGWAccessControlPolicy& get_acl(void) { return acls; }
+    int set_acl(const RGWAccessControlPolicy &acl) { acls = acl; return 0; }
+};
+
+class RGWRadosStore : public RGWStore {
+  private:
+    RGWRados *rados;
+    RGWRadosUser *user;
+    RGWRadosBucket *bucket;
+
+  public:
+    RGWRadosStore()
+      : rados(nullptr),
+        user(nullptr),
+        bucket(nullptr) {
+      }
+    ~RGWRadosStore() {
+       if (bucket)
+           delete bucket;
+       if (user)
+           delete user;
+       if (rados)
+           delete rados;
+    }
+
+    virtual RGWUser* get_user(const rgw_user &u);
+    virtual RGWSalBucket* get_bucket(RGWUser &u, const cls_user_bucket &b) { return bucket; }
+    virtual RGWSalBucket* create_bucket(RGWUser &u, const cls_user_bucket &b);
+    virtual RGWBucketList* list_buckets(void) { return new RGWBucketList(); }
+
+    void setRados(RGWRados * st) { rados = st; }
+    RGWRados *getRados(void) { return rados; }
+
+    RGWServices *svc() { return &rados->svc; }
+    RGWCtl *ctl() { return &rados->ctl; }
+
+    void finalize(void) override;
+
+    virtual CephContext *ctx(void) { return rados->ctx(); }
+};
+
+} } // namespace rgw::sal
+
+
+class RGWStoreManager {
+public:
+  RGWStoreManager() {}
+  static rgw::sal::RGWRadosStore *get_storage(CephContext *cct, bool use_gc_thread, bool use_lc_thread, bool quota_threads,
+                              bool run_sync_thread, bool run_reshard_thread, bool use_cache = true) {
+    rgw::sal::RGWRadosStore *store = init_storage_provider(cct, use_gc_thread, use_lc_thread,
+       quota_threads, run_sync_thread, run_reshard_thread, use_cache);
+    return store;
+  }
+  static rgw::sal::RGWRadosStore *get_raw_storage(CephContext *cct) {
+    rgw::sal::RGWRadosStore *rados = init_raw_storage_provider(cct);
+    return rados;
+  }
+  static rgw::sal::RGWRadosStore *init_storage_provider(CephContext *cct, bool use_gc_thread, bool use_lc_thread, bool quota_threads, bool run_sync_thread, bool run_reshard_thread, bool use_metadata_cache);
+  static rgw::sal::RGWRadosStore *init_raw_storage_provider(CephContext *cct);
+  static void close_storage(rgw::sal::RGWRadosStore *store);
+
+};
index 3624eacb3b775189d99b838a42469cdef5414ba8..fe8bcca1720d0474a3a7b0beaa14beaee1e8d94a 100644 (file)
@@ -26,6 +26,7 @@
 #include "rgw_user.h"
 #include "rgw_iam_policy.h"
 #include "rgw_sts.h"
+#include "rgw_sal.h"
 
 #define dout_subsys ceph_subsys_rgw
 
@@ -140,7 +141,7 @@ void AssumedRoleUser::dump(Formatter *f) const
 }
 
 int AssumedRoleUser::generateAssumedRoleUser(CephContext* cct,
-                                              RGWRados *store,
+                                              rgw::sal::RGWRadosStore *store,
                                               const string& roleId,
                                               const rgw::ARN& roleArn,
                                               const string& roleSessionName)
@@ -251,7 +252,7 @@ std::tuple<int, RGWRole> STSService::getRoleInfo(const string& arn)
   if (auto r_arn = rgw::ARN::parse(arn); r_arn) {
     auto pos = r_arn->resource.find_last_of('/');
     string roleName = r_arn->resource.substr(pos + 1);
-    RGWRole role(cct, store->pctl, roleName, r_arn->account);
+    RGWRole role(cct, store->getRados()->pctl, roleName, r_arn->account);
     if (int ret = role.get(); ret < 0) {
       if (ret == -ENOENT) {
         ret = -ERR_NO_ROLE_FOUND;
@@ -270,14 +271,14 @@ int STSService::storeARN(string& arn)
 {
   int ret = 0;
   RGWUserInfo info;
-  if (ret = rgw_get_user_info_by_uid(store->ctl.user, user_id, info); ret < 0) {
+  if (ret = rgw_get_user_info_by_uid(store->ctl()->user, user_id, info); ret < 0) {
     return -ERR_NO_SUCH_ENTITY;
   }
 
   info.assumed_role_arn = arn;
 
   RGWObjVersionTracker objv_tracker;
-  if (ret = rgw_store_user_info(store->ctl.user, info, &info, &objv_tracker, real_time(),
+  if (ret = rgw_store_user_info(store->ctl()->user, info, &info, &objv_tracker, real_time(),
           false); ret < 0) {
     return -ERR_INTERNAL_ERROR;
   }
index 68187ba19960b505f9725ddb258c476f29d3ab82..fe517110a5c87efb99c4b2d3cdda0fdcb9ed8ba7 100644 (file)
@@ -107,7 +107,7 @@ class AssumedRoleUser {
   string assumeRoleId;
 public:
   int generateAssumedRoleUser( CephContext* cct,
-                                RGWRados *store,
+                                rgw::sal::RGWRadosStore *store,
                                 const string& roleId,
                                 const rgw::ARN& roleArn,
                                 const string& roleSessionName);
@@ -203,14 +203,14 @@ using AssumeRoleWithWebIdentityResponse = struct AssumeRoleWithWebIdentityRespon
 
 class STSService {
   CephContext* cct;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   rgw_user user_id;
   RGWRole role;
   rgw::auth::Identity* identity;
   int storeARN(string& arn);
 public:
   STSService() = default;
-  STSService(CephContext* cct, RGWRados *store, rgw_user user_id, rgw::auth::Identity* identity) : cct(cct), store(store), user_id(user_id), identity(identity) {}
+  STSService(CephContext* cct, rgw::sal::RGWRadosStore *store, rgw_user user_id, rgw::auth::Identity* identity) : cct(cct), store(store), user_id(user_id), identity(identity) {}
   std::tuple<int, RGWRole> getRoleInfo(const string& arn);
   AssumeRoleResponse assumeRole(AssumeRoleRequest& req);
   GetSessionTokenResponse getSessionToken(GetSessionTokenRequest& req);
index 02d1a513026634f91d8eacbc2c485d3c9adfb37e..f07fd6516f096df308c3430ac90579c44d47a844 100644 (file)
@@ -684,7 +684,7 @@ void RGW_SWIFT_Auth_Get::execute()
 
   user_str = user;
 
-  if ((ret = store->ctl.user->get_info_by_swift(user_str, &info, s->yield)) < 0)
+  if ((ret = store->ctl()->user->get_info_by_swift(user_str, &info, s->yield)) < 0)
   {
     ret = -EACCES;
     goto done;
@@ -738,7 +738,7 @@ done:
   end_header(s);
 }
 
-int RGWHandler_SWIFT_Auth::init(RGWRados *store, struct req_state *state,
+int RGWHandler_SWIFT_Auth::init(rgw::sal::RGWRadosStore *store, struct req_state *state,
                                rgw::io::BasicClient *cio)
 {
   state->dialect = "swift-auth";
index b22ea932fe188a13d8e838e7567935daca1fd362..fbf53f6217f218199a22496abee52cad902b3dc1 100644 (file)
@@ -9,6 +9,7 @@
 #include "rgw_auth.h"
 #include "rgw_auth_keystone.h"
 #include "rgw_auth_filters.h"
+#include "rgw_sal.h"
 
 #define RGW_SWIFT_TOKEN_EXPIRATION (15 * 60)
 
@@ -298,7 +299,7 @@ public:
   ~RGWHandler_SWIFT_Auth() override {}
   RGWOp *op_get() override;
 
-  int init(RGWRados *store, struct req_state *state, rgw::io::BasicClient *cio) override;
+  int init(rgw::sal::RGWRadosStore *store, struct req_state *state, rgw::io::BasicClient *cio) override;
   int authorize(const DoutPrefixProvider *dpp) override;
   int postauth_init() override { return 0; }
   int read_permissions(RGWOp *op) override { return 0; }
index 523e51e07605e141690863d36f7ea4d9b94ec0fa..8544e5c00a91a2d5bedbadef31568e5ea23873da 100644 (file)
@@ -42,7 +42,7 @@ static string mdlog_sync_status_oid = "mdlog.sync-status";
 static string mdlog_sync_status_shard_prefix = "mdlog.sync-status.shard";
 static string mdlog_sync_full_sync_index_prefix = "meta.full-sync.index";
 
-RGWSyncErrorLogger::RGWSyncErrorLogger(RGWRados *_store, const string &oid_prefix, int _num_shards) : store(_store), num_shards(_num_shards) {
+RGWSyncErrorLogger::RGWSyncErrorLogger(rgw::sal::RGWRadosStore *_store, const string &oid_prefix, int _num_shards) : store(_store), num_shards(_num_shards) {
   for (int i = 0; i < num_shards; i++) {
     oids.push_back(get_shard_oid(oid_prefix, i));
   }
@@ -59,7 +59,7 @@ RGWCoroutine *RGWSyncErrorLogger::log_error_cr(const string& source_zone, const
   rgw_sync_error_info info(source_zone, error_code, message);
   bufferlist bl;
   encode(info, bl);
-  store->svc.cls->timelog.prepare_entry(entry, real_clock::now(), section, name, bl);
+  store->svc()->cls->timelog.prepare_entry(entry, real_clock::now(), section, name, bl);
 
   uint32_t shard_id = ++counter % num_shards;
 
@@ -258,7 +258,7 @@ int RGWRemoteMetaLog::read_log_info(rgw_mdlog_info *log_info)
 
 int RGWRemoteMetaLog::read_master_log_shards_info(const string &master_period, map<int, RGWMetadataLogInfo> *shards_info)
 {
-  if (store->svc.zone->is_meta_master()) {
+  if (store->svc()->zone->is_meta_master()) {
     return 0;
   }
 
@@ -273,7 +273,7 @@ int RGWRemoteMetaLog::read_master_log_shards_info(const string &master_period, m
 
 int RGWRemoteMetaLog::read_master_log_shards_next(const string& period, map<int, string> shard_markers, map<int, rgw_mdlog_shard_data> *result)
 {
-  if (store->svc.zone->is_meta_master()) {
+  if (store->svc()->zone->is_meta_master()) {
     return 0;
   }
 
@@ -282,7 +282,7 @@ int RGWRemoteMetaLog::read_master_log_shards_next(const string& period, map<int,
 
 int RGWRemoteMetaLog::init()
 {
-  conn = store->svc.zone->get_master_conn();
+  conn = store->svc()->zone->get_master_conn();
 
   int ret = http_manager.start();
   if (ret < 0) {
@@ -309,18 +309,18 @@ void RGWRemoteMetaLog::finish()
 
 int RGWMetaSyncStatusManager::init()
 {
-  if (store->svc.zone->is_meta_master()) {
+  if (store->svc()->zone->is_meta_master()) {
     return 0;
   }
 
-  if (!store->svc.zone->get_master_conn()) {
+  if (!store->svc()->zone->get_master_conn()) {
     lderr(store->ctx()) << "no REST connection to master zone" << dendl;
     return -EIO;
   }
 
-  int r = rgw_init_ioctx(store->get_rados_handle(), store->svc.zone->get_zone_params().log_pool, ioctx, true);
+  int r = rgw_init_ioctx(store->getRados()->get_rados_handle(), store->svc()->zone->get_zone_params().log_pool, ioctx, true);
   if (r < 0) {
-    lderr(store->ctx()) << "ERROR: failed to open log pool (" << store->svc.zone->get_zone_params().log_pool << " ret=" << r << dendl;
+    lderr(store->ctx()) << "ERROR: failed to open log pool (" << store->svc()->zone->get_zone_params().log_pool << " ret=" << r << dendl;
     return r;
   }
 
@@ -342,7 +342,7 @@ int RGWMetaSyncStatusManager::init()
   int num_shards = sync_status.sync_info.num_shards;
 
   for (int i = 0; i < num_shards; i++) {
-    shard_objs[i] = rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env.shard_obj_name(i));
+    shard_objs[i] = rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env.shard_obj_name(i));
   }
 
   std::unique_lock wl{ts_to_shard_lock};
@@ -366,7 +366,7 @@ std::ostream&  RGWMetaSyncStatusManager::gen_prefix(std::ostream& out) const
   return out << "meta sync: ";
 }
 
-void RGWMetaSyncEnv::init(const DoutPrefixProvider *_dpp, CephContext *_cct, RGWRados *_store, RGWRESTConn *_conn,
+void RGWMetaSyncEnv::init(const DoutPrefixProvider *_dpp, CephContext *_cct, rgw::sal::RGWRadosStore *_store, RGWRESTConn *_conn,
                           RGWAsyncRadosProcessor *_async_rados, RGWHTTPManager *_http_manager,
                           RGWSyncErrorLogger *_error_logger, RGWSyncTraceManager *_sync_tracer) {
   dpp = _dpp;
@@ -393,7 +393,7 @@ string RGWMetaSyncEnv::shard_obj_name(int shard_id)
 }
 
 class RGWAsyncReadMDLogEntries : public RGWAsyncRadosRequest {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   RGWMetadataLog *mdlog;
   int shard_id;
   string *marker;
@@ -417,7 +417,7 @@ protected:
     return ret;
   }
 public:
-  RGWAsyncReadMDLogEntries(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+  RGWAsyncReadMDLogEntries(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
                            RGWMetadataLog* mdlog, int _shard_id,
                            string* _marker, int _max_entries,
                            list<cls_log_entry> *_entries, bool *_truncated)
@@ -487,7 +487,7 @@ public:
 
   int operate() override {
     auto store = env->store;
-    RGWRESTConn *conn = store->svc.zone->get_master_conn();
+    RGWRESTConn *conn = store->svc()->zone->get_master_conn();
     reenter(this) {
       yield {
        char buf[16];
@@ -655,9 +655,9 @@ public:
         set_status("acquiring sync lock");
        uint32_t lock_duration = cct->_conf->rgw_sync_lease_period;
         string lock_name = "sync_lock";
-        RGWRados *store = sync_env->store;
+       rgw::sal::RGWRadosStore *store = sync_env->store;
         lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, store,
-                                                rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env->status_oid()),
+                                                rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()),
                                                 lock_name, lock_duration, this));
         lease_stack.reset(spawn(lease_cr.get(), false));
       }
@@ -672,9 +672,9 @@ public:
       }
       yield {
         set_status("writing sync status");
-        RGWRados *store = sync_env->store;
-        call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(sync_env->async_rados, store->svc.sysobj,
-                                                           rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env->status_oid()),
+       rgw::sal::RGWRadosStore *store = sync_env->store;
+        call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(sync_env->async_rados, store->svc()->sysobj,
+                                                           rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()),
                                                            status));
       }
 
@@ -702,19 +702,19 @@ public:
           RGWMetadataLogInfo& info = shards_info[i];
          marker.next_step_marker = info.marker;
          marker.timestamp = info.last_update;
-          RGWRados *store = sync_env->store;
+         rgw::sal::RGWRadosStore *store = sync_env->store;
           spawn(new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(sync_env->async_rados,
-                                                                store->svc.sysobj,
-                                                                rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env->shard_obj_name(i)),
+                                                                store->svc()->sysobj,
+                                                                rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->shard_obj_name(i)),
                                                                 marker), true);
         }
       }
       yield {
         set_status("changing sync state: build full sync maps");
        status.state = rgw_meta_sync_info::StateBuildingFullSyncMaps;
-        RGWRados *store = sync_env->store;
-        call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(sync_env->async_rados, store->svc.sysobj,
-                                                           rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env->status_oid()),
+       rgw::sal::RGWRadosStore *store = sync_env->store;
+        call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(sync_env->async_rados, store->svc()->sysobj,
+                                                           rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()),
                                                            status));
       }
       set_status("drop lock lease");
@@ -755,9 +755,9 @@ bool RGWReadSyncStatusMarkersCR::spawn_next()
     return false;
   }
   using CR = RGWSimpleRadosReadCR<rgw_meta_sync_marker>;
-  rgw_raw_obj obj{env->store->svc.zone->get_zone_params().log_pool,
+  rgw_raw_obj obj{env->store->svc()->zone->get_zone_params().log_pool,
                   env->shard_obj_name(shard_id)};
-  spawn(new CR(env->async_rados, env->store->svc.sysobj, obj, &markers[shard_id]), false);
+  spawn(new CR(env->async_rados, env->store->svc()->sysobj, obj, &markers[shard_id]), false);
   shard_id++;
   return true;
 }
@@ -781,9 +781,9 @@ int RGWReadSyncStatusCoroutine::operate()
     using ReadInfoCR = RGWSimpleRadosReadCR<rgw_meta_sync_info>;
     yield {
       bool empty_on_enoent = false; // fail on ENOENT
-      rgw_raw_obj obj{sync_env->store->svc.zone->get_zone_params().log_pool,
+      rgw_raw_obj obj{sync_env->store->svc()->zone->get_zone_params().log_pool,
                       sync_env->status_oid()};
-      call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc.sysobj, obj,
+      call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj, obj,
                           &sync_status->sync_info, empty_on_enoent));
     }
     if (retcode < 0) {
@@ -892,7 +892,7 @@ public:
         string lock_name = "sync_lock";
         lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados,
                                                 sync_env->store,
-                                                rgw_raw_obj(sync_env->store->svc.zone->get_zone_params().log_pool, sync_env->status_oid()),
+                                                rgw_raw_obj(sync_env->store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()),
                                                 lock_name, lock_duration, this));
         lease_stack.reset(spawn(lease_cr.get(), false));
       }
@@ -906,7 +906,7 @@ public:
         yield;
       }
       entries_index.reset(new RGWShardedOmapCRManager(sync_env->async_rados, sync_env->store, this, num_shards,
-                                                      sync_env->store->svc.zone->get_zone_params().log_pool,
+                                                      sync_env->store->svc()->zone->get_zone_params().log_pool,
                                                       mdlog_sync_full_sync_index_prefix));
       yield {
        call(new RGWReadRESTResourceCR<list<string> >(cct, conn, sync_env->http_manager,
@@ -956,8 +956,8 @@ public:
             tn->log(20, SSTR("list metadata: section=" << *sections_iter << " key=" << *iter));
             string s = *sections_iter + ":" + *iter;
             int shard_id;
-            RGWRados *store = sync_env->store;
-            int ret = store->ctl.meta.mgr->get_shard_id(*sections_iter, *iter, &shard_id);
+           rgw::sal::RGWRadosStore *store = sync_env->store;
+            int ret = store->ctl()->meta.mgr->get_shard_id(*sections_iter, *iter, &shard_id);
             if (ret < 0) {
               tn->log(0, SSTR("ERROR: could not determine shard id for " << *sections_iter << ":" << *iter));
               ret_status = ret;
@@ -979,8 +979,8 @@ public:
           int shard_id = (int)iter->first;
           rgw_meta_sync_marker& marker = iter->second;
           marker.total_entries = entries_index->get_total_entries(shard_id);
-          spawn(new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(sync_env->async_rados, sync_env->store->svc.sysobj,
-                                                                rgw_raw_obj(sync_env->store->svc.zone->get_zone_params().log_pool, sync_env->shard_obj_name(shard_id)),
+          spawn(new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(sync_env->async_rados, sync_env->store->svc()->sysobj,
+                                                                rgw_raw_obj(sync_env->store->svc()->zone->get_zone_params().log_pool, sync_env->shard_obj_name(shard_id)),
                                                                 marker), true);
         }
       }
@@ -1082,12 +1082,12 @@ public:
 };
 
 class RGWAsyncMetaStoreEntry : public RGWAsyncRadosRequest {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   string raw_key;
   bufferlist bl;
 protected:
   int _send_request() override {
-    int ret = store->ctl.meta.mgr->put(raw_key, bl, null_yield, RGWMDLogSyncType::APPLY_ALWAYS);
+    int ret = store->ctl()->meta.mgr->put(raw_key, bl, null_yield, RGWMDLogSyncType::APPLY_ALWAYS);
     if (ret < 0) {
       ldout(store->ctx(), 0) << "ERROR: can't store key: " << raw_key << " ret=" << ret << dendl;
       return ret;
@@ -1095,7 +1095,7 @@ protected:
     return 0;
   }
 public:
-  RGWAsyncMetaStoreEntry(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+  RGWAsyncMetaStoreEntry(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
                        const string& _raw_key,
                        bufferlist& _bl) : RGWAsyncRadosRequest(caller, cn), store(_store),
                                           raw_key(_raw_key), bl(_bl) {}
@@ -1135,11 +1135,11 @@ public:
 };
 
 class RGWAsyncMetaRemoveEntry : public RGWAsyncRadosRequest {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   string raw_key;
 protected:
   int _send_request() override {
-    int ret = store->ctl.meta.mgr->remove(raw_key, null_yield);
+    int ret = store->ctl()->meta.mgr->remove(raw_key, null_yield);
     if (ret < 0) {
       ldout(store->ctx(), 0) << "ERROR: can't remove key: " << raw_key << " ret=" << ret << dendl;
       return ret;
@@ -1147,7 +1147,7 @@ protected:
     return 0;
   }
 public:
-  RGWAsyncMetaRemoveEntry(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+  RGWAsyncMetaRemoveEntry(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
                        const string& _raw_key) : RGWAsyncRadosRequest(caller, cn), store(_store),
                                           raw_key(_raw_key) {}
 };
@@ -1234,10 +1234,10 @@ public:
 
     ldpp_dout(sync_env->dpp, 20) << __func__ << "(): updating marker marker_oid=" << marker_oid << " marker=" << new_marker << " realm_epoch=" << sync_marker.realm_epoch << dendl;
     tn->log(20, SSTR("new marker=" << new_marker));
-    RGWRados *store = sync_env->store;
+    rgw::sal::RGWRadosStore *store = sync_env->store;
     return new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(sync_env->async_rados,
-                                                           store->svc.sysobj,
-                                                           rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, marker_oid),
+                                                           store->svc()->sysobj,
+                                                           rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, marker_oid),
                                                            sync_marker);
   }
 
@@ -1555,7 +1555,7 @@ public:
       yield {
        uint32_t lock_duration = cct->_conf->rgw_sync_lease_period;
         string lock_name = "sync_lock";
-        RGWRados *store = sync_env->store;
+       rgw::sal::RGWRadosStore *store = sync_env->store;
         lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, store,
                                                 rgw_raw_obj(pool, sync_env->shard_obj_name(shard_id)),
                                                 lock_name, lock_duration, this));
@@ -1647,7 +1647,7 @@ public:
          ldpp_dout(sync_env->dpp, 4) << *this << ": saving marker pos=" << temp_marker->marker << " realm_epoch=" << realm_epoch << dendl;
 
          using WriteMarkerCR = RGWSimpleRadosWriteCR<rgw_meta_sync_marker>;
-         yield call(new WriteMarkerCR(sync_env->async_rados, sync_env->store->svc.sysobj,
+         yield call(new WriteMarkerCR(sync_env->async_rados, sync_env->store->svc()->sysobj,
                                       rgw_raw_obj(pool, sync_env->shard_obj_name(shard_id)),
                                       *temp_marker));
         }
@@ -1701,7 +1701,7 @@ public:
         yield {
           uint32_t lock_duration = cct->_conf->rgw_sync_lease_period;
           string lock_name = "sync_lock";
-          RGWRados *store = sync_env->store;
+         rgw::sal::RGWRadosStore *store = sync_env->store;
           lease_cr.reset( new RGWContinuousLeaseCR(sync_env->async_rados, store,
                                                    rgw_raw_obj(pool, sync_env->shard_obj_name(shard_id)),
                                                    lock_name, lock_duration, this));
@@ -1890,8 +1890,8 @@ public:
   }
 
   RGWCoroutine *alloc_finisher_cr() override {
-    RGWRados *store = sync_env->store;
-    return new RGWSimpleRadosReadCR<rgw_meta_sync_marker>(sync_env->async_rados, store->svc.sysobj,
+    rgw::sal::RGWRadosStore *store = sync_env->store;
+    return new RGWSimpleRadosReadCR<rgw_meta_sync_marker>(sync_env->async_rados, store->svc()->sysobj,
                                                           rgw_raw_obj(pool, sync_env->shard_obj_name(shard_id)),
                                                           &sync_marker);
   }
@@ -1920,7 +1920,7 @@ public:
   RGWMetaSyncCR(RGWMetaSyncEnv *_sync_env, const RGWPeriodHistory::Cursor &cursor,
                 const rgw_meta_sync_status& _sync_status, RGWSyncTraceNodeRef& _tn)
     : RGWCoroutine(_sync_env->cct), sync_env(_sync_env),
-      pool(sync_env->store->svc.zone->get_zone_params().log_pool),
+      pool(sync_env->store->svc()->zone->get_zone_params().log_pool),
       cursor(cursor), sync_status(_sync_status), tn(_tn) {}
 
   ~RGWMetaSyncCR() {
@@ -1931,7 +1931,7 @@ public:
       // loop through one period at a time
       tn->log(1, "start");
       for (;;) {
-        if (cursor == sync_env->store->svc.mdlog->get_period_history()->get_current()) {
+        if (cursor == sync_env->store->svc()->mdlog->get_period_history()->get_current()) {
           next = RGWPeriodHistory::Cursor{};
           if (cursor) {
             ldpp_dout(sync_env->dpp, 10) << "RGWMetaSyncCR on current period="
@@ -1951,7 +1951,7 @@ public:
           // get the mdlog for the current period (may be empty)
           auto& period_id = sync_status.sync_info.period;
           auto realm_epoch = sync_status.sync_info.realm_epoch;
-          auto mdlog = sync_env->store->svc.mdlog->get_log(period_id);
+          auto mdlog = sync_env->store->svc()->mdlog->get_log(period_id);
 
           tn->log(1, SSTR("realm epoch=" << realm_epoch << " period id=" << period_id));
 
@@ -2005,7 +2005,7 @@ public:
         sync_status.sync_info.period = cursor.get_period().get_id();
         sync_status.sync_info.realm_epoch = cursor.get_epoch();
         yield call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(sync_env->async_rados,
-                                                                 sync_env->store->svc.sysobj,
+                                                                 sync_env->store->svc()->sysobj,
                                                                  rgw_raw_obj(pool, sync_env->status_oid()),
                                                                  sync_status.sync_info));
       }
@@ -2031,16 +2031,16 @@ void RGWRemoteMetaLog::init_sync_env(RGWMetaSyncEnv *env) {
   env->async_rados = async_rados;
   env->http_manager = &http_manager;
   env->error_logger = error_logger;
-  env->sync_tracer = store->get_sync_tracer();
+  env->sync_tracer = store->getRados()->get_sync_tracer();
 }
 
 int RGWRemoteMetaLog::read_sync_status(rgw_meta_sync_status *sync_status)
 {
-  if (store->svc.zone->is_meta_master()) {
+  if (store->svc()->zone->is_meta_master()) {
     return 0;
   }
   // cannot run concurrently with run_sync(), so run in a separate manager
-  RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
+  RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
   RGWHTTPManager http_manager(store->ctx(), crs.get_completion_mgr());
   int ret = http_manager.start();
   if (ret < 0) {
@@ -2057,7 +2057,7 @@ int RGWRemoteMetaLog::read_sync_status(rgw_meta_sync_status *sync_status)
 
 int RGWRemoteMetaLog::init_sync_status()
 {
-  if (store->svc.zone->is_meta_master()) {
+  if (store->svc()->zone->is_meta_master()) {
     return 0;
   }
 
@@ -2070,7 +2070,7 @@ int RGWRemoteMetaLog::init_sync_status()
 
   rgw_meta_sync_info sync_info;
   sync_info.num_shards = mdlog_info.num_shards;
-  auto cursor = store->svc.mdlog->get_period_history()->get_current();
+  auto cursor = store->svc()->mdlog->get_period_history()->get_current();
   if (cursor) {
     sync_info.period = cursor.get_period().get_id();
     sync_info.realm_epoch = cursor.get_epoch();
@@ -2082,13 +2082,13 @@ int RGWRemoteMetaLog::init_sync_status()
 int RGWRemoteMetaLog::store_sync_info(const rgw_meta_sync_info& sync_info)
 {
   tn->log(20, "store sync info");
-  return run(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(async_rados, store->svc.sysobj,
-                                                           rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env.status_oid()),
+  return run(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(async_rados, store->svc()->sysobj,
+                                                           rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env.status_oid()),
                                                            sync_info));
 }
 
 // return a cursor to the period at our sync position
-static RGWPeriodHistory::Cursor get_period_at(RGWRados* store,
+static RGWPeriodHistory::Cursor get_period_at(rgw::sal::RGWRadosStore* store,
                                               const rgw_meta_sync_info& info)
 {
   if (info.period.empty()) {
@@ -2097,7 +2097,7 @@ static RGWPeriodHistory::Cursor get_period_at(RGWRados* store,
   }
 
   // look for an existing period in our history
-  auto cursor = store->svc.mdlog->get_period_history()->lookup(info.realm_epoch);
+  auto cursor = store->svc()->mdlog->get_period_history()->lookup(info.realm_epoch);
   if (cursor) {
     // verify that the period ids match
     auto& existing = cursor.get_period().get_id();
@@ -2112,14 +2112,14 @@ static RGWPeriodHistory::Cursor get_period_at(RGWRados* store,
 
   // read the period from rados or pull it from the master
   RGWPeriod period;
-  int r = store->svc.mdlog->pull_period(info.period, period);
+  int r = store->svc()->mdlog->pull_period(info.period, period);
   if (r < 0) {
     lderr(store->ctx()) << "ERROR: failed to read period id "
         << info.period << ": " << cpp_strerror(r) << dendl;
     return RGWPeriodHistory::Cursor{r};
   }
   // attach the period to our history
-  cursor = store->svc.mdlog->get_period_history()->attach(std::move(period));
+  cursor = store->svc()->mdlog->get_period_history()->attach(std::move(period));
   if (!cursor) {
     r = cursor.get_error();
     lderr(store->ctx()) << "ERROR: failed to read period history back to "
@@ -2130,7 +2130,7 @@ static RGWPeriodHistory::Cursor get_period_at(RGWRados* store,
 
 int RGWRemoteMetaLog::run_sync()
 {
-  if (store->svc.zone->is_meta_master()) {
+  if (store->svc()->zone->is_meta_master()) {
     return 0;
   }
 
@@ -2193,7 +2193,7 @@ int RGWRemoteMetaLog::run_sync()
     if (sync_status.sync_info.state == rgw_meta_sync_info::StateInit) {
       ldpp_dout(dpp, 20) << __func__ << "(): init" << dendl;
       sync_status.sync_info.num_shards = mdlog_info.num_shards;
-      auto cursor = store->svc.mdlog->get_period_history()->get_current();
+      auto cursor = store->svc()->mdlog->get_period_history()->get_current();
       if (cursor) {
         // run full sync, then start incremental from the current period/epoch
         sync_status.sync_info.period = cursor.get_period().get_id();
index 742717ae4cf8102da1c4b5d8cdef3bd66c0f933d..8cbcdcf27b7dfa97af89484f2cffa8f67d3497ed 100644 (file)
 #include "rgw_http_client.h"
 #include "rgw_metadata.h"
 #include "rgw_meta_sync_status.h"
-#include "rgw_rados.h"
+#include "rgw_sal.h"
 #include "rgw_sync_trace.h"
 #include "rgw_mdlog.h"
 
+namespace rgw { namespace sal {
+  class RGWRadosStore;
+} }
 
 #define ERROR_LOGGER_SHARDS 32
 #define RGW_SYNC_ERROR_LOG_SHARD_PREFIX "sync.error-log"
@@ -71,14 +74,14 @@ class RGWRESTConn;
 class RGWSyncTraceManager;
 
 class RGWSyncErrorLogger {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
 
   vector<string> oids;
   int num_shards;
 
   std::atomic<int64_t> counter = { 0 };
 public:
-  RGWSyncErrorLogger(RGWRados *_store, const string &oid_prefix, int _num_shards);
+  RGWSyncErrorLogger(rgw::sal::RGWRadosStore *_store, const string &oid_prefix, int _num_shards);
   RGWCoroutine *log_error_cr(const string& source_zone, const string& section, const string& name, uint32_t error_code, const string& message);
 
   static string get_shard_oid(const string& oid_prefix, int shard_id);
@@ -176,7 +179,7 @@ public:
 struct RGWMetaSyncEnv {
   const DoutPrefixProvider *dpp;
   CephContext *cct{nullptr};
-  RGWRados *store{nullptr};
+  rgw::sal::RGWRadosStore *store{nullptr};
   RGWRESTConn *conn{nullptr};
   RGWAsyncRadosProcessor *async_rados{nullptr};
   RGWHTTPManager *http_manager{nullptr};
@@ -185,7 +188,7 @@ struct RGWMetaSyncEnv {
 
   RGWMetaSyncEnv() {}
 
-  void init(const DoutPrefixProvider *_dpp, CephContext *_cct, RGWRados *_store, RGWRESTConn *_conn,
+  void init(const DoutPrefixProvider *_dpp, CephContext *_cct, rgw::sal::RGWRadosStore *_store, RGWRESTConn *_conn,
             RGWAsyncRadosProcessor *_async_rados, RGWHTTPManager *_http_manager,
             RGWSyncErrorLogger *_error_logger, RGWSyncTraceManager *_sync_tracer);
 
@@ -195,7 +198,7 @@ struct RGWMetaSyncEnv {
 
 class RGWRemoteMetaLog : public RGWCoroutinesManager {
   const DoutPrefixProvider *dpp;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   RGWRESTConn *conn;
   RGWAsyncRadosProcessor *async_rados;
 
@@ -218,10 +221,10 @@ class RGWRemoteMetaLog : public RGWCoroutinesManager {
   RGWSyncTraceNodeRef tn;
 
 public:
-  RGWRemoteMetaLog(const DoutPrefixProvider *dpp, RGWRados *_store,
+  RGWRemoteMetaLog(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *_store,
                    RGWAsyncRadosProcessor *async_rados,
                    RGWMetaSyncStatusManager *_sm)
-    : RGWCoroutinesManager(_store->ctx(), _store->get_cr_registry()),
+    : RGWCoroutinesManager(_store->ctx(), _store->getRados()->get_cr_registry()),
       dpp(dpp), store(_store), conn(NULL), async_rados(async_rados),
       http_manager(store->ctx(), completion_mgr),
       status_manager(_sm) {}
@@ -246,7 +249,7 @@ public:
 };
 
 class RGWMetaSyncStatusManager : public DoutPrefixProvider {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   librados::IoCtx ioctx;
 
   RGWRemoteMetaLog master_log;
@@ -272,7 +275,7 @@ class RGWMetaSyncStatusManager : public DoutPrefixProvider {
   vector<string> clone_markers;
 
 public:
-  RGWMetaSyncStatusManager(RGWRados *_store, RGWAsyncRadosProcessor *async_rados)
+  RGWMetaSyncStatusManager(rgw::sal::RGWRadosStore *_store, RGWAsyncRadosProcessor *async_rados)
     : store(_store), master_log(this, store, async_rados, this)
   {}
   int init();
index e8074d8bf46dede0678dafe79ae7eda153310156..5de9545c6b29ac1c099ca72fe62bc2216961de13 100644 (file)
@@ -563,11 +563,11 @@ struct AWSSyncConfig {
   void expand_target(RGWDataSyncEnv *sync_env, const string& sid, const string& path, string *dest) {
       apply_meta_param(path, "sid", sid, dest);
 
-      const RGWZoneGroup& zg = sync_env->store->svc.zone->get_zonegroup();
+      const RGWZoneGroup& zg = sync_env->store->svc()->zone->get_zonegroup();
       apply_meta_param(path, "zonegroup", zg.get_name(), dest);
       apply_meta_param(path, "zonegroup_id", zg.get_id(), dest);
 
-      const RGWZone& zone = sync_env->store->svc.zone->get_zone();
+      const RGWZone& zone = sync_env->store->svc()->zone->get_zone();
       apply_meta_param(path, "zone", zone.name, dest);
       apply_meta_param(path, "zone_id", zone.id, dest);
   }
@@ -642,7 +642,7 @@ struct AWSSyncConfig {
     auto& root_conf = root_profile->conn_conf;
 
     root_profile->conn.reset(new S3RESTConn(sync_env->cct,
-                                           sync_env->store->svc.zone,
+                                           sync_env->store->svc()->zone,
                                            id,
                                            { root_conf->endpoint },
                                            root_conf->key,
@@ -652,7 +652,7 @@ struct AWSSyncConfig {
       auto& c = i.second;
 
       c->conn.reset(new S3RESTConn(sync_env->cct,
-                                   sync_env->store->svc.zone,
+                                   sync_env->store->svc()->zone,
                                    id,
                                    { c->conn_conf->endpoint },
                                    c->conn_conf->key,
@@ -1426,14 +1426,14 @@ public:
                                                    obj_size(_obj_size),
                                                    src_properties(_src_properties),
                                                    rest_obj(_rest_obj),
-                                                   status_obj(sync_env->store->svc.zone->get_zone_params().log_pool,
+                                                   status_obj(sync_env->store->svc()->zone->get_zone_params().log_pool,
                                                               RGWBucketSyncStatusManager::obj_status_oid(sync_env->source_zone, src_obj)) {
   }
 
 
   int operate() override {
     reenter(this) {
-      yield call(new RGWSimpleRadosReadCR<rgw_sync_aws_multipart_upload_info>(sync_env->async_rados, sync_env->store->svc.sysobj,
+      yield call(new RGWSimpleRadosReadCR<rgw_sync_aws_multipart_upload_info>(sync_env->async_rados, sync_env->store->svc()->sysobj,
                                                                  status_obj, &status, false));
 
       if (retcode < 0 && retcode != -ENOENT) {
@@ -1496,7 +1496,7 @@ public:
           return set_cr_error(ret_err);
         }
 
-        yield call(new RGWSimpleRadosWriteCR<rgw_sync_aws_multipart_upload_info>(sync_env->async_rados, sync_env->store->svc.sysobj, status_obj, status));
+        yield call(new RGWSimpleRadosWriteCR<rgw_sync_aws_multipart_upload_info>(sync_env->async_rados, sync_env->store->svc()->sysobj, status_obj, status));
         if (retcode < 0) {
           ldout(sync_env->cct, 0) << "ERROR: failed to store multipart upload state, retcode=" << retcode << dendl;
           /* continue with upload anyway */
@@ -1604,8 +1604,7 @@ public:
                               << " zone_short_id=" << src_zone_short_id << " pg_ver=" << src_pg_ver
                               << dendl;
 
-
-      source_conn = sync_env->store->svc.zone->get_zone_conn_by_id(sync_env->source_zone);
+      source_conn = sync_env->store->svc()->zone->get_zone_conn_by_id(sync_env->source_zone);
       if (!source_conn) {
         ldout(sync_env->cct, 0) << "ERROR: cannot find http connection to zone " << sync_env->source_zone << dendl;
         return set_cr_error(-EINVAL);
index f9b02a4027bbfa917ef413f3ca25600f1a90515d..efb357e8193a5a933dcdf7bf73c6dcb8f56c0078 100644 (file)
@@ -800,7 +800,7 @@ public:
   ~RGWElasticDataSyncModule() override {}
 
   void init(RGWDataSyncEnv *sync_env, uint64_t instance_id) override {
-    conf->init_instance(sync_env->store->svc.zone->get_realm(), instance_id);
+    conf->init_instance(sync_env->store->svc()->zone->get_realm(), instance_id);
   }
 
   RGWCoroutine *init_sync(RGWDataSyncEnv *sync_env) override {
index cfeda263862989d2a7d4f8582a4176974323eee9..ae224e2d5f8b40f142159dcce3c26d9ae7aef6f1 100644 (file)
@@ -379,7 +379,7 @@ class RGWHandler_REST_MDSearch_S3 : public RGWHandler_REST_S3 {
 protected:
   RGWOp *op_get() override {
     if (s->info.args.exists("query")) {
-      return new RGWMetadataSearch_ObjStore_S3(store->get_sync_module());
+      return new RGWMetadataSearch_ObjStore_S3(store->getRados()->get_sync_module());
     }
     if (!s->init_state.url_bucket.empty() &&
         s->info.args.exists("mdsearch")) {
index 5c4a1788272ee674104d189cdb9b954ab863d654..01474821f71359298da5ee5614089f58d783bbf0 100644 (file)
@@ -959,7 +959,7 @@ class PSManager
             rgw_raw_obj obj;
             ups.get_sub_meta_obj(sub_name, &obj);
             bool empty_on_enoent = false;
-            call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc.sysobj,
+            call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj,
                                 obj,
                                 &user_sub_conf, empty_on_enoent));
           }
@@ -1141,7 +1141,7 @@ public:
       using ReadInfoCR = RGWSimpleRadosReadCR<rgw_pubsub_bucket_topics>;
       yield {
         bool empty_on_enoent = true;
-        call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc.sysobj,
+        call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj,
                             bucket_obj,
                             &bucket_topics, empty_on_enoent));
       }
@@ -1155,7 +1155,7 @@ public:
        using ReadUserTopicsInfoCR = RGWSimpleRadosReadCR<rgw_pubsub_user_topics>;
        yield {
          bool empty_on_enoent = true;
-         call(new ReadUserTopicsInfoCR(sync_env->async_rados, sync_env->store->svc.sysobj,
+         call(new ReadUserTopicsInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj,
                                        user_obj,
                                        &user_topics, empty_on_enoent));
        }
@@ -1518,7 +1518,7 @@ public:
 
   void init(RGWDataSyncEnv *sync_env, uint64_t instance_id) override {
     PSManagerRef mgr = PSManager::get_shared(sync_env, env);
-    env->init_instance(sync_env->store->svc.zone->get_realm(), instance_id, mgr);
+    env->init_instance(sync_env->store->svc()->zone->get_realm(), instance_id, mgr);
   }
 
   RGWCoroutine *start_sync(RGWDataSyncEnv *sync_env) override {
index d9c5c34cd19058bfcb157a593800e60fcb46fa75..b0179b8564b8794c39f665a6e24652348c4f13fc 100644 (file)
@@ -70,7 +70,7 @@ public:
     dest.arn_topic = topic_name;
     // the topic ARN will be sent in the reply
     const rgw::ARN arn(rgw::Partition::aws, rgw::Service::sns, 
-        store->svc.zone->get_zonegroup().get_name(),
+        store->svc()->zone->get_zonegroup().get_name(),
         s->user->user_id.tenant, topic_name);
     topic_arn = arn.to_string();
     return 0;
@@ -348,7 +348,7 @@ public:
       return -EINVAL;
     }
 
-    const auto psmodule = static_cast<RGWPSSyncModuleInstance*>(store->get_sync_module().get());
+    const auto psmodule = static_cast<RGWPSSyncModuleInstance*>(store->getRados()->get_sync_module().get());
     const auto& conf = psmodule->get_effective_conf();
 
     dest.push_endpoint = s->info.args.get("push-endpoint");
@@ -701,7 +701,7 @@ public:
 
     const auto& id = s->owner.get_id();
 
-    ret = store->get_bucket_info(*s->sysobj_ctx, id.tenant, bucket_name,
+    ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, id.tenant, bucket_name,
                                  bucket_info, nullptr, null_yield, nullptr);
     if (ret < 0) {
       ldout(s->cct, 1) << "failed to get bucket info, cannot verify ownership" << dendl;
@@ -872,7 +872,7 @@ void RGWPSCreateNotif_ObjStore_S3::execute() {
   ups = make_unique<RGWUserPubSub>(store, s->owner.get_id());
   auto b = ups->get_bucket(bucket_info.bucket);
   ceph_assert(b);
-  const auto psmodule = static_cast<RGWPSSyncModuleInstance*>(store->get_sync_module().get());
+  const auto psmodule = static_cast<RGWPSSyncModuleInstance*>(store->getRados()->get_sync_module().get());
   const auto& conf = psmodule->get_effective_conf();
 
   for (const auto& c : configurations.list) {
@@ -964,7 +964,7 @@ public:
       return ret;
     }
 
-    ret = store->get_bucket_info(*s->sysobj_ctx, s->owner.get_id().tenant, bucket_name,
+    ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, s->owner.get_id().tenant, bucket_name,
                                  bucket_info, nullptr, null_yield, nullptr);
     if (ret < 0) {
       return ret;
@@ -1124,7 +1124,7 @@ public:
       return ret;
     }
 
-    ret = store->get_bucket_info(*s->sysobj_ctx, s->owner.get_id().tenant, bucket_name,
+    ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, s->owner.get_id().tenant, bucket_name,
                                  bucket_info, nullptr, null_yield, nullptr);
     if (ret < 0) {
       return ret;
index 85502e556fb33523c916bf5e979c2ff46148a5f0..f1973d78ab58702d7a5ca8d22e824a80e9c38a70 100644 (file)
@@ -399,9 +399,9 @@ void rgw_filter_attrset(map<string, bufferlist>& unfiltered_attrset, const strin
   }
 }
 
-RGWDataAccess::RGWDataAccess(RGWRados *_store) : store(_store)
+RGWDataAccess::RGWDataAccess(rgw::sal::RGWRadosStore *_store) : store(_store)
 {
-  sysobj_ctx = std::make_unique<RGWSysObjectCtx>(store->svc.sysobj->init_obj_ctx());
+  sysobj_ctx = std::make_unique<RGWSysObjectCtx>(store->svc()->sysobj->init_obj_ctx());
 }
 
 
@@ -424,7 +424,7 @@ int RGWDataAccess::Bucket::finish_init()
 
 int RGWDataAccess::Bucket::init()
 {
-  int ret = sd->store->get_bucket_info(*sd->sysobj_ctx,
+  int ret = sd->store->getRados()->get_bucket_info(*sd->sysobj_ctx,
                                       tenant, name,
                                       bucket_info,
                                       &mtime,
@@ -457,7 +457,7 @@ int RGWDataAccess::Object::put(bufferlist& data,
                                const DoutPrefixProvider *dpp,
                                optional_yield y)
 {
-  RGWRados *store = sd->store;
+  rgw::sal::RGWRadosStore *store = sd->store;
   CephContext *cct = store->ctx();
 
   string tag;
@@ -472,7 +472,7 @@ int RGWDataAccess::Object::put(bufferlist& data,
 
   auto& owner = bucket->policy.get_owner();
 
-  string req_id = store->svc.zone_utils->unique_id(store->get_new_req_id());
+  string req_id = store->svc()->zone_utils->unique_id(store->getRados()->get_new_req_id());
 
   using namespace rgw::putobj;
   AtomicObjectProcessor processor(&aio, store, bucket_info, nullptr,
@@ -488,7 +488,7 @@ int RGWDataAccess::Object::put(bufferlist& data,
   CompressorRef plugin;
   boost::optional<RGWPutObj_Compress> compressor;
 
-  const auto& compression_type = store->svc.zone->get_zone_params().get_compression_type(bucket_info.placement_rule);
+  const auto& compression_type = store->svc()->zone->get_zone_params().get_compression_type(bucket_info.placement_rule);
   if (compression_type != "none") {
     plugin = Compressor::create(store->ctx(), compression_type);
     if (!plugin) {
index 0b3a9b30aaafd08d26a36ccc555c1698703c3b1f..a72d8fc9bfade471fbd4182971d6e37225e6c6e6 100644 (file)
@@ -19,6 +19,9 @@ class RGWRados;
 class RGWSysObjectCtx;
 struct RGWObjVersionTracker;
 class optional_yield;
+namespace rgw { namespace sal {
+  class RGWRadosStore;
+} }
 
 struct obj_version;
 
@@ -132,11 +135,11 @@ using RGWMD5Etag = RGWEtag<MD5, CEPH_CRYPTO_MD5_DIGESTSIZE>;
 
 class RGWDataAccess
 {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   std::unique_ptr<RGWSysObjectCtx> sysobj_ctx;
 
 public:
-  RGWDataAccess(RGWRados *_store);
+  RGWDataAccess(rgw::sal::RGWRadosStore *_store);
 
   class Object;
   class Bucket;
index 7bc9469864c4cff80cefa411256744bcbdf8a093..f53ae0f7c815e4c593dbb7ed40fdefaea3299377 100644 (file)
@@ -7,6 +7,7 @@
 #include <sstream>
 
 #include "rgw_torrent.h"
+#include "rgw_sal.h"
 #include "include/str_list.h"
 #include "include/rados/librados.hpp"
 
@@ -35,7 +36,7 @@ seed::~seed()
   store = NULL;
 }
 
-void seed::init(struct req_state *p_req, RGWRados *p_store)
+void seed::init(struct req_state *p_req, rgw::sal::RGWRadosStore *p_store)
 {
   s = p_req;
   store = p_store;
@@ -245,9 +246,9 @@ int seed::save_torrent_file()
   rgw_obj obj(s->bucket, s->object.name);    
 
   rgw_raw_obj raw_obj;
-  store->obj_to_raw(s->bucket_info.placement_rule, obj, &raw_obj);
+  store->getRados()->obj_to_raw(s->bucket_info.placement_rule, obj, &raw_obj);
 
-  auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+  auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
   auto sysobj = obj_ctx.get_obj(raw_obj);
 
   op_ret = sysobj.omap().set(key, bl, null_yield);
index c135323d076b17a1703888f760f9d0fecfbf493b..a20999cd221c57d4d90fa79cebd33094d660ba1d 100644 (file)
@@ -108,7 +108,7 @@ private:
   bufferlist bl;  // bufflist ready to send
 
   struct req_state *s{nullptr};
-  RGWRados *store{nullptr};
+  rgw::sal::RGWRadosStore *store{nullptr};
   SHA1 h;
 
   TorrentBencode dencode;
@@ -117,7 +117,7 @@ public:
   ~seed();
 
   int get_params();
-  void init(struct req_state *p_req, RGWRados *p_store);
+  void init(struct req_state *p_req, rgw::sal::RGWRadosStore *p_store);
   int get_torrent_file(RGWRados::Object::Read &read_op,
                        uint64_t &total_len,
                        ceph::bufferlist &bl_data,
index d19967b182f5e94a7649857f0a1d74f4fa8399a4..d7eb793a19a138abb2f44fe93a501bda63e0338f 100644 (file)
@@ -26,7 +26,7 @@
 #include "rgw_cr_rest.h"
 #include "rgw_data_sync.h"
 #include "rgw_metadata.h"
-#include "rgw_rados.h"
+#include "rgw_sal.h"
 #include "rgw_zone.h"
 #include "rgw_sync.h"
 #include "rgw_bucket.h"
@@ -237,7 +237,7 @@ void TrimComplete::Handler::handle(bufferlist::const_iterator& input,
 
 /// rados watcher for bucket trim notifications
 class BucketTrimWatcher : public librados::WatchCtx2 {
-  RGWRados *const store;
+  rgw::sal::RGWRadosStore *const store;
   const rgw_raw_obj& obj;
   rgw_rados_ref ref;
   uint64_t handle{0};
@@ -246,7 +246,7 @@ class BucketTrimWatcher : public librados::WatchCtx2 {
   boost::container::flat_map<TrimNotifyType, HandlerPtr> handlers;
 
  public:
-  BucketTrimWatcher(RGWRados *store, const rgw_raw_obj& obj,
+  BucketTrimWatcher(rgw::sal::RGWRadosStore *store, const rgw_raw_obj& obj,
                     TrimCounters::Server *counters)
     : store(store), obj(obj) {
     handlers.emplace(NotifyTrimCounters, new TrimCounters::Handler(counters));
@@ -258,7 +258,7 @@ class BucketTrimWatcher : public librados::WatchCtx2 {
   }
 
   int start() {
-    int r = store->get_raw_obj_ref(obj, &ref);
+    int r = store->getRados()->get_raw_obj_ref(obj, &ref);
     if (r < 0) {
       return r;
     }
@@ -380,12 +380,12 @@ int take_min_status(CephContext *cct, Iter first, Iter last,
 /// concurrent requests
 class BucketTrimShardCollectCR : public RGWShardCollectCR {
   static constexpr int MAX_CONCURRENT_SHARDS = 16;
-  RGWRados *const store;
+  rgw::sal::RGWRadosStore *const store;
   const RGWBucketInfo& bucket_info;
   const std::vector<std::string>& markers; //< shard markers to trim
   size_t i{0}; //< index of current shard marker
  public:
-  BucketTrimShardCollectCR(RGWRados *store, const RGWBucketInfo& bucket_info,
+  BucketTrimShardCollectCR(rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info,
                            const std::vector<std::string>& markers)
     : RGWShardCollectCR(store->ctx(), MAX_CONCURRENT_SHARDS),
       store(store), bucket_info(bucket_info), markers(markers)
@@ -414,7 +414,7 @@ bool BucketTrimShardCollectCR::spawn_next()
 
 /// trim the bilog of all of the given bucket instance's shards
 class BucketTrimInstanceCR : public RGWCoroutine {
-  RGWRados *const store;
+  rgw::sal::RGWRadosStore *const store;
   RGWHTTPManager *const http;
   BucketTrimObserver *const observer;
   std::string bucket_instance;
@@ -428,14 +428,14 @@ class BucketTrimInstanceCR : public RGWCoroutine {
   std::vector<std::string> min_markers; //< min marker per shard
 
  public:
-  BucketTrimInstanceCR(RGWRados *store, RGWHTTPManager *http,
+  BucketTrimInstanceCR(rgw::sal::RGWRadosStore *store, RGWHTTPManager *http,
                        BucketTrimObserver *observer,
                        const std::string& bucket_instance)
     : RGWCoroutine(store->ctx()), store(store),
       http(http), observer(observer),
       bucket_instance(bucket_instance),
-      zone_id(store->svc.zone->get_zone().id),
-      peer_status(store->svc.zone->get_zone_data_notify_to_map().size()) {
+      zone_id(store->svc()->zone->get_zone().id),
+      peer_status(store->svc()->zone->get_zone_data_notify_to_map().size()) {
     rgw_bucket_parse_bucket_key(cct, bucket_instance, &bucket, nullptr);
   }
 
@@ -460,14 +460,14 @@ int BucketTrimInstanceCR::operate()
       };
 
       auto p = peer_status.begin();
-      for (auto& c : store->svc.zone->get_zone_data_notify_to_map()) {
+      for (auto& c : store->svc()->zone->get_zone_data_notify_to_map()) {
         using StatusCR = RGWReadRESTResourceCR<StatusShards>;
         spawn(new StatusCR(cct, c.second, http, "/admin/log/", params, &*p),
               false);
         ++p;
       }
       // in parallel, read the local bucket instance info
-      spawn(new RGWGetBucketInstanceInfoCR(store->svc.rados->get_async_processor(), store,
+      spawn(new RGWGetBucketInstanceInfoCR(store->svc()->rados->get_async_processor(), store,
                                            bucket, &bucket_info),
             false);
     }
@@ -517,13 +517,13 @@ int BucketTrimInstanceCR::operate()
 
 /// trim each bucket instance while limiting the number of concurrent operations
 class BucketTrimInstanceCollectCR : public RGWShardCollectCR {
-  RGWRados *const store;
+  rgw::sal::RGWRadosStore *const store;
   RGWHTTPManager *const http;
   BucketTrimObserver *const observer;
   std::vector<std::string>::const_iterator bucket;
   std::vector<std::string>::const_iterator end;
  public:
-  BucketTrimInstanceCollectCR(RGWRados *store, RGWHTTPManager *http,
+  BucketTrimInstanceCollectCR(rgw::sal::RGWRadosStore *store, RGWHTTPManager *http,
                               BucketTrimObserver *observer,
                               const std::vector<std::string>& buckets,
                               int max_concurrent)
@@ -718,7 +718,7 @@ class MetadataListCR : public RGWSimpleCoroutine {
 };
 
 class BucketTrimCR : public RGWCoroutine {
-  RGWRados *const store;
+  rgw::sal::RGWRadosStore *const store;
   RGWHTTPManager *const http;
   const BucketTrimConfig& config;
   BucketTrimObserver *const observer;
@@ -733,7 +733,7 @@ class BucketTrimCR : public RGWCoroutine {
 
   static const std::string section; //< metadata section for bucket instances
  public:
-  BucketTrimCR(RGWRados *store, RGWHTTPManager *http,
+  BucketTrimCR(rgw::sal::RGWRadosStore *store, RGWHTTPManager *http,
                const BucketTrimConfig& config, BucketTrimObserver *observer,
                const rgw_raw_obj& obj)
     : RGWCoroutine(store->ctx()), store(store), http(http), config(config),
@@ -789,7 +789,7 @@ int BucketTrimCR::operate()
       // read BucketTrimStatus for marker position
       set_status("reading trim status");
       using ReadStatus = RGWSimpleRadosReadCR<BucketTrimStatus>;
-      yield call(new ReadStatus(store->svc.rados->get_async_processor(), store->svc.sysobj, obj,
+      yield call(new ReadStatus(store->svc()->rados->get_async_processor(), store->svc()->sysobj, obj,
                                 &status, true, &objv));
       if (retcode < 0) {
         ldout(cct, 10) << "failed to read bilog trim status: "
@@ -824,8 +824,8 @@ int BucketTrimCR::operate()
           return buckets.size() < config.buckets_per_interval;
         };
 
-        call(new MetadataListCR(cct, store->svc.rados->get_async_processor(),
-                                store->ctl.meta.mgr,
+        call(new MetadataListCR(cct, store->svc()->rados->get_async_processor(),
+                                store->ctl()->meta.mgr,
                                 section, status.marker, cb));
       }
       if (retcode < 0) {
@@ -848,7 +848,7 @@ int BucketTrimCR::operate()
       status.marker = std::move(last_cold_marker);
       ldout(cct, 20) << "writing bucket trim marker=" << status.marker << dendl;
       using WriteStatus = RGWSimpleRadosWriteCR<BucketTrimStatus>;
-      yield call(new WriteStatus(store->svc.rados->get_async_processor(), store->svc.sysobj, obj,
+      yield call(new WriteStatus(store->svc()->rados->get_async_processor(), store->svc()->sysobj, obj,
                                  status, &objv));
       if (retcode < 0) {
         ldout(cct, 4) << "failed to write updated trim status: "
@@ -881,7 +881,7 @@ int BucketTrimCR::operate()
 }
 
 class BucketTrimPollCR : public RGWCoroutine {
-  RGWRados *const store;
+  rgw::sal::RGWRadosStore *const store;
   RGWHTTPManager *const http;
   const BucketTrimConfig& config;
   BucketTrimObserver *const observer;
@@ -890,7 +890,7 @@ class BucketTrimPollCR : public RGWCoroutine {
   const std::string cookie;
 
  public:
-  BucketTrimPollCR(RGWRados *store, RGWHTTPManager *http,
+  BucketTrimPollCR(rgw::sal::RGWRadosStore *store, RGWHTTPManager *http,
                    const BucketTrimConfig& config,
                    BucketTrimObserver *observer, const rgw_raw_obj& obj)
     : RGWCoroutine(store->ctx()), store(store), http(http),
@@ -910,7 +910,7 @@ int BucketTrimPollCR::operate()
 
       // prevent others from trimming for our entire wait interval
       set_status("acquiring trim lock");
-      yield call(new RGWSimpleRadosLockCR(store->svc.rados->get_async_processor(), store,
+      yield call(new RGWSimpleRadosLockCR(store->svc()->rados->get_async_processor(), store,
                                           obj, name, cookie,
                                           config.trim_interval_sec));
       if (retcode < 0) {
@@ -923,7 +923,7 @@ int BucketTrimPollCR::operate()
       if (retcode < 0) {
         // on errors, unlock so other gateways can try
         set_status("unlocking");
-        yield call(new RGWSimpleRadosUnlockCR(store->svc.rados->get_async_processor(), store,
+        yield call(new RGWSimpleRadosUnlockCR(store->svc()->rados->get_async_processor(), store,
                                               obj, name, cookie));
       }
     }
@@ -1004,7 +1004,7 @@ void configure_bucket_trim(CephContext *cct, BucketTrimConfig& config)
 class BucketTrimManager::Impl : public TrimCounters::Server,
                                 public BucketTrimObserver {
  public:
-  RGWRados *const store;
+   rgw::sal::RGWRadosStore *const store;
   const BucketTrimConfig config;
 
   const rgw_raw_obj status_obj;
@@ -1023,9 +1023,9 @@ class BucketTrimManager::Impl : public TrimCounters::Server,
   /// protect data shared between data sync, trim, and watch/notify threads
   std::mutex mutex;
 
-  Impl(RGWRados *store, const BucketTrimConfig& config)
+  Impl(rgw::sal::RGWRadosStore *store, const BucketTrimConfig& config)
     : store(store), config(config),
-      status_obj(store->svc.zone->get_zone_params().log_pool, BucketTrimStatus::oid),
+      status_obj(store->svc()->zone->get_zone_params().log_pool, BucketTrimStatus::oid),
       counter(config.counter_size),
       trimmed(config.recent_size, config.recent_duration),
       watcher(store, status_obj, this)
@@ -1061,7 +1061,7 @@ class BucketTrimManager::Impl : public TrimCounters::Server,
   }
 };
 
-BucketTrimManager::BucketTrimManager(RGWRados *store,
+BucketTrimManager::BucketTrimManager(rgw::sal::RGWRadosStore *store,
                                      const BucketTrimConfig& config)
   : impl(new Impl(store, config))
 {
index 13d1f63a1cf433e03b38001940658f1b098e8c76..485333d59aaf849a6b50540daeb2f138698df2b3 100644 (file)
 class CephContext;
 class RGWCoroutine;
 class RGWHTTPManager;
-class RGWRados;
 
 namespace rgw {
 
+namespace sal {
+  class RGWRadosStore;
+}
+
 /// Interface to inform the trim process about which buckets are most active
 struct BucketChangeObserver {
   virtual ~BucketChangeObserver() = default;
@@ -69,7 +72,7 @@ class BucketTrimManager : public BucketChangeObserver {
   class Impl;
   std::unique_ptr<Impl> impl;
  public:
-  BucketTrimManager(RGWRados *store, const BucketTrimConfig& config);
+  BucketTrimManager(sal::RGWRadosStore *store, const BucketTrimConfig& config);
   ~BucketTrimManager();
 
   int init();
index e2ccb075af9717ae6a095454373ee3fc32e479a2..78b764ab46505630f83c0288a03ebacf877dc077 100644 (file)
@@ -53,7 +53,7 @@ void take_min_markers(IterIn first, IterIn last, IterOut dest)
 
 class DataLogTrimCR : public RGWCoroutine {
   using TrimCR = RGWSyncLogTrimCR;
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   RGWHTTPManager *http;
   const int num_shards;
   const std::string& zone_id; //< my zone id
@@ -63,12 +63,12 @@ class DataLogTrimCR : public RGWCoroutine {
   int ret{0};
 
  public:
-  DataLogTrimCR(RGWRados *store, RGWHTTPManager *http,
+  DataLogTrimCR(rgw::sal::RGWRadosStore *store, RGWHTTPManager *http,
                    int num_shards, std::vector<std::string>& last_trim)
     : RGWCoroutine(store->ctx()), store(store), http(http),
       num_shards(num_shards),
-      zone_id(store->svc.zone->get_zone().id),
-      peer_status(store->svc.zone->get_zone_data_notify_to_map().size()),
+      zone_id(store->svc()->zone->get_zone().id),
+      peer_status(store->svc()->zone->get_zone_data_notify_to_map().size()),
       min_shard_markers(num_shards, TrimCR::max_marker),
       last_trim(last_trim)
   {}
@@ -91,7 +91,7 @@ int DataLogTrimCR::operate()
       };
 
       auto p = peer_status.begin();
-      for (auto& c : store->svc.zone->get_zone_data_notify_to_map()) {
+      for (auto& c : store->svc()->zone->get_zone_data_notify_to_map()) {
         ldout(cct, 20) << "query sync status from " << c.first << dendl;
         using StatusCR = RGWReadRESTResourceCR<rgw_data_sync_status>;
         spawn(new StatusCR(cct, c.second, http, "/admin/log/", params, &*p),
@@ -128,7 +128,7 @@ int DataLogTrimCR::operate()
         ldout(cct, 10) << "trimming log shard " << i
             << " at marker=" << m
             << " last_trim=" << last_trim[i] << dendl;
-        spawn(new TrimCR(store, store->svc.datalog_rados->get_oid(i),
+        spawn(new TrimCR(store, store->svc()->datalog_rados->get_oid(i),
                          m, &last_trim[i]),
               true);
       }
@@ -138,7 +138,7 @@ int DataLogTrimCR::operate()
   return 0;
 }
 
-RGWCoroutine* create_admin_data_log_trim_cr(RGWRados *store,
+RGWCoroutine* create_admin_data_log_trim_cr(rgw::sal::RGWRadosStore *store,
                                             RGWHTTPManager *http,
                                             int num_shards,
                                             std::vector<std::string>& markers)
@@ -147,7 +147,7 @@ RGWCoroutine* create_admin_data_log_trim_cr(RGWRados *store,
 }
 
 class DataLogTrimPollCR : public RGWCoroutine {
-  RGWRados *store;
+  rgw::sal::RGWRadosStore *store;
   RGWHTTPManager *http;
   const int num_shards;
   const utime_t interval; //< polling interval
@@ -156,11 +156,11 @@ class DataLogTrimPollCR : public RGWCoroutine {
   std::vector<std::string> last_trim; //< last trimmed marker per shard
 
  public:
-  DataLogTrimPollCR(RGWRados *store, RGWHTTPManager *http,
+  DataLogTrimPollCR(rgw::sal::RGWRadosStore *store, RGWHTTPManager *http,
                     int num_shards, utime_t interval)
     : RGWCoroutine(store->ctx()), store(store), http(http),
       num_shards(num_shards), interval(interval),
-      lock_oid(store->svc.datalog_rados->get_oid(0)),
+      lock_oid(store->svc()->datalog_rados->get_oid(0)),
       lock_cookie(RGWSimpleRadosLockCR::gen_random_cookie(cct)),
       last_trim(num_shards)
   {}
@@ -178,8 +178,8 @@ int DataLogTrimPollCR::operate()
       // request a 'data_trim' lock that covers the entire wait interval to
       // prevent other gateways from attempting to trim for the duration
       set_status("acquiring trim lock");
-      yield call(new RGWSimpleRadosLockCR(store->svc.rados->get_async_processor(), store,
-                                          rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, lock_oid),
+      yield call(new RGWSimpleRadosLockCR(store->svc()->rados->get_async_processor(), store,
+                                          rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, lock_oid),
                                           "data_trim", lock_cookie,
                                           interval.sec()));
       if (retcode < 0) {
@@ -199,7 +199,7 @@ int DataLogTrimPollCR::operate()
   return 0;
 }
 
-RGWCoroutine* create_data_log_trim_cr(RGWRados *store,
+RGWCoroutine* create_data_log_trim_cr(rgw::sal::RGWRadosStore *store,
                                       RGWHTTPManager *http,
                                       int num_shards, utime_t interval)
 {
index 6b640dafe01f0aa1b69a9618189c796918cec57d..b76bf0babf6e3124af42e30d2ce83d8bb69dadf3 100644 (file)
@@ -7,14 +7,17 @@ class RGWCoroutine;
 class RGWRados;
 class RGWHTTPManager;
 class utime_t;
+namespace rgw { namespace sal {
+  class RGWRadosStore;
+} }
 
 // DataLogTrimCR factory function
-extern RGWCoroutine* create_data_log_trim_cr(RGWRados *store,
+extern RGWCoroutine* create_data_log_trim_cr(rgw::sal::RGWRadosStore *store,
                                              RGWHTTPManager *http,
                                              int num_shards, utime_t interval);
 
 // factory function for datalog trim via radosgw-admin
-RGWCoroutine* create_admin_data_log_trim_cr(RGWRados *store,
+RGWCoroutine* create_admin_data_log_trim_cr(rgw::sal::RGWRadosStore *store,
                                             RGWHTTPManager *http,
                                             int num_shards,
                                             std::vector<std::string>& markers);
index 5771b0770c97adde8939c33ef7b2b0cd7408e605..342c17fa91a6481fe8d9d4e47ec4f710317af0a3 100644 (file)
@@ -23,7 +23,7 @@
 
 /// purge all log shards for the given mdlog
 class PurgeLogShardsCR : public RGWShardCollectCR {
-  RGWRados *const store;
+  rgw::sal::RGWRadosStore *const store;
   const RGWMetadataLog* mdlog;
   const int num_shards;
   rgw_raw_obj obj;
@@ -32,7 +32,7 @@ class PurgeLogShardsCR : public RGWShardCollectCR {
   static constexpr int max_concurrent = 16;
 
  public:
-  PurgeLogShardsCR(RGWRados *store, const RGWMetadataLog* mdlog,
+  PurgeLogShardsCR(rgw::sal::RGWRadosStore *store, const RGWMetadataLog* mdlog,
                    const rgw_pool& pool, int num_shards)
     : RGWShardCollectCR(store->ctx(), max_concurrent),
       store(store), mdlog(mdlog), num_shards(num_shards), obj(pool, "")
@@ -56,7 +56,7 @@ class PurgePeriodLogsCR : public RGWCoroutine {
     RGWSI_Zone *zone;
     RGWSI_MDLog *mdlog;
   } svc;
-  RGWRados *const store;
+  rgw::sal::RGWRadosStore *const store;
   RGWMetadataManager *const metadata;
   RGWObjVersionTracker objv;
   Cursor cursor;
@@ -64,11 +64,11 @@ class PurgePeriodLogsCR : public RGWCoroutine {
   epoch_t *last_trim_epoch; //< update last trim on success
 
  public:
-  PurgePeriodLogsCR(RGWRados *store, epoch_t realm_epoch, epoch_t *last_trim)
-    : RGWCoroutine(store->ctx()), store(store), metadata(store->ctl.meta.mgr),
+  PurgePeriodLogsCR(rgw::sal::RGWRadosStore *store, epoch_t realm_epoch, epoch_t *last_trim)
+    : RGWCoroutine(store->ctx()), store(store), metadata(store->ctl()->meta.mgr),
       realm_epoch(realm_epoch), last_trim_epoch(last_trim) {
-    svc.zone = store->svc.zone;
-    svc.mdlog = store->svc.mdlog;
+    svc.zone = store->svc()->zone;
+    svc.mdlog = store->svc()->mdlog;
   }
 
   int operate() override;
@@ -137,14 +137,14 @@ using connection_map = std::map<std::string, std::unique_ptr<RGWRESTConn>>;
 
 /// construct a RGWRESTConn for each zone in the realm
 template <typename Zonegroups>
-connection_map make_peer_connections(RGWRados *store,
+connection_map make_peer_connections(rgw::sal::RGWRadosStore *store,
                                      const Zonegroups& zonegroups)
 {
   connection_map connections;
   for (auto& g : zonegroups) {
     for (auto& z : g.second.zones) {
       std::unique_ptr<RGWRESTConn> conn{
-        new RGWRESTConn(store->ctx(), store->svc.zone, z.first, z.second.endpoints)};
+        new RGWRESTConn(store->ctx(), store->svc()->zone, z.first, z.second.endpoints)};
       connections.emplace(z.first, std::move(conn));
     }
   }
@@ -203,17 +203,17 @@ int take_min_status(CephContext *cct, Iter first, Iter last,
 
 struct TrimEnv {
   const DoutPrefixProvider *dpp;
-  RGWRados *const store;
+  rgw::sal::RGWRadosStore *const store;
   RGWHTTPManager *const http;
   int num_shards;
   const std::string& zone;
   Cursor current; //< cursor to current period
   epoch_t last_trim_epoch{0}; //< epoch of last mdlog that was purged
 
-  TrimEnv(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards)
+  TrimEnv(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWHTTPManager *http, int num_shards)
     : dpp(dpp), store(store), http(http), num_shards(num_shards),
-      zone(store->svc.zone->get_zone_params().get_id()),
-      current(store->svc.mdlog->get_period_history()->get_current())
+      zone(store->svc()->zone->get_zone_params().get_id()),
+      current(store->svc()->mdlog->get_period_history()->get_current())
   {}
 };
 
@@ -223,7 +223,7 @@ struct MasterTrimEnv : public TrimEnv {
   /// last trim marker for each shard, only applies to current period's mdlog
   std::vector<std::string> last_trim_markers;
 
-  MasterTrimEnv(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards)
+  MasterTrimEnv(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWHTTPManager *http, int num_shards)
     : TrimEnv(dpp, store, http, num_shards),
       last_trim_markers(num_shards)
   {
@@ -238,7 +238,7 @@ struct PeerTrimEnv : public TrimEnv {
   /// last trim timestamp for each shard, only applies to current period's mdlog
   std::vector<ceph::real_time> last_trim_timestamps;
 
-  PeerTrimEnv(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards)
+  PeerTrimEnv(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWHTTPManager *http, int num_shards)
     : TrimEnv(dpp, store, http, num_shards),
       last_trim_timestamps(num_shards)
   {}
@@ -396,7 +396,7 @@ int MetaMasterTrimCR::operate()
 
       // if realm_epoch == current, trim mdlog based on markers
       if (epoch == env.current.get_epoch()) {
-        auto mdlog = store->svc.mdlog->get_log(env.current.get_period().get_id());
+        auto mdlog = store->svc()->mdlog->get_log(env.current.get_period().get_id());
         spawn(new MetaMasterTrimShardCollectCR(env, mdlog, min_status), true);
       }
     }
@@ -526,9 +526,9 @@ class MetaPeerTrimShardCollectCR : public RGWShardCollectCR {
     : RGWShardCollectCR(env.store->ctx(), MAX_CONCURRENT_SHARDS),
       env(env), mdlog(mdlog), period_id(env.current.get_period().get_id())
   {
-    meta_env.init(env.dpp, cct, env.store, env.store->svc.zone->get_master_conn(),
-                  env.store->svc.rados->get_async_processor(), env.http, nullptr,
-                  env.store->get_sync_tracer());
+    meta_env.init(env.dpp, cct, env.store, env.store->svc()->zone->get_master_conn(),
+                  env.store->svc()->rados->get_async_processor(), env.http, nullptr,
+                  env.store->getRados()->get_sync_tracer());
   }
 
   bool spawn_next() override;
@@ -568,7 +568,7 @@ int MetaPeerTrimCR::operate()
       };
 
       using LogInfoCR = RGWReadRESTResourceCR<rgw_mdlog_info>;
-      call(new LogInfoCR(cct, env.store->svc.zone->get_master_conn(), env.http,
+      call(new LogInfoCR(cct, env.store->svc()->zone->get_master_conn(), env.http,
                          "/admin/log/", params, &mdlog_info));
     }
     if (retcode < 0) {
@@ -590,7 +590,7 @@ int MetaPeerTrimCR::operate()
     // if realm_epoch == current, trim mdlog based on master's markers
     if (mdlog_info.realm_epoch == env.current.get_epoch()) {
       yield {
-        auto mdlog = env.store->svc.mdlog->get_log(env.current.get_period().get_id());
+        auto mdlog = env.store->svc()->mdlog->get_log(env.current.get_period().get_id());
         call(new MetaPeerTrimShardCollectCR(env, mdlog));
         // ignore any errors during purge/trim because we want to hold the lock open
       }
@@ -601,7 +601,7 @@ int MetaPeerTrimCR::operate()
 }
 
 class MetaTrimPollCR : public RGWCoroutine {
-  RGWRados *const store;
+  rgw::sal::RGWRadosStore *const store;
   const utime_t interval; //< polling interval
   const rgw_raw_obj obj;
   const std::string name{"meta_trim"}; //< lock name
@@ -612,9 +612,9 @@ class MetaTrimPollCR : public RGWCoroutine {
   virtual RGWCoroutine* alloc_cr() = 0;
 
  public:
-  MetaTrimPollCR(RGWRados *store, utime_t interval)
+  MetaTrimPollCR(rgw::sal::RGWRadosStore *store, utime_t interval)
     : RGWCoroutine(store->ctx()), store(store), interval(interval),
-      obj(store->svc.zone->get_zone_params().log_pool, RGWMetadataLogHistory::oid),
+      obj(store->svc()->zone->get_zone_params().log_pool, RGWMetadataLogHistory::oid),
       cookie(RGWSimpleRadosLockCR::gen_random_cookie(cct))
   {}
 
@@ -630,7 +630,7 @@ int MetaTrimPollCR::operate()
 
       // prevent others from trimming for our entire wait interval
       set_status("acquiring trim lock");
-      yield call(new RGWSimpleRadosLockCR(store->svc.rados->get_async_processor(), store,
+      yield call(new RGWSimpleRadosLockCR(store->svc()->rados->get_async_processor(), store,
                                           obj, name, cookie, interval.sec()));
       if (retcode < 0) {
         ldout(cct, 4) << "failed to lock: " << cpp_strerror(retcode) << dendl;
@@ -643,7 +643,7 @@ int MetaTrimPollCR::operate()
       if (retcode < 0) {
         // on errors, unlock so other gateways can try
         set_status("unlocking");
-        yield call(new RGWSimpleRadosUnlockCR(store->svc.rados->get_async_processor(), store,
+        yield call(new RGWSimpleRadosUnlockCR(store->svc()->rados->get_async_processor(), store,
                                               obj, name, cookie));
       }
     }
@@ -657,7 +657,7 @@ class MetaMasterTrimPollCR : public MetaTrimPollCR  {
     return new MetaMasterTrimCR(env);
   }
  public:
-  MetaMasterTrimPollCR(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http,
+  MetaMasterTrimPollCR(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWHTTPManager *http,
                        int num_shards, utime_t interval)
     : MetaTrimPollCR(store, interval),
       env(dpp, store, http, num_shards)
@@ -670,17 +670,17 @@ class MetaPeerTrimPollCR : public MetaTrimPollCR {
     return new MetaPeerTrimCR(env);
   }
  public:
-  MetaPeerTrimPollCR(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http,
+  MetaPeerTrimPollCR(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWHTTPManager *http,
                      int num_shards, utime_t interval)
     : MetaTrimPollCR(store, interval),
       env(dpp, store, http, num_shards)
   {}
 };
 
-RGWCoroutine* create_meta_log_trim_cr(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http,
+RGWCoroutine* create_meta_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWHTTPManager *http,
                                       int num_shards, utime_t interval)
 {
-  if (store->svc.zone->is_meta_master()) {
+  if (store->svc()->zone->is_meta_master()) {
     return new MetaMasterTrimPollCR(dpp, store, http, num_shards, interval);
   }
   return new MetaPeerTrimPollCR(dpp, store, http, num_shards, interval);
@@ -688,24 +688,24 @@ RGWCoroutine* create_meta_log_trim_cr(const DoutPrefixProvider *dpp, RGWRados *s
 
 
 struct MetaMasterAdminTrimCR : private MasterTrimEnv, public MetaMasterTrimCR {
-  MetaMasterAdminTrimCR(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards)
+  MetaMasterAdminTrimCR(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWHTTPManager *http, int num_shards)
     : MasterTrimEnv(dpp, store, http, num_shards),
       MetaMasterTrimCR(*static_cast<MasterTrimEnv*>(this))
   {}
 };
 
 struct MetaPeerAdminTrimCR : private PeerTrimEnv, public MetaPeerTrimCR {
-  MetaPeerAdminTrimCR(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards)
+  MetaPeerAdminTrimCR(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWHTTPManager *http, int num_shards)
     : PeerTrimEnv(dpp, store, http, num_shards),
       MetaPeerTrimCR(*static_cast<PeerTrimEnv*>(this))
   {}
 };
 
-RGWCoroutine* create_admin_meta_log_trim_cr(const DoutPrefixProvider *dpp, RGWRados *store,
+RGWCoroutine* create_admin_meta_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store,
                                             RGWHTTPManager *http,
                                             int num_shards)
 {
-  if (store->svc.zone->is_meta_master()) {
+  if (store->svc()->zone->is_meta_master()) {
     return new MetaMasterAdminTrimCR(dpp, store, http, num_shards);
   }
   return new MetaPeerAdminTrimCR(dpp, store, http, num_shards);
index 79465668f87edf765643ef5790b9ffc708115982..8e5c3de70235e126ab5f57d388fe966422ddfb43 100644 (file)
@@ -8,15 +8,18 @@ class DoutPrefixProvider;
 class RGWRados;
 class RGWHTTPManager;
 class utime_t;
+namespace rgw { namespace sal {
+  class RGWRadosStore;
+} }
 
 // MetaLogTrimCR factory function
 RGWCoroutine* create_meta_log_trim_cr(const DoutPrefixProvider *dpp,
-                                      RGWRados *store,
+                                      rgw::sal::RGWRadosStore *store,
                                       RGWHTTPManager *http,
                                       int num_shards, utime_t interval);
 
 // factory function for mdlog trim via radosgw-admin
 RGWCoroutine* create_admin_meta_log_trim_cr(const DoutPrefixProvider *dpp,
-                                            RGWRados *store,
+                                            rgw::sal::RGWRadosStore *store,
                                             RGWHTTPManager *http,
                                             int num_shards);
index bf8990f4538a57372b0c5c03b1a7f404ba198feb..3308828ba9902dba2eb377d4a7b49d4197b9b9e4 100644 (file)
@@ -11,7 +11,7 @@
 #include "common/Formatter.h"
 #include "common/ceph_json.h"
 #include "common/RWLock.h"
-#include "rgw_rados.h"
+#include "rgw_sal.h"
 #include "rgw_zone.h"
 #include "rgw_acl.h"
 
@@ -47,18 +47,18 @@ void rgw_get_anon_user(RGWUserInfo& info)
   info.access_keys.clear();
 }
 
-int rgw_user_sync_all_stats(RGWRados *store, const rgw_user& user_id)
+int rgw_user_sync_all_stats(rgw::sal::RGWRadosStore *store, const rgw_user& user_id)
 {
   CephContext *cct = store->ctx();
   size_t max_entries = cct->_conf->rgw_list_buckets_max_chunk;
   bool is_truncated = false;
   string marker;
   int ret;
-  RGWSysObjectCtx obj_ctx = store->svc.sysobj->init_obj_ctx();
+  RGWSysObjectCtx obj_ctx = store->svc()->sysobj->init_obj_ctx();
 
   do {
     RGWUserBuckets user_buckets;
-    ret = store->ctl.user->list_buckets(user_id,
+    ret = store->ctl()->user->list_buckets(user_id,
                                         marker, string(),
                                         max_entries,
                                         false,
@@ -81,26 +81,26 @@ int rgw_user_sync_all_stats(RGWRados *store, const rgw_user& user_id)
       bucket.tenant = user_id.tenant;
       bucket.name = bucket_ent.bucket.name;
 
-      ret = store->get_bucket_info(obj_ctx, user_id.tenant, bucket_ent.bucket.name,
+      ret = store->getRados()->get_bucket_info(obj_ctx, user_id.tenant, bucket_ent.bucket.name,
                                    bucket_info, nullptr, null_yield, nullptr);
       if (ret < 0) {
         ldout(cct, 0) << "ERROR: could not read bucket info: bucket=" << bucket_ent.bucket << " ret=" << ret << dendl;
         continue;
       }
-      ret = store->ctl.bucket->sync_user_stats(user_id, bucket_info);
+      ret = store->ctl()->bucket->sync_user_stats(user_id, bucket_info);
       if (ret < 0) {
         ldout(cct, 0) << "ERROR: could not sync bucket stats: ret=" << ret << dendl;
         return ret;
       }
       RGWQuotaInfo bucket_quota;
-      ret = store->check_bucket_shards(bucket_info, bucket_info.bucket, bucket_quota);
+      ret = store->getRados()->check_bucket_shards(bucket_info, bucket_info.bucket, bucket_quota);
       if (ret < 0) {
        ldout(cct, 0) << "ERROR in check_bucket_shards: " << cpp_strerror(-ret)<< dendl;
       }
     }
   } while (is_truncated);
 
-  ret = store->ctl.user->complete_flush_stats(user_id);
+  ret = store->ctl()->user->complete_flush_stats(user_id);
   if (ret < 0) {
     cerr << "ERROR: failed to complete syncing user stats: ret=" << ret << std::endl;
     return ret;
@@ -109,7 +109,7 @@ int rgw_user_sync_all_stats(RGWRados *store, const rgw_user& user_id)
   return 0;
 }
 
-int rgw_user_get_all_buckets_stats(RGWRados *store, const rgw_user& user_id, map<string, cls_user_bucket_entry>& buckets_usage_map)
+int rgw_user_get_all_buckets_stats(rgw::sal::RGWRadosStore *store, const rgw_user& user_id, map<string, cls_user_bucket_entry>& buckets_usage_map)
 {
   CephContext *cct = store->ctx();
   size_t max_entries = cct->_conf->rgw_list_buckets_max_chunk;
@@ -132,7 +132,7 @@ int rgw_user_get_all_buckets_stats(RGWRados *store, const rgw_user& user_id, map
 
       const RGWBucketEnt& bucket_ent = i.second;
       RGWBucketEnt stats;
-      ret = store->ctl.bucket->read_bucket_stats(bucket_ent.bucket, &stats, null_yield);
+      ret = store->ctl()->bucket->read_bucket_stats(bucket_ent.bucket, &stats, null_yield);
       if (ret < 0) {
         ldout(cct, 0) << "ERROR: could not get bucket stats: ret=" << ret << dendl;
         return ret;
@@ -1386,7 +1386,7 @@ RGWUser::RGWUser() : caps(this), keys(this), subusers(this)
   init_default();
 }
 
-int RGWUser::init(RGWRados *storage, RGWUserAdminOpState& op_state)
+int RGWUser::init(rgw::sal::RGWRadosStore *storage, RGWUserAdminOpState& op_state)
 {
   init_default();
   int ret = init_storage(storage);
@@ -1409,14 +1409,14 @@ void RGWUser::init_default()
   clear_populated();
 }
 
-int RGWUser::init_storage(RGWRados *storage)
+int RGWUser::init_storage(rgw::sal::RGWRadosStore *storage)
 {
   if (!storage) {
     return -EINVAL;
   }
 
   store = storage;
-  user_ctl = store->ctl.user;
+  user_ctl = store->ctl()->user;
 
   clear_populated();
 
@@ -1655,7 +1655,7 @@ int RGWUser::execute_rename(RGWUserAdminOpState& op_state, std::string *err_msg)
   string obj_marker;
   CephContext *cct = store->ctx();
   size_t max_buckets = cct->_conf->rgw_list_buckets_max_chunk;
-  RGWBucketCtl* bucket_ctl = store->ctl.bucket;
+  RGWBucketCtl* bucket_ctl = store->ctl()->bucket;
 
   do {
     RGWUserBuckets buckets;
@@ -2086,7 +2086,7 @@ int RGWUser::execute_modify(RGWUserAdminOpState& op_state, std::string *err_msg)
         marker = iter->first;
       }
 
-      ret = store->set_buckets_enabled(bucket_names, !suspended);
+      ret = store->getRados()->set_buckets_enabled(bucket_names, !suspended);
       if (ret < 0) {
         set_err_msg(err_msg, "failed to modify bucket");
         return ret;
@@ -2170,7 +2170,7 @@ int RGWUser::list(RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher)
     op_state.max_entries = 1000;
   }
 
-  auto meta_mgr = store->ctl.meta.mgr;
+  auto meta_mgr = store->ctl()->meta.mgr;
 
   int ret = meta_mgr->list_keys_init(metadata_key, op_state.marker, &handle);
   if (ret < 0) {
@@ -2218,7 +2218,7 @@ int RGWUser::list(RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher)
   return 0;
 }
 
-int RGWUserAdminOp_User::list(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_User::list(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
                   RGWFormatterFlusher& flusher)
 {
   RGWUser user;
@@ -2234,7 +2234,7 @@ int RGWUserAdminOp_User::list(RGWRados *store, RGWUserAdminOpState& op_state,
   return 0;
 }
 
-int RGWUserAdminOp_User::info(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_User::info(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
                   RGWFormatterFlusher& flusher)
 {
   RGWUserInfo info;
@@ -2263,7 +2263,7 @@ int RGWUserAdminOp_User::info(RGWRados *store, RGWUserAdminOpState& op_state,
   RGWStorageStats stats;
   RGWStorageStats *arg_stats = NULL;
   if (op_state.fetch_stats) {
-    int ret = store->ctl.user->read_stats(info.user_id, &stats);
+    int ret = store->ctl()->user->read_stats(info.user_id, &stats);
     if (ret < 0 && ret != -ENOENT) {
       return ret;
     }
@@ -2281,7 +2281,7 @@ int RGWUserAdminOp_User::info(RGWRados *store, RGWUserAdminOpState& op_state,
   return 0;
 }
 
-int RGWUserAdminOp_User::create(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_User::create(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
                   RGWFormatterFlusher& flusher)
 {
   RGWUserInfo info;
@@ -2313,7 +2313,7 @@ int RGWUserAdminOp_User::create(RGWRados *store, RGWUserAdminOpState& op_state,
   return 0;
 }
 
-int RGWUserAdminOp_User::modify(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_User::modify(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
                   RGWFormatterFlusher& flusher)
 {
   RGWUserInfo info;
@@ -2344,7 +2344,7 @@ int RGWUserAdminOp_User::modify(RGWRados *store, RGWUserAdminOpState& op_state,
   return 0;
 }
 
-int RGWUserAdminOp_User::remove(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_User::remove(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
                   RGWFormatterFlusher& flusher, optional_yield y)
 {
   RGWUserInfo info;
@@ -2361,7 +2361,7 @@ int RGWUserAdminOp_User::remove(RGWRados *store, RGWUserAdminOpState& op_state,
   return ret;
 }
 
-int RGWUserAdminOp_Subuser::create(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_Subuser::create(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
                   RGWFormatterFlusher& flusher)
 {
   RGWUserInfo info;
@@ -2393,7 +2393,7 @@ int RGWUserAdminOp_Subuser::create(RGWRados *store, RGWUserAdminOpState& op_stat
   return 0;
 }
 
-int RGWUserAdminOp_Subuser::modify(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_Subuser::modify(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
                   RGWFormatterFlusher& flusher)
 {
   RGWUserInfo info;
@@ -2425,7 +2425,7 @@ int RGWUserAdminOp_Subuser::modify(RGWRados *store, RGWUserAdminOpState& op_stat
   return 0;
 }
 
-int RGWUserAdminOp_Subuser::remove(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_Subuser::remove(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
                   RGWFormatterFlusher& flusher)
 {
   RGWUserInfo info;
@@ -2445,7 +2445,7 @@ int RGWUserAdminOp_Subuser::remove(RGWRados *store, RGWUserAdminOpState& op_stat
   return 0;
 }
 
-int RGWUserAdminOp_Key::create(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_Key::create(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
                   RGWFormatterFlusher& flusher)
 {
   RGWUserInfo info;
@@ -2484,7 +2484,7 @@ int RGWUserAdminOp_Key::create(RGWRados *store, RGWUserAdminOpState& op_state,
   return 0;
 }
 
-int RGWUserAdminOp_Key::remove(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_Key::remove(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
                   RGWFormatterFlusher& flusher)
 {
   RGWUserInfo info;
@@ -2504,7 +2504,7 @@ int RGWUserAdminOp_Key::remove(RGWRados *store, RGWUserAdminOpState& op_state,
   return 0;
 }
 
-int RGWUserAdminOp_Caps::add(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_Caps::add(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
                   RGWFormatterFlusher& flusher)
 {
   RGWUserInfo info;
@@ -2537,7 +2537,7 @@ int RGWUserAdminOp_Caps::add(RGWRados *store, RGWUserAdminOpState& op_state,
 }
 
 
-int RGWUserAdminOp_Caps::remove(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_Caps::remove(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
                   RGWFormatterFlusher& flusher)
 {
   RGWUserInfo info;
index b043702d9d810e905835b392272ad40002006c26..8391a4b3128b778ad88307d34727364cc9df8008 100644 (file)
 
 #define XMLNS_AWS_S3 "http://s3.amazonaws.com/doc/2006-03-01/"
 
-class RGWRados;
 class RGWUserCtl;
 class RGWBucketCtl;
 class RGWUserBuckets;
 
 class RGWGetUserStats_CB;
+namespace rgw { namespace sal {
+class RGWRadosStore;
+} }
 
 /**
  * A string wrapper that includes encode/decode functions
@@ -55,8 +57,8 @@ struct RGWUID
 };
 WRITE_CLASS_ENCODER(RGWUID)
 
-extern int rgw_user_sync_all_stats(RGWRados *store, const rgw_user& user_id);
-extern int rgw_user_get_all_buckets_stats(RGWRados *store, const rgw_user& user_id, map<string, cls_user_bucket_entry>&buckets_usage_map);
+extern int rgw_user_sync_all_stats(rgw::sal::RGWRadosStore *store, const rgw_user& user_id);
+extern int rgw_user_get_all_buckets_stats(rgw::sal::RGWRadosStore *store, const rgw_user& user_id, map<string, cls_user_bucket_entry>&buckets_usage_map);
 
 /**
  * Get the anonymous (ie, unauthenticated) user info.
@@ -552,7 +554,7 @@ class RGWAccessKeyPool
 
   std::map<std::string, int, ltstr_nocase> key_type_map;
   rgw_user user_id;
-  RGWRados *store{nullptr};
+  rgw::sal::RGWRadosStore *store{nullptr};
   RGWUserCtl *user_ctl{nullptr};
 
   map<std::string, RGWAccessKey> *swift_keys{nullptr};
@@ -595,7 +597,7 @@ class RGWSubUserPool
   RGWUser *user{nullptr};
 
   rgw_user user_id;
-  RGWRados *store{nullptr};
+  rgw::sal::RGWRadosStore *store{nullptr};
   RGWUserCtl *user_ctl{nullptr};
   bool subusers_allowed{false};
 
@@ -653,7 +655,7 @@ class RGWUser
 
 private:
   RGWUserInfo old_info;
-  RGWRados *store{nullptr};
+  rgw::sal::RGWRadosStore *store{nullptr};
   RGWUserCtl *user_ctl{nullptr};
 
   rgw_user user_id;
@@ -679,13 +681,13 @@ private:
 public:
   RGWUser();
 
-  int init(RGWRados *storage, RGWUserAdminOpState& op_state);
+  int init(rgw::sal::RGWRadosStore *storage, RGWUserAdminOpState& op_state);
 
-  int init_storage(RGWRados *storage);
+  int init_storage(rgw::sal::RGWRadosStore *storage);
   int init(RGWUserAdminOpState& op_state);
   int init_members(RGWUserAdminOpState& op_state);
 
-  RGWRados *get_store() { return store; }
+  rgw::sal::RGWRadosStore *get_store() { return store; }
   RGWUserCtl *get_user_ctl() { return user_ctl; }
 
   /* API Contracted Members */
@@ -724,52 +726,52 @@ public:
 class RGWUserAdminOp_User
 {
 public:
-  static int list(RGWRados *store,
+  static int list(rgw::sal::RGWRadosStore *store,
                   RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
 
-  static int info(RGWRados *store,
+  static int info(rgw::sal::RGWRadosStore *store,
                   RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
 
-  static int create(RGWRados *store,
+  static int create(rgw::sal::RGWRadosStore *store,
                   RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
 
-  static int modify(RGWRados *store,
+  static int modify(rgw::sal::RGWRadosStore *store,
                   RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
 
-  static int remove(RGWRados *store,
+  static int remove(rgw::sal::RGWRadosStore *store,
                   RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y);
 };
 
 class RGWUserAdminOp_Subuser
 {
 public:
-  static int create(RGWRados *store,
+  static int create(rgw::sal::RGWRadosStore *store,
                   RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
 
-  static int modify(RGWRados *store,
+  static int modify(rgw::sal::RGWRadosStore *store,
                   RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
 
-  static int remove(RGWRados *store,
+  static int remove(rgw::sal::RGWRadosStore *store,
                   RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
 };
 
 class RGWUserAdminOp_Key
 {
 public:
-  static int create(RGWRados *store,
+  static int create(rgw::sal::RGWRadosStore *store,
                   RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
 
-  static int remove(RGWRados *store,
+  static int remove(rgw::sal::RGWRadosStore *store,
                   RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
 };
 
 class RGWUserAdminOp_Caps
 {
 public:
-  static int add(RGWRados *store,
+  static int add(rgw::sal::RGWRadosStore *store,
                  RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
 
-  static int remove(RGWRados *store,
+  static int remove(rgw::sal::RGWRadosStore *store,
                  RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
 };
 
index bfe3631e73ee80b626a3dcc03552ceba4ac3d0d8..812b037d98775840d1188423e4a78e2f157015ab 100644 (file)
@@ -1340,10 +1340,10 @@ void RGWPeriod::fork()
   realm_epoch++;
 }
 
-static int read_sync_status(RGWRados *store, rgw_meta_sync_status *sync_status)
+static int read_sync_status(rgw::sal::RGWRadosStore *store, rgw_meta_sync_status *sync_status)
 {
   // initialize a sync status manager to read the status
-  RGWMetaSyncStatusManager mgr(store, store->svc.rados->get_async_processor());
+  RGWMetaSyncStatusManager mgr(store, store->svc()->rados->get_async_processor());
   int r = mgr.init();
   if (r < 0) {
     return r;
@@ -1353,7 +1353,7 @@ static int read_sync_status(RGWRados *store, rgw_meta_sync_status *sync_status)
   return r;
 }
 
-int RGWPeriod::update_sync_status(RGWRados *store, /* for now */
+int RGWPeriod::update_sync_status(rgw::sal::RGWRadosStore *store, /* for now */
                                  const RGWPeriod &current_period,
                                   std::ostream& error_stream,
                                   bool force_if_stale)
@@ -1403,7 +1403,7 @@ int RGWPeriod::update_sync_status(RGWRados *store, /* for now */
   return 0;
 }
 
-int RGWPeriod::commit(RGWRados *store,
+int RGWPeriod::commit(rgw::sal::RGWRadosStore *store,
                      RGWRealm& realm, const RGWPeriod& current_period,
                       std::ostream& error_stream, bool force_if_stale)
 {
index 23b4089e17ed51a94e86062740d7476a4c981f73..7303984d1ebf00fae4f0e2483547e4b04fddaf3a 100644 (file)
@@ -1026,7 +1026,7 @@ class RGWPeriod
   const std::string get_period_oid_prefix() const;
 
   // gather the metadata sync status for each shard; only for use on master zone
-  int update_sync_status(RGWRados *store,
+  int update_sync_status(rgw::sal::RGWRadosStore *store,
                          const RGWPeriod &current_period,
                          std::ostream& error_stream, bool force_if_stale);
 
@@ -1120,7 +1120,7 @@ public:
   int update();
 
   // commit a staging period; only for use on master zone
-  int commit(RGWRados *store,
+  int commit(rgw::sal::RGWRadosStore *store,
              RGWRealm& realm, const RGWPeriod &current_period,
              std::ostream& error_stream, bool force_if_stale = false);
 
index 7f9ca6d39fd029f6aff53de02b8ff3bf9c64e7a3..ca8c3d921c6dc054de4029ed720c37b29f279691 100644 (file)
@@ -820,11 +820,11 @@ TEST_F(IPPolicyTest, IPEnvironment) {
   // Unfortunately RGWCivetWeb is too tightly tied to civetweb to test RGWCivetWeb::init_env.
   RGWEnv rgw_env;
   RGWUserInfo user;
-  RGWRados rgw_rados;
+  rgw::sal::RGWRadosStore store;
   rgw_env.set("REMOTE_ADDR", "192.168.1.1");
   rgw_env.set("HTTP_HOST", "1.2.3.4");
   req_state rgw_req_state(cct.get(), &rgw_env, &user, 0);
-  rgw_build_iam_environment(&rgw_rados, &rgw_req_state);
+  rgw_build_iam_environment(&store, &rgw_req_state);
   auto ip = rgw_req_state.env.find("aws:SourceIp");
   ASSERT_NE(ip, rgw_req_state.env.end());
   EXPECT_EQ(ip->second, "192.168.1.1");
@@ -832,13 +832,13 @@ TEST_F(IPPolicyTest, IPEnvironment) {
   ASSERT_EQ(cct.get()->_conf.set_val("rgw_remote_addr_param", "SOME_VAR"), 0);
   EXPECT_EQ(cct.get()->_conf->rgw_remote_addr_param, "SOME_VAR");
   rgw_req_state.env.clear();
-  rgw_build_iam_environment(&rgw_rados, &rgw_req_state);
+  rgw_build_iam_environment(&store, &rgw_req_state);
   ip = rgw_req_state.env.find("aws:SourceIp");
   EXPECT_EQ(ip, rgw_req_state.env.end());
 
   rgw_env.set("SOME_VAR", "192.168.1.2");
   rgw_req_state.env.clear();
-  rgw_build_iam_environment(&rgw_rados, &rgw_req_state);
+  rgw_build_iam_environment(&store, &rgw_req_state);
   ip = rgw_req_state.env.find("aws:SourceIp");
   ASSERT_NE(ip, rgw_req_state.env.end());
   EXPECT_EQ(ip->second, "192.168.1.2");
@@ -846,14 +846,14 @@ TEST_F(IPPolicyTest, IPEnvironment) {
   ASSERT_EQ(cct.get()->_conf.set_val("rgw_remote_addr_param", "HTTP_X_FORWARDED_FOR"), 0);
   rgw_env.set("HTTP_X_FORWARDED_FOR", "192.168.1.3");
   rgw_req_state.env.clear();
-  rgw_build_iam_environment(&rgw_rados, &rgw_req_state);
+  rgw_build_iam_environment(&store, &rgw_req_state);
   ip = rgw_req_state.env.find("aws:SourceIp");
   ASSERT_NE(ip, rgw_req_state.env.end());
   EXPECT_EQ(ip->second, "192.168.1.3");
 
   rgw_env.set("HTTP_X_FORWARDED_FOR", "192.168.1.4, 4.3.2.1, 2001:db8:85a3:8d3:1319:8a2e:370:7348");
   rgw_req_state.env.clear();
-  rgw_build_iam_environment(&rgw_rados, &rgw_req_state);
+  rgw_build_iam_environment(&store, &rgw_req_state);
   ip = rgw_req_state.env.find("aws:SourceIp");
   ASSERT_NE(ip, rgw_req_state.env.end());
   EXPECT_EQ(ip->second, "192.168.1.4");