From 1fac0a75eefa224c691aa9468180448970845cdd Mon Sep 17 00:00:00 2001 From: Daniel Gryniewicz Date: Tue, 29 Nov 2022 12:31:01 -0500 Subject: [PATCH] RGW - Zipper - Rename rgw::sal::Store to rgw::sal::Driver All along, we've had an overload of the term "store". It was the base class of the entrypoint, and it was the lowest layer in a stack. This renames the base class to Driver. So, the primary elements of zipper that to work are now Drivers, and they come in two different flavors: Filters live in the middle of the stack, and do not need to implement every API, but pass through instead. And Stores live a the bottom of the stack, and must implement the entire API. Signed-off-by: Daniel Gryniewicz --- src/rgw/rgw_acl_s3.cc | 22 +- src/rgw/rgw_acl_s3.h | 4 +- src/rgw/rgw_acl_swift.cc | 28 +- src/rgw/rgw_acl_swift.h | 8 +- src/rgw/rgw_admin.cc | 492 +++++++++--------- src/rgw/rgw_appmain.cc | 64 +-- src/rgw/rgw_asio_frontend.cc | 26 +- src/rgw/rgw_asio_frontend.h | 2 +- src/rgw/rgw_auth.cc | 10 +- src/rgw/rgw_auth.h | 12 +- src/rgw/rgw_auth_filters.h | 26 +- src/rgw/rgw_auth_registry.h | 20 +- src/rgw/rgw_auth_s3.h | 42 +- src/rgw/rgw_file.cc | 26 +- src/rgw/rgw_file.h | 82 +-- src/rgw/rgw_frontend.h | 20 +- src/rgw/rgw_lc.cc | 78 +-- src/rgw/rgw_lc.h | 8 +- src/rgw/rgw_lib.cc | 16 +- src/rgw/rgw_lib.h | 26 +- src/rgw/rgw_loadgen_process.cc | 4 +- src/rgw/rgw_log.cc | 14 +- src/rgw/rgw_log.h | 8 +- src/rgw/rgw_lua.cc | 20 +- src/rgw/rgw_lua.h | 10 +- src/rgw/rgw_lua_background.cc | 8 +- src/rgw/rgw_lua_background.h | 4 +- src/rgw/rgw_lua_request.cc | 6 +- src/rgw/rgw_lua_request.h | 2 +- src/rgw/rgw_main.cc | 2 +- src/rgw/rgw_main.h | 14 +- src/rgw/rgw_object_expirer.cc | 22 +- src/rgw/rgw_op.cc | 236 ++++----- src/rgw/rgw_op.h | 84 +-- src/rgw/rgw_os_lib.cc | 6 +- src/rgw/rgw_period_pusher.cc | 30 +- src/rgw/rgw_period_pusher.h | 6 +- src/rgw/rgw_process.cc | 26 +- src/rgw/rgw_process.h | 20 +- src/rgw/rgw_quota.cc | 86 +-- src/rgw/rgw_quota.h | 2 +- src/rgw/rgw_rados.cc | 114 ++-- src/rgw/rgw_rados.h | 18 +- src/rgw/rgw_realm_reloader.cc | 54 +- src/rgw/rgw_realm_reloader.h | 8 +- src/rgw/rgw_rest.cc | 10 +- src/rgw/rgw_rest.h | 14 +- src/rgw/rgw_rest_bucket.cc | 28 +- src/rgw/rgw_rest_bucket.h | 2 +- src/rgw/rgw_rest_config.cc | 2 +- src/rgw/rgw_rest_config.h | 4 +- src/rgw/rgw_rest_conn.cc | 8 +- src/rgw/rgw_rest_conn.h | 6 +- src/rgw/rgw_rest_iam.cc | 8 +- src/rgw/rgw_rest_iam.h | 4 +- src/rgw/rgw_rest_info.cc | 4 +- src/rgw/rgw_rest_info.h | 2 +- src/rgw/rgw_rest_log.cc | 72 +-- src/rgw/rgw_rest_log.h | 2 +- src/rgw/rgw_rest_metadata.cc | 14 +- src/rgw/rgw_rest_metadata.h | 2 +- src/rgw/rgw_rest_oidc_provider.cc | 8 +- src/rgw/rgw_rest_ratelimit.cc | 22 +- src/rgw/rgw_rest_ratelimit.h | 4 +- src/rgw/rgw_rest_role.cc | 42 +- src/rgw/rgw_rest_s3.cc | 122 ++--- src/rgw/rgw_rest_s3.h | 32 +- src/rgw/rgw_rest_s3website.h | 2 +- src/rgw/rgw_rest_sts.cc | 20 +- src/rgw/rgw_rest_sts.h | 24 +- src/rgw/rgw_rest_swift.cc | 76 +-- src/rgw/rgw_rest_swift.h | 54 +- src/rgw/rgw_rest_usage.cc | 12 +- src/rgw/rgw_rest_usage.h | 2 +- src/rgw/rgw_rest_user_policy.cc | 12 +- src/rgw/rgw_role.cc | 16 +- src/rgw/rgw_role.h | 12 +- src/rgw/rgw_sal.cc | 138 ++--- src/rgw/rgw_sal.h | 94 ++-- src/rgw/rgw_sal_daos.h | 2 +- src/rgw/rgw_sal_dbstore.cc | 26 +- src/rgw/rgw_sal_dbstore.h | 2 +- src/rgw/rgw_sal_filter.cc | 126 ++--- src/rgw/rgw_sal_filter.h | 8 +- src/rgw/rgw_sal_fwd.h | 2 +- src/rgw/rgw_sal_motr.h | 2 +- src/rgw/rgw_sal_store.h | 6 +- src/rgw/rgw_sts.cc | 10 +- src/rgw/rgw_sts.h | 8 +- src/rgw/rgw_swift_auth.cc | 18 +- src/rgw/rgw_swift_auth.h | 44 +- src/rgw/rgw_torrent.cc | 8 +- src/rgw/rgw_torrent.h | 4 +- src/rgw/rgw_usage.cc | 12 +- src/rgw/rgw_usage.h | 6 +- src/rgw/rgw_user.cc | 8 +- src/rgw/rgw_zone.cc | 2 +- src/rgw/store/dbstore/common/dbstore.h | 6 +- src/rgw/store/rados/rgw_bucket.cc | 258 ++++----- src/rgw/store/rados/rgw_bucket.h | 56 +- src/rgw/store/rados/rgw_data_sync.cc | 184 +++---- src/rgw/store/rados/rgw_data_sync.h | 42 +- src/rgw/store/rados/rgw_lc_tier.cc | 30 +- src/rgw/store/rados/rgw_lc_tier.h | 6 +- .../store/rados/rgw_object_expirer_core.cc | 20 +- src/rgw/store/rados/rgw_object_expirer_core.h | 14 +- src/rgw/store/rados/rgw_period.cc | 12 +- src/rgw/store/rados/rgw_rest_pubsub.cc | 32 +- src/rgw/store/rados/rgw_rest_realm.cc | 30 +- src/rgw/store/rados/rgw_rest_realm.h | 2 +- src/rgw/store/rados/rgw_rest_user.cc | 80 +-- src/rgw/store/rados/rgw_rest_user.h | 2 +- src/rgw/store/rados/rgw_sal_rados.cc | 2 +- src/rgw/store/rados/rgw_sal_rados.h | 6 +- src/rgw/store/rados/rgw_service.cc | 12 +- src/rgw/store/rados/rgw_service.h | 4 +- src/rgw/store/rados/rgw_sync_module.cc | 6 +- src/rgw/store/rados/rgw_sync_module.h | 2 +- src/rgw/store/rados/rgw_sync_module_aws.cc | 12 +- src/rgw/store/rados/rgw_sync_module_es.cc | 2 +- .../store/rados/rgw_sync_module_es_rest.cc | 6 +- src/rgw/store/rados/rgw_sync_module_es_rest.h | 2 +- src/rgw/store/rados/rgw_tools.cc | 22 +- src/rgw/store/rados/rgw_tools.h | 4 +- src/rgw/store/rados/rgw_user.cc | 118 ++--- src/rgw/store/rados/rgw_user.h | 42 +- src/rgw/store/rados/rgw_zone.cc | 6 +- src/rgw/store/rados/rgw_zone.h | 6 +- src/test/librgw_file_nfsns.cc | 2 +- 129 files changed, 2006 insertions(+), 2006 deletions(-) diff --git a/src/rgw/rgw_acl_s3.cc b/src/rgw/rgw_acl_s3.cc index 0d83ff13c2f74..9f71e32815006 100644 --- a/src/rgw/rgw_acl_s3.cc +++ b/src/rgw/rgw_acl_s3.cc @@ -293,7 +293,7 @@ static const char *get_acl_header(const RGWEnv *env, return env->get(header, NULL); } -static int parse_grantee_str(const DoutPrefixProvider *dpp, rgw::sal::Store* store, string& grantee_str, +static int parse_grantee_str(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, string& grantee_str, const struct s3_acl_header *perm, ACLGrant& grant) { string id_type, id_val_quoted; @@ -308,13 +308,13 @@ static int parse_grantee_str(const DoutPrefixProvider *dpp, rgw::sal::Store* sto if (strcasecmp(id_type.c_str(), "emailAddress") == 0) { std::unique_ptr user; - ret = store->get_user_by_email(dpp, id_val, null_yield, &user); + ret = driver->get_user_by_email(dpp, id_val, null_yield, &user); if (ret < 0) return ret; grant.set_canon(user->get_id(), user->get_display_name(), rgw_perm); } else if (strcasecmp(id_type.c_str(), "id") == 0) { - std::unique_ptr user = store->get_user(rgw_user(id_val)); + std::unique_ptr user = driver->get_user(rgw_user(id_val)); ret = user->load_user(dpp, null_yield); if (ret < 0) return ret; @@ -333,7 +333,7 @@ static int parse_grantee_str(const DoutPrefixProvider *dpp, rgw::sal::Store* sto return 0; } -static int parse_acl_header(const DoutPrefixProvider *dpp, rgw::sal::Store* store, +static int parse_acl_header(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, const RGWEnv *env, const struct s3_acl_header *perm, std::list& _grants) { @@ -349,7 +349,7 @@ static int parse_acl_header(const DoutPrefixProvider *dpp, rgw::sal::Store* stor for (list::iterator it = grantees.begin(); it != grantees.end(); ++it) { ACLGrant grant; - int ret = parse_grantee_str(dpp, store, *it, perm, grant); + int ret = parse_grantee_str(dpp, driver, *it, perm, grant); if (ret < 0) return ret; @@ -455,14 +455,14 @@ static const s3_acl_header acl_header_perms[] = { }; int RGWAccessControlPolicy_S3::create_from_headers(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const RGWEnv *env, ACLOwner& _owner) { std::list grants; int r = 0; for (const struct s3_acl_header *p = acl_header_perms; p->rgw_perm; p++) { - r = parse_acl_header(dpp, store, env, p, grants); + r = parse_acl_header(dpp, driver, env, p, grants); if (r < 0) { return r; } @@ -480,7 +480,7 @@ int RGWAccessControlPolicy_S3::create_from_headers(const DoutPrefixProvider *dpp can only be called on object that was parsed */ int RGWAccessControlPolicy_S3::rebuild(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, ACLOwner *owner, + rgw::sal::Driver* driver, ACLOwner *owner, RGWAccessControlPolicy& dest, std::string &err_msg) { if (!owner) @@ -493,7 +493,7 @@ int RGWAccessControlPolicy_S3::rebuild(const DoutPrefixProvider *dpp, return -EPERM; } - std::unique_ptr user = store->get_user(owner->get_id()); + std::unique_ptr user = driver->get_user(owner->get_id()); if (user->load_user(dpp, null_yield) < 0) { ldpp_dout(dpp, 10) << "owner info does not exist" << dendl; err_msg = "Invalid id"; @@ -528,7 +528,7 @@ int RGWAccessControlPolicy_S3::rebuild(const DoutPrefixProvider *dpp, } email = u.id; ldpp_dout(dpp, 10) << "grant user email=" << email << dendl; - if (store->get_user_by_email(dpp, email, null_yield, &user) < 0) { + if (driver->get_user_by_email(dpp, email, null_yield, &user) < 0) { ldpp_dout(dpp, 10) << "grant user email not found or other error" << dendl; err_msg = "The e-mail address you provided does not match any account on record."; return -ERR_UNRESOLVABLE_EMAIL; @@ -547,7 +547,7 @@ int RGWAccessControlPolicy_S3::rebuild(const DoutPrefixProvider *dpp, } if (grant_user.user_id.empty()) { - user = store->get_user(uid); + user = driver->get_user(uid); if (user->load_user(dpp, null_yield) < 0) { ldpp_dout(dpp, 10) << "grant user does not exist:" << uid << dendl; err_msg = "Invalid id"; diff --git a/src/rgw/rgw_acl_s3.h b/src/rgw/rgw_acl_s3.h index 09cc7c39a1d34..9521b9f47371c 100644 --- a/src/rgw/rgw_acl_s3.h +++ b/src/rgw/rgw_acl_s3.h @@ -84,7 +84,7 @@ public: bool xml_end(const char *el) override; void to_xml(std::ostream& out); - int rebuild(const DoutPrefixProvider *dpp, rgw::sal::Store* store, ACLOwner *owner, + int rebuild(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, ACLOwner *owner, RGWAccessControlPolicy& dest, std::string &err_msg); bool compare_group_name(std::string& id, ACLGroupTypeEnum group) override; @@ -98,7 +98,7 @@ public: int ret = _acl.create_canned(owner, bucket_owner, canned_acl); return ret; } - int create_from_headers(const DoutPrefixProvider *dpp, rgw::sal::Store* store, + int create_from_headers(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, const RGWEnv *env, ACLOwner& _owner); }; diff --git a/src/rgw/rgw_acl_swift.cc b/src/rgw/rgw_acl_swift.cc index f02eb2dbfa71d..f1ca68d637d75 100644 --- a/src/rgw/rgw_acl_swift.cc +++ b/src/rgw/rgw_acl_swift.cc @@ -116,7 +116,7 @@ static boost::optional referrer_to_grant(std::string url_spec, static ACLGrant user_to_grant(const DoutPrefixProvider *dpp, CephContext* const cct, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const std::string& uid, const uint32_t perm) { @@ -124,7 +124,7 @@ static ACLGrant user_to_grant(const DoutPrefixProvider *dpp, ACLGrant grant; std::unique_ptr user; - user = store->get_user(rgw_user(uid)); + user = driver->get_user(rgw_user(uid)); if (user->load_user(dpp, null_yield) < 0) { ldpp_dout(dpp, 10) << "grant user does not exist: " << uid << dendl; /* skipping silently */ @@ -137,7 +137,7 @@ static ACLGrant user_to_grant(const DoutPrefixProvider *dpp, } int RGWAccessControlPolicy_SWIFT::add_grants(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const std::vector& uids, const uint32_t perm) { @@ -150,7 +150,7 @@ int RGWAccessControlPolicy_SWIFT::add_grants(const DoutPrefixProvider *dpp, const size_t pos = uid.find(':'); if (std::string::npos == pos) { /* No, it don't have -- we've got just a regular user identifier. */ - grant = user_to_grant(dpp, cct, store, uid, perm); + grant = user_to_grant(dpp, cct, driver, uid, perm); } else { /* Yes, *potentially* an HTTP referral. */ auto designator = uid.substr(0, pos); @@ -161,7 +161,7 @@ int RGWAccessControlPolicy_SWIFT::add_grants(const DoutPrefixProvider *dpp, boost::algorithm::trim(designatee); if (! boost::algorithm::starts_with(designator, ".")) { - grant = user_to_grant(dpp, cct, store, uid, perm); + grant = user_to_grant(dpp, cct, driver, uid, perm); } else if ((perm & SWIFT_PERM_WRITE) == 0 && is_referrer(designator)) { /* HTTP referrer-based ACLs aren't acceptable for writes. */ grant = referrer_to_grant(designatee, perm); @@ -180,7 +180,7 @@ int RGWAccessControlPolicy_SWIFT::add_grants(const DoutPrefixProvider *dpp, int RGWAccessControlPolicy_SWIFT::create(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const rgw_user& id, const std::string& name, const char* read_list, @@ -201,7 +201,7 @@ int RGWAccessControlPolicy_SWIFT::create(const DoutPrefixProvider *dpp, return r; } - r = add_grants(dpp, store, uids, SWIFT_PERM_READ); + r = add_grants(dpp, driver, uids, SWIFT_PERM_READ); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: add_grants for read returned r=" << r << dendl; @@ -218,7 +218,7 @@ int RGWAccessControlPolicy_SWIFT::create(const DoutPrefixProvider *dpp, return r; } - r = add_grants(dpp, store, uids, SWIFT_PERM_WRITE); + r = add_grants(dpp, driver, uids, SWIFT_PERM_WRITE); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: add_grants for write returned r=" << r << dendl; @@ -305,7 +305,7 @@ void RGWAccessControlPolicy_SWIFT::to_str(string& read, string& write) } void RGWAccessControlPolicy_SWIFTAcct::add_grants(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const std::vector& uids, const uint32_t perm) { @@ -316,7 +316,7 @@ void RGWAccessControlPolicy_SWIFTAcct::add_grants(const DoutPrefixProvider *dpp, grant.set_group(ACL_GROUP_ALL_USERS, perm); acl.add_grant(&grant); } else { - std::unique_ptr user = store->get_user(rgw_user(uid)); + std::unique_ptr user = driver->get_user(rgw_user(uid)); if (user->load_user(dpp, null_yield) < 0) { ldpp_dout(dpp, 10) << "grant user does not exist:" << uid << dendl; @@ -332,7 +332,7 @@ void RGWAccessControlPolicy_SWIFTAcct::add_grants(const DoutPrefixProvider *dpp, } bool RGWAccessControlPolicy_SWIFTAcct::create(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const rgw_user& id, const std::string& name, const std::string& acl_str) @@ -354,7 +354,7 @@ bool RGWAccessControlPolicy_SWIFTAcct::create(const DoutPrefixProvider *dpp, decode_json_obj(admin, *iter); ldpp_dout(dpp, 0) << "admins: " << admin << dendl; - add_grants(dpp, store, admin, SWIFT_PERM_ADMIN); + add_grants(dpp, driver, admin, SWIFT_PERM_ADMIN); } iter = parser.find_first("read-write"); @@ -363,7 +363,7 @@ bool RGWAccessControlPolicy_SWIFTAcct::create(const DoutPrefixProvider *dpp, decode_json_obj(readwrite, *iter); ldpp_dout(dpp, 0) << "read-write: " << readwrite << dendl; - add_grants(dpp, store, readwrite, SWIFT_PERM_RWRT); + add_grants(dpp, driver, readwrite, SWIFT_PERM_RWRT); } iter = parser.find_first("read-only"); @@ -372,7 +372,7 @@ bool RGWAccessControlPolicy_SWIFTAcct::create(const DoutPrefixProvider *dpp, decode_json_obj(readonly, *iter); ldpp_dout(dpp, 0) << "read-only: " << readonly << dendl; - add_grants(dpp, store, readonly, SWIFT_PERM_READ); + add_grants(dpp, driver, readonly, SWIFT_PERM_READ); } return true; diff --git a/src/rgw/rgw_acl_swift.h b/src/rgw/rgw_acl_swift.h index f4d5e14c59324..8d263e854d260 100644 --- a/src/rgw/rgw_acl_swift.h +++ b/src/rgw/rgw_acl_swift.h @@ -17,7 +17,7 @@ class RGWUserCtl; class RGWAccessControlPolicy_SWIFT : public RGWAccessControlPolicy { - int add_grants(const DoutPrefixProvider *dpp, rgw::sal::Store* store, + int add_grants(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, const std::vector& uids, uint32_t perm); @@ -28,7 +28,7 @@ public: ~RGWAccessControlPolicy_SWIFT() override = default; int create(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const rgw_user& id, const std::string& name, const char* read_list, @@ -47,11 +47,11 @@ public: ~RGWAccessControlPolicy_SWIFTAcct() override {} void add_grants(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const std::vector& uids, uint32_t perm); bool create(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const rgw_user& id, const std::string& name, const std::string& acl_str); diff --git a/src/rgw/rgw_admin.cc b/src/rgw/rgw_admin.cc index 81cb33c4479b6..f18ce8cfbfb78 100644 --- a/src/rgw/rgw_admin.cc +++ b/src/rgw/rgw_admin.cc @@ -81,7 +81,7 @@ extern "C" { using namespace std; -static rgw::sal::Store* store = NULL; +static rgw::sal::Driver* driver = NULL; static constexpr auto dout_subsys = ceph_subsys_rgw; @@ -1177,11 +1177,11 @@ static void show_reshard_status( } class StoreDestructor { - rgw::sal::Store* store; + rgw::sal::Driver* driver; public: - explicit StoreDestructor(rgw::sal::Store* _s) : store(_s) {} + explicit StoreDestructor(rgw::sal::Driver* _s) : driver(_s) {} ~StoreDestructor() { - StoreManager::close_storage(store); + DriverManager::close_storage(driver); rgw_http_client_cleanup(); } }; @@ -1189,7 +1189,7 @@ public: static int init_bucket(rgw::sal::User* user, const rgw_bucket& b, std::unique_ptr* bucket) { - return store->get_bucket(dpp(), user, b, bucket, null_yield); + return driver->get_bucket(dpp(), user, b, bucket, null_yield); } static int init_bucket(rgw::sal::User* user, @@ -1398,13 +1398,13 @@ void set_quota_info(RGWQuotaInfo& quota, OPT opt_cmd, int64_t max_size, int64_t } } -int set_bucket_quota(rgw::sal::Store* store, OPT opt_cmd, +int set_bucket_quota(rgw::sal::Driver* driver, OPT opt_cmd, const string& tenant_name, const string& bucket_name, int64_t max_size, int64_t max_objects, bool have_max_size, bool have_max_objects) { std::unique_ptr bucket; - int r = store->get_bucket(dpp(), nullptr, tenant_name, bucket_name, &bucket, null_yield); + int r = driver->get_bucket(dpp(), nullptr, tenant_name, bucket_name, &bucket, null_yield); if (r < 0) { cerr << "could not get bucket info for bucket=" << bucket_name << ": " << cpp_strerror(-r) << std::endl; return -r; @@ -1420,7 +1420,7 @@ int set_bucket_quota(rgw::sal::Store* store, OPT opt_cmd, return 0; } -int set_bucket_ratelimit(rgw::sal::Store* store, OPT opt_cmd, +int set_bucket_ratelimit(rgw::sal::Driver* driver, OPT opt_cmd, const string& tenant_name, const string& bucket_name, int64_t max_read_ops, int64_t max_write_ops, int64_t max_read_bytes, int64_t max_write_bytes, @@ -1428,7 +1428,7 @@ int set_bucket_ratelimit(rgw::sal::Store* store, OPT opt_cmd, bool have_max_read_bytes, bool have_max_write_bytes) { std::unique_ptr bucket; - int r = store->get_bucket(dpp(), nullptr, tenant_name, bucket_name, &bucket, null_yield); + int r = driver->get_bucket(dpp(), nullptr, tenant_name, bucket_name, &bucket, null_yield); if (r < 0) { cerr << "could not get bucket info for bucket=" << bucket_name << ": " << cpp_strerror(-r) << std::endl; return -r; @@ -1527,11 +1527,11 @@ int show_user_ratelimit(std::unique_ptr& user, Formatter *format return 0; } -int show_bucket_ratelimit(rgw::sal::Store* store, const string& tenant_name, +int show_bucket_ratelimit(rgw::sal::Driver* driver, const string& tenant_name, const string& bucket_name, Formatter *formatter) { std::unique_ptr bucket; - int r = store->get_bucket(dpp(), nullptr, tenant_name, bucket_name, &bucket, null_yield); + int r = driver->get_bucket(dpp(), nullptr, tenant_name, bucket_name, &bucket, null_yield); if (r < 0) { cerr << "could not get bucket info for bucket=" << bucket_name << ": " << cpp_strerror(-r) << std::endl; return -r; @@ -1591,7 +1591,7 @@ int set_user_quota(OPT opt_cmd, RGWUser& user, RGWUserAdminOpState& op_state, in return 0; } -int check_min_obj_stripe_size(rgw::sal::Store* store, rgw::sal::Object* obj, uint64_t min_stripe_size, bool *need_rewrite) +int check_min_obj_stripe_size(rgw::sal::Driver* driver, rgw::sal::Object* obj, uint64_t min_stripe_size, bool *need_rewrite) { int ret = obj->get_obj_attrs(null_yield, dpp()); if (ret < 0) { @@ -1659,7 +1659,7 @@ int check_obj_locator_underscore(rgw::sal::Object* obj, bool fix, bool remove_ba string status = (needs_fixing ? "needs_fixing" : "ok"); if ((needs_fixing || remove_bad) && fix) { - ret = static_cast(store)->getRados()->fix_head_obj_locator(dpp(), obj->get_bucket()->get_info(), needs_fixing, remove_bad, obj->get_key()); + ret = static_cast(driver)->getRados()->fix_head_obj_locator(dpp(), obj->get_bucket()->get_info(), needs_fixing, remove_bad, obj->get_key()); if (ret < 0) { cerr << "ERROR: fix_head_object_locator() returned ret=" << ret << std::endl; goto done; @@ -1686,7 +1686,7 @@ int check_obj_tail_locator_underscore(RGWBucketInfo& bucket_info, rgw_obj_key& k bool needs_fixing; string status; - int ret = static_cast(store)->getRados()->fix_tail_obj_locator(dpp(), bucket_info, key, fix, &needs_fixing, null_yield); + int ret = static_cast(driver)->getRados()->fix_tail_obj_locator(dpp(), bucket_info, key, fix, &needs_fixing, null_yield); if (ret < 0) { cerr << "ERROR: fix_tail_object_locator_underscore() returned ret=" << ret << std::endl; status = "failed"; @@ -1744,7 +1744,7 @@ int do_check_object_locator(const string& tenant_name, const string& bucket_name do { ret = bucket->list(dpp(), params, max_entries - count, results, null_yield); if (ret < 0) { - cerr << "ERROR: store->list_objects(): " << cpp_strerror(-ret) << std::endl; + cerr << "ERROR: driver->list_objects(): " << cpp_strerror(-ret) << std::endl; return -ret; } @@ -1776,18 +1776,18 @@ int do_check_object_locator(const string& tenant_name, const string& bucket_name } /// search for a matching zone/zonegroup id and return a connection if found -static boost::optional get_remote_conn(rgw::sal::RadosStore* store, +static boost::optional get_remote_conn(rgw::sal::RadosStore* driver, const RGWZoneGroup& zonegroup, const std::string& remote) { boost::optional conn; if (remote == zonegroup.get_id()) { - conn.emplace(store->ctx(), store, remote, zonegroup.endpoints, zonegroup.api_name); + conn.emplace(driver->ctx(), driver, remote, zonegroup.endpoints, zonegroup.api_name); } else { for (const auto& z : zonegroup.zones) { const auto& zone = z.second; if (remote == zone.id) { - conn.emplace(store->ctx(), store, remote, zone.endpoints, zonegroup.api_name); + conn.emplace(driver->ctx(), driver, remote, zone.endpoints, zonegroup.api_name); break; } } @@ -1796,13 +1796,13 @@ static boost::optional get_remote_conn(rgw::sal::RadosStore* store, } /// search each zonegroup for a connection -static boost::optional get_remote_conn(rgw::sal::RadosStore* store, +static boost::optional get_remote_conn(rgw::sal::RadosStore* driver, const RGWPeriodMap& period_map, const std::string& remote) { boost::optional conn; for (const auto& zg : period_map.zonegroups) { - conn = get_remote_conn(store, zg.second, remote); + conn = get_remote_conn(driver, zg.second, remote); if (conn) { break; } @@ -1885,7 +1885,7 @@ static int commit_period(rgw::sal::ConfigStore* cfgstore, return -EINVAL; } // are we the period's master zone? - if (store->get_zone()->get_id() == master_zone) { + if (driver->get_zone()->get_id() == master_zone) { // read the current period RGWPeriod current_period; int ret = cfgstore->read_period(dpp(), null_yield, realm.current_period, @@ -1895,7 +1895,7 @@ static int commit_period(rgw::sal::ConfigStore* cfgstore, return ret; } // the master zone can commit locally - ret = rgw::commit_period(dpp(), null_yield, cfgstore, store, + ret = rgw::commit_period(dpp(), null_yield, cfgstore, driver, realm, realm_writer, current_period, period, cerr, force); if (ret < 0) { @@ -1912,7 +1912,7 @@ static int commit_period(rgw::sal::ConfigStore* cfgstore, boost::optional conn; RGWRESTConn *remote_conn = nullptr; if (!remote.empty()) { - conn = get_remote_conn(static_cast(store), period.get_map(), remote); + conn = get_remote_conn(static_cast(driver), period.get_map(), remote); if (!conn) { cerr << "failed to find a zone or zonegroup for remote " << remote << std::endl; @@ -1948,7 +1948,7 @@ static int commit_period(rgw::sal::ConfigStore* cfgstore, return ret; } - // decode the response and store it back + // decode the response and driver it back try { decode_json_obj(period, &p); } catch (const JSONDecoder::err& e) { @@ -2016,7 +2016,7 @@ static int update_period(rgw::sal::ConfigStore* cfgstore, constexpr bool exclusive = false; ret = cfgstore->create_period(dpp(), null_yield, exclusive, period); if (ret < 0) { - cerr << "failed to store period: " << cpp_strerror(-ret) << std::endl; + cerr << "failed to driver period: " << cpp_strerror(-ret) << std::endl; return ret; } if (commit) { @@ -2110,7 +2110,7 @@ stringstream& push_ss(stringstream& ss, list& l, int tab = 0) static void get_md_sync_status(list& status) { - RGWMetaSyncStatusManager sync(static_cast(store), static_cast(store)->svc()->rados->get_async_processor()); + RGWMetaSyncStatusManager sync(static_cast(driver), static_cast(driver)->svc()->rados->get_async_processor()); int ret = sync.init(dpp()); if (ret < 0) { @@ -2176,7 +2176,7 @@ static void get_md_sync_status(list& status) push_ss(ss, status) << "incremental sync: " << num_inc << "/" << total_shards << " shards"; map master_shards_info; - string master_period = static_cast(store)->svc()->zone->get_current_period_id(); + string master_period = static_cast(driver)->svc()->zone->get_current_period_id(); ret = sync.read_master_log_shards_info(dpp(), master_period, &master_shards_info); if (ret < 0) { @@ -2255,18 +2255,18 @@ static void get_data_sync_status(const rgw_zone_id& source_zone, list& s RGWZone *sz; - if (!(sz = static_cast(store)->svc()->zone->find_zone(source_zone))) { + if (!(sz = static_cast(driver)->svc()->zone->find_zone(source_zone))) { push_ss(ss, status, tab) << string("zone not found"); flush_ss(ss, status); return; } - if (!static_cast(store)->svc()->zone->zone_syncs_from(static_cast(store)->svc()->zone->get_zone(), *sz)) { + if (!static_cast(driver)->svc()->zone->zone_syncs_from(static_cast(driver)->svc()->zone->get_zone(), *sz)) { push_ss(ss, status, tab) << string("not syncing from zone"); flush_ss(ss, status); return; } - RGWDataSyncStatusManager sync(static_cast(store), static_cast(store)->svc()->rados->get_async_processor(), source_zone, nullptr); + RGWDataSyncStatusManager sync(static_cast(driver), static_cast(driver)->svc()->rados->get_async_processor(), source_zone, nullptr); int ret = sync.init(dpp()); if (ret < 0) { @@ -2432,8 +2432,8 @@ static auto get_disabled_features(const rgw::zone_features::set& enabled) { static void sync_status(Formatter *formatter) { - const rgw::sal::ZoneGroup& zonegroup = store->get_zone()->get_zonegroup(); - rgw::sal::Zone* zone = store->get_zone(); + const rgw::sal::ZoneGroup& zonegroup = driver->get_zone()->get_zonegroup(); + rgw::sal::Zone* zone = driver->get_zone(); int width = 15; @@ -2453,7 +2453,7 @@ static void sync_status(Formatter *formatter) list md_status; - if (store->is_meta_master()) { + if (driver->is_meta_master()) { md_status.push_back("no sync (zone is master)"); } else { get_md_sync_status(md_status); @@ -2463,14 +2463,14 @@ static void sync_status(Formatter *formatter) list data_status; - auto& zone_conn_map = static_cast(store)->svc()->zone->get_zone_conn_map(); + auto& zone_conn_map = static_cast(driver)->svc()->zone->get_zone_conn_map(); for (auto iter : zone_conn_map) { const rgw_zone_id& source_id = iter.first; string source_str = "source: "; string s = source_str + source_id.id; std::unique_ptr sz; - if (store->get_zone()->get_zonegroup().get_zone_by_id(source_id.id, &sz) == 0) { + if (driver->get_zone()->get_zonegroup().get_zone_by_id(source_id.id, &sz) == 0) { s += string(" (") + sz->get_name() + ")"; } data_status.push_back(s); @@ -2489,7 +2489,7 @@ std::ostream& operator<<(std::ostream& out, const indented& h) { return out << std::setw(h.w) << h.header << std::setw(1) << ' '; } -static int bucket_source_sync_status(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, const RGWZone& zone, +static int bucket_source_sync_status(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* driver, const RGWZone& zone, const RGWZone& source, RGWRESTConn *conn, const RGWBucketInfo& bucket_info, rgw_sync_bucket_pipe pipe, @@ -2525,7 +2525,7 @@ static int bucket_source_sync_status(const DoutPrefixProvider *dpp, rgw::sal::Ra // check for full sync status rgw_bucket_sync_status full_status; - r = rgw_read_bucket_full_sync_status(dpp, store, pipe, &full_status, null_yield); + r = rgw_read_bucket_full_sync_status(dpp, driver, pipe, &full_status, null_yield); if (r >= 0) { if (full_status.state == BucketSyncState::Init) { out << indented{width} << "init: bucket sync has not started\n"; @@ -2561,13 +2561,13 @@ static int bucket_source_sync_status(const DoutPrefixProvider *dpp, rgw::sal::Ra // use shard count from our log gen=0 shard_status.resize(rgw::num_shards(log.layout.in_index)); } else { - lderr(store->ctx()) << "failed to read bucket full sync status: " << cpp_strerror(r) << dendl; + lderr(driver->ctx()) << "failed to read bucket full sync status: " << cpp_strerror(r) << dendl; return r; } - r = rgw_read_bucket_inc_sync_status(dpp, store, pipe, gen, &shard_status); + r = rgw_read_bucket_inc_sync_status(dpp, driver, pipe, gen, &shard_status); if (r < 0) { - lderr(store->ctx()) << "failed to read bucket incremental sync status: " << cpp_strerror(r) << dendl; + lderr(driver->ctx()) << "failed to read bucket incremental sync status: " << cpp_strerror(r) << dendl; return r; } @@ -2652,9 +2652,9 @@ static void get_hint_entities(const std::set& zones, const std::set static rgw_zone_id resolve_zone_id(const string& s) { std::unique_ptr zone; - int ret = store->get_zone()->get_zonegroup().get_zone_by_id(s, &zone); + int ret = driver->get_zone()->get_zonegroup().get_zone_by_id(s, &zone); if (ret < 0) - ret = store->get_zone()->get_zonegroup().get_zone_by_name(s, &zone); + ret = driver->get_zone()->get_zonegroup().get_zone_by_name(s, &zone); if (ret < 0) return rgw_zone_id(s); @@ -2668,9 +2668,9 @@ rgw_zone_id validate_zone_id(const rgw_zone_id& zone_id) static int sync_info(std::optional opt_target_zone, std::optional opt_bucket, Formatter *formatter) { - rgw_zone_id zone_id = opt_target_zone.value_or(store->get_zone()->get_id()); + rgw_zone_id zone_id = opt_target_zone.value_or(driver->get_zone()->get_id()); - auto zone_policy_handler = store->get_zone()->get_sync_policy_handler(); + auto zone_policy_handler = driver->get_zone()->get_sync_policy_handler(); RGWBucketSyncPolicyHandlerRef bucket_handler; @@ -2741,7 +2741,7 @@ static int sync_info(std::optional opt_target_zone, std::optionalget_sync_policy_handler(dpp(), zid, hint_bucket, &hint_bucket_handler, null_yield); + int r = driver->get_sync_policy_handler(dpp(), zid, hint_bucket, &hint_bucket_handler, null_yield); if (r < 0) { ldpp_dout(dpp(), 20) << "could not get bucket sync policy handler for hint bucket=" << hint_bucket << " ... skipping" << dendl; continue; @@ -2779,11 +2779,11 @@ static int sync_info(std::optional opt_target_zone, std::optionalget_zone()->get_zonegroup(); - rgw::sal::Zone* zone = store->get_zone(); + const rgw::sal::ZoneGroup& zonegroup = driver->get_zone()->get_zonegroup(); + rgw::sal::Zone* zone = driver->get_zone(); constexpr int width = 15; out << indented{width, "realm"} << zone->get_realm_id() << " (" << zone->get_realm_name() << ")\n"; @@ -2791,14 +2791,14 @@ static int bucket_sync_info(rgw::sal::Store* store, const RGWBucketInfo& info, out << indented{width, "zone"} << zone->get_id() << " (" << zone->get_name() << ")\n"; out << indented{width, "bucket"} << info.bucket << "\n\n"; - if (!static_cast(store)->ctl()->bucket->bucket_imports_data(info.bucket, null_yield, dpp())) { + if (!static_cast(driver)->ctl()->bucket->bucket_imports_data(info.bucket, null_yield, dpp())) { out << "Sync is disabled for bucket " << info.bucket.name << '\n'; return 0; } RGWBucketSyncPolicyHandlerRef handler; - int r = store->get_sync_policy_handler(dpp(), std::nullopt, info.bucket, &handler, null_yield); + int r = driver->get_sync_policy_handler(dpp(), std::nullopt, info.bucket, &handler, null_yield); if (r < 0) { ldpp_dout(dpp(), -1) << "ERROR: failed to get policy handler for bucket (" << info.bucket << "): r=" << r << ": " << cpp_strerror(-r) << dendl; return r; @@ -2817,13 +2817,13 @@ static int bucket_sync_info(rgw::sal::Store* store, const RGWBucketInfo& info, return 0; } -static int bucket_sync_status(rgw::sal::Store* store, const RGWBucketInfo& info, +static int bucket_sync_status(rgw::sal::Driver* driver, const RGWBucketInfo& info, const rgw_zone_id& source_zone_id, std::optional& opt_source_bucket, std::ostream& out) { - const rgw::sal::ZoneGroup& zonegroup = store->get_zone()->get_zonegroup(); - rgw::sal::Zone* zone = store->get_zone(); + const rgw::sal::ZoneGroup& zonegroup = driver->get_zone()->get_zonegroup(); + rgw::sal::Zone* zone = driver->get_zone(); constexpr int width = 15; out << indented{width, "realm"} << zone->get_realm_id() << " (" << zone->get_realm_name() << ")\n"; @@ -2834,14 +2834,14 @@ static int bucket_sync_status(rgw::sal::Store* store, const RGWBucketInfo& info, << to_iso_8601(ceph::real_clock::now(), iso_8601_format::YMDhms) << "\n\n"; - if (!static_cast(store)->ctl()->bucket->bucket_imports_data(info.bucket, null_yield, dpp())) { + if (!static_cast(driver)->ctl()->bucket->bucket_imports_data(info.bucket, null_yield, dpp())) { out << "Sync is disabled for bucket " << info.bucket.name << " or bucket has no sync sources" << std::endl; return 0; } RGWBucketSyncPolicyHandlerRef handler; - int r = store->get_sync_policy_handler(dpp(), std::nullopt, info.bucket, &handler, null_yield); + int r = driver->get_sync_policy_handler(dpp(), std::nullopt, info.bucket, &handler, null_yield); if (r < 0) { ldpp_dout(dpp(), -1) << "ERROR: failed to get policy handler for bucket (" << info.bucket << "): r=" << r << ": " << cpp_strerror(-r) << dendl; return r; @@ -2849,12 +2849,12 @@ static int bucket_sync_status(rgw::sal::Store* store, const RGWBucketInfo& info, auto sources = handler->get_all_sources(); - auto& zone_conn_map = static_cast(store)->svc()->zone->get_zone_conn_map(); + auto& zone_conn_map = static_cast(driver)->svc()->zone->get_zone_conn_map(); set zone_ids; if (!source_zone_id.empty()) { std::unique_ptr zone; - int ret = store->get_zone()->get_zonegroup().get_zone_by_id(source_zone_id.id, &zone); + int ret = driver->get_zone()->get_zonegroup().get_zone_by_id(source_zone_id.id, &zone); if (ret < 0) { ldpp_dout(dpp(), -1) << "Source zone not found in zonegroup " << zonegroup.get_name() << dendl; @@ -2868,7 +2868,7 @@ static int bucket_sync_status(rgw::sal::Store* store, const RGWBucketInfo& info, zone_ids.insert(source_zone_id); } else { std::list ids; - int ret = store->get_zone()->get_zonegroup().list_zones(ids); + int ret = driver->get_zone()->get_zonegroup().list_zones(ids); if (ret == 0) { for (const auto& entry : ids) { zone_ids.insert(entry); @@ -2877,8 +2877,8 @@ static int bucket_sync_status(rgw::sal::Store* store, const RGWBucketInfo& info, } for (auto& zone_id : zone_ids) { - auto z = static_cast(store)->svc()->zone->get_zonegroup().zones.find(zone_id.id); - if (z == static_cast(store)->svc()->zone->get_zonegroup().zones.end()) { /* should't happen */ + auto z = static_cast(driver)->svc()->zone->get_zonegroup().zones.find(zone_id.id); + if (z == static_cast(driver)->svc()->zone->get_zonegroup().zones.end()) { /* should't happen */ continue; } auto c = zone_conn_map.find(zone_id.id); @@ -2893,7 +2893,7 @@ static int bucket_sync_status(rgw::sal::Store* store, const RGWBucketInfo& info, continue; } if (pipe.source.zone.value_or(rgw_zone_id()) == z->second.id) { - bucket_source_sync_status(dpp(), static_cast(store), static_cast(store)->svc()->zone->get_zone(), z->second, + bucket_source_sync_status(dpp(), static_cast(driver), static_cast(driver)->svc()->zone->get_zone(), z->second, c->second, info, pipe, width, out); @@ -2941,7 +2941,7 @@ static void parse_tier_config_param(const string& s, map(store)->getRados()->get_rados_handle()->ioctx_create(pool.to_str().c_str(), io_ctx); + int ret = static_cast(driver)->getRados()->get_rados_handle()->ioctx_create(pool.to_str().c_str(), io_ctx); if (ret < 0) { // the pool may not exist at this moment, we have no way to check if it supports omap. return 0; @@ -2956,7 +2956,7 @@ static int check_pool_support_omap(const rgw_pool& pool) return 0; } -int check_reshard_bucket_params(rgw::sal::Store* store, +int check_reshard_bucket_params(rgw::sal::Driver* driver, const string& bucket_name, const string& tenant, const string& bucket_id, @@ -2975,8 +2975,8 @@ int check_reshard_bucket_params(rgw::sal::Store* store, return -EINVAL; } - if (num_shards > (int)static_cast(store)->getRados()->get_max_bucket_shards()) { - cerr << "ERROR: num_shards too high, max value: " << static_cast(store)->getRados()->get_max_bucket_shards() << std::endl; + if (num_shards > (int)static_cast(driver)->getRados()->get_max_bucket_shards()) { + cerr << "ERROR: num_shards too high, max value: " << static_cast(driver)->getRados()->get_max_bucket_shards() << std::endl; return -EINVAL; } @@ -3052,7 +3052,7 @@ static int trim_sync_error_log(int shard_id, const string& marker, int delay_ms) shard_id); // call cls_log_trim() until it returns -ENODATA for (;;) { - int ret = static_cast(store)->svc()->cls->timelog.trim(dpp(), oid, {}, {}, {}, marker, nullptr, + int ret = static_cast(driver)->svc()->cls->timelog.trim(dpp(), oid, {}, {}, {}, marker, nullptr, null_yield); if (ret == -ENODATA) { return 0; @@ -3186,7 +3186,7 @@ public: int ret = bucket->put_info(dpp(), false, real_time()); if (ret < 0) { - cerr << "failed to store bucket info: " << cpp_strerror(-ret) << std::endl; + cerr << "failed to driver bucket info: " << cpp_strerror(-ret) << std::endl; return -ret; } @@ -3209,7 +3209,7 @@ void resolve_zone_id_opt(std::optional& zone_name, std::optional zone; - int ret = store->get_zone()->get_zonegroup().get_zone_by_name(*zone_name, &zone); + int ret = driver->get_zone()->get_zonegroup().get_zone_by_name(*zone_name, &zone); if (ret < 0) { cerr << "WARNING: cannot find source zone id for name=" << *zone_name << std::endl; zone_id = rgw_zone_id(*zone_name); @@ -3226,7 +3226,7 @@ void resolve_zone_ids_opt(std::optional >& names, std::optional zone; - int ret = store->get_zone()->get_zonegroup().get_zone_by_name(name, &zone); + int ret = driver->get_zone()->get_zonegroup().get_zone_by_name(name, &zone); if (ret < 0) { cerr << "WARNING: cannot find source zone id for name=" << name << std::endl; zid = rgw_zone_id(name); @@ -3254,7 +3254,7 @@ class JSONFormatter_PrettyZone : public JSONFormatter { auto zone_id = *(static_cast(pval)); string zone_name; std::unique_ptr zone; - if (store->get_zone()->get_zonegroup().get_zone_by_id(zone_id.id, &zone) == 0) { + if (driver->get_zone()->get_zonegroup().get_zone_by_id(zone_id.id, &zone) == 0) { zone_name = zone->get_name(); } else { cerr << "WARNING: cannot find zone name for id=" << zone_id << std::endl; @@ -4343,21 +4343,21 @@ int main(int argc, const char **argv) bool need_cache = readonly_ops_list.find(opt_cmd) == readonly_ops_list.end(); bool need_gc = (gc_ops_list.find(opt_cmd) != gc_ops_list.end()) && !bypass_gc; - StoreManager::Config cfg = StoreManager::get_config(true, g_ceph_context); + DriverManager::Config cfg = DriverManager::get_config(true, g_ceph_context); auto config_store_type = g_conf().get_val("rgw_config_store"); - cfgstore = StoreManager::create_config_store(dpp(), config_store_type); + cfgstore = DriverManager::create_config_store(dpp(), config_store_type); if (!cfgstore) { cerr << "couldn't init config storage provider" << std::endl; return EIO; } if (raw_storage_op) { - store = StoreManager::get_raw_storage(dpp(), + driver = DriverManager::get_raw_storage(dpp(), g_ceph_context, cfg); } else { - store = StoreManager::get_storage(dpp(), + driver = DriverManager::get_storage(dpp(), g_ceph_context, cfg, false, @@ -4368,13 +4368,13 @@ int main(int argc, const char **argv) need_cache && g_conf()->rgw_cache_enabled, need_gc); } - if (!store) { + if (!driver) { cerr << "couldn't init storage provider" << std::endl; return EIO; } - /* Needs to be after the store is initialized. Note, user could be empty here. */ - user = store->get_user(user_id_arg); + /* Needs to be after the driver is initialized. Note, user could be empty here. */ + user = driver->get_user(user_id_arg); init_optional_bucket(opt_bucket, opt_tenant, opt_bucket_name, opt_bucket_id); @@ -4463,14 +4463,14 @@ int main(int argc, const char **argv) RGWStreamFlusher stream_flusher(formatter.get(), cout); - RGWUserAdminOpState user_op(store); + RGWUserAdminOpState user_op(driver); if (!user_email.empty()) { user_op.user_email_specified=true; } if (!source_zone_name.empty()) { std::unique_ptr zone; - if (store->get_zone()->get_zonegroup().get_zone_by_name(source_zone_name, &zone) < 0) { + if (driver->get_zone()->get_zonegroup().get_zone_by_name(source_zone_name, &zone) < 0) { cerr << "WARNING: cannot find source zone id for name=" << source_zone_name << std::endl; source_zone = source_zone_name; } else { @@ -4491,7 +4491,7 @@ int main(int argc, const char **argv) oath_init(); - StoreDestructor store_destructor(store); + StoreDestructor store_destructor(driver); if (raw_storage_op) { try_to_resolve_local_entities(cfgstore.get(), realm_id, realm_name, @@ -4629,7 +4629,7 @@ int main(int argc, const char **argv) // use realm master zone as remote remote = current_period.get_master_zone().id; } - conn = get_remote_conn(static_cast(store), current_period.get_map(), remote); + conn = get_remote_conn(static_cast(driver), current_period.get_map(), remote); if (!conn) { cerr << "failed to find a zone or zonegroup for remote " << remote << std::endl; @@ -5044,7 +5044,7 @@ int main(int argc, const char **argv) } else { ret = writer->write(dpp(), null_yield, realm); if (ret < 0) { - cerr << "ERROR: couldn't store realm info: " << cpp_strerror(-ret) << std::endl; + cerr << "ERROR: couldn't driver realm info: " << cpp_strerror(-ret) << std::endl; return 1; } } @@ -5211,7 +5211,7 @@ int main(int argc, const char **argv) // validate --tier-type if specified const string *ptier_type = (tier_type_specified ? &tier_type : nullptr); if (ptier_type) { - auto sync_mgr = static_cast(store)->svc()->sync_modules->get_manager(); + auto sync_mgr = static_cast(driver)->svc()->sync_modules->get_manager(); if (!sync_mgr->get_module(*ptier_type, nullptr)) { ldpp_dout(dpp(), -1) << "ERROR: could not find sync module: " << *ptier_type << ", valid sync modules: " @@ -5883,7 +5883,7 @@ int main(int argc, const char **argv) // validate --tier-type if specified const string *ptier_type = (tier_type_specified ? &tier_type : nullptr); if (ptier_type) { - auto sync_mgr = static_cast(store)->svc()->sync_modules->get_manager(); + auto sync_mgr = static_cast(driver)->svc()->sync_modules->get_manager(); if (!sync_mgr->get_module(*ptier_type, nullptr)) { ldpp_dout(dpp(), -1) << "ERROR: could not find sync module: " << *ptier_type << ", valid sync modules: " @@ -6163,7 +6163,7 @@ int main(int argc, const char **argv) // validate --tier-type if specified const string *ptier_type = (tier_type_specified ? &tier_type : nullptr); if (ptier_type) { - auto sync_mgr = static_cast(store)->svc()->sync_modules->get_manager(); + auto sync_mgr = static_cast(driver)->svc()->sync_modules->get_manager(); if (!sync_mgr->get_module(*ptier_type, nullptr)) { ldpp_dout(dpp(), -1) << "ERROR: could not find sync module: " << *ptier_type << ", valid sync modules: " @@ -6421,7 +6421,7 @@ int main(int argc, const char **argv) resolve_zone_ids_opt(opt_source_zone_names, opt_source_zone_ids); resolve_zone_ids_opt(opt_dest_zone_names, opt_dest_zone_ids); - bool non_master_cmd = (!store->is_meta_master() && !yes_i_really_mean_it); + bool non_master_cmd = (!driver->is_meta_master() && !yes_i_really_mean_it); std::set non_master_ops_list = {OPT::USER_CREATE, OPT::USER_RM, OPT::USER_MODIFY, OPT::USER_ENABLE, OPT::USER_SUSPEND, OPT::SUBUSER_CREATE, @@ -6525,7 +6525,7 @@ int main(int argc, const char **argv) rgw_placement_rule target_rule; target_rule.name = placement_id; target_rule.storage_class = opt_storage_class.value_or(""); - if (!store->valid_placement(target_rule)) { + if (!driver->valid_placement(target_rule)) { cerr << "NOTICE: invalid dest placement: " << target_rule.to_str() << std::endl; return EINVAL; } @@ -6540,7 +6540,7 @@ int main(int argc, const char **argv) RGWUser ruser; int ret = 0; if (!(rgw::sal::User::empty(user) && access_key.empty()) || !subuser.empty()) { - ret = ruser.init(dpp(), store, user_op, null_yield); + ret = ruser.init(dpp(), driver, user_op, null_yield); if (ret < 0) { cerr << "user.init failed: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -6778,7 +6778,7 @@ int main(int argc, const char **argv) cerr << "failed to parse policy: " << e.what() << std::endl; return -EINVAL; } - std::unique_ptr role = store->get_role(role_name, tenant, path, assume_role_doc); + std::unique_ptr role = driver->get_role(role_name, tenant, path, assume_role_doc); ret = role->create(dpp(), true, "", null_yield); if (ret < 0) { return -ret; @@ -6792,7 +6792,7 @@ int main(int argc, const char **argv) cerr << "ERROR: empty role name" << std::endl; return -EINVAL; } - std::unique_ptr role = store->get_role(role_name, tenant); + std::unique_ptr role = driver->get_role(role_name, tenant); ret = role->delete_obj(dpp(), null_yield); if (ret < 0) { return -ret; @@ -6806,7 +6806,7 @@ int main(int argc, const char **argv) cerr << "ERROR: empty role name" << std::endl; return -EINVAL; } - std::unique_ptr role = store->get_role(role_name, tenant); + std::unique_ptr role = driver->get_role(role_name, tenant); ret = role->get(dpp(), null_yield); if (ret < 0) { return -ret; @@ -6834,7 +6834,7 @@ int main(int argc, const char **argv) return -EINVAL; } - std::unique_ptr role = store->get_role(role_name, tenant); + std::unique_ptr role = driver->get_role(role_name, tenant); ret = role->get(dpp(), null_yield); if (ret < 0) { return -ret; @@ -6850,7 +6850,7 @@ int main(int argc, const char **argv) case OPT::ROLE_LIST: { vector> result; - ret = store->get_roles(dpp(), null_yield, path_prefix, tenant, result); + ret = driver->get_roles(dpp(), null_yield, path_prefix, tenant, result); if (ret < 0) { return -ret; } @@ -6892,7 +6892,7 @@ int main(int argc, const char **argv) return -EINVAL; } - std::unique_ptr role = store->get_role(role_name, tenant); + std::unique_ptr role = driver->get_role(role_name, tenant); ret = role->get(dpp(), null_yield); if (ret < 0) { return -ret; @@ -6911,7 +6911,7 @@ int main(int argc, const char **argv) cerr << "ERROR: Role name is empty" << std::endl; return -EINVAL; } - std::unique_ptr role = store->get_role(role_name, tenant); + std::unique_ptr role = driver->get_role(role_name, tenant); ret = role->get(dpp(), null_yield); if (ret < 0) { return -ret; @@ -6931,7 +6931,7 @@ int main(int argc, const char **argv) cerr << "ERROR: policy name is empty" << std::endl; return -EINVAL; } - std::unique_ptr role = store->get_role(role_name, tenant); + std::unique_ptr role = driver->get_role(role_name, tenant); int ret = role->get(dpp(), null_yield); if (ret < 0) { return -ret; @@ -6955,7 +6955,7 @@ int main(int argc, const char **argv) cerr << "ERROR: policy name is empty" << std::endl; return -EINVAL; } - std::unique_ptr role = store->get_role(role_name, tenant); + std::unique_ptr role = driver->get_role(role_name, tenant); ret = role->get(dpp(), null_yield); if (ret < 0) { return -ret; @@ -6979,7 +6979,7 @@ int main(int argc, const char **argv) return -EINVAL; } - std::unique_ptr role = store->get_role(role_name, tenant); + std::unique_ptr role = driver->get_role(role_name, tenant); ret = role->get(dpp(), null_yield); if (ret < 0) { return -ret; @@ -7012,13 +7012,13 @@ int main(int argc, const char **argv) if (opt_cmd == OPT::POLICY) { if (format == "xml") { - int ret = RGWBucketAdminOp::dump_s3_policy(store, bucket_op, cout, dpp()); + int ret = RGWBucketAdminOp::dump_s3_policy(driver, bucket_op, cout, dpp()); if (ret < 0) { cerr << "ERROR: failed to get policy: " << cpp_strerror(-ret) << std::endl; return -ret; } } else { - int ret = RGWBucketAdminOp::get_policy(store, bucket_op, stream_flusher, dpp()); + int ret = RGWBucketAdminOp::get_policy(driver, bucket_op, stream_flusher, dpp()); if (ret < 0) { cerr << "ERROR: failed to get policy: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -7037,12 +7037,12 @@ int main(int argc, const char **argv) if (!rgw::sal::User::empty(user)) { user_ids.push_back(user->get_id().id); ret = - RGWBucketAdminOp::limit_check(store, bucket_op, user_ids, stream_flusher, + RGWBucketAdminOp::limit_check(driver, bucket_op, user_ids, stream_flusher, null_yield, dpp(), warnings_only); } else { /* list users in groups of max-keys, then perform user-bucket * limit-check on each group */ - ret = store->meta_list_keys_init(dpp(), metadata_key, string(), &handle); + ret = driver->meta_list_keys_init(dpp(), metadata_key, string(), &handle); if (ret < 0) { cerr << "ERROR: buckets limit check can't get user metadata_key: " << cpp_strerror(-ret) << std::endl; @@ -7050,7 +7050,7 @@ int main(int argc, const char **argv) } do { - ret = store->meta_list_keys_next(dpp(), handle, max, user_ids, + ret = driver->meta_list_keys_next(dpp(), handle, max, user_ids, &truncated); if (ret < 0 && ret != -ENOENT) { cerr << "ERROR: buckets limit check lists_keys_next(): " @@ -7059,14 +7059,14 @@ int main(int argc, const char **argv) } else { /* ok, do the limit checks for this group */ ret = - RGWBucketAdminOp::limit_check(store, bucket_op, user_ids, stream_flusher, + RGWBucketAdminOp::limit_check(driver, bucket_op, user_ids, stream_flusher, null_yield, dpp(), warnings_only); if (ret < 0) break; } user_ids.clear(); } while (truncated); - store->meta_list_keys_complete(handle); + driver->meta_list_keys_complete(handle); } return -ret; } /* OPT::BUCKET_LIMIT_CHECK */ @@ -7079,7 +7079,7 @@ int main(int argc, const char **argv) return -ENOENT; } } - RGWBucketAdminOp::info(store, bucket_op, stream_flusher, null_yield, dpp()); + RGWBucketAdminOp::info(driver, bucket_op, stream_flusher, null_yield, dpp()); } else { int ret = init_bucket(user.get(), tenant, bucket_name, bucket_id, &bucket); if (ret < 0) { @@ -7118,7 +7118,7 @@ int main(int argc, const char **argv) ret = bucket->list(dpp(), params, std::min(remaining, paginate_size), results, null_yield); if (ret < 0) { - cerr << "ERROR: store->list_objects(): " << cpp_strerror(-ret) << std::endl; + cerr << "ERROR: driver->list_objects(): " << cpp_strerror(-ret) << std::endl; return -ret; } @@ -7136,7 +7136,7 @@ int main(int argc, const char **argv) } /* OPT::BUCKETS_LIST */ if (opt_cmd == OPT::BUCKET_RADOS_LIST) { - RGWRadosList lister(static_cast(store), + RGWRadosList lister(static_cast(driver), max_concurrent_ios, orphan_stale_secs, tenant); if (rgw_obj_fs) { lister.set_field_separator(*rgw_obj_fs); @@ -7183,7 +7183,7 @@ int main(int argc, const char **argv) if (opt_cmd == OPT::BUCKET_STATS) { if (bucket_name.empty() && !bucket_id.empty()) { rgw_bucket bucket; - if (!rgw_find_bucket_by_id(dpp(), store->ctx(), store, marker, bucket_id, &bucket)) { + if (!rgw_find_bucket_by_id(dpp(), driver->ctx(), driver, marker, bucket_id, &bucket)) { cerr << "failure: no such bucket id" << std::endl; return -ENOENT; } @@ -7192,7 +7192,7 @@ int main(int argc, const char **argv) } bucket_op.set_fetch_stats(true); - int r = RGWBucketAdminOp::info(store, bucket_op, stream_flusher, null_yield, dpp()); + int r = RGWBucketAdminOp::info(driver, bucket_op, stream_flusher, null_yield, dpp()); if (r < 0) { cerr << "failure: " << cpp_strerror(-r) << ": " << err << std::endl; return posix_errortrans(-r); @@ -7203,7 +7203,7 @@ int main(int argc, const char **argv) bucket_op.set_bucket_id(bucket_id); bucket_op.set_new_bucket_name(new_bucket_name); string err; - int r = RGWBucketAdminOp::link(store, bucket_op, dpp(), &err); + int r = RGWBucketAdminOp::link(driver, bucket_op, dpp(), &err); if (r < 0) { cerr << "failure: " << cpp_strerror(-r) << ": " << err << std::endl; return -r; @@ -7211,7 +7211,7 @@ int main(int argc, const char **argv) } if (opt_cmd == OPT::BUCKET_UNLINK) { - int r = RGWBucketAdminOp::unlink(store, bucket_op, dpp()); + int r = RGWBucketAdminOp::unlink(driver, bucket_op, dpp()); if (r < 0) { cerr << "failure: " << cpp_strerror(-r) << std::endl; return -r; @@ -7286,7 +7286,7 @@ int main(int argc, const char **argv) bucket_op.set_new_bucket_name(new_bucket_name); string err; - int r = RGWBucketAdminOp::chown(store, bucket_op, marker, dpp(), &err); + int r = RGWBucketAdminOp::chown(driver, bucket_op, marker, dpp(), &err); if (r < 0) { cerr << "failure: " << cpp_strerror(-r) << ": " << err << std::endl; return -r; @@ -7303,7 +7303,7 @@ int main(int argc, const char **argv) formatter->reset(); formatter->open_array_section("logs"); RGWAccessHandle h; - int r = static_cast(store)->getRados()->log_list_init(dpp(), date, &h); + int r = static_cast(driver)->getRados()->log_list_init(dpp(), date, &h); if (r == -ENOENT) { // no logs. } else { @@ -7313,7 +7313,7 @@ int main(int argc, const char **argv) } while (true) { string name; - int r = static_cast(store)->getRados()->log_list_next(h, &name); + int r = static_cast(driver)->getRados()->log_list_next(h, &name); if (r == -ENOENT) break; if (r < 0) { @@ -7348,7 +7348,7 @@ int main(int argc, const char **argv) if (opt_cmd == OPT::LOG_SHOW) { RGWAccessHandle h; - int r = static_cast(store)->getRados()->log_show_init(dpp(), oid, &h); + int r = static_cast(driver)->getRados()->log_show_init(dpp(), oid, &h); if (r < 0) { cerr << "error opening log " << oid << ": " << cpp_strerror(-r) << std::endl; return -r; @@ -7360,7 +7360,7 @@ int main(int argc, const char **argv) struct rgw_log_entry entry; // peek at first entry to get bucket metadata - r = static_cast(store)->getRados()->log_show_next(dpp(), h, &entry); + r = static_cast(driver)->getRados()->log_show_next(dpp(), h, &entry); if (r < 0) { cerr << "error reading log " << oid << ": " << cpp_strerror(-r) << std::endl; return -r; @@ -7396,7 +7396,7 @@ int main(int argc, const char **argv) formatter->flush(cout); } next: - r = static_cast(store)->getRados()->log_show_next(dpp(), h, &entry); + r = static_cast(driver)->getRados()->log_show_next(dpp(), h, &entry); } while (r > 0); if (r < 0) { @@ -7419,7 +7419,7 @@ next: cout << std::endl; } if (opt_cmd == OPT::LOG_RM) { - int r = static_cast(store)->getRados()->log_remove(dpp(), oid); + int r = static_cast(driver)->getRados()->log_remove(dpp(), oid); if (r < 0) { cerr << "error removing log " << oid << ": " << cpp_strerror(-r) << std::endl; return -r; @@ -7433,7 +7433,7 @@ next: exit(1); } - int ret = static_cast(store)->svc()->zone->add_bucket_placement(dpp(), pool, null_yield); + int ret = static_cast(driver)->svc()->zone->add_bucket_placement(dpp(), pool, null_yield); if (ret < 0) cerr << "failed to add bucket placement: " << cpp_strerror(-ret) << std::endl; } @@ -7444,14 +7444,14 @@ next: exit(1); } - int ret = static_cast(store)->svc()->zone->remove_bucket_placement(dpp(), pool, null_yield); + int ret = static_cast(driver)->svc()->zone->remove_bucket_placement(dpp(), pool, null_yield); if (ret < 0) cerr << "failed to remove bucket placement: " << cpp_strerror(-ret) << std::endl; } if (opt_cmd == OPT::POOLS_LIST) { set pools; - int ret = static_cast(store)->svc()->zone->list_placement_set(dpp(), pools, null_yield); + int ret = static_cast(driver)->svc()->zone->list_placement_set(dpp(), pools, null_yield); if (ret < 0) { cerr << "could not list placement set: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -7497,7 +7497,7 @@ next: return -ret; } } - ret = RGWUsage::show(dpp(), store, user.get(), bucket.get(), start_epoch, + ret = RGWUsage::show(dpp(), driver, user.get(), bucket.get(), start_epoch, end_epoch, show_log_entries, show_log_sum, &categories, stream_flusher); if (ret < 0) { @@ -7541,7 +7541,7 @@ next: return -ret; } } - ret = RGWUsage::trim(dpp(), store, user.get(), bucket.get(), start_epoch, end_epoch); + ret = RGWUsage::trim(dpp(), driver, user.get(), bucket.get(), start_epoch, end_epoch); if (ret < 0) { cerr << "ERROR: read_usage() returned ret=" << ret << std::endl; return 1; @@ -7555,7 +7555,7 @@ next: return 1; } - ret = RGWUsage::clear(dpp(), store); + ret = RGWUsage::clear(dpp(), driver); if (ret < 0) { return ret; } @@ -7581,7 +7581,7 @@ next: } RGWOLHInfo olh; rgw_obj obj(bucket->get_key(), object); - ret = static_cast(store)->getRados()->get_olh(dpp(), bucket->get_info(), obj, &olh); + ret = static_cast(driver)->getRados()->get_olh(dpp(), bucket->get_info(), obj, &olh); if (ret < 0) { cerr << "ERROR: failed reading olh: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -7608,7 +7608,7 @@ next: return -ret; } - ret = static_cast(store)->getRados()->bucket_index_read_olh_log(dpp(), bucket->get_info(), *state, obj->get_obj(), 0, &log, &is_truncated); + ret = static_cast(driver)->getRados()->bucket_index_read_olh_log(dpp(), bucket->get_info(), *state, obj->get_obj(), 0, &log, &is_truncated); if (ret < 0) { cerr << "ERROR: failed reading olh: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -7641,7 +7641,7 @@ next: rgw_cls_bi_entry entry; - ret = static_cast(store)->getRados()->bi_get(dpp(), bucket->get_info(), obj, bi_index_type, &entry); + ret = static_cast(driver)->getRados()->bi_get(dpp(), bucket->get_info(), obj, bi_index_type, &entry); if (ret < 0) { cerr << "ERROR: bi_get(): " << cpp_strerror(-ret) << std::endl; return -ret; @@ -7671,7 +7671,7 @@ next: rgw_obj obj(bucket->get_key(), key); - ret = static_cast(store)->getRados()->bi_put(dpp(), bucket->get_key(), obj, entry); + ret = static_cast(driver)->getRados()->bi_put(dpp(), bucket->get_key(), obj, entry); if (ret < 0) { cerr << "ERROR: bi_put(): " << cpp_strerror(-ret) << std::endl; return -ret; @@ -7702,7 +7702,7 @@ next: int i = (specified_shard_id ? shard_id : 0); for (; i < max_shards; i++) { - RGWRados::BucketShard bs(static_cast(store)->getRados()); + RGWRados::BucketShard bs(static_cast(driver)->getRados()); int ret = bs.init(dpp(), bucket->get_info(), index, i); marker.clear(); @@ -7714,7 +7714,7 @@ next: do { entries.clear(); // if object is specified, we use that as a filter to only retrieve some some entries - ret = static_cast(store)->getRados()->bi_list(bs, object, marker, max_entries, &entries, &is_truncated); + ret = static_cast(driver)->getRados()->bi_list(bs, object, marker, max_entries, &entries, &is_truncated); if (ret < 0) { cerr << "ERROR: bi_list(): " << cpp_strerror(-ret) << std::endl; return -ret; @@ -7770,14 +7770,14 @@ next: const int max_shards = rgw::num_shards(index); for (int i = 0; i < max_shards; i++) { - RGWRados::BucketShard bs(static_cast(store)->getRados()); + RGWRados::BucketShard bs(static_cast(driver)->getRados()); int ret = bs.init(dpp(), bucket->get_info(), index, i); if (ret < 0) { cerr << "ERROR: bs.init(bucket=" << bucket << ", shard=" << i << "): " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = static_cast(store)->getRados()->bi_remove(dpp(), bs); + ret = static_cast(driver)->getRados()->bi_remove(dpp(), bs); if (ret < 0) { cerr << "ERROR: failed to remove bucket index object: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -7795,7 +7795,7 @@ next: return EINVAL; } - RGWDataAccess data_access(store); + RGWDataAccess data_access(driver); rgw_obj_key key(object, object_version); RGWDataAccess::BucketRef b; @@ -7833,7 +7833,7 @@ next: return -ret; } rgw_obj_key key(object, object_version); - ret = rgw_remove_object(dpp(), store, bucket.get(), key); + ret = rgw_remove_object(dpp(), driver, bucket.get(), key); if (ret < 0) { cerr << "ERROR: object remove returned: " << cpp_strerror(-ret) << std::endl; @@ -7861,13 +7861,13 @@ next: obj->set_instance(object_version); bool need_rewrite = true; if (min_rewrite_stripe_size > 0) { - ret = check_min_obj_stripe_size(store, obj.get(), min_rewrite_stripe_size, &need_rewrite); + ret = check_min_obj_stripe_size(driver, obj.get(), min_rewrite_stripe_size, &need_rewrite); if (ret < 0) { ldpp_dout(dpp(), 0) << "WARNING: check_min_obj_stripe_size failed, r=" << ret << dendl; } } if (need_rewrite) { - ret = static_cast(store)->getRados()->rewrite_obj(obj.get(), dpp(), null_yield); + ret = static_cast(driver)->getRados()->rewrite_obj(obj.get(), dpp(), null_yield); if (ret < 0) { cerr << "ERROR: object rewrite returned: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -7878,14 +7878,14 @@ next: } if (opt_cmd == OPT::OBJECTS_EXPIRE) { - if (!static_cast(store)->getRados()->process_expire_objects(dpp())) { + if (!static_cast(driver)->getRados()->process_expire_objects(dpp())) { cerr << "ERROR: process_expire_objects() processing returned error." << std::endl; return 1; } } if (opt_cmd == OPT::OBJECTS_EXPIRE_STALE_LIST) { - ret = RGWBucketAdminOp::fix_obj_expiry(store, bucket_op, stream_flusher, dpp(), true); + ret = RGWBucketAdminOp::fix_obj_expiry(driver, bucket_op, stream_flusher, dpp(), true); if (ret < 0) { cerr << "ERROR: listing returned " << cpp_strerror(-ret) << std::endl; return -ret; @@ -7893,7 +7893,7 @@ next: } if (opt_cmd == OPT::OBJECTS_EXPIRE_STALE_RM) { - ret = RGWBucketAdminOp::fix_obj_expiry(store, bucket_op, stream_flusher, dpp(), false); + ret = RGWBucketAdminOp::fix_obj_expiry(driver, bucket_op, stream_flusher, dpp(), false); if (ret < 0) { cerr << "ERROR: removing returned " << cpp_strerror(-ret) << std::endl; return -ret; @@ -7948,7 +7948,7 @@ next: result.reserve(NUM_ENTRIES); const auto& current_index = bucket->get_info().layout.current_index; - int r = static_cast(store)->getRados()->cls_bucket_list_ordered( + int r = static_cast(driver)->getRados()->cls_bucket_list_ordered( dpp(), bucket->get_info(), current_index, RGW_NO_SHARD, marker, empty_prefix, empty_delimiter, NUM_ENTRIES, true, expansion_factor, @@ -7989,7 +7989,7 @@ next: bool need_rewrite = true; if (min_rewrite_stripe_size > 0) { - r = check_min_obj_stripe_size(store, obj.get(), min_rewrite_stripe_size, &need_rewrite); + r = check_min_obj_stripe_size(driver, obj.get(), min_rewrite_stripe_size, &need_rewrite); if (r < 0) { ldpp_dout(dpp(), 0) << "WARNING: check_min_obj_stripe_size failed, r=" << r << dendl; } @@ -7997,7 +7997,7 @@ next: if (!need_rewrite) { formatter->dump_string("status", "Skipped"); } else { - r = static_cast(store)->getRados()->rewrite_obj(obj.get(), dpp(), null_yield); + r = static_cast(driver)->getRados()->rewrite_obj(obj.get(), dpp(), null_yield); if (r == 0) { formatter->dump_string("status", "Success"); } else { @@ -8017,7 +8017,7 @@ next: } if (opt_cmd == OPT::BUCKET_RESHARD) { - int ret = check_reshard_bucket_params(store, + int ret = check_reshard_bucket_params(driver, bucket_name, tenant, bucket_id, @@ -8029,7 +8029,7 @@ next: return ret; } - auto zone_svc = static_cast(store)->svc()->zone; + auto zone_svc = static_cast(driver)->svc()->zone; if (!zone_svc->can_reshard()) { const auto& zonegroup = zone_svc->get_zonegroup(); std::cerr << "The zonegroup '" << zonegroup.get_name() << "' does not " @@ -8046,7 +8046,7 @@ next: return EINVAL; } - RGWBucketReshard br(static_cast(store), + RGWBucketReshard br(static_cast(driver), bucket->get_info(), bucket->get_attrs(), nullptr /* no callback */); @@ -8068,7 +8068,7 @@ next: } if (opt_cmd == OPT::RESHARD_ADD) { - int ret = check_reshard_bucket_params(store, + int ret = check_reshard_bucket_params(driver, bucket_name, tenant, bucket_id, @@ -8082,7 +8082,7 @@ next: int num_source_shards = rgw::current_num_shards(bucket->get_info().layout); - RGWReshard reshard(static_cast(store), dpp()); + RGWReshard reshard(static_cast(driver), dpp()); cls_rgw_reshard_entry entry; entry.time = real_clock::now(); entry.tenant = tenant; @@ -8102,9 +8102,9 @@ next: } int num_logshards = - store->ctx()->_conf.get_val("rgw_reshard_num_logs"); + driver->ctx()->_conf.get_val("rgw_reshard_num_logs"); - RGWReshard reshard(static_cast(store), dpp()); + RGWReshard reshard(static_cast(driver), dpp()); formatter->open_array_section("reshard"); for (int i = 0; i < num_logshards; i++) { @@ -8150,7 +8150,7 @@ next: return -ret; } - RGWBucketReshard br(static_cast(store), + RGWBucketReshard br(static_cast(driver), bucket->get_info(), bucket->get_attrs(), nullptr /* no callback */); list status; @@ -8165,7 +8165,7 @@ next: } if (opt_cmd == OPT::RESHARD_PROCESS) { - RGWReshard reshard(static_cast(store), true, &cout); + RGWReshard reshard(static_cast(driver), true, &cout); int ret = reshard.process_all_logshards(dpp()); if (ret < 0) { @@ -8197,14 +8197,14 @@ next: if (bucket_initable) { // we did not encounter an error, so let's work with the bucket - RGWBucketReshard br(static_cast(store), + RGWBucketReshard br(static_cast(driver), bucket->get_info(), bucket->get_attrs(), nullptr /* no callback */); int ret = br.cancel(dpp()); if (ret < 0) { if (ret == -EBUSY) { cerr << "There is ongoing resharding, please retry after " << - store->ctx()->_conf.get_val("rgw_reshard_bucket_lock_duration") << + driver->ctx()->_conf.get_val("rgw_reshard_bucket_lock_duration") << " seconds." << std::endl; return -ret; } else if (ret == -EINVAL) { @@ -8218,7 +8218,7 @@ next: } } - RGWReshard reshard(static_cast(store), dpp()); + RGWReshard reshard(static_cast(driver), dpp()); cls_rgw_reshard_entry entry; entry.tenant = tenant; @@ -8320,20 +8320,20 @@ next: } do_check_object_locator(tenant, bucket_name, fix, remove_bad, formatter.get()); } else { - RGWBucketAdminOp::check_index(store, bucket_op, stream_flusher, null_yield, dpp()); + RGWBucketAdminOp::check_index(driver, bucket_op, stream_flusher, null_yield, dpp()); } } if (opt_cmd == OPT::BUCKET_RM) { if (!inconsistent_index) { - RGWBucketAdminOp::remove_bucket(store, bucket_op, null_yield, dpp(), bypass_gc, true); + RGWBucketAdminOp::remove_bucket(driver, bucket_op, null_yield, dpp(), bypass_gc, true); } else { if (!yes_i_really_mean_it) { cerr << "using --inconsistent_index can corrupt the bucket index " << std::endl << "do you really mean it? (requires --yes-i-really-mean-it)" << std::endl; return 1; } - RGWBucketAdminOp::remove_bucket(store, bucket_op, null_yield, dpp(), bypass_gc, false); + RGWBucketAdminOp::remove_bucket(driver, bucket_op, null_yield, dpp(), bypass_gc, false); } } @@ -8345,7 +8345,7 @@ next: do { list result; - int ret = static_cast(store)->getRados()->list_gc_objs(&index, marker, 1000, !include_all, result, &truncated, processing_queue); + int ret = static_cast(driver)->getRados()->list_gc_objs(&index, marker, 1000, !include_all, result, &truncated, processing_queue); if (ret < 0) { cerr << "ERROR: failed to list objs: " << cpp_strerror(-ret) << std::endl; return 1; @@ -8375,7 +8375,7 @@ next: } if (opt_cmd == OPT::GC_PROCESS) { - int ret = static_cast(store)->getRados()->process_gc(!include_all); + int ret = static_cast(driver)->getRados()->process_gc(!include_all); if (ret < 0) { cerr << "ERROR: gc processing returned error: " << cpp_strerror(-ret) << std::endl; return 1; @@ -8392,7 +8392,7 @@ next: max_entries = MAX_LC_LIST_ENTRIES; } do { - int ret = static_cast(store)->getRados()->list_lc_progress(marker, max_entries, + int ret = static_cast(driver)->getRados()->list_lc_progress(marker, max_entries, bucket_lc_map, index); if (ret < 0) { cerr << "ERROR: failed to list objs: " << cpp_strerror(-ret) @@ -8464,7 +8464,7 @@ next: } int ret = - static_cast(store)->getRados()->process_lc(bucket); + static_cast(driver)->getRados()->process_lc(bucket); if (ret < 0) { cerr << "ERROR: lc processing returned error: " << cpp_strerror(-ret) << std::endl; return 1; @@ -8472,7 +8472,7 @@ next: } if (opt_cmd == OPT::LC_RESHARD_FIX) { - ret = RGWBucketAdminOp::fix_lc_shards(store, bucket_op, stream_flusher, dpp()); + ret = RGWBucketAdminOp::fix_lc_shards(driver, bucket_op, stream_flusher, dpp()); if (ret < 0) { cerr << "ERROR: fixing lc shards: " << cpp_strerror(-ret) << std::endl; } @@ -8491,7 +8491,7 @@ next: << std::endl; } - RGWOrphanSearch search(static_cast(store), max_concurrent_ios, orphan_stale_secs); + RGWOrphanSearch search(static_cast(driver), max_concurrent_ios, orphan_stale_secs); if (job_id.empty()) { cerr << "ERROR: --job-id not specified" << std::endl; @@ -8531,7 +8531,7 @@ next: << std::endl; } - RGWOrphanSearch search(static_cast(store), max_concurrent_ios, orphan_stale_secs); + RGWOrphanSearch search(static_cast(driver), max_concurrent_ios, orphan_stale_secs); if (job_id.empty()) { cerr << "ERROR: --job-id not specified" << std::endl; @@ -8561,7 +8561,7 @@ next: << std::endl; } - RGWOrphanStore orphan_store(static_cast(store)); + RGWOrphanStore orphan_store(static_cast(driver)); int ret = orphan_store.init(dpp()); if (ret < 0){ cerr << "connection to cluster failed!" << std::endl; @@ -8587,7 +8587,7 @@ next: } if (opt_cmd == OPT::USER_CHECK) { - check_bad_user_bucket_mapping(store, user.get(), fix, null_yield, dpp()); + check_bad_user_bucket_mapping(driver, user.get(), fix, null_yield, dpp()); } if (opt_cmd == OPT::USER_STATS) { @@ -8606,7 +8606,7 @@ next: "so at most one of the two should be specified" << std::endl; return EINVAL; } - ret = static_cast(store)->svc()->user->reset_bucket_stats(dpp(), user->get_id(), null_yield); + ret = static_cast(driver)->svc()->user->reset_bucket_stats(dpp(), user->get_id(), null_yield); if (ret < 0) { cerr << "ERROR: could not reset user stats: " << cpp_strerror(-ret) << std::endl; @@ -8628,7 +8628,7 @@ next: return -ret; } } else { - int ret = rgw_user_sync_all_stats(dpp(), store, user.get(), null_yield); + int ret = rgw_user_sync_all_stats(dpp(), driver, user.get(), null_yield); if (ret < 0) { cerr << "ERROR: could not sync user stats: " << cpp_strerror(-ret) << std::endl; @@ -8664,7 +8664,7 @@ next: } if (opt_cmd == OPT::METADATA_GET) { - int ret = static_cast(store)->ctl()->meta.mgr->get(metadata_key, formatter.get(), null_yield, dpp()); + int ret = static_cast(driver)->ctl()->meta.mgr->get(metadata_key, formatter.get(), null_yield, dpp()); if (ret < 0) { cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -8680,7 +8680,7 @@ next: cerr << "ERROR: failed to read input: " << cpp_strerror(-ret) << std::endl; return -ret; } - ret = static_cast(store)->ctl()->meta.mgr->put(metadata_key, bl, null_yield, dpp(), RGWMDLogSyncType::APPLY_ALWAYS, false); + ret = static_cast(driver)->ctl()->meta.mgr->put(metadata_key, bl, null_yield, dpp(), RGWMDLogSyncType::APPLY_ALWAYS, false); if (ret < 0) { cerr << "ERROR: can't put key: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -8688,7 +8688,7 @@ next: } if (opt_cmd == OPT::METADATA_RM) { - int ret = static_cast(store)->ctl()->meta.mgr->remove(metadata_key, null_yield, dpp()); + int ret = static_cast(driver)->ctl()->meta.mgr->remove(metadata_key, null_yield, dpp()); if (ret < 0) { cerr << "ERROR: can't remove key: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -8701,7 +8701,7 @@ next: } void *handle; int max = 1000; - int ret = store->meta_list_keys_init(dpp(), metadata_key, marker, &handle); + int ret = driver->meta_list_keys_init(dpp(), metadata_key, marker, &handle); if (ret < 0) { cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -8719,7 +8719,7 @@ next: do { list keys; left = (max_entries_specified ? max_entries - count : max); - ret = store->meta_list_keys_next(dpp(), handle, left, keys, &truncated); + ret = driver->meta_list_keys_next(dpp(), handle, left, keys, &truncated); if (ret < 0 && ret != -ENOENT) { cerr << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << std::endl; return -ret; @@ -8738,13 +8738,13 @@ next: encode_json("truncated", truncated, formatter.get()); encode_json("count", count, formatter.get()); if (truncated) { - encode_json("marker", store->meta_get_marker(handle), formatter.get()); + encode_json("marker", driver->meta_get_marker(handle), formatter.get()); } formatter->close_section(); } formatter->flush(cout); - store->meta_list_keys_complete(handle); + driver->meta_list_keys_complete(handle); } if (opt_cmd == OPT::MDLOG_LIST) { @@ -8784,7 +8784,7 @@ next: std::cerr << "No --period given, using current period=" << period_id << std::endl; } - RGWMetadataLog *meta_log = static_cast(store)->svc()->mdlog->get_log(period_id); + RGWMetadataLog *meta_log = static_cast(driver)->svc()->mdlog->get_log(period_id); formatter->open_array_section("entries"); for (; i < g_ceph_context->_conf->rgw_md_log_max_shards; i++) { @@ -8802,7 +8802,7 @@ next: for (list::iterator iter = entries.begin(); iter != entries.end(); ++iter) { cls_log_entry& entry = *iter; - static_cast(store)->ctl()->meta.mgr->dump_log_entry(entry, formatter.get()); + static_cast(driver)->ctl()->meta.mgr->dump_log_entry(entry, formatter.get()); } formatter->flush(cout); } while (truncated); @@ -8834,7 +8834,7 @@ next: std::cerr << "No --period given, using current period=" << period_id << std::endl; } - RGWMetadataLog *meta_log = static_cast(store)->svc()->mdlog->get_log(period_id); + RGWMetadataLog *meta_log = static_cast(driver)->svc()->mdlog->get_log(period_id); formatter->open_array_section("entries"); @@ -8855,10 +8855,10 @@ next: if (opt_cmd == OPT::MDLOG_AUTOTRIM) { // need a full history for purging old mdlog periods - static_cast(store)->svc()->mdlog->init_oldest_log_period(null_yield, dpp()); + static_cast(driver)->svc()->mdlog->init_oldest_log_period(null_yield, dpp()); - RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry()); - RGWHTTPManager http(store->ctx(), crs.get_completion_mgr()); + RGWCoroutinesManager crs(driver->ctx(), driver->get_cr_registry()); + RGWHTTPManager http(driver->ctx(), crs.get_completion_mgr()); int ret = http.start(); if (ret < 0) { cerr << "failed to initialize http client with " << cpp_strerror(ret) << std::endl; @@ -8867,7 +8867,7 @@ next: auto num_shards = g_conf()->rgw_md_log_max_shards; auto mltcr = create_admin_meta_log_trim_cr( - dpp(), static_cast(store), &http, num_shards); + dpp(), static_cast(driver), &http, num_shards); if (!mltcr) { cerr << "Cluster misconfigured! Unable to trim." << std::endl; return -EIO; @@ -8915,7 +8915,7 @@ next: std::cerr << "missing --period argument" << std::endl; return EINVAL; } - RGWMetadataLog *meta_log = static_cast(store)->svc()->mdlog->get_log(period_id); + RGWMetadataLog *meta_log = static_cast(driver)->svc()->mdlog->get_log(period_id); // trim until -ENODATA do { @@ -8936,7 +8936,7 @@ next: } if (opt_cmd == OPT::METADATA_SYNC_STATUS) { - RGWMetaSyncStatusManager sync(static_cast(store), static_cast(store)->svc()->rados->get_async_processor()); + RGWMetaSyncStatusManager sync(static_cast(driver), static_cast(driver)->svc()->rados->get_async_processor()); int ret = sync.init(dpp()); if (ret < 0) { @@ -8980,7 +8980,7 @@ next: } if (opt_cmd == OPT::METADATA_SYNC_INIT) { - RGWMetaSyncStatusManager sync(static_cast(store), static_cast(store)->svc()->rados->get_async_processor()); + RGWMetaSyncStatusManager sync(static_cast(driver), static_cast(driver)->svc()->rados->get_async_processor()); int ret = sync.init(dpp()); if (ret < 0) { @@ -8996,7 +8996,7 @@ next: if (opt_cmd == OPT::METADATA_SYNC_RUN) { - RGWMetaSyncStatusManager sync(static_cast(store), static_cast(store)->svc()->rados->get_async_processor()); + RGWMetaSyncStatusManager sync(static_cast(driver), static_cast(driver)->svc()->rados->get_async_processor()); int ret = sync.init(dpp()); if (ret < 0) { @@ -9016,7 +9016,7 @@ next: cerr << "ERROR: source zone not specified" << std::endl; return EINVAL; } - RGWDataSyncStatusManager sync(static_cast(store), static_cast(store)->svc()->rados->get_async_processor(), source_zone, nullptr); + RGWDataSyncStatusManager sync(static_cast(driver), static_cast(driver)->svc()->rados->get_async_processor(), source_zone, nullptr); int ret = sync.init(dpp()); if (ret < 0) { @@ -9086,7 +9086,7 @@ next: return EINVAL; } - RGWDataSyncStatusManager sync(static_cast(store), static_cast(store)->svc()->rados->get_async_processor(), source_zone, nullptr); + RGWDataSyncStatusManager sync(static_cast(driver), static_cast(driver)->svc()->rados->get_async_processor(), source_zone, nullptr); int ret = sync.init(dpp()); if (ret < 0) { @@ -9108,14 +9108,14 @@ next: } RGWSyncModuleInstanceRef sync_module; - int ret = static_cast(store)->svc()->sync_modules->get_manager()->create_instance(dpp(), g_ceph_context, static_cast(store)->svc()->zone->get_zone().tier_type, - static_cast(store)->svc()->zone->get_zone_params().tier_config, &sync_module); + int ret = static_cast(driver)->svc()->sync_modules->get_manager()->create_instance(dpp(), g_ceph_context, static_cast(driver)->svc()->zone->get_zone().tier_type, + static_cast(driver)->svc()->zone->get_zone_params().tier_config, &sync_module); if (ret < 0) { ldpp_dout(dpp(), -1) << "ERROR: failed to init sync module instance, ret=" << ret << dendl; return ret; } - RGWDataSyncStatusManager sync(static_cast(store), static_cast(store)->svc()->rados->get_async_processor(), source_zone, nullptr, sync_module); + RGWDataSyncStatusManager sync(static_cast(driver), static_cast(driver)->svc()->rados->get_async_processor(), source_zone, nullptr, sync_module); ret = sync.init(dpp()); if (ret < 0) { @@ -9155,7 +9155,7 @@ next: } auto sync = RGWBucketPipeSyncStatusManager::construct( - dpp(), static_cast(store), source_zone, opt_sb, + dpp(), static_cast(driver), source_zone, opt_sb, bucket->get_key(), extra_info ? &std::cout : nullptr); if (!sync) { @@ -9183,13 +9183,13 @@ next: return -ret; } - if (!static_cast(store)->ctl()->bucket->bucket_imports_data(bucket->get_key(), null_yield, dpp())) { + if (!static_cast(driver)->ctl()->bucket->bucket_imports_data(bucket->get_key(), null_yield, dpp())) { std::cout << "Sync is disabled for bucket " << bucket_name << std::endl; return 0; } RGWBucketSyncPolicyHandlerRef handler; - ret = store->get_sync_policy_handler(dpp(), std::nullopt, bucket->get_key(), &handler, null_yield); + ret = driver->get_sync_policy_handler(dpp(), std::nullopt, bucket->get_key(), &handler, null_yield); if (ret < 0) { std::cerr << "ERROR: failed to get policy handler for bucket (" << bucket << "): r=" << ret << ": " << cpp_strerror(-ret) << std::endl; @@ -9197,7 +9197,7 @@ next: } auto timeout_at = ceph::coarse_mono_clock::now() + opt_timeout_sec; - ret = rgw_bucket_sync_checkpoint(dpp(), static_cast(store), *handler, bucket->get_info(), + ret = rgw_bucket_sync_checkpoint(dpp(), static_cast(driver), *handler, bucket->get_info(), opt_source_zone, opt_source_bucket, opt_retry_delay_ms, timeout_at); if (ret < 0) { @@ -9218,7 +9218,7 @@ next: } bucket_op.set_tenant(tenant); string err_msg; - ret = RGWBucketAdminOp::sync_bucket(store, bucket_op, dpp(), &err_msg); + ret = RGWBucketAdminOp::sync_bucket(driver, bucket_op, dpp(), &err_msg); if (ret < 0) { cerr << err_msg << std::endl; return -ret; @@ -9234,7 +9234,7 @@ next: if (ret < 0) { return -ret; } - bucket_sync_info(store, bucket->get_info(), std::cout); + bucket_sync_info(driver, bucket->get_info(), std::cout); } if (opt_cmd == OPT::BUCKET_SYNC_STATUS) { @@ -9246,7 +9246,7 @@ next: if (ret < 0) { return -ret; } - bucket_sync_status(store, bucket->get_info(), source_zone, opt_source_bucket, std::cout); + bucket_sync_status(driver, bucket->get_info(), source_zone, opt_source_bucket, std::cout); } if (opt_cmd == OPT::BUCKET_SYNC_MARKERS) { @@ -9263,7 +9263,7 @@ next: return -ret; } auto sync = RGWBucketPipeSyncStatusManager::construct( - dpp(), static_cast(store), source_zone, + dpp(), static_cast(driver), source_zone, opt_source_bucket, bucket->get_key(), nullptr); if (!sync) { @@ -9296,7 +9296,7 @@ next: return -ret; } auto sync = RGWBucketPipeSyncStatusManager::construct( - dpp(), static_cast(store), source_zone, + dpp(), static_cast(driver), source_zone, opt_source_bucket, bucket->get_key(), extra_info ? &std::cout : nullptr); if (!sync) { @@ -9340,7 +9340,7 @@ next: do { list entries; - ret = static_cast(store)->svc()->bilog_rados->log_list(dpp(), bucket->get_info(), log_layout, shard_id, marker, max_entries - count, entries, &truncated); + ret = static_cast(driver)->svc()->bilog_rados->log_list(dpp(), bucket->get_info(), log_layout, shard_id, marker, max_entries - count, entries, &truncated); if (ret < 0) { cerr << "ERROR: list_bi_log_entries(): " << cpp_strerror(-ret) << std::endl; return -ret; @@ -9404,7 +9404,7 @@ next: do { list entries; - ret = static_cast(store)->svc()->cls->timelog.list(dpp(), oid, {}, {}, max_entries - count, entries, marker, &marker, &truncated, + ret = static_cast(driver)->svc()->cls->timelog.list(dpp(), oid, {}, {}, max_entries - count, entries, marker, &marker, &truncated, null_yield); if (ret == -ENOENT) { break; @@ -9832,7 +9832,7 @@ next: if (!gen) { gen = 0; } - ret = bilog_trim(dpp(), static_cast(store), + ret = bilog_trim(dpp(), static_cast(driver), bucket->get_info(), *gen, shard_id, start_marker, end_marker); if (ret < 0) { @@ -9863,7 +9863,7 @@ next: log_layout = *i; } - ret = static_cast(store)->svc()->bilog_rados->get_log_status(dpp(), bucket->get_info(), log_layout, shard_id, + ret = static_cast(driver)->svc()->bilog_rados->get_log_status(dpp(), bucket->get_info(), log_layout, shard_id, &markers, null_yield); if (ret < 0) { cerr << "ERROR: get_bi_log_status(): " << cpp_strerror(-ret) << std::endl; @@ -9879,8 +9879,8 @@ next: } if (opt_cmd == OPT::BILOG_AUTOTRIM) { - RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry()); - RGWHTTPManager http(store->ctx(), crs.get_completion_mgr()); + RGWCoroutinesManager crs(driver->ctx(), driver->get_cr_registry()); + RGWHTTPManager http(driver->ctx(), crs.get_completion_mgr()); int ret = http.start(); if (ret < 0) { cerr << "failed to initialize http client with " << cpp_strerror(ret) << std::endl; @@ -9888,9 +9888,9 @@ next: } rgw::BucketTrimConfig config; - configure_bucket_trim(store->ctx(), config); + configure_bucket_trim(driver->ctx(), config); - rgw::BucketTrimManager trim(static_cast(store), config); + rgw::BucketTrimManager trim(static_cast(driver), config); ret = trim.init(); if (ret < 0) { cerr << "trim manager init failed with " << cpp_strerror(ret) << std::endl; @@ -9930,7 +9930,7 @@ next: } } - auto datalog_svc = static_cast(store)->svc()->datalog_rados; + auto datalog_svc = static_cast(driver)->svc()->datalog_rados; RGWDataChangesLog::LogMarker log_marker; do { @@ -9972,7 +9972,7 @@ next: list entries; RGWDataChangesLogInfo info; - static_cast(store)->svc()->datalog_rados->get_info(dpp(), i, &info); + static_cast(driver)->svc()->datalog_rados->get_info(dpp(), i, &info); ::encode_json("info", info, formatter.get()); @@ -9985,8 +9985,8 @@ next: } if (opt_cmd == OPT::DATALOG_AUTOTRIM) { - RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry()); - RGWHTTPManager http(store->ctx(), crs.get_completion_mgr()); + RGWCoroutinesManager crs(driver->ctx(), driver->get_cr_registry()); + RGWHTTPManager http(driver->ctx(), crs.get_completion_mgr()); int ret = http.start(); if (ret < 0) { cerr << "failed to initialize http client with " << cpp_strerror(ret) << std::endl; @@ -9995,7 +9995,7 @@ next: auto num_shards = g_conf()->rgw_data_log_num_shards; std::vector markers(num_shards); - ret = crs.run(dpp(), create_admin_data_log_trim_cr(dpp(), static_cast(store), &http, num_shards, markers)); + ret = crs.run(dpp(), create_admin_data_log_trim_cr(dpp(), static_cast(driver), &http, num_shards, markers)); if (ret < 0) { cerr << "automated datalog trim failed with " << cpp_strerror(ret) << std::endl; return -ret; @@ -10034,7 +10034,7 @@ next: return EINVAL; } - auto datalog = static_cast(store)->svc()->datalog_rados; + auto datalog = static_cast(driver)->svc()->datalog_rados; ret = datalog->trim_entries(dpp(), shard_id, marker); if (ret < 0 && ret != -ENODATA) { @@ -10048,7 +10048,7 @@ next: std::cerr << "log-type not specified." << std::endl; return -EINVAL; } - auto datalog = static_cast(store)->svc()->datalog_rados; + auto datalog = static_cast(driver)->svc()->datalog_rados; ret = datalog->change_format(dpp(), *opt_log_type, null_yield); if (ret < 0) { cerr << "ERROR: change_format(): " << cpp_strerror(-ret) << std::endl; @@ -10057,7 +10057,7 @@ next: } if (opt_cmd == OPT::DATALOG_PRUNE) { - auto datalog = static_cast(store)->svc()->datalog_rados; + auto datalog = static_cast(driver)->svc()->datalog_rados; std::optional through; ret = datalog->trim_generations(dpp(), through); @@ -10086,7 +10086,7 @@ next: cerr << "ERROR: invalid quota scope specification." << std::endl; return EINVAL; } - set_bucket_quota(store, opt_cmd, tenant, bucket_name, + set_bucket_quota(driver, opt_cmd, tenant, bucket_name, max_size, max_objects, have_max_size, have_max_objects); } else if (!rgw::sal::User::empty(user)) { if (quota_scope == "bucket") { @@ -10113,7 +10113,7 @@ next: cerr << "ERROR: invalid ratelimit scope specification. (bucket scope is not bucket but bucket has been specified)" << std::endl; return EINVAL; } - return set_bucket_ratelimit(store, opt_cmd, tenant, bucket_name, + return set_bucket_ratelimit(driver, opt_cmd, tenant, bucket_name, max_read_ops, max_write_ops, max_read_bytes, max_write_bytes, have_max_read_ops, have_max_write_ops, @@ -10141,7 +10141,7 @@ next: cerr << "ERROR: invalid ratelimit scope specification. (bucket scope is not bucket but bucket has been specified)" << std::endl; return EINVAL; } - return show_bucket_ratelimit(store, tenant, bucket_name, formatter.get()); + return show_bucket_ratelimit(driver, tenant, bucket_name, formatter.get()); } else if (!rgw::sal::User::empty(user)) { } if (ratelimit_scope == "user") { return show_user_ratelimit(user, formatter.get()); @@ -10193,14 +10193,14 @@ next: } real_time mtime = real_clock::now(); - string oid = static_cast(store)->svc()->cls->mfa.get_mfa_oid(user->get_id()); + string oid = static_cast(driver)->svc()->cls->mfa.get_mfa_oid(user->get_id()); - int ret = static_cast(store)->ctl()->meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user->get_id()), + int ret = static_cast(driver)->ctl()->meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user->get_id()), mtime, &objv_tracker, null_yield, dpp(), MDLOG_STATUS_WRITE, [&] { - return static_cast(store)->svc()->cls->mfa.create_mfa(dpp(), user->get_id(), config, &objv_tracker, mtime, null_yield); + return static_cast(driver)->svc()->cls->mfa.create_mfa(dpp(), user->get_id(), config, &objv_tracker, mtime, null_yield); }); if (ret < 0) { cerr << "MFA creation failed, error: " << cpp_strerror(-ret) << std::endl; @@ -10231,12 +10231,12 @@ next: real_time mtime = real_clock::now(); - int ret = static_cast(store)->ctl()->meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user->get_id()), + int ret = static_cast(driver)->ctl()->meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user->get_id()), mtime, &objv_tracker, null_yield, dpp(), MDLOG_STATUS_WRITE, [&] { - return static_cast(store)->svc()->cls->mfa.remove_mfa(dpp(), user->get_id(), totp_serial, &objv_tracker, mtime, null_yield); + return static_cast(driver)->svc()->cls->mfa.remove_mfa(dpp(), user->get_id(), totp_serial, &objv_tracker, mtime, null_yield); }); if (ret < 0) { cerr << "MFA removal failed, error: " << cpp_strerror(-ret) << std::endl; @@ -10266,7 +10266,7 @@ next: } rados::cls::otp::otp_info_t result; - int ret = static_cast(store)->svc()->cls->mfa.get_mfa(dpp(), user->get_id(), totp_serial, &result, null_yield); + int ret = static_cast(driver)->svc()->cls->mfa.get_mfa(dpp(), user->get_id(), totp_serial, &result, null_yield); if (ret < 0) { if (ret == -ENOENT || ret == -ENODATA) { cerr << "MFA serial id not found" << std::endl; @@ -10288,7 +10288,7 @@ next: } list result; - int ret = static_cast(store)->svc()->cls->mfa.list_mfa(dpp(), user->get_id(), &result, null_yield); + int ret = static_cast(driver)->svc()->cls->mfa.list_mfa(dpp(), user->get_id(), &result, null_yield); if (ret < 0) { cerr << "MFA listing failed, error: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -10316,7 +10316,7 @@ next: } list result; - int ret = static_cast(store)->svc()->cls->mfa.check_mfa(dpp(), user->get_id(), totp_serial, totp_pin.front(), null_yield); + int ret = static_cast(driver)->svc()->cls->mfa.check_mfa(dpp(), user->get_id(), totp_serial, totp_pin.front(), null_yield); if (ret < 0) { cerr << "MFA check failed, error: " << cpp_strerror(-ret) << std::endl; return -ret; @@ -10342,7 +10342,7 @@ next: } rados::cls::otp::otp_info_t config; - int ret = static_cast(store)->svc()->cls->mfa.get_mfa(dpp(), user->get_id(), totp_serial, &config, null_yield); + int ret = static_cast(driver)->svc()->cls->mfa.get_mfa(dpp(), user->get_id(), totp_serial, &config, null_yield); if (ret < 0) { if (ret == -ENOENT || ret == -ENODATA) { cerr << "MFA serial id not found" << std::endl; @@ -10354,14 +10354,14 @@ next: ceph::real_time now; - ret = static_cast(store)->svc()->cls->mfa.otp_get_current_time(dpp(), user->get_id(), &now, null_yield); + ret = static_cast(driver)->svc()->cls->mfa.otp_get_current_time(dpp(), user->get_id(), &now, null_yield); if (ret < 0) { cerr << "ERROR: failed to fetch current time from osd: " << cpp_strerror(-ret) << std::endl; return -ret; } time_t time_ofs; - ret = scan_totp(store->ctx(), now, config, totp_pin, &time_ofs); + ret = scan_totp(driver->ctx(), now, config, totp_pin, &time_ofs); if (ret < 0) { if (ret == -ENOENT) { cerr << "failed to resync, TOTP values not found in range" << std::endl; @@ -10376,12 +10376,12 @@ next: /* now update the backend */ real_time mtime = real_clock::now(); - ret = static_cast(store)->ctl()->meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user->get_id()), + ret = static_cast(driver)->ctl()->meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user->get_id()), mtime, &objv_tracker, null_yield, dpp(), MDLOG_STATUS_WRITE, [&] { - return static_cast(store)->svc()->cls->mfa.create_mfa(dpp(), user->get_id(), config, &objv_tracker, mtime, null_yield); + return static_cast(driver)->svc()->cls->mfa.create_mfa(dpp(), user->get_id(), config, &objv_tracker, mtime, null_yield); }); if (ret < 0) { cerr << "MFA update failed, error: " << cpp_strerror(-ret) << std::endl; @@ -10391,26 +10391,26 @@ next: } if (opt_cmd == OPT::RESHARD_STALE_INSTANCES_LIST) { - if (!static_cast(store)->svc()->zone->can_reshard() && !yes_i_really_mean_it) { + if (!static_cast(driver)->svc()->zone->can_reshard() && !yes_i_really_mean_it) { cerr << "Resharding disabled in a multisite env, stale instances unlikely from resharding" << std::endl; cerr << "These instances may not be safe to delete." << std::endl; cerr << "Use --yes-i-really-mean-it to force displaying these instances." << std::endl; return EINVAL; } - ret = RGWBucketAdminOp::list_stale_instances(store, bucket_op, stream_flusher, dpp()); + ret = RGWBucketAdminOp::list_stale_instances(driver, bucket_op, stream_flusher, dpp()); if (ret < 0) { cerr << "ERROR: listing stale instances" << cpp_strerror(-ret) << std::endl; } } if (opt_cmd == OPT::RESHARD_STALE_INSTANCES_DELETE) { - if (!static_cast(store)->svc()->zone->can_reshard()) { + if (!static_cast(driver)->svc()->zone->can_reshard()) { cerr << "Resharding disabled in a multisite env. Stale instances are not safe to be deleted." << std::endl; return EINVAL; } - ret = RGWBucketAdminOp::clear_stale_instances(store, bucket_op, stream_flusher, dpp()); + ret = RGWBucketAdminOp::clear_stale_instances(driver, bucket_op, stream_flusher, dpp()); if (ret < 0) { cerr << "ERROR: deleting stale instances" << cpp_strerror(-ret) << std::endl; } @@ -10418,7 +10418,7 @@ next: if (opt_cmd == OPT::PUBSUB_TOPICS_LIST) { - RGWPubSub ps(static_cast(store), tenant); + RGWPubSub ps(static_cast(driver), tenant); if (!bucket_name.empty()) { rgw_pubsub_bucket_topics result; @@ -10453,7 +10453,7 @@ next: return EINVAL; } - RGWPubSub ps(static_cast(store), tenant); + RGWPubSub ps(static_cast(driver), tenant); rgw_pubsub_topic_subs topic; ret = ps.get_topic(topic_name, &topic); @@ -10471,7 +10471,7 @@ next: return EINVAL; } - RGWPubSub ps(static_cast(store), tenant); + RGWPubSub ps(static_cast(driver), tenant); ret = ps.remove_topic(dpp(), topic_name, null_yield); if (ret < 0) { @@ -10510,7 +10510,7 @@ next: cerr << "ERROR: cannot specify tenant in background context" << std::endl; return EINVAL; } - auto lua_manager = store->get_lua_manager(); + auto lua_manager = driver->get_lua_manager(); rc = rgw::lua::write_script(dpp(), lua_manager.get(), tenant, null_yield, script_ctx, script); if (rc < 0) { cerr << "ERROR: failed to put script. error: " << rc << std::endl; @@ -10528,7 +10528,7 @@ next: cerr << "ERROR: invalid script context: " << *str_script_ctx << ". must be one of: " << LUA_CONTEXT_LIST << std::endl; return EINVAL; } - auto lua_manager = store->get_lua_manager(); + auto lua_manager = driver->get_lua_manager(); std::string script; const auto rc = rgw::lua::read_script(dpp(), lua_manager.get(), tenant, null_yield, script_ctx, script); if (rc == -ENOENT) { @@ -10552,7 +10552,7 @@ next: cerr << "ERROR: invalid script context: " << *str_script_ctx << ". must be one of: " << LUA_CONTEXT_LIST << std::endl; return EINVAL; } - auto lua_manager = store->get_lua_manager(); + auto lua_manager = driver->get_lua_manager(); const auto rc = rgw::lua::delete_script(dpp(), lua_manager.get(), tenant, null_yield, script_ctx); if (rc < 0) { cerr << "ERROR: failed to remove script. error: " << rc << std::endl; @@ -10566,7 +10566,7 @@ next: cerr << "ERROR: lua package name was not provided (via --package)" << std::endl; return EINVAL; } - const auto rc = rgw::lua::add_package(dpp(), store, null_yield, *script_package, bool(allow_compilation)); + const auto rc = rgw::lua::add_package(dpp(), driver, null_yield, *script_package, bool(allow_compilation)); if (rc < 0) { cerr << "ERROR: failed to add lua package: " << script_package << " .error: " << rc << std::endl; return -rc; @@ -10583,7 +10583,7 @@ next: cerr << "ERROR: lua package name was not provided (via --package)" << std::endl; return EINVAL; } - const auto rc = rgw::lua::remove_package(dpp(), store, null_yield, *script_package); + const auto rc = rgw::lua::remove_package(dpp(), driver, null_yield, *script_package); if (rc == -ENOENT) { cerr << "WARNING: package " << script_package << " did not exists or already removed" << std::endl; return 0; @@ -10601,7 +10601,7 @@ next: if (opt_cmd == OPT::SCRIPT_PACKAGE_LIST) { #ifdef WITH_RADOSGW_LUA_PACKAGES rgw::lua::packages_t packages; - const auto rc = rgw::lua::list_packages(dpp(), store, null_yield, packages); + const auto rc = rgw::lua::list_packages(dpp(), driver, null_yield, packages); if (rc == -ENOENT) { std::cout << "no lua packages in allowlist" << std::endl; } else if (rc < 0) { diff --git a/src/rgw/rgw_appmain.cc b/src/rgw/rgw_appmain.cc index 5a9c1641da808..cea408c174519 100644 --- a/src/rgw/rgw_appmain.cc +++ b/src/rgw/rgw_appmain.cc @@ -211,8 +211,8 @@ void rgw::AppMain::init_storage() (g_conf()->rgw_run_sync_thread && ((!nfs) || (nfs && g_conf()->rgw_nfs_run_sync_thread))); - StoreManager::Config cfg = StoreManager::get_config(false, g_ceph_context); - store = StoreManager::get_storage(dpp, dpp->get_cct(), + DriverManager::Config cfg = DriverManager::get_config(false, g_ceph_context); + driver = DriverManager::get_storage(dpp, dpp->get_cct(), cfg, run_gc, run_lc, @@ -238,7 +238,7 @@ void rgw::AppMain::init_http_clients() void rgw::AppMain::cond_init_apis() { - rgw_rest_init(g_ceph_context, store->get_zone()->get_zonegroup()); + rgw_rest_init(g_ceph_context, driver->get_zone()->get_zonegroup()); if (have_http_frontend) { std::vector apis; @@ -272,7 +272,7 @@ void rgw::AppMain::cond_init_apis() if (apis_map.count("s3") > 0 || s3website_enabled) { if (!swift_at_root) { rest.register_default_mgr(set_logging( - rest_filter(store, RGW_REST_S3, + rest_filter(driver, RGW_REST_S3, new RGWRESTMgr_S3(s3website_enabled, sts_enabled, iam_enabled, pubsub_enabled)))); } else { @@ -297,10 +297,10 @@ void rgw::AppMain::cond_init_apis() if (! swift_at_root) { rest.register_resource(g_conf()->rgw_swift_url_prefix, - set_logging(rest_filter(store, RGW_REST_SWIFT, + set_logging(rest_filter(driver, RGW_REST_SWIFT, swift_resource))); } else { - if (store->get_zone()->get_zonegroup().get_zone_count() > 1) { + if (driver->get_zone()->get_zonegroup().get_zone_count() > 1) { derr << "Placing Swift API in the root of URL hierarchy while running" << " multi-site configuration requires another instance of RadosGW" << " with S3 API enabled!" << dendl; @@ -319,8 +319,8 @@ void rgw::AppMain::cond_init_apis() RGWRESTMgr_Admin *admin_resource = new RGWRESTMgr_Admin; admin_resource->register_resource("info", new RGWRESTMgr_Info); admin_resource->register_resource("usage", new RGWRESTMgr_Usage); - /* Register store-specific admin APIs */ - store->register_admin_apis(admin_resource); + /* Register driver-specific admin APIs */ + driver->register_admin_apis(admin_resource); rest.register_resource(g_conf()->rgw_admin_entry, admin_resource); } } /* have_http_frontend */ @@ -328,12 +328,12 @@ void rgw::AppMain::cond_init_apis() void rgw::AppMain::init_ldap() { - const string &ldap_uri = store->ctx()->_conf->rgw_ldap_uri; - const string &ldap_binddn = store->ctx()->_conf->rgw_ldap_binddn; - const string &ldap_searchdn = store->ctx()->_conf->rgw_ldap_searchdn; - const string &ldap_searchfilter = store->ctx()->_conf->rgw_ldap_searchfilter; - const string &ldap_dnattr = store->ctx()->_conf->rgw_ldap_dnattr; - std::string ldap_bindpw = parse_rgw_ldap_bindpw(store->ctx()); + const string &ldap_uri = driver->ctx()->_conf->rgw_ldap_uri; + const string &ldap_binddn = driver->ctx()->_conf->rgw_ldap_binddn; + const string &ldap_searchdn = driver->ctx()->_conf->rgw_ldap_searchdn; + const string &ldap_searchfilter = driver->ctx()->_conf->rgw_ldap_searchfilter; + const string &ldap_dnattr = driver->ctx()->_conf->rgw_ldap_dnattr; + std::string ldap_bindpw = parse_rgw_ldap_bindpw(driver->ctx()); ldh.reset(new rgw::LDAPHelper(ldap_uri, ldap_binddn, ldap_bindpw.c_str(), ldap_searchdn, ldap_searchfilter, ldap_dnattr)); @@ -343,7 +343,7 @@ void rgw::AppMain::init_ldap() void rgw::AppMain::init_opslog() { - rgw_log_usage_init(dpp->get_cct(), store); + rgw_log_usage_init(dpp->get_cct(), driver); OpsLogManifold *olog_manifold = new OpsLogManifold(); if (!g_conf()->rgw_ops_log_socket_path.empty()) { @@ -359,7 +359,7 @@ void rgw::AppMain::init_opslog() ops_log_file->start(); olog_manifold->add_sink(ops_log_file); } - olog_manifold->add_sink(new OpsLogRados(store)); + olog_manifold->add_sink(new OpsLogRados(driver)); olog = olog_manifold; } /* init_opslog */ @@ -390,7 +390,7 @@ int rgw::AppMain::init_frontends2(RGWLib* rgwlib) implicit_tenant_context.reset(new rgw::auth::ImplicitTenants{g_conf()}); g_conf().add_observer(implicit_tenant_context.get()); auto auth_registry = - rgw::auth::StrategyRegistry::create(dpp->get_cct(), *(implicit_tenant_context.get()), store); + rgw::auth::StrategyRegistry::create(dpp->get_cct(), *(implicit_tenant_context.get()), driver); /* allocate a mime table (you'd never guess that from the name) */ rgw_tools_init(dpp, dpp->get_cct()); @@ -421,7 +421,7 @@ int rgw::AppMain::init_frontends2(RGWLib* rgwlib) std::string uri_prefix; config->get_val("prefix", "", &uri_prefix); - RGWProcessEnv env = {store, &rest, olog, port, uri_prefix, + RGWProcessEnv env = {driver, &rest, olog, port, uri_prefix, auth_registry, ratelimiter.get(), lua_background.get()}; fe = new RGWLoadGenFrontend(env, config); @@ -431,13 +431,13 @@ int rgw::AppMain::init_frontends2(RGWLib* rgwlib) config->get_val("port", 80, &port); std::string uri_prefix; config->get_val("prefix", "", &uri_prefix); - RGWProcessEnv env{store, &rest, olog, port, uri_prefix, + RGWProcessEnv env{driver, &rest, olog, port, uri_prefix, auth_registry, ratelimiter.get(), lua_background.get()}; fe = new RGWAsioFrontend(env, config, *(sched_ctx.get())); } else if (framework == "rgw-nfs") { int port = 80; - RGWProcessEnv env = { store, &rest, olog, port }; + RGWProcessEnv env = { driver, &rest, olog, port }; fe = new RGWLibFrontend(env, config); if (rgwlib) { rgwlib->set_fe(static_cast(fe)); @@ -468,24 +468,24 @@ int rgw::AppMain::init_frontends2(RGWLib* rgwlib) } std::string daemon_type = (nfs) ? "rgw-nfs" : "rgw"; - r = store->register_to_service_map(dpp, daemon_type, service_map_meta); + r = driver->register_to_service_map(dpp, daemon_type, service_map_meta); if (r < 0) { derr << "ERROR: failed to register to service map: " << cpp_strerror(-r) << dendl; /* ignore error */ } - if (store->get_name() == "rados") { + if (driver->get_name() == "rados") { // add a watcher to respond to realm configuration changes - pusher = std::make_unique(dpp, store, null_yield); + pusher = std::make_unique(dpp, driver, null_yield); fe_pauser = std::make_unique(fes, *(implicit_tenant_context.get()), pusher.get()); rgw_pauser = std::make_unique(); rgw_pauser->add_pauser(fe_pauser.get()); if (lua_background) { rgw_pauser->add_pauser(lua_background.get()); } - reloader = std::make_unique(store, service_map_meta, rgw_pauser.get()); + reloader = std::make_unique(driver, service_map_meta, rgw_pauser.get()); realm_watcher = std::make_unique(dpp, g_ceph_context, - static_cast(store)->svc()->zone->get_realm()); + static_cast(driver)->svc()->zone->get_realm()); realm_watcher->add_watcher(RGWRealmNotify::Reload, *reloader); realm_watcher->add_watcher(RGWRealmNotify::ZonesNeedPeriod, *pusher.get()); } @@ -520,14 +520,14 @@ void rgw::AppMain::init_lua() const auto &luarocks_path = g_conf().get_val("rgw_luarocks_location"); if (luarocks_path.empty()) { - store->set_luarocks_path(""); + driver->set_luarocks_path(""); } else { - store->set_luarocks_path(luarocks_path + "/" + g_conf()->name.to_str()); + driver->set_luarocks_path(luarocks_path + "/" + g_conf()->name.to_str()); } #ifdef WITH_RADOSGW_LUA_PACKAGES rgw::lua::packages_t failed_packages; std::string output; - r = rgw::lua::install_packages(dpp, store, null_yield, failed_packages, + r = rgw::lua::install_packages(dpp, driver, null_yield, failed_packages, output); if (r < 0) { dout(1) << "WARNING: failed to install lua packages from allowlist" @@ -542,16 +542,16 @@ void rgw::AppMain::init_lua() } #endif - if (store->get_name() == "rados") { /* Supported for only RadosStore */ + if (driver->get_name() == "rados") { /* Supported for only RadosStore */ lua_background = std::make_unique< - rgw::lua::Background>(store, dpp->get_cct(), store->get_luarocks_path()); + rgw::lua::Background>(driver, dpp->get_cct(), driver->get_luarocks_path()); lua_background->start(); } } /* init_lua */ void rgw::AppMain::shutdown(std::function finalize_async_signals) { - if (store->get_name() == "rados") { + if (driver->get_name() == "rados") { reloader.reset(); // stop the realm reloader } @@ -578,7 +578,7 @@ void rgw::AppMain::shutdown(std::function finalize_async_signals) lua_background->shutdown(); } - StoreManager::close_storage(store); + DriverManager::close_storage(driver); rgw_tools_cleanup(); rgw_shutdown_resolver(); diff --git a/src/rgw/rgw_asio_frontend.cc b/src/rgw/rgw_asio_frontend.cc index 4b4a8175192fb..2bfc0c1cac94d 100644 --- a/src/rgw/rgw_asio_frontend.cc +++ b/src/rgw/rgw_asio_frontend.cc @@ -190,7 +190,7 @@ void handle_connection(boost::asio::io_context& context, // don't impose a limit on the body, since we read it in pieces static constexpr size_t body_limit = std::numeric_limits::max(); - auto cct = env.store->ctx(); + auto cct = env.driver->ctx(); // read messages from the stream until eof for (;;) { @@ -239,7 +239,7 @@ void handle_connection(boost::asio::io_context& context, } // process the request - RGWRequest req{env.store->get_new_req_id()}; + RGWRequest req{env.driver->get_new_req_id()}; auto& socket = stream.lowest_layer(); const auto& remote_endpoint = socket.remote_endpoint(ec); @@ -266,7 +266,7 @@ void handle_connection(boost::asio::io_context& context, string user = "-"; const auto started = ceph::coarse_real_clock::now(); ceph::coarse_real_clock::duration latency{}; - process_request(env.store, env.rest, &req, env.uri_prefix, + process_request(env.driver, env.rest, &req, env.uri_prefix, *env.auth_registry, &client, env.olog, y, scheduler, &user, &latency, env.ratelimiting->get_active(), @@ -409,7 +409,7 @@ class AsioFrontend { std::vector threads; std::atomic going_down{false}; - CephContext* ctx() const { return env.store->ctx(); } + CephContext* ctx() const { return env.driver->ctx(); } std::optional client_counters; std::unique_ptr client_config; void accept(Listener& listener, boost::system::error_code ec); @@ -418,7 +418,7 @@ class AsioFrontend { AsioFrontend(const RGWProcessEnv& env, RGWFrontendConfig* conf, dmc::SchedulerCtx& sched_ctx) : env(env), conf(conf), pause_mutex(context.get_executor()), - lua_manager(env.store->get_lua_manager()) + lua_manager(env.driver->get_lua_manager()) { auto sched_t = dmc::get_scheduler_t(ctx()); switch(sched_t){ @@ -444,7 +444,7 @@ class AsioFrontend { void stop(); void join(); void pause(); - void unpause(rgw::sal::Store* store, rgw_auth_registry_ptr_t); + void unpause(rgw::sal::Driver* driver, rgw_auth_registry_ptr_t); }; unsigned short parse_port(const char *input, boost::system::error_code& ec) @@ -755,7 +755,7 @@ int AsioFrontend::get_config_key_val(string name, return -EINVAL; } - int r = env.store->get_config_key_val(name, pbl); + int r = env.driver->get_config_key_val(name, pbl); if (r < 0) { lderr(ctx()) << type << " was not found: " << name << dendl; return r; @@ -911,7 +911,7 @@ int AsioFrontend::init_ssl() key_is_cert = true; } - ExpandMetaVar emv(env.store->get_zone()); + ExpandMetaVar emv(env.driver->get_zone()); cert = emv.process_str(*cert); key = emv.process_str(*key); @@ -1110,12 +1110,12 @@ void AsioFrontend::pause() } } -void AsioFrontend::unpause(rgw::sal::Store* const store, +void AsioFrontend::unpause(rgw::sal::Driver* const driver, rgw_auth_registry_ptr_t auth_registry) { - env.store = store; + env.driver = driver; env.auth_registry = std::move(auth_registry); - lua_manager = store->get_lua_manager(); + lua_manager = driver->get_lua_manager(); // unpause to unblock connections pause_mutex.unlock(); @@ -1175,8 +1175,8 @@ void RGWAsioFrontend::pause_for_new_config() } void RGWAsioFrontend::unpause_with_new_config( - rgw::sal::Store* const store, + rgw::sal::Driver* const driver, rgw_auth_registry_ptr_t auth_registry ) { - impl->unpause(store, std::move(auth_registry)); + impl->unpause(driver, std::move(auth_registry)); } diff --git a/src/rgw/rgw_asio_frontend.h b/src/rgw/rgw_asio_frontend.h index ce48844188854..570941ec6533b 100644 --- a/src/rgw/rgw_asio_frontend.h +++ b/src/rgw/rgw_asio_frontend.h @@ -22,7 +22,7 @@ public: void join() override; void pause_for_new_config() override; - void unpause_with_new_config(rgw::sal::Store* store, + void unpause_with_new_config(rgw::sal::Driver* driver, rgw_auth_registry_ptr_t auth_registry) override; }; diff --git a/src/rgw/rgw_auth.cc b/src/rgw/rgw_auth.cc index e303e2181ff37..92813fdd36ec6 100644 --- a/src/rgw/rgw_auth.cc +++ b/src/rgw/rgw_auth.cc @@ -369,7 +369,7 @@ void rgw::auth::WebIdentityApplier::create_account(const DoutPrefixProvider* dpp const string& display_name, RGWUserInfo& user_info) const /* out */ { - std::unique_ptr user = store->get_user(acct_user); + std::unique_ptr user = driver->get_user(acct_user); user->get_info().display_name = display_name; user->get_info().type = TYPE_WEB; user->get_info().max_buckets = @@ -392,7 +392,7 @@ void rgw::auth::WebIdentityApplier::load_acct_info(const DoutPrefixProvider* dpp federated_user.tenant = role_tenant; federated_user.ns = "oidc"; - std::unique_ptr user = store->get_user(federated_user); + std::unique_ptr user = driver->get_user(federated_user); //Check in oidc namespace if (user->load_user(dpp, null_yield) >= 0) { @@ -646,7 +646,7 @@ void rgw::auth::RemoteApplier::create_account(const DoutPrefixProvider* dpp, new_acct_user.tenant = new_acct_user.id; } - std::unique_ptr user = store->get_user(new_acct_user); + std::unique_ptr user = driver->get_user(new_acct_user); user->get_info().display_name = info.acct_name; if (info.acct_type) { //ldap/keystone for s3 users @@ -705,7 +705,7 @@ void rgw::auth::RemoteApplier::load_acct_info(const DoutPrefixProvider* dpp, RGW ; /* suppress lookup for id used by "other" protocol */ else if (acct_user.tenant.empty()) { const rgw_user tenanted_uid(acct_user.id, acct_user.id); - user = store->get_user(tenanted_uid); + user = driver->get_user(tenanted_uid); if (user->load_user(dpp, null_yield) >= 0) { /* Succeeded. */ @@ -714,7 +714,7 @@ void rgw::auth::RemoteApplier::load_acct_info(const DoutPrefixProvider* dpp, RGW } } - user = store->get_user(acct_user); + user = driver->get_user(acct_user); if (split_mode && implicit_tenant) ; /* suppress lookup for id used by "other" protocol */ diff --git a/src/rgw/rgw_auth.h b/src/rgw/rgw_auth.h index f14066592afee..23dbc46aa5726 100644 --- a/src/rgw/rgw_auth.h +++ b/src/rgw/rgw_auth.h @@ -377,7 +377,7 @@ class WebIdentityApplier : public IdentityApplier { std::string user_name; protected: CephContext* const cct; - rgw::sal::Store* store; + rgw::sal::Driver* driver; std::string role_session; std::string role_tenant; std::unordered_multimap token_claims; @@ -392,14 +392,14 @@ protected: RGWUserInfo& user_info) const; /* out */ public: WebIdentityApplier( CephContext* const cct, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const std::string& role_session, const std::string& role_tenant, const std::unordered_multimap& token_claims, boost::optional> role_tags, boost::optional>> principal_tags) : cct(cct), - store(store), + driver(driver), role_session(role_session), role_tenant(role_tenant), token_claims(token_claims), @@ -583,7 +583,7 @@ protected: CephContext* const cct; /* Read-write is intensional here due to RGWUserInfo creation process. */ - rgw::sal::Store* store; + rgw::sal::Driver* driver; /* Supplemental strategy for extracting permissions from ACLs. Its results * will be combined (ORed) with a default strategy that is responsible for @@ -601,13 +601,13 @@ protected: public: RemoteApplier(CephContext* const cct, - rgw::sal::Store* store, + rgw::sal::Driver* driver, acl_strategy_t&& extra_acl_strategy, const AuthInfo& info, rgw::auth::ImplicitTenants& implicit_tenant_context, rgw::auth::ImplicitTenants::implicit_tenant_flag_bits implicit_tenant_bit) : cct(cct), - store(store), + driver(driver), extra_acl_strategy(std::move(extra_acl_strategy)), info(info), implicit_tenant_context(implicit_tenant_context), diff --git a/src/rgw/rgw_auth_filters.h b/src/rgw/rgw_auth_filters.h index 2434ca2781e50..08f6d659c90d0 100644 --- a/src/rgw/rgw_auth_filters.h +++ b/src/rgw/rgw_auth_filters.h @@ -127,7 +127,7 @@ public: template class ThirdPartyAccountApplier : public DecoratedApplier { - rgw::sal::Store* store; + rgw::sal::Driver* driver; const rgw_user acct_user_override; public: @@ -137,11 +137,11 @@ public: static const rgw_user UNKNOWN_ACCT; template - ThirdPartyAccountApplier(rgw::sal::Store* store, + ThirdPartyAccountApplier(rgw::sal::Driver* driver, const rgw_user &acct_user_override, U&& decoratee) : DecoratedApplier(std::move(decoratee)), - store(store), + driver(driver), acct_user_override(acct_user_override) { } @@ -187,7 +187,7 @@ void ThirdPartyAccountApplier::load_acct_info(const DoutPrefixProvider* dpp, if (acct_user_override.tenant.empty()) { const rgw_user tenanted_uid(acct_user_override.id, acct_user_override.id); - user = store->get_user(tenanted_uid); + user = driver->get_user(tenanted_uid); if (user->load_user(dpp, null_yield) >= 0) { user_info = user->get_info(); @@ -196,7 +196,7 @@ void ThirdPartyAccountApplier::load_acct_info(const DoutPrefixProvider* dpp, } } - user = store->get_user(acct_user_override); + user = driver->get_user(acct_user_override); const int ret = user->load_user(dpp, null_yield); if (ret < 0) { /* We aren't trying to recover from ENOENT here. It's supposed that creating @@ -212,10 +212,10 @@ void ThirdPartyAccountApplier::load_acct_info(const DoutPrefixProvider* dpp, } template static inline -ThirdPartyAccountApplier add_3rdparty(rgw::sal::Store* store, +ThirdPartyAccountApplier add_3rdparty(rgw::sal::Driver* driver, const rgw_user &acct_user_override, T&& t) { - return ThirdPartyAccountApplier(store, acct_user_override, + return ThirdPartyAccountApplier(driver, acct_user_override, std::forward(t)); } @@ -223,19 +223,19 @@ ThirdPartyAccountApplier add_3rdparty(rgw::sal::Store* store, template class SysReqApplier : public DecoratedApplier { CephContext* const cct; - rgw::sal::Store* store; + rgw::sal::Driver* driver; const RGWHTTPArgs& args; mutable boost::tribool is_system; public: template SysReqApplier(CephContext* const cct, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const req_state* const s, U&& decoratee) : DecoratedApplier(std::forward(decoratee)), cct(cct), - store(store), + driver(driver), args(s->info.args), is_system(boost::logic::indeterminate) { } @@ -266,7 +266,7 @@ void SysReqApplier::load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo /* We aren't writing directly to user_info for consistency and security * reasons. rgw_get_user_info_by_uid doesn't trigger the operator=() but * calls ::decode instead. */ - std::unique_ptr user = store->get_user(effective_uid); + std::unique_ptr user = driver->get_user(effective_uid); if (user->load_user(dpp, null_yield) < 0) { //ldpp_dout(dpp, 0) << "User lookup failed!" << dendl; throw -EACCES; @@ -293,10 +293,10 @@ void SysReqApplier::modify_request_state(const DoutPrefixProvider* dpp, req_s template static inline SysReqApplier add_sysreq(CephContext* const cct, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const req_state* const s, T&& t) { - return SysReqApplier(cct, store, s, std::forward(t)); + return SysReqApplier(cct, driver, s, std::forward(t)); } } /* namespace auth */ diff --git a/src/rgw/rgw_auth_registry.h b/src/rgw/rgw_auth_registry.h index 35071c54cd334..01ba057acec10 100644 --- a/src/rgw/rgw_auth_registry.h +++ b/src/rgw/rgw_auth_registry.h @@ -38,9 +38,9 @@ class StrategyRegistry { s3_main_strategy_t(CephContext* const cct, ImplicitTenants& implicit_tenant_context, - rgw::sal::Store* store) - : s3_main_strategy_plain(cct, implicit_tenant_context, store), - s3_main_strategy_boto2(cct, implicit_tenant_context, store) { + rgw::sal::Driver* driver) + : s3_main_strategy_plain(cct, implicit_tenant_context, driver), + s3_main_strategy_boto2(cct, implicit_tenant_context, driver) { add_engine(Strategy::Control::SUFFICIENT, s3_main_strategy_plain); add_engine(Strategy::Control::FALLBACK, s3_main_strategy_boto2); } @@ -61,11 +61,11 @@ class StrategyRegistry { public: StrategyRegistry(CephContext* const cct, ImplicitTenants& implicit_tenant_context, - rgw::sal::Store* store) - : s3_main_strategy(cct, implicit_tenant_context, store), - s3_post_strategy(cct, implicit_tenant_context, store), - swift_strategy(cct, implicit_tenant_context, store), - sts_strategy(cct, implicit_tenant_context, store) { + rgw::sal::Driver* driver) + : s3_main_strategy(cct, implicit_tenant_context, driver), + s3_post_strategy(cct, implicit_tenant_context, driver), + swift_strategy(cct, implicit_tenant_context, driver), + sts_strategy(cct, implicit_tenant_context, driver) { } const s3_main_strategy_t& get_s3_main() const { @@ -87,8 +87,8 @@ public: static std::shared_ptr create(CephContext* const cct, ImplicitTenants& implicit_tenant_context, - rgw::sal::Store* store) { - return std::make_shared(cct, implicit_tenant_context, store); + rgw::sal::Driver* driver) { + return std::make_shared(cct, implicit_tenant_context, driver); } }; diff --git a/src/rgw/rgw_auth_s3.h b/src/rgw/rgw_auth_s3.h index 00eddc46f30b0..241bc6858fbfc 100644 --- a/src/rgw/rgw_auth_s3.h +++ b/src/rgw/rgw_auth_s3.h @@ -35,7 +35,7 @@ class STSAuthStrategy : public rgw::auth::Strategy, public rgw::auth::LocalApplier::Factory, public rgw::auth::RoleApplier::Factory { typedef rgw::auth::IdentityApplier::aplptr_t aplptr_t; - rgw::sal::Store* store; + rgw::sal::Driver* driver; rgw::auth::ImplicitTenants& implicit_tenant_context; STSEngine sts_engine; @@ -44,8 +44,8 @@ class STSAuthStrategy : public rgw::auth::Strategy, const req_state* const s, rgw::auth::RemoteApplier::acl_strategy_t&& acl_alg, const rgw::auth::RemoteApplier::AuthInfo &info) const override { - auto apl = rgw::auth::add_sysreq(cct, store, s, - rgw::auth::RemoteApplier(cct, store, std::move(acl_alg), info, + auto apl = rgw::auth::add_sysreq(cct, driver, s, + rgw::auth::RemoteApplier(cct, driver, std::move(acl_alg), info, implicit_tenant_context, rgw::auth::ImplicitTenants::IMPLICIT_TENANTS_S3)); return aplptr_t(new decltype(apl)(std::move(apl))); @@ -57,7 +57,7 @@ class STSAuthStrategy : public rgw::auth::Strategy, const std::string& subuser, const std::optional& perm_mask, const std::string& access_key_id) const override { - auto apl = rgw::auth::add_sysreq(cct, store, s, + auto apl = rgw::auth::add_sysreq(cct, driver, s, rgw::auth::LocalApplier(cct, user_info, subuser, perm_mask, access_key_id)); return aplptr_t(new decltype(apl)(std::move(apl))); } @@ -66,19 +66,19 @@ class STSAuthStrategy : public rgw::auth::Strategy, const req_state* const s, const rgw::auth::RoleApplier::Role& role, const rgw::auth::RoleApplier::TokenAttrs& token_attrs) const override { - auto apl = rgw::auth::add_sysreq(cct, store, s, + auto apl = rgw::auth::add_sysreq(cct, driver, s, rgw::auth::RoleApplier(cct, role, token_attrs)); return aplptr_t(new decltype(apl)(std::move(apl))); } public: STSAuthStrategy(CephContext* const cct, - rgw::sal::Store* store, + rgw::sal::Driver* driver, rgw::auth::ImplicitTenants& implicit_tenant_context, AWSEngine::VersionAbstractor* const ver_abstractor) - : store(store), + : driver(driver), implicit_tenant_context(implicit_tenant_context), - sts_engine(cct, store, *ver_abstractor, + sts_engine(cct, driver, *ver_abstractor, static_cast(this), static_cast(this), static_cast(this)) { @@ -95,7 +95,7 @@ public: class ExternalAuthStrategy : public rgw::auth::Strategy, public rgw::auth::RemoteApplier::Factory { typedef rgw::auth::IdentityApplier::aplptr_t aplptr_t; - rgw::sal::Store* store; + rgw::sal::Driver* driver; rgw::auth::ImplicitTenants& implicit_tenant_context; using keystone_config_t = rgw::keystone::CephCtxConfig; @@ -110,8 +110,8 @@ class ExternalAuthStrategy : public rgw::auth::Strategy, const req_state* const s, rgw::auth::RemoteApplier::acl_strategy_t&& acl_alg, const rgw::auth::RemoteApplier::AuthInfo &info) const override { - auto apl = rgw::auth::add_sysreq(cct, store, s, - rgw::auth::RemoteApplier(cct, store, std::move(acl_alg), info, + auto apl = rgw::auth::add_sysreq(cct, driver, s, + rgw::auth::RemoteApplier(cct, driver, std::move(acl_alg), info, implicit_tenant_context, rgw::auth::ImplicitTenants::IMPLICIT_TENANTS_S3)); /* TODO(rzarzynski): replace with static_ptr. */ @@ -120,12 +120,12 @@ class ExternalAuthStrategy : public rgw::auth::Strategy, public: ExternalAuthStrategy(CephContext* const cct, - rgw::sal::Store* store, + rgw::sal::Driver* driver, rgw::auth::ImplicitTenants& implicit_tenant_context, AWSEngine::VersionAbstractor* const ver_abstractor) - : store(store), + : driver(driver), implicit_tenant_context(implicit_tenant_context), - ldap_engine(cct, store, *ver_abstractor, + ldap_engine(cct, driver, *ver_abstractor, static_cast(this)) { if (cct->_conf->rgw_s3_auth_use_keystone && @@ -161,7 +161,7 @@ class AWSAuthStrategy : public rgw::auth::Strategy, AbstractorT>::value, "AbstractorT must be a subclass of rgw::auth::s3::VersionAbstractor"); - rgw::sal::Store* store; + rgw::sal::Driver* driver; AbstractorT ver_abstractor; S3AnonymousEngine anonymous_engine; @@ -175,7 +175,7 @@ class AWSAuthStrategy : public rgw::auth::Strategy, const std::string& subuser, const std::optional& perm_mask, const std::string& access_key_id) const override { - auto apl = rgw::auth::add_sysreq(cct, store, s, + auto apl = rgw::auth::add_sysreq(cct, driver, s, rgw::auth::LocalApplier(cct, user_info, subuser, perm_mask, access_key_id)); /* TODO(rzarzynski): replace with static_ptr. */ return aplptr_t(new decltype(apl)(std::move(apl))); @@ -220,14 +220,14 @@ public: AWSAuthStrategy(CephContext* const cct, rgw::auth::ImplicitTenants& implicit_tenant_context, - rgw::sal::Store* store) - : store(store), + rgw::sal::Driver* driver) + : driver(driver), ver_abstractor(cct), anonymous_engine(cct, static_cast(this)), - external_engines(cct, store, implicit_tenant_context, &ver_abstractor), - sts_engine(cct, store, implicit_tenant_context, &ver_abstractor), - local_engine(cct, store, ver_abstractor, + external_engines(cct, driver, implicit_tenant_context, &ver_abstractor), + sts_engine(cct, driver, implicit_tenant_context, &ver_abstractor), + local_engine(cct, driver, ver_abstractor, static_cast(this)) { /* The anonymous auth. */ if (AllowAnonAccessT) { diff --git a/src/rgw/rgw_file.cc b/src/rgw/rgw_file.cc index 953cf98a1991d..eaaedd4da8b61 100644 --- a/src/rgw/rgw_file.cc +++ b/src/rgw/rgw_file.cc @@ -1524,7 +1524,7 @@ namespace rgw { return false; RGWRMdirCheck req(fs->get_context(), - g_rgwlib->get_store()->get_user(fs->get_user()->user_id), + g_rgwlib->get_driver()->get_user(fs->get_user()->user_id), this); int rc = g_rgwlib->get_fe()->execute_req(&req); if (! rc) { @@ -1580,7 +1580,7 @@ namespace rgw { } if (is_root()) { - RGWListBucketsRequest req(cct, g_rgwlib->get_store()->get_user(fs->get_user()->user_id), + RGWListBucketsRequest req(cct, g_rgwlib->get_driver()->get_user(fs->get_user()->user_id), this, rcb, cb_arg, offset); rc = g_rgwlib->get_fe()->execute_req(&req); if (! rc) { @@ -1593,7 +1593,7 @@ namespace rgw { *eof = req.eof(); } } else { - RGWReaddirRequest req(cct, g_rgwlib->get_store()->get_user(fs->get_user()->user_id), + RGWReaddirRequest req(cct, g_rgwlib->get_driver()->get_user(fs->get_user()->user_id), this, rcb, cb_arg, offset); rc = g_rgwlib->get_fe()->execute_req(&req); if (! rc) { @@ -1662,8 +1662,8 @@ namespace rgw { /* start */ std::string object_name = relative_object_name(); f->write_req = - new RGWWriteRequest(g_rgwlib->get_store(), - g_rgwlib->get_store()->get_user(fs->get_user()->user_id), + new RGWWriteRequest(g_rgwlib->get_driver(), + g_rgwlib->get_driver()->get_user(fs->get_user()->user_id), this, bucket_name(), object_name); rc = g_rgwlib->get_fe()->start_req(f->write_req); if (rc < 0) { @@ -1824,7 +1824,7 @@ namespace rgw { state->object->set_bucket(state->bucket.get()); auto compression_type = - get_store()->get_compression_type(state->bucket->get_placement_rule()); + get_driver()->get_compression_type(state->bucket->get_placement_rule()); /* not obviously supportable */ ceph_assert(! dlo_manifest); @@ -1862,7 +1862,7 @@ namespace rgw { version_id = state->object->get_instance(); } } - processor = get_store()->get_atomic_writer(this, state->yield, state->object->clone(), + processor = get_driver()->get_atomic_writer(this, state->yield, state->object->clone(), state->bucket_owner.get_id(), &state->dest_placement, 0, state->req_id); @@ -2052,8 +2052,8 @@ void rgwfile_version(int *major, int *minor, int *extra) sec_key, "/"); ceph_assert(new_fs); - const DoutPrefix dp(g_rgwlib->get_store()->ctx(), dout_subsys, "rgw mount: "); - rc = new_fs->authorize(&dp, g_rgwlib->get_store()); + const DoutPrefix dp(g_rgwlib->get_driver()->ctx(), dout_subsys, "rgw mount: "); + rc = new_fs->authorize(&dp, g_rgwlib->get_driver()); if (rc != 0) { delete new_fs; return -EINVAL; @@ -2095,8 +2095,8 @@ int rgw_mount2(librgw_t rgw, const char *uid, const char *acc_key, ceph_assert(new_fs); /* should we be using ceph_assert? */ - const DoutPrefix dp(g_rgwlib->get_store()->ctx(), dout_subsys, "rgw mount2: "); - rc = new_fs->authorize(&dp, g_rgwlib->get_store()); + const DoutPrefix dp(g_rgwlib->get_driver()->ctx(), dout_subsys, "rgw mount2: "); + rc = new_fs->authorize(&dp, g_rgwlib->get_driver()); if (rc != 0) { delete new_fs; return -EINVAL; @@ -2148,7 +2148,7 @@ int rgw_statfs(struct rgw_fs *rgw_fs, struct rados_cluster_stat_t stats; RGWGetClusterStatReq req(fs->get_context(), - g_rgwlib->get_store()->get_user(fs->get_user()->user_id), + g_rgwlib->get_driver()->get_user(fs->get_user()->user_id), stats); int rc = g_rgwlib->get_fe()->execute_req(&req); if (rc < 0) { @@ -2713,7 +2713,7 @@ int rgw_writev(struct rgw_fs *rgw_fs, struct rgw_file_handle *fh, } std::string oname = rgw_fh->relative_object_name(); - RGWPutObjRequest req(cct, g_rgwlib->get_store()->get_user(fs->get_user()->user_id), + RGWPutObjRequest req(cct, g_rgwlib->get_driver()->get_user(fs->get_user()->user_id), rgw_fh->bucket_name(), oname, bl); int rc = g_rgwlib->get_fe()->execute_req(&req); diff --git a/src/rgw/rgw_file.h b/src/rgw/rgw_file.h index f217b415cd576..ef8e7b5ce07d3 100644 --- a/src/rgw/rgw_file.h +++ b/src/rgw/rgw_file.h @@ -1000,8 +1000,8 @@ namespace rgw { (void) fh_lru.unref(fh, cohort::lru::FLAG_NONE); } - int authorize(const DoutPrefixProvider *dpp, rgw::sal::Store* store) { - int ret = store->get_user_by_access_key(dpp, key.id, null_yield, &user); + int authorize(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver) { + int ret = driver->get_user_by_access_key(dpp, key.id, null_yield, &user); if (ret == 0) { RGWAccessKey* k = user->get_info().get_key(key.id); if (!k || (k->key != key.key)) @@ -1019,12 +1019,12 @@ namespace rgw { token = std::string(""); } if (token.valid() && (ldh->auth(token.id, token.key) == 0)) { - /* try to store user if it doesn't already exist */ + /* try to driver user if it doesn't already exist */ if (user->load_user(dpp, null_yield) < 0) { int ret = user->store_user(dpp, null_yield, true); if (ret < 0) { lsubdout(get_context(), rgw, 10) - << "NOTICE: failed to store new user's info: ret=" << ret + << "NOTICE: failed to driver new user's info: ret=" << ret << dendl; } } @@ -1314,7 +1314,7 @@ namespace rgw { RGWUserInfo* get_user() { return &user->get_info(); } void update_user(const DoutPrefixProvider *dpp) { - (void) g_rgwlib->get_store()->get_user_by_access_key(dpp, key.id, null_yield, &user); + (void) g_rgwlib->get_driver()->get_user_by_access_key(dpp, key.id, null_yield, &user); } void close(); @@ -1375,9 +1375,9 @@ public: bool only_bucket() override { return false; } int op_init() override { - // assign store, s, and dialect_handler + // assign driver, s, and dialect_handler // framework promises to call op_init after parent init - RGWOp::init(RGWHandler::store, get_state(), this); + RGWOp::init(RGWHandler::driver, get_state(), this); op = this; // assign self as op: REQUIRED return 0; } @@ -1510,9 +1510,9 @@ public: bool only_bucket() override { return true; } int op_init() override { - // assign store, s, and dialect_handler + // assign driver, s, and dialect_handler // framework promises to call op_init after parent init - RGWOp::init(RGWHandler::store, get_state(), this); + RGWOp::init(RGWHandler::driver, get_state(), this); op = this; // assign self as op: REQUIRED return 0; } @@ -1811,9 +1811,9 @@ public: bool only_bucket() override { return true; } int op_init() override { - // assign store, s, and dialect_handler + // assign driver, s, and dialect_handler // framework promises to call op_init after parent init - RGWOp::init(RGWHandler::store, get_state(), this); + RGWOp::init(RGWHandler::driver, get_state(), this); op = this; // assign self as op: REQUIRED return 0; } @@ -1890,9 +1890,9 @@ public: } int op_init() override { - // assign store, s, and dialect_handler + // assign driver, s, and dialect_handler // framework promises to call op_init after parent init - RGWOp::init(RGWHandler::store, get_state(), this); + RGWOp::init(RGWHandler::driver, get_state(), this); op = this; // assign self as op: REQUIRED return 0; } @@ -1947,9 +1947,9 @@ public: bool only_bucket() override { return true; } int op_init() override { - // assign store, s, and dialect_handler + // assign driver, s, and dialect_handler // framework promises to call op_init after parent init - RGWOp::init(RGWHandler::store, get_state(), this); + RGWOp::init(RGWHandler::driver, get_state(), this); op = this; // assign self as op: REQUIRED return 0; } @@ -1998,9 +1998,9 @@ public: bool only_bucket() override { return true; } int op_init() override { - // assign store, s, and dialect_handler + // assign driver, s, and dialect_handler // framework promises to call op_init after parent init - RGWOp::init(RGWHandler::store, get_state(), this); + RGWOp::init(RGWHandler::driver, get_state(), this); op = this; // assign self as op: REQUIRED int rc = valid_s3_object_name(obj_name); @@ -2094,9 +2094,9 @@ public: bool only_bucket() override { return false; } int op_init() override { - // assign store, s, and dialect_handler + // assign driver, s, and dialect_handler // framework promises to call op_init after parent init - RGWOp::init(RGWHandler::store, get_state(), this); + RGWOp::init(RGWHandler::driver, get_state(), this); op = this; // assign self as op: REQUIRED return 0; } @@ -2175,9 +2175,9 @@ public: bool only_bucket() override { return true; } int op_init() override { - // assign store, s, and dialect_handler + // assign driver, s, and dialect_handler // framework promises to call op_init after parent init - RGWOp::init(RGWHandler::store, get_state(), this); + RGWOp::init(RGWHandler::driver, get_state(), this); op = this; // assign self as op: REQUIRED return 0; } @@ -2250,9 +2250,9 @@ public: bool only_bucket() override { return false; } int op_init() override { - // assign store, s, and dialect_handler + // assign driver, s, and dialect_handler // framework promises to call op_init after parent init - RGWOp::init(RGWHandler::store, get_state(), this); + RGWOp::init(RGWHandler::driver, get_state(), this); op = this; // assign self as op: REQUIRED return 0; } @@ -2324,9 +2324,9 @@ public: bool only_bucket() override { return false; } int op_init() override { - // assign store, s, and dialect_handler + // assign driver, s, and dialect_handler // framework promises to call op_init after parent init - RGWOp::init(RGWHandler::store, get_state(), this); + RGWOp::init(RGWHandler::driver, get_state(), this); op = this; // assign self as op: REQUIRED return 0; } @@ -2387,9 +2387,9 @@ public: bool only_bucket() override { return true; } int op_init() override { - // assign store, s, and dialect_handler + // assign driver, s, and dialect_handler // framework promises to call op_init after parent init - RGWOp::init(RGWHandler::store, get_state(), this); + RGWOp::init(RGWHandler::driver, get_state(), this); op = this; // assign self as op: REQUIRED return 0; } @@ -2489,11 +2489,11 @@ public: size_t bytes_written; bool eio; - RGWWriteRequest(rgw::sal::Store* store, + RGWWriteRequest(rgw::sal::Driver* driver, std::unique_ptr _user, RGWFileHandle* _fh, const std::string& _bname, const std::string& _oname) - : RGWLibContinuedReq(store->ctx(), std::move(_user)), + : RGWLibContinuedReq(driver->ctx(), std::move(_user)), bucket_name(_bname), obj_name(_oname), rgw_fh(_fh), filter(nullptr), timer_id(0), real_ofs(0), bytes_written(0), eio(false) { @@ -2509,9 +2509,9 @@ public: bool only_bucket() override { return true; } int op_init() override { - // assign store, s, and dialect_handler + // assign driver, s, and dialect_handler // framework promises to call op_init after parent init - RGWOp::init(RGWHandler::store, get_state(), this); + RGWOp::init(RGWHandler::driver, get_state(), this); op = this; // assign self as op: REQUIRED return 0; } @@ -2597,9 +2597,9 @@ public: bool only_bucket() override { return true; } int op_init() override { - // assign store, s, and dialect_handler + // assign driver, s, and dialect_handler // framework promises to call op_init after parent init - RGWOp::init(RGWHandler::store, get_state(), this); + RGWOp::init(RGWHandler::driver, get_state(), this); op = this; // assign self as op: REQUIRED return 0; @@ -2679,9 +2679,9 @@ public: virtual bool only_bucket() { return false; } virtual int op_init() { - // assign store, s, and dialect_handler + // assign driver, s, and dialect_handler // framework promises to call op_init after parent init - RGWOp::init(RGWHandler::store, get_state(), this); + RGWOp::init(RGWHandler::driver, get_state(), this); op = this; // assign self as op: REQUIRED return 0; } @@ -2730,9 +2730,9 @@ public: bool only_bucket() override { return false; } int op_init() override { - // assign store, s, and dialect_handler + // assign driver, s, and dialect_handler // framework promises to call op_init after parent init - RGWOp::init(RGWHandler::store, get_state(), this); + RGWOp::init(RGWHandler::driver, get_state(), this); op = this; // assign self as op: REQUIRED return 0; } @@ -2784,9 +2784,9 @@ public: virtual bool only_bucket() { return false; } virtual int op_init() { - // assign store, s, and dialect_handler + // assign driver, s, and dialect_handler // framework promises to call op_init after parent init - RGWOp::init(RGWHandler::store, get_state(), this); + RGWOp::init(RGWHandler::driver, get_state(), this); op = this; // assign self as op: REQUIRED return 0; } @@ -2829,9 +2829,9 @@ public: } int op_init() override { - // assign store, s, and dialect_handler + // assign driver, s, and dialect_handler // framework promises to call op_init after parent init - RGWOp::init(RGWHandler::store, get_state(), this); + RGWOp::init(RGWHandler::driver, get_state(), this); op = this; // assign self as op: REQUIRED return 0; } diff --git a/src/rgw/rgw_frontend.h b/src/rgw/rgw_frontend.h index 12f38602b3ce9..b5c3b728cd646 100644 --- a/src/rgw/rgw_frontend.h +++ b/src/rgw/rgw_frontend.h @@ -83,7 +83,7 @@ public: virtual void join() = 0; virtual void pause_for_new_config() = 0; - virtual void unpause_with_new_config(rgw::sal::Store* store, + virtual void unpause_with_new_config(rgw::sal::Driver* driver, rgw_auth_registry_ptr_t auth_registry) = 0; }; @@ -122,11 +122,11 @@ public: pprocess->pause(); } - void unpause_with_new_config(rgw::sal::Store* const store, + void unpause_with_new_config(rgw::sal::Driver* const driver, rgw_auth_registry_ptr_t auth_registry) override { - env.store = store; + env.driver = driver; env.auth_registry = auth_registry; - pprocess->unpause_with_new_config(store, std::move(auth_registry)); + pprocess->unpause_with_new_config(driver, std::move(auth_registry)); } }; /* RGWProcessFrontend */ @@ -136,7 +136,7 @@ public: : RGWProcessFrontend(pe, _conf) {} CephContext *get_cct() const { - return env.store->ctx(); + return env.driver->ctx(); } unsigned get_subsys() const @@ -166,7 +166,7 @@ public: } rgw_user uid(uid_str); - std::unique_ptr user = env.store->get_user(uid); + std::unique_ptr user = env.driver->get_user(uid); int ret = user->load_user(this, null_yield); if (ret < 0) { @@ -208,16 +208,16 @@ class RGWFrontendPauser : public RGWRealmReloader::Pauser { if (pauser) pauser->pause(); } - void resume(rgw::sal::Store* store) override { + void resume(rgw::sal::Driver* driver) override { /* Initialize the registry of auth strategies which will coordinate * the dynamic reconfiguration. */ auto auth_registry = \ - rgw::auth::StrategyRegistry::create(g_ceph_context, implicit_tenants, store); + rgw::auth::StrategyRegistry::create(g_ceph_context, implicit_tenants, driver); for (auto frontend : frontends) - frontend->unpause_with_new_config(store, auth_registry); + frontend->unpause_with_new_config(driver, auth_registry); if (pauser) - pauser->resume(store); + pauser->resume(driver); } }; diff --git a/src/rgw/rgw_lc.cc b/src/rgw/rgw_lc.cc index 4ec0fae5c412b..0ef047ea93fd9 100644 --- a/src/rgw/rgw_lc.cc +++ b/src/rgw/rgw_lc.cc @@ -247,10 +247,10 @@ void *RGWLC::LCWorker::entry() { return NULL; } -void RGWLC::initialize(CephContext *_cct, rgw::sal::Store* _store) { +void RGWLC::initialize(CephContext *_cct, rgw::sal::Driver* _driver) { cct = _cct; - store = _store; - sal_lc = store->get_lifecycle(); + driver = _driver; + sal_lc = driver->get_lifecycle(); max_objs = cct->_conf->rgw_lc_max_objs; if (max_objs > HASH_PRIME) max_objs = HASH_PRIME; @@ -317,7 +317,7 @@ static bool obj_has_expired(const DoutPrefixProvider *dpp, CephContext *cct, cep return (timediff >= cmp); } -static bool pass_object_lock_check(rgw::sal::Store* store, rgw::sal::Object* obj, const DoutPrefixProvider *dpp) +static bool pass_object_lock_check(rgw::sal::Driver* driver, rgw::sal::Object* obj, const DoutPrefixProvider *dpp) { if (!obj->get_bucket()->get_info().obj_lock_enabled()) { return true; @@ -365,7 +365,7 @@ static bool pass_object_lock_check(rgw::sal::Store* store, rgw::sal::Object* obj } class LCObjsLister { - rgw::sal::Store* store; + rgw::sal::Driver* driver; rgw::sal::Bucket* bucket; rgw::sal::Bucket::ListParams list_params; rgw::sal::Bucket::ListResults list_results; @@ -375,11 +375,11 @@ class LCObjsLister { int64_t delay_ms; public: - LCObjsLister(rgw::sal::Store* _store, rgw::sal::Bucket* _bucket) : - store(_store), bucket(_bucket) { + LCObjsLister(rgw::sal::Driver* _driver, rgw::sal::Bucket* _bucket) : + driver(_driver), bucket(_bucket) { list_params.list_versions = bucket->versioned(); list_params.allow_unordered = true; - delay_ms = store->ctx()->_conf.get_val("rgw_lc_thread_delay"); + delay_ms = driver->ctx()->_conf.get_val("rgw_lc_thread_delay"); } void set_prefix(const string& p) { @@ -457,14 +457,14 @@ struct op_env { using LCWorker = RGWLC::LCWorker; lc_op op; - rgw::sal::Store* store; + rgw::sal::Driver* driver; LCWorker* worker; rgw::sal::Bucket* bucket; LCObjsLister& ol; - op_env(lc_op& _op, rgw::sal::Store* _store, LCWorker* _worker, + op_env(lc_op& _op, rgw::sal::Driver* _driver, LCWorker* _worker, rgw::sal::Bucket* _bucket, LCObjsLister& _ol) - : op(_op), store(_store), worker(_worker), bucket(_bucket), + : op(_op), driver(_driver), worker(_worker), bucket(_bucket), ol(_ol) {} }; /* op_env */ @@ -478,7 +478,7 @@ struct lc_op_ctx { boost::optional next_key_name; ceph::real_time effective_mtime; - rgw::sal::Store* store; + rgw::sal::Driver* driver; rgw::sal::Bucket* bucket; lc_op& op; // ok--refers to expanded env.op LCObjsLister& ol; @@ -494,10 +494,10 @@ struct lc_op_ctx { boost::optional next_key_name, ceph::real_time effective_mtime, const DoutPrefixProvider *dpp, WorkQ* wq) - : cct(env.store->ctx()), env(env), o(o), next_key_name(next_key_name), + : cct(env.driver->ctx()), env(env), o(o), next_key_name(next_key_name), effective_mtime(effective_mtime), - store(env.store), bucket(env.bucket), op(env.op), ol(env.ol), - rctx(env.store), dpp(dpp), wq(wq) + driver(env.driver), bucket(env.bucket), op(env.op), ol(env.ol), + rctx(env.driver), dpp(dpp), wq(wq) { obj = bucket->get_object(o.key); } @@ -517,7 +517,7 @@ static int remove_expired_obj( const DoutPrefixProvider *dpp, lc_op_ctx& oc, bool remove_indeed, rgw::notify::EventType event_type) { - auto& store = oc.store; + auto& driver = oc.driver; auto& bucket_info = oc.bucket->get_info(); auto& o = oc.o; auto obj_key = o.key; @@ -535,7 +535,7 @@ static int remove_expired_obj( std::unique_ptr bucket; std::unique_ptr obj; - ret = store->get_bucket(nullptr, bucket_info, &bucket); + ret = driver->get_bucket(nullptr, bucket_info, &bucket); if (ret < 0) { return ret; } @@ -544,7 +544,7 @@ static int remove_expired_obj( std::unique_ptr user; if (! bucket->get_owner()) { auto& bucket_info = bucket->get_info(); - user = store->get_user(bucket_info.owner); + user = driver->get_user(bucket_info.owner); // forgive me, lord if (user) { bucket->set_owner(user.get()); @@ -562,8 +562,8 @@ static int remove_expired_obj( del_op->params.unmod_since = meta.mtime; del_op->params.marker_version_id = version_id; - // notification supported only for RADOS store for now - notify = store->get_notification(dpp, obj.get(), nullptr, event_type, + // notification supported only for RADOS driver for now + notify = driver->get_notification(dpp, obj.get(), nullptr, event_type, bucket.get(), lc_id, const_cast(oc.bucket->get_tenant()), lc_req_id, null_yield); @@ -885,7 +885,7 @@ int RGWLC::handle_multipart_expiration(rgw::sal::Bucket* target, if (ret < 0) { if (ret == (-ENOENT)) return 0; - ldpp_dout(this, 0) << "ERROR: store->list_objects():" <list_objects():" <thr_name() << dendl; return is_expired && - pass_object_lock_check(oc.store, oc.obj.get(), dpp); + pass_object_lock_check(oc.driver, oc.obj.get(), dpp); } int process(lc_op_ctx& oc) { @@ -1307,7 +1307,7 @@ public: } std::string tier_type = ""; - rgw::sal::ZoneGroup& zonegroup = oc.store->get_zone()->get_zonegroup(); + rgw::sal::ZoneGroup& zonegroup = oc.driver->get_zone()->get_zonegroup(); rgw_placement_rule target_placement; target_placement.inherit_from(oc.bucket->get_placement_rule()); @@ -1318,7 +1318,7 @@ public: if (!r && oc.tier->get_tier_type() == "cloud-s3") { ldpp_dout(oc.dpp, 30) << "Found cloud s3 tier: " << target_placement.storage_class << dendl; if (!oc.o.is_current() && - !pass_object_lock_check(oc.store, oc.obj.get(), oc.dpp)) { + !pass_object_lock_check(oc.driver, oc.obj.get(), oc.dpp)) { /* Skip objects which has object lock enabled. */ ldpp_dout(oc.dpp, 10) << "Object(key:" << oc.o.key << ") is locked. Skipping transition to cloud-s3 tier: " << target_placement.storage_class << dendl; return 0; @@ -1331,7 +1331,7 @@ public: return r; } } else { - if (!oc.store->valid_placement(target_placement)) { + if (!oc.driver->valid_placement(target_placement)) { ldpp_dout(oc.dpp, 0) << "ERROR: non existent dest placement: " << target_placement << " bucket="<< oc.bucket @@ -1521,7 +1521,7 @@ int RGWLC::bucket_lc_process(string& shard_id, LCWorker* worker, return 0; } - int ret = store->get_bucket(this, nullptr, bucket_tenant, bucket_name, &bucket, null_yield); + int ret = driver->get_bucket(this, nullptr, bucket_tenant, bucket_name, &bucket, null_yield); if (ret < 0) { ldpp_dout(this, 0) << "LC:get_bucket for " << bucket_name << " failed" << dendl; @@ -1569,7 +1569,7 @@ int RGWLC::bucket_lc_process(string& shard_id, LCWorker* worker, } /* fetch information for zone checks */ - rgw::sal::Zone* zone = store->get_zone(); + rgw::sal::Zone* zone = driver->get_zone(); auto pf = [](RGWLC::LCWorker* wk, WorkQ* wq, WorkItem& wi) { auto wt = @@ -1620,7 +1620,7 @@ int RGWLC::bucket_lc_process(string& shard_id, LCWorker* worker, pre_marker = next_marker; } - LCObjsLister ol(store, bucket.get()); + LCObjsLister ol(driver, bucket.get()); ol.set_prefix(prefix_iter->first); if (! zone_check(op, zone)) { @@ -1633,11 +1633,11 @@ int RGWLC::bucket_lc_process(string& shard_id, LCWorker* worker, if (ret < 0) { if (ret == (-ENOENT)) return 0; - ldpp_dout(this, 0) << "ERROR: store->list_objects():" << dendl; + ldpp_dout(this, 0) << "ERROR: driver->list_objects():" << dendl; return ret; } - op_env oenv(op, store, worker, bucket.get(), ol); + op_env oenv(op, driver, worker, bucket.get(), ol); LCOpRule orule(oenv); orule.build(); // why can't ctor do it? rgw_bucket_dir_entry* o{nullptr}; @@ -1828,7 +1828,7 @@ int RGWLC::process(LCWorker* worker, * do need the entry {pro,epi}logue which update the state entry * for this bucket) */ auto bucket_lc_key = get_bucket_lc_key(optional_bucket->get_key()); - auto index = get_lc_index(store->ctx(), bucket_lc_key); + auto index = get_lc_index(driver->ctx(), bucket_lc_key); ret = process_bucket(index, max_secs, worker, bucket_lc_key, once); return ret; } else { @@ -2381,11 +2381,11 @@ void RGWLifecycleConfiguration::generate_test_instances( template static int guard_lc_modify(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, rgw::sal::Lifecycle* sal_lc, const rgw_bucket& bucket, const string& cookie, const F& f) { - CephContext *cct = store->ctx(); + CephContext *cct = driver->ctx(); auto bucket_lc_key = get_bucket_lc_key(bucket); string oid; @@ -2455,7 +2455,7 @@ int RGWLC::set_bucket_config(rgw::sal::Bucket* bucket, rgw_bucket& b = bucket->get_key(); - ret = guard_lc_modify(this, store, sal_lc.get(), b, cookie, + ret = guard_lc_modify(this, driver, sal_lc.get(), b, cookie, [&](rgw::sal::Lifecycle* sal_lc, const string& oid, rgw::sal::Lifecycle::LCEntry& entry) { return sal_lc->set_entry(oid, entry); @@ -2483,7 +2483,7 @@ int RGWLC::remove_bucket_config(rgw::sal::Bucket* bucket, } } - ret = guard_lc_modify(this, store, sal_lc.get(), b, cookie, + ret = guard_lc_modify(this, driver, sal_lc.get(), b, cookie, [&](rgw::sal::Lifecycle* sal_lc, const string& oid, rgw::sal::Lifecycle::LCEntry& entry) { return sal_lc->rm_entry(oid, entry); @@ -2501,7 +2501,7 @@ RGWLC::~RGWLC() namespace rgw::lc { int fix_lc_shard_entry(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, rgw::sal::Lifecycle* sal_lc, rgw::sal::Bucket* bucket) { @@ -2512,7 +2512,7 @@ int fix_lc_shard_entry(const DoutPrefixProvider *dpp, auto bucket_lc_key = get_bucket_lc_key(bucket->get_key()); std::string lc_oid; - get_lc_oid(store->ctx(), bucket_lc_key, &lc_oid); + get_lc_oid(driver->ctx(), bucket_lc_key, &lc_oid); std::unique_ptr entry; // There are multiple cases we need to encounter here @@ -2531,11 +2531,11 @@ int fix_lc_shard_entry(const DoutPrefixProvider *dpp, << " creating " << dendl; // TODO: we have too many ppl making cookies like this! char cookie_buf[COOKIE_LEN + 1]; - gen_rand_alphanumeric(store->ctx(), cookie_buf, sizeof(cookie_buf) - 1); + gen_rand_alphanumeric(driver->ctx(), cookie_buf, sizeof(cookie_buf) - 1); std::string cookie = cookie_buf; ret = guard_lc_modify(dpp, - store, sal_lc, bucket->get_key(), cookie, + driver, sal_lc, bucket->get_key(), cookie, [&lc_oid](rgw::sal::Lifecycle* slc, const string& oid, rgw::sal::Lifecycle::LCEntry& entry) { diff --git a/src/rgw/rgw_lc.h b/src/rgw/rgw_lc.h index 14bcb3b266654..e74b67fca6d05 100644 --- a/src/rgw/rgw_lc.h +++ b/src/rgw/rgw_lc.h @@ -515,7 +515,7 @@ WRITE_CLASS_ENCODER(RGWLifecycleConfiguration) class RGWLC : public DoutPrefixProvider { CephContext *cct; - rgw::sal::Store* store; + rgw::sal::Driver* driver; std::unique_ptr sal_lc; int max_objs{0}; std::string *obj_names{nullptr}; @@ -569,10 +569,10 @@ public: std::vector> workers; - RGWLC() : cct(nullptr), store(nullptr) {} + RGWLC() : cct(nullptr), driver(nullptr) {} virtual ~RGWLC() override; - void initialize(CephContext *_cct, rgw::sal::Store* _store); + void initialize(CephContext *_cct, rgw::sal::Driver* _driver); void finalize(); int process(LCWorker* worker, @@ -619,7 +619,7 @@ public: namespace rgw::lc { int fix_lc_shard_entry(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, rgw::sal::Lifecycle* sal_lc, rgw::sal::Bucket* bucket); diff --git a/src/rgw/rgw_lib.cc b/src/rgw/rgw_lib.cc index 24e7dd900d1af..04c5db8fdf247 100644 --- a/src/rgw/rgw_lib.cc +++ b/src/rgw/rgw_lib.cc @@ -211,7 +211,7 @@ namespace rgw { s->cio = io; /* XXX and -then- stash req_state pointers everywhere they are needed */ - ret = req->init(rgw_env, store, io, s); + ret = req->init(rgw_env, driver, io, s); if (ret < 0) { ldpp_dout(op, 10) << "failed to initialize request" << dendl; abort_req(s, op, ret); @@ -347,7 +347,7 @@ namespace rgw { rgw_env.set("HTTP_HOST", ""); - int ret = req->init(rgw_env, store, &io_ctx, s); + int ret = req->init(rgw_env, driver, &io_ctx, s); if (ret < 0) { ldpp_dout(op, 10) << "failed to initialize request" << dendl; abort_req(s, op, ret); @@ -499,7 +499,7 @@ namespace rgw { main.init_http_clients(); main.init_storage(); - if (! main.get_store()) { + if (! main.get_driver()) { mutex.lock(); init_timer.cancel_all_events(); init_timer.shutdown(); @@ -544,10 +544,10 @@ namespace rgw { return 0; } /* RGWLib::stop() */ - int RGWLibIO::set_uid(rgw::sal::Store* store, const rgw_user& uid) + int RGWLibIO::set_uid(rgw::sal::Driver* driver, const rgw_user& uid) { - const DoutPrefix dp(store->ctx(), dout_subsys, "librgw: "); - std::unique_ptr user = store->get_user(uid); + const DoutPrefix dp(driver->ctx(), dout_subsys, "librgw: "); + std::unique_ptr user = driver->get_user(uid); /* object exists, but policy is broken */ int ret = user->load_user(&dp, null_yield); if (ret < 0) { @@ -561,7 +561,7 @@ namespace rgw { int RGWLibRequest::read_permissions(RGWOp* op, optional_yield y) { /* bucket and object ops */ int ret = - rgw_build_bucket_policies(op, g_rgwlib->get_store(), get_state(), y); + rgw_build_bucket_policies(op, g_rgwlib->get_driver(), get_state(), y); if (ret < 0) { ldpp_dout(op, 10) << "read_permissions (bucket policy) on " << get_state()->bucket << ":" @@ -572,7 +572,7 @@ namespace rgw { ret = -EACCES; } else if (! only_bucket()) { /* object ops */ - ret = rgw_build_object_policies(op, g_rgwlib->get_store(), get_state(), + ret = rgw_build_object_policies(op, g_rgwlib->get_driver(), get_state(), op->prefetch_data(), y); if (ret < 0) { ldpp_dout(op, 10) << "read_permissions (object policy) on" diff --git a/src/rgw/rgw_lib.h b/src/rgw/rgw_lib.h index 06a69025588e1..e863aadab3c3f 100644 --- a/src/rgw/rgw_lib.h +++ b/src/rgw/rgw_lib.h @@ -29,7 +29,7 @@ namespace rgw { {} ~RGWLib() {} - rgw::sal::Store* get_store() { return main.get_store(); } + rgw::sal::Driver* get_driver() { return main.get_driver(); } RGWLibFrontend* get_fe() { return fe; } @@ -70,7 +70,7 @@ namespace rgw { return user_info; } - int set_uid(rgw::sal::Store* store, const rgw_user& uid); + int set_uid(rgw::sal::Driver* driver, const rgw_user& uid); int write_data(const char *buf, int len); int read_data(char *buf, int len); @@ -115,7 +115,7 @@ namespace rgw { RGWHandler_Lib() {} ~RGWHandler_Lib() override {} - static int init_from_header(rgw::sal::Store* store, + static int init_from_header(rgw::sal::Driver* driver, req_state *s); }; /* RGWHandler_Lib */ @@ -130,7 +130,7 @@ namespace rgw { inline req_state* get_state() { return this->RGWRequest::s; } RGWLibRequest(CephContext* _cct, std::unique_ptr _user) - : RGWRequest(g_rgwlib->get_store()->get_new_req_id()), + : RGWRequest(g_rgwlib->get_driver()->get_new_req_id()), tuser(std::move(_user)), cct(_cct) {} @@ -148,14 +148,14 @@ namespace rgw { using RGWHandler::init; - int init(const RGWEnv& rgw_env, rgw::sal::Store* _store, + int init(const RGWEnv& rgw_env, rgw::sal::Driver* _driver, RGWLibIO* io, req_state* _s) { RGWRequest::init_state(_s); - RGWHandler::init(_store, _s, io); + RGWHandler::init(_driver, _s, io); - get_state()->req_id = store->zone_unique_id(id); - get_state()->trans_id = store->zone_unique_trans_id(id); + get_state()->req_id = driver->zone_unique_id(id); + get_state()->trans_id = driver->zone_unique_trans_id(id); get_state()->bucket_tenant = tuser->get_tenant(); get_state()->set_user(tuser); @@ -164,7 +164,7 @@ namespace rgw { int ret = header_init(); if (ret == 0) { - ret = init_from_header(store, _s); + ret = init_from_header(driver, _s); } return ret; } @@ -188,16 +188,16 @@ namespace rgw { io_ctx.init(_cct); RGWRequest::init_state(&rstate); - RGWHandler::init(g_rgwlib->get_store(), &rstate, &io_ctx); + RGWHandler::init(g_rgwlib->get_driver(), &rstate, &io_ctx); - get_state()->req_id = store->zone_unique_id(id); - get_state()->trans_id = store->zone_unique_trans_id(id); + get_state()->req_id = driver->zone_unique_id(id); + get_state()->trans_id = driver->zone_unique_trans_id(id); ldpp_dout(get_state(), 2) << "initializing for trans_id = " << get_state()->trans_id.c_str() << dendl; } - inline rgw::sal::Store* get_store() { return store; } + inline rgw::sal::Driver* get_driver() { return driver; } inline RGWLibIO& get_io() { return io_ctx; } virtual int execute() final { ceph_abort(); } diff --git a/src/rgw/rgw_loadgen_process.cc b/src/rgw/rgw_loadgen_process.cc index 524d664c7cc2c..4a4c18e75f2d5 100644 --- a/src/rgw/rgw_loadgen_process.cc +++ b/src/rgw/rgw_loadgen_process.cc @@ -107,7 +107,7 @@ void RGWLoadGenProcess::gen_request(const string& method, int content_length, std::atomic* fail_flag) { RGWLoadGenRequest* req = - new RGWLoadGenRequest(store->get_new_req_id(), method, resource, + new RGWLoadGenRequest(driver->get_new_req_id(), method, resource, content_length, fail_flag); dout(10) << "allocated request req=" << hex << req << dec << dendl; req_throttle.get(1); @@ -133,7 +133,7 @@ void RGWLoadGenProcess::handle_request(const DoutPrefixProvider *dpp, RGWRequest RGWLoadGenIO real_client_io(&env); RGWRestfulIO client_io(cct, &real_client_io); ActiveRateLimiter ratelimit(cct); - int ret = process_request(store, rest, req, uri_prefix, + int ret = process_request(driver, rest, req, uri_prefix, *auth_registry, &client_io, olog, null_yield, nullptr, nullptr, nullptr, ratelimit.get_active(), diff --git a/src/rgw/rgw_log.cc b/src/rgw/rgw_log.cc index b723eb65e9980..7595f14a3a7b2 100644 --- a/src/rgw/rgw_log.cc +++ b/src/rgw/rgw_log.cc @@ -94,7 +94,7 @@ string render_log_object_name(const string& format, /* usage logger */ class UsageLogger : public DoutPrefixProvider { CephContext *cct; - rgw::sal::Store* store; + rgw::sal::Driver* driver; map usage_map; ceph::mutex lock = ceph::make_mutex("UsageLogger"); int32_t num_entries; @@ -117,7 +117,7 @@ class UsageLogger : public DoutPrefixProvider { } public: - UsageLogger(CephContext *_cct, rgw::sal::Store* _store) : cct(_cct), store(_store), num_entries(0), timer(cct, timer_lock) { + UsageLogger(CephContext *_cct, rgw::sal::Driver* _driver) : cct(_cct), driver(_driver), num_entries(0), timer(cct, timer_lock) { timer.init(); std::lock_guard l{timer_lock}; set_timer(); @@ -171,7 +171,7 @@ public: num_entries = 0; lock.unlock(); - store->log_usage(this, old_map); + driver->log_usage(this, old_map); } CephContext *get_cct() const override { return cct; } @@ -181,9 +181,9 @@ public: static UsageLogger *usage_logger = NULL; -void rgw_log_usage_init(CephContext *cct, rgw::sal::Store* store) +void rgw_log_usage_init(CephContext *cct, rgw::sal::Driver* driver) { - usage_logger = new UsageLogger(cct, store); + usage_logger = new UsageLogger(cct, driver); } void rgw_log_usage_finalize() @@ -515,7 +515,7 @@ int OpsLogSocket::log_json(req_state* s, bufferlist& bl) return 0; } -OpsLogRados::OpsLogRados(rgw::sal::Store* const& store): store(store) +OpsLogRados::OpsLogRados(rgw::sal::Driver* const& driver): driver(driver) { } @@ -535,7 +535,7 @@ int OpsLogRados::log(req_state* s, struct rgw_log_entry& entry) localtime_r(&t, &bdt); string oid = render_log_object_name(s->cct->_conf->rgw_log_object_name, &bdt, entry.bucket_id, entry.bucket); - if (store->log_op(s, oid, bl) < 0) { + if (driver->log_op(s, oid, bl) < 0) { ldpp_dout(s, 0) << "ERROR: failed to log RADOS RGW ops log entry for txn: " << s->trans_id << dendl; return -1; } diff --git a/src/rgw/rgw_log.h b/src/rgw/rgw_log.h index 5e8b275c6b3bb..0c97b2f8a64bb 100644 --- a/src/rgw/rgw_log.h +++ b/src/rgw/rgw_log.h @@ -272,11 +272,11 @@ public: }; class OpsLogRados : public OpsLogSink { - // main()'s Store pointer as a reference, possibly modified by RGWRealmReloader - rgw::sal::Store* const& store; + // main()'s driver pointer as a reference, possibly modified by RGWRealmReloader + rgw::sal::Driver* const& driver; public: - OpsLogRados(rgw::sal::Store* const& store); + OpsLogRados(rgw::sal::Driver* const& driver); int log(req_state* s, struct rgw_log_entry& entry) override; }; @@ -284,7 +284,7 @@ class RGWREST; int rgw_log_op(RGWREST* const rest, struct req_state* s, const RGWOp* op, OpsLogSink* olog); -void rgw_log_usage_init(CephContext* cct, rgw::sal::Store* store); +void rgw_log_usage_init(CephContext* cct, rgw::sal::Driver* driver); void rgw_log_usage_finalize(); void rgw_format_ops_log_entry(struct rgw_log_entry& entry, ceph::Formatter *formatter); diff --git a/src/rgw/rgw_lua.cc b/src/rgw/rgw_lua.cc index 30d29731604cd..28c57ddbc4306 100644 --- a/src/rgw/rgw_lua.cc +++ b/src/rgw/rgw_lua.cc @@ -97,7 +97,7 @@ int delete_script(const DoutPrefixProvider *dpp, sal::LuaManager* manager, const namespace bp = boost::process; -int add_package(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, const std::string& package_name, bool allow_compilation) +int add_package(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y, const std::string& package_name, bool allow_compilation) { // verify that luarocks can load this package const auto p = bp::search_path("luarocks"); @@ -128,36 +128,36 @@ int add_package(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_ //replace previous versions of the package const std::string package_name_no_version = package_name.substr(0, package_name.find(" ")); - ret = remove_package(dpp, store, y, package_name_no_version); + ret = remove_package(dpp, driver, y, package_name_no_version); if (ret < 0) { return ret; } - auto lua_mgr = store->get_lua_manager(); + auto lua_mgr = driver->get_lua_manager(); return lua_mgr->add_package(dpp, y, package_name); } -int remove_package(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, const std::string& package_name) +int remove_package(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y, const std::string& package_name) { - auto lua_mgr = store->get_lua_manager(); + auto lua_mgr = driver->get_lua_manager(); return lua_mgr->remove_package(dpp, y, package_name); } namespace bp = boost::process; -int list_packages(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, packages_t& packages) +int list_packages(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y, packages_t& packages) { - auto lua_mgr = store->get_lua_manager(); + auto lua_mgr = driver->get_lua_manager(); return lua_mgr->list_packages(dpp, y, packages); } -int install_packages(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, packages_t& failed_packages, std::string& output) { +int install_packages(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y, packages_t& failed_packages, std::string& output) { // luarocks directory cleanup std::error_code ec; - const auto& luarocks_path = store->get_luarocks_path(); + const auto& luarocks_path = driver->get_luarocks_path(); if (std::filesystem::remove_all(luarocks_path, ec) == static_cast(-1) && ec != std::errc::no_such_file_or_directory) { @@ -168,7 +168,7 @@ int install_packages(const DoutPrefixProvider *dpp, rgw::sal::Store* store, opti } packages_t packages; - auto ret = list_packages(dpp, store, y, packages); + auto ret = list_packages(dpp, driver, y, packages); if (ret == -ENOENT) { // allowlist is empty return 0; diff --git a/src/rgw/rgw_lua.h b/src/rgw/rgw_lua.h index 405492809f29c..b76795750b2e1 100644 --- a/src/rgw/rgw_lua.h +++ b/src/rgw/rgw_lua.h @@ -35,7 +35,7 @@ context to_context(const std::string& s); // verify a lua script bool verify(const std::string& script, std::string& err_msg); -// store a lua script in a context +// driver a lua script in a context int write_script(const DoutPrefixProvider *dpp, rgw::sal::LuaManager* manager, const std::string& tenant, optional_yield y, context ctx, const std::string& script); // read the stored lua script from a context @@ -49,17 +49,17 @@ using packages_t = std::set; #ifdef WITH_RADOSGW_LUA_PACKAGES // add a lua package to the allowlist -int add_package(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, const std::string& package_name, bool allow_compilation); +int add_package(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y, const std::string& package_name, bool allow_compilation); // remove a lua package from the allowlist -int remove_package(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, const std::string& package_name); +int remove_package(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y, const std::string& package_name); // list lua packages in the allowlist -int list_packages(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, packages_t& packages); +int list_packages(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y, packages_t& packages); // install all packages from the allowlist // return the list of packages that failed to install and the output of the install command -int install_packages(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, packages_t& failed_packages, std::string& output); +int install_packages(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y, packages_t& failed_packages, std::string& output); #endif } diff --git a/src/rgw/rgw_lua_background.cc b/src/rgw/rgw_lua_background.cc index 88f4391e2d07a..35de4a7e9a9bd 100644 --- a/src/rgw/rgw_lua_background.cc +++ b/src/rgw/rgw_lua_background.cc @@ -56,13 +56,13 @@ int RGWTable::increment_by(lua_State* L) { return 0; } -Background::Background(rgw::sal::Store* store, +Background::Background(rgw::sal::Driver* driver, CephContext* cct, const std::string& luarocks_path, int execute_interval) : execute_interval(execute_interval), dp(cct, dout_subsys, "lua background: "), - lua_manager(store->get_lua_manager()), + lua_manager(driver->get_lua_manager()), cct(cct), luarocks_path(luarocks_path) {} @@ -96,8 +96,8 @@ void Background::pause() { cond.notify_all(); } -void Background::resume(rgw::sal::Store* store) { - lua_manager = store->get_lua_manager(); +void Background::resume(rgw::sal::Driver* driver) { + lua_manager = driver->get_lua_manager(); paused = false; cond.notify_all(); } diff --git a/src/rgw/rgw_lua_background.h b/src/rgw/rgw_lua_background.h index 671495b3c553f..e1271bceb9b0b 100644 --- a/src/rgw/rgw_lua_background.h +++ b/src/rgw/rgw_lua_background.h @@ -206,7 +206,7 @@ protected: virtual int read_script(); public: - Background(rgw::sal::Store* store, + Background(rgw::sal::Driver* driver, CephContext* cct, const std::string& luarocks_path, int execute_interval = INIT_EXECUTE_INTERVAL); @@ -223,7 +223,7 @@ public: } void pause() override; - void resume(rgw::sal::Store* _store) override; + void resume(rgw::sal::Driver* _driver) override; }; } //namepsace rgw::lua diff --git a/src/rgw/rgw_lua_request.cc b/src/rgw/rgw_lua_request.cc index 869c8f8862724..4ee66b6a816bd 100644 --- a/src/rgw/rgw_lua_request.cc +++ b/src/rgw/rgw_lua_request.cc @@ -846,7 +846,7 @@ void create_top_metatable(lua_State* L, req_state* s, const char* op_name) { } int execute( - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWREST* rest, OpsLogSink* olog, req_state* s, @@ -858,8 +858,8 @@ int execute( lua_state_guard lguard(L); open_standard_libs(L); - set_package_path(L, store ? - store->get_luarocks_path() : + set_package_path(L, driver ? + driver->get_luarocks_path() : ""); create_debug_action(L, s->cct); diff --git a/src/rgw/rgw_lua_request.h b/src/rgw/rgw_lua_request.h index c52fbab8f836c..7c85ac9cd984f 100644 --- a/src/rgw/rgw_lua_request.h +++ b/src/rgw/rgw_lua_request.h @@ -16,7 +16,7 @@ void create_top_metatable(lua_State* L, req_state* s, const char* op_name); // execute a lua script in the Request context int execute( - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWREST* rest, OpsLogSink* olog, req_state *s, diff --git a/src/rgw/rgw_main.cc b/src/rgw/rgw_main.cc index b9753893f654d..6d26302516db8 100644 --- a/src/rgw/rgw_main.cc +++ b/src/rgw/rgw_main.cc @@ -136,7 +136,7 @@ int main(int argc, char *argv[]) main.init_http_clients(); main.init_storage(); - if (! main.get_store()) { + if (! main.get_driver()) { mutex.lock(); init_timer.cancel_all_events(); init_timer.shutdown(); diff --git a/src/rgw/rgw_main.h b/src/rgw/rgw_main.h index bcd45badf66bc..ca95319866424 100644 --- a/src/rgw/rgw_main.h +++ b/src/rgw/rgw_main.h @@ -43,8 +43,8 @@ public: void pause() override { std::for_each(pausers.begin(), pausers.end(), [](Pauser* p){p->pause();}); } - void resume(rgw::sal::Store* store) override { - std::for_each(pausers.begin(), pausers.end(), [store](Pauser* p){p->resume(store);}); + void resume(rgw::sal::Driver* driver) override { + std::for_each(pausers.begin(), pausers.end(), [driver](Pauser* p){p->resume(driver);}); } }; @@ -75,7 +75,7 @@ class AppMain { std::unique_ptr fe_pauser; std::unique_ptr realm_watcher; std::unique_ptr rgw_pauser; - rgw::sal::Store* store; + rgw::sal::Driver* driver; DoutPrefixProvider* dpp; public: @@ -86,8 +86,8 @@ public: void shutdown(std::function finalize_async_signals = []() { /* nada */}); - rgw::sal::Store* get_store() { - return store; + rgw::sal::Driver* get_driver() { + return driver; } rgw::LDAPHelper* get_ldh() { @@ -121,9 +121,9 @@ static inline RGWRESTMgr *set_logging(RGWRESTMgr* mgr) return mgr; } -static inline RGWRESTMgr *rest_filter(rgw::sal::Store* store, int dialect, RGWRESTMgr* orig) +static inline RGWRESTMgr *rest_filter(rgw::sal::Driver* driver, int dialect, RGWRESTMgr* orig) { - RGWSyncModuleInstanceRef sync_module = store->get_sync_module(); + RGWSyncModuleInstanceRef sync_module = driver->get_sync_module(); if (sync_module) { return sync_module->get_rest_filter(dialect, orig); } else { diff --git a/src/rgw/rgw_object_expirer.cc b/src/rgw/rgw_object_expirer.cc index 2e1249b85f714..fd36a49c6fb22 100644 --- a/src/rgw/rgw_object_expirer.cc +++ b/src/rgw/rgw_object_expirer.cc @@ -32,16 +32,16 @@ #define dout_subsys ceph_subsys_rgw -static rgw::sal::Store* store = NULL; +static rgw::sal::Driver* driver = NULL; class StoreDestructor { - rgw::sal::Store* store; + rgw::sal::Driver* driver; public: - explicit StoreDestructor(rgw::sal::Store* _s) : store(_s) {} + explicit StoreDestructor(rgw::sal::Driver* _s) : driver(_s) {} ~StoreDestructor() { - if (store) { - StoreManager::close_storage(store); + if (driver) { + DriverManager::close_storage(driver); } } }; @@ -80,19 +80,19 @@ int main(const int argc, const char **argv) common_init_finish(g_ceph_context); const DoutPrefix dp(cct.get(), dout_subsys, "rgw object expirer: "); - StoreManager::Config cfg; + DriverManager::Config cfg; cfg.store_name = "rados"; cfg.filter_name = "none"; - store = StoreManager::get_storage(&dp, g_ceph_context, cfg, false, false, false, false, false); - if (!store) { + driver = DriverManager::get_storage(&dp, g_ceph_context, cfg, false, false, false, false, false); + if (!driver) { std::cerr << "couldn't init storage provider" << std::endl; return EIO; } - /* Guard to not forget about closing the rados store. */ - StoreDestructor store_dtor(store); + /* Guard to not forget about closing the rados driver. */ + StoreDestructor store_dtor(driver); - RGWObjectExpirer objexp(store); + RGWObjectExpirer objexp(driver); objexp.start_processor(); const utime_t interval(g_ceph_context->_conf->rgw_objexp_gc_interval, 0); diff --git a/src/rgw/rgw_op.cc b/src/rgw/rgw_op.cc index 464c04e1f0944..e6d7be01caec3 100644 --- a/src/rgw/rgw_op.cc +++ b/src/rgw/rgw_op.cc @@ -218,7 +218,7 @@ static int get_user_policy_from_attr(const DoutPrefixProvider *dpp, */ int rgw_op_get_bucket_policy_from_attr(const DoutPrefixProvider *dpp, CephContext *cct, - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWBucketInfo& bucket_info, map& bucket_attrs, RGWAccessControlPolicy *policy, @@ -232,7 +232,7 @@ int rgw_op_get_bucket_policy_from_attr(const DoutPrefixProvider *dpp, return ret; } else { ldpp_dout(dpp, 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl; - std::unique_ptr user = store->get_user(bucket_info.owner); + std::unique_ptr user = driver->get_user(bucket_info.owner); /* object exists, but policy is broken */ int r = user->load_user(dpp, y); if (r < 0) @@ -245,7 +245,7 @@ int rgw_op_get_bucket_policy_from_attr(const DoutPrefixProvider *dpp, static int get_obj_policy_from_attr(const DoutPrefixProvider *dpp, CephContext *cct, - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWBucketInfo& bucket_info, map& bucket_attrs, RGWAccessControlPolicy *policy, @@ -266,7 +266,7 @@ static int get_obj_policy_from_attr(const DoutPrefixProvider *dpp, } else if (ret == -ENODATA) { /* object exists, but policy is broken */ ldpp_dout(dpp, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl; - std::unique_ptr user = store->get_user(bucket_info.owner); + std::unique_ptr user = driver->get_user(bucket_info.owner); ret = user->load_user(dpp, y); if (ret < 0) return ret; @@ -334,7 +334,7 @@ vector get_iam_user_policy_from_attr(CephContext* cct, } static int read_bucket_policy(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, req_state *s, RGWBucketInfo& bucket_info, map& bucket_attrs, @@ -352,7 +352,7 @@ static int read_bucket_policy(const DoutPrefixProvider *dpp, return 0; } - int ret = rgw_op_get_bucket_policy_from_attr(dpp, s->cct, store, bucket_info, bucket_attrs, policy, y); + int ret = rgw_op_get_bucket_policy_from_attr(dpp, s->cct, driver, bucket_info, bucket_attrs, policy, y); if (ret == -ENOENT) { ret = -ERR_NO_SUCH_BUCKET; } @@ -361,7 +361,7 @@ static int read_bucket_policy(const DoutPrefixProvider *dpp, } static int read_obj_policy(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, req_state *s, RGWBucketInfo& bucket_info, map& bucket_attrs, @@ -396,14 +396,14 @@ static int read_obj_policy(const DoutPrefixProvider *dpp, } policy = get_iam_policy_from_attr(s->cct, bucket_attrs, bucket->get_tenant()); - int ret = get_obj_policy_from_attr(dpp, s->cct, store, bucket_info, + int ret = get_obj_policy_from_attr(dpp, s->cct, driver, bucket_info, bucket_attrs, acl, storage_class, object, s->yield); if (ret == -ENOENT) { /* object does not exist checking the bucket's ACL to make sure that we send a proper error code */ RGWAccessControlPolicy bucket_policy(s->cct); - ret = rgw_op_get_bucket_policy_from_attr(dpp, s->cct, store, bucket_info, bucket_attrs, &bucket_policy, y); + ret = rgw_op_get_bucket_policy_from_attr(dpp, s->cct, driver, bucket_info, bucket_attrs, &bucket_policy, y); if (ret < 0) { return ret; } @@ -450,7 +450,7 @@ static int read_obj_policy(const DoutPrefixProvider *dpp, * only_bucket: If true, reads the user and bucket ACLs rather than the object ACL. * Returns: 0 on success, -ERR# otherwise. */ -int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Store* store, req_state* s, optional_yield y) +int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, req_state* s, optional_yield y) { int ret = 0; @@ -480,14 +480,14 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Store* st /* check if copy source is within the current domain */ if (!s->src_bucket_name.empty()) { std::unique_ptr src_bucket; - ret = store->get_bucket(dpp, nullptr, + ret = driver->get_bucket(dpp, nullptr, rgw_bucket(s->src_tenant_name, s->src_bucket_name, s->bucket_instance_id), &src_bucket, y); if (ret == 0) { string& zonegroup = src_bucket->get_info().zonegroup; - s->local_source = store->get_zone()->get_zonegroup().equals(zonegroup); + s->local_source = driver->get_zone()->get_zonegroup().equals(zonegroup); } } @@ -504,7 +504,7 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Store* st /* This is the only place that s->bucket is created. It should never be * overwritten. */ - ret = store->get_bucket(dpp, s->user.get(), rgw_bucket(rgw_bucket_key(s->bucket_tenant, s->bucket_name, s->bucket_instance_id)), &s->bucket, y); + ret = driver->get_bucket(dpp, s->user.get(), rgw_bucket(rgw_bucket_key(s->bucket_tenant, s->bucket_name, s->bucket_instance_id)), &s->bucket, y); if (ret < 0) { if (ret != -ENOENT) { string bucket_log; @@ -522,7 +522,7 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Store* st s->bucket_mtime = s->bucket->get_modification_time(); s->bucket_attrs = s->bucket->get_attrs(); - ret = read_bucket_policy(dpp, store, s, s->bucket->get_info(), + ret = read_bucket_policy(dpp, driver, s, s->bucket->get_info(), s->bucket->get_attrs(), s->bucket_acl.get(), s->bucket->get_key(), y); acct_acl_user = { @@ -533,7 +533,7 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Store* st s->bucket_owner = s->bucket_acl->get_owner(); std::unique_ptr zonegroup; - int r = store->get_zonegroup(s->bucket->get_info().zonegroup, &zonegroup); + int r = driver->get_zonegroup(s->bucket->get_info().zonegroup, &zonegroup); if (!r) { s->zonegroup_endpoint = zonegroup->get_endpoint(); s->zonegroup_name = zonegroup->get_name(); @@ -542,14 +542,14 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Store* st ret = r; } - if (!store->get_zone()->get_zonegroup().equals(s->bucket->get_info().zonegroup)) { + if (!driver->get_zone()->get_zonegroup().equals(s->bucket->get_info().zonegroup)) { ldpp_dout(dpp, 0) << "NOTICE: request for data in a different zonegroup (" << s->bucket->get_info().zonegroup << " != " - << store->get_zone()->get_zonegroup().get_id() << ")" << dendl; + << driver->get_zone()->get_zonegroup().get_id() << ")" << dendl; /* we now need to make sure that the operation actually requires copy source, that is * it's a copy operation */ - if (store->get_zone()->get_zonegroup().is_master_zonegroup() && s->system_request) { + if (driver->get_zone()->get_zonegroup().is_master_zonegroup() && s->system_request) { /*If this is the master, don't redirect*/ } else if (s->op_type == RGW_OP_GET_BUCKET_LOCATION ) { /* If op is get bucket location, don't redirect */ @@ -564,7 +564,7 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Store* st s->dest_placement.storage_class = s->info.storage_class; s->dest_placement.inherit_from(s->bucket->get_placement_rule()); - if (!store->valid_placement(s->dest_placement)) { + if (!driver->valid_placement(s->dest_placement)) { ldpp_dout(dpp, 0) << "NOTICE: invalid dest placement: " << s->dest_placement.to_str() << dendl; return -EINVAL; } @@ -574,7 +574,7 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Store* st /* handle user ACL only for those APIs which support it */ if (s->user_acl) { - std::unique_ptr acl_user = store->get_user(acct_acl_user.uid); + std::unique_ptr acl_user = driver->get_user(acct_acl_user.uid); ret = acl_user->read_attrs(dpp, y); if (!ret) { @@ -630,7 +630,7 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Store* st ret = -EACCES; } - bool success = store->get_zone()->get_redirect_endpoint(&s->redirect_zone_endpoint); + bool success = driver->get_zone()->get_redirect_endpoint(&s->redirect_zone_endpoint); if (success) { ldpp_dout(dpp, 20) << "redirect_zone_endpoint=" << s->redirect_zone_endpoint << dendl; } @@ -644,7 +644,7 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Store* st * only_bucket: If true, reads the bucket ACL rather than the object ACL. * Returns: 0 on success, -ERR# otherwise. */ -int rgw_build_object_policies(const DoutPrefixProvider *dpp, rgw::sal::Store* store, +int rgw_build_object_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, req_state *s, bool prefetch_data, optional_yield y) { int ret = 0; @@ -659,7 +659,7 @@ int rgw_build_object_policies(const DoutPrefixProvider *dpp, rgw::sal::Store* st if (prefetch_data) { s->object->set_prefetch_data(); } - ret = read_obj_policy(dpp, store, s, s->bucket->get_info(), s->bucket_attrs, + ret = read_obj_policy(dpp, driver, s, s->bucket->get_info(), s->bucket_attrs, s->object_acl.get(), nullptr, s->iam_policy, s->bucket.get(), s->object.get(), y); } @@ -848,7 +848,7 @@ static void rgw_add_grant_to_iam_environment(rgw::IAM::Environment& e, req_state } } -void rgw_build_iam_environment(rgw::sal::Store* store, +void rgw_build_iam_environment(rgw::sal::Driver* driver, req_state* s) { const auto& m = s->info.env->get_map(); @@ -997,7 +997,7 @@ int RGWOp::verify_op_mask() return -EPERM; } - if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->get_zone()->is_writeable()) { + if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !driver->get_zone()->is_writeable()) { ldpp_dout(this, 5) << "NOTICE: modify request to a read-only zone by a " "non-system user, permission denied" << dendl; return -EPERM; @@ -1155,7 +1155,7 @@ void RGWPutBucketTags::execute(optional_yield y) if (op_ret < 0) return; - op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; } @@ -1185,7 +1185,7 @@ int RGWDeleteBucketTags::verify_permission(optional_yield y) void RGWDeleteBucketTags::execute(optional_yield y) { bufferlist in_data; - op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -1240,7 +1240,7 @@ void RGWPutBucketReplication::execute(optional_yield y) { if (op_ret < 0) return; - op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -1282,7 +1282,7 @@ int RGWDeleteBucketReplication::verify_permission(optional_yield y) void RGWDeleteBucketReplication::execute(optional_yield y) { bufferlist in_data; - op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -1345,7 +1345,7 @@ int RGWOp::init_quota() } std::unique_ptr owner_user = - store->get_user(s->bucket->get_info().owner); + driver->get_user(s->bucket->get_info().owner); rgw::sal::User* user; if (s->user->get_id() == s->bucket_owner.get_id()) { @@ -1358,7 +1358,7 @@ int RGWOp::init_quota() } - store->get_quota(quota); + driver->get_quota(quota); if (s->bucket->get_info().quota.enabled) { quota.bucket_quota = s->bucket->get_info().quota; @@ -1650,7 +1650,7 @@ int RGWGetObj::read_user_manifest_part(rgw::sal::Bucket* bucket, static int iterate_user_manifest_parts(const DoutPrefixProvider *dpp, CephContext * const cct, - rgw::sal::Store* const store, + rgw::sal::Driver* const driver, const off_t ofs, const off_t end, rgw::sal::Bucket* bucket, @@ -1757,7 +1757,7 @@ struct rgw_slo_part { static int iterate_slo_parts(const DoutPrefixProvider *dpp, CephContext *cct, - rgw::sal::Store*store, + rgw::sal::Driver* driver, off_t ofs, off_t end, map& slo_parts, @@ -1875,14 +1875,14 @@ int RGWGetObj::handle_user_manifest(const char *prefix, optional_yield y) if (bucket_name.compare(s->bucket->get_name()) != 0) { map bucket_attrs; - r = store->get_bucket(this, s->user.get(), s->user->get_tenant(), bucket_name, &ubucket, y); + r = driver->get_bucket(this, s->user.get(), s->user->get_tenant(), bucket_name, &ubucket, y); if (r < 0) { ldpp_dout(this, 0) << "could not get bucket info for bucket=" << bucket_name << dendl; return r; } bucket_acl = &_bucket_acl; - r = read_bucket_policy(this, store, s, ubucket->get_info(), bucket_attrs, bucket_acl, ubucket->get_key(), y); + r = read_bucket_policy(this, driver, s, ubucket->get_info(), bucket_attrs, bucket_acl, ubucket->get_key(), y); if (r < 0) { ldpp_dout(this, 0) << "failed to read bucket policy" << dendl; return r; @@ -1900,7 +1900,7 @@ int RGWGetObj::handle_user_manifest(const char *prefix, optional_yield y) * - total length (of the parts we are going to send to client), * - overall DLO's content size, * - md5 sum of overall DLO's content (for etag of Swift API). */ - r = iterate_user_manifest_parts(this, s->cct, store, ofs, end, + r = iterate_user_manifest_parts(this, s->cct, driver, ofs, end, pbucket, obj_prefix, bucket_acl, *bucket_policy, nullptr, &s->obj_size, &lo_etag, nullptr /* cb */, nullptr /* cb arg */, y); @@ -1914,7 +1914,7 @@ int RGWGetObj::handle_user_manifest(const char *prefix, optional_yield y) return r; } - r = iterate_user_manifest_parts(this, s->cct, store, ofs, end, + r = iterate_user_manifest_parts(this, s->cct, driver, ofs, end, pbucket, obj_prefix, bucket_acl, *bucket_policy, &total_len, nullptr, nullptr, nullptr, nullptr, y); @@ -1928,7 +1928,7 @@ int RGWGetObj::handle_user_manifest(const char *prefix, optional_yield y) return 0; } - r = iterate_user_manifest_parts(this, s->cct, store, ofs, end, + r = iterate_user_manifest_parts(this, s->cct, driver, ofs, end, pbucket, obj_prefix, bucket_acl, *bucket_policy, nullptr, nullptr, nullptr, get_obj_user_manifest_iterate_cb, (void *)this, y); @@ -2005,7 +2005,7 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl, optional_yield y) RGWAccessControlPolicy& _bucket_acl = allocated_acls.back(); std::unique_ptr tmp_bucket; - int r = store->get_bucket(this, s->user.get(), s->user->get_tenant(), bucket_name, &tmp_bucket, y); + int r = driver->get_bucket(this, s->user.get(), s->user->get_tenant(), bucket_name, &tmp_bucket, y); if (r < 0) { ldpp_dout(this, 0) << "could not get bucket info for bucket=" << bucket_name << dendl; @@ -2013,7 +2013,7 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl, optional_yield y) } bucket = tmp_bucket.get(); bucket_acl = &_bucket_acl; - r = read_bucket_policy(this, store, s, tmp_bucket->get_info(), tmp_bucket->get_attrs(), bucket_acl, + r = read_bucket_policy(this, driver, s, tmp_bucket->get_info(), tmp_bucket->get_attrs(), bucket_acl, tmp_bucket->get_key(), y); if (r < 0) { ldpp_dout(this, 0) << "failed to read bucket ACL for bucket " @@ -2069,7 +2069,7 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl, optional_yield y) << " total=" << total_len << dendl; - r = iterate_slo_parts(this, s->cct, store, ofs, end, slo_parts, + r = iterate_slo_parts(this, s->cct, driver, ofs, end, slo_parts, get_obj_user_manifest_iterate_cb, (void *)this); if (r < 0) { return r; @@ -2199,7 +2199,7 @@ void RGWGetObj::execute(optional_yield y) op_ret = -EINVAL; goto done_err; } - torrent.init(s, store); + torrent.init(s, driver); rgw_obj obj = s->object->get_obj(); op_ret = torrent.get_torrent_file(s->object.get(), total_len, bl, obj); if (op_ret < 0) @@ -2439,7 +2439,7 @@ void RGWListBuckets::execute(optional_yield y) * isn't actually used in a given account. In such situation its usage * stats would be simply full of zeros. */ std::set targets; - if (store->get_zone()->get_zonegroup().get_placement_target_names(targets)) { + if (driver->get_zone()->get_zonegroup().get_placement_target_names(targets)) { for (const auto& policy : targets) { policies_stats.emplace(policy, decltype(policies_stats)::mapped_type()); } @@ -2530,13 +2530,13 @@ void RGWGetUsage::execute(optional_yield y) } } - op_ret = rgw_user_sync_all_stats(this, store, s->user.get(), y); + op_ret = rgw_user_sync_all_stats(this, driver, s->user.get(), y); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to sync user stats" << dendl; return; } - op_ret = rgw_user_get_all_buckets_stats(this, store, s->user.get(), buckets_usage, y); + op_ret = rgw_user_get_all_buckets_stats(this, driver, s->user.get(), buckets_usage, y); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to get user's buckets stats" << dendl; return; @@ -2582,7 +2582,7 @@ void RGWStatAccount::execute(optional_yield y) * isn't actually used in a given account. In such situation its usage * stats would be simply full of zeros. */ std::set names; - store->get_zone()->get_zonegroup().get_placement_target_names(names); + driver->get_zone()->get_zonegroup().get_placement_target_names(names); for (const auto& policy : names) { policies_stats.emplace(policy, decltype(policies_stats)::mapped_type()); } @@ -2698,7 +2698,7 @@ void RGWSetBucketVersioning::execute(optional_yield y) } } - op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -2787,7 +2787,7 @@ void RGWSetBucketWebsite::execute(optional_yield y) return; } - op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << " forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -2830,7 +2830,7 @@ void RGWDeleteBucketWebsite::execute(optional_yield y) bufferlist in_data; - op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "NOTICE: forward_to_master failed on bucket=" << s->bucket->get_name() << "returned err=" << op_ret << dendl; @@ -2875,7 +2875,7 @@ void RGWStatBucket::execute(optional_yield y) return; } - op_ret = store->get_bucket(this, s->user.get(), s->bucket->get_key(), &bucket, y); + op_ret = driver->get_bucket(this, s->user.get(), s->bucket->get_key(), &bucket, y); if (op_ret) { return; } @@ -3215,7 +3215,7 @@ void RGWCreateBucket::execute(optional_yield y) if (!relaxed_region_enforcement && !location_constraint.empty() && - !store->get_zone()->has_zonegroup_api(location_constraint)) { + !driver->get_zone()->has_zonegroup_api(location_constraint)) { ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")" << " can't be found." << dendl; op_ret = -ERR_INVALID_LOCATION_CONSTRAINT; @@ -3223,10 +3223,10 @@ void RGWCreateBucket::execute(optional_yield y) return; } - if (!relaxed_region_enforcement && !store->get_zone()->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() && - store->get_zone()->get_zonegroup().get_api_name() != location_constraint) { + if (!relaxed_region_enforcement && !driver->get_zone()->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() && + driver->get_zone()->get_zonegroup().get_api_name() != location_constraint) { ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")" - << " doesn't match zonegroup" << " (" << store->get_zone()->get_zonegroup().get_api_name() << ")" + << " doesn't match zonegroup" << " (" << driver->get_zone()->get_zonegroup().get_api_name() << ")" << dendl; op_ret = -ERR_INVALID_LOCATION_CONSTRAINT; s->err.message = "The specified location-constraint is not valid"; @@ -3234,12 +3234,12 @@ void RGWCreateBucket::execute(optional_yield y) } std::set names; - store->get_zone()->get_zonegroup().get_placement_target_names(names); + driver->get_zone()->get_zonegroup().get_placement_target_names(names); if (!placement_rule.name.empty() && !names.count(placement_rule.name)) { ldpp_dout(this, 0) << "placement target (" << placement_rule.name << ")" << " doesn't exist in the placement targets of zonegroup" - << " (" << store->get_zone()->get_zonegroup().get_api_name() << ")" << dendl; + << " (" << driver->get_zone()->get_zonegroup().get_api_name() << ")" << dendl; op_ret = -ERR_INVALID_LOCATION_CONSTRAINT; s->err.message = "The specified placement target does not exist"; return; @@ -3249,7 +3249,7 @@ void RGWCreateBucket::execute(optional_yield y) * specific request */ { std::unique_ptr tmp_bucket; - op_ret = store->get_bucket(this, s->user.get(), s->bucket_tenant, + op_ret = driver->get_bucket(this, s->user.get(), s->bucket_tenant, s->bucket_name, &tmp_bucket, y); if (op_ret < 0 && op_ret != -ENOENT) return; @@ -3257,7 +3257,7 @@ void RGWCreateBucket::execute(optional_yield y) if (s->bucket_exists) { if (!s->system_request && - store->get_zone()->get_zonegroup().get_id() != + driver->get_zone()->get_zonegroup().get_id() != tmp_bucket->get_info().zonegroup) { op_ret = -EEXIST; return; @@ -3275,10 +3275,10 @@ void RGWCreateBucket::execute(optional_yield y) if (s->system_request) { zonegroup_id = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup"); if (zonegroup_id.empty()) { - zonegroup_id = store->get_zone()->get_zonegroup().get_id(); + zonegroup_id = driver->get_zone()->get_zonegroup().get_id(); } } else { - zonegroup_id = store->get_zone()->get_zonegroup().get_id(); + zonegroup_id = driver->get_zone()->get_zonegroup().get_id(); } /* Encode special metadata first as we're using std::map::emplace under @@ -3458,7 +3458,7 @@ void RGWDeleteBucket::execute(optional_yield y) } bufferlist in_data; - op_ret = store->forward_request_to_master(this, s->user.get(), &ot.read_version, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), &ot.read_version, in_data, nullptr, s->info, y); if (op_ret < 0) { if (op_ret == -ENOENT) { /* adjust error, we want to return with NoSuchBucket and not @@ -3528,7 +3528,7 @@ int RGWPutObj::init_processing(optional_yield y) { } } std::unique_ptr bucket; - ret = store->get_bucket(this, s->user.get(), copy_source_tenant_name, copy_source_bucket_name, + ret = driver->get_bucket(this, s->user.get(), copy_source_tenant_name, copy_source_bucket_name, &bucket, y); if (ret < 0) { ldpp_dout(this, 5) << __func__ << "(): get_bucket() returned ret=" << ret << dendl; @@ -3591,7 +3591,7 @@ int RGWPutObj::verify_permission(optional_yield y) boost::optional policy; map cs_attrs; std::unique_ptr cs_bucket; - int ret = store->get_bucket(NULL, copy_source_bucket_info, &cs_bucket); + int ret = driver->get_bucket(NULL, copy_source_bucket_info, &cs_bucket); if (ret < 0) return ret; @@ -3602,7 +3602,7 @@ int RGWPutObj::verify_permission(optional_yield y) cs_object->set_prefetch_data(); /* check source object permissions */ - if (ret = read_obj_policy(this, store, s, copy_source_bucket_info, cs_attrs, &cs_acl, nullptr, + if (ret = read_obj_policy(this, driver, s, copy_source_bucket_info, cs_attrs, &cs_acl, nullptr, policy, cs_bucket.get(), cs_object.get(), y, true); ret < 0) { return ret; } @@ -3792,7 +3792,7 @@ int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl) new_end = lst; std::unique_ptr bucket; - ret = store->get_bucket(nullptr, copy_source_bucket_info, &bucket); + ret = driver->get_bucket(nullptr, copy_source_bucket_info, &bucket); if (ret < 0) return ret; @@ -3965,7 +3965,7 @@ void RGWPutObj::execute(optional_yield y) // make reservation for notification if needed std::unique_ptr res - = store->get_notification( + = driver->get_notification( s->object.get(), s->src_object.get(), s, rgw::notify::ObjectCreatedPut); if(!multipart) { @@ -4009,7 +4009,7 @@ void RGWPutObj::execute(optional_yield y) op_ret = -ERR_INVALID_BUCKET_STATE; return; } - processor = store->get_append_writer(this, s->yield, s->object->clone(), + processor = driver->get_append_writer(this, s->yield, s->object->clone(), s->bucket_owner.get_id(), pdest_placement, s->req_id, position, &cur_accounted_size); @@ -4022,7 +4022,7 @@ void RGWPutObj::execute(optional_yield y) version_id = s->object->get_instance(); } } - processor = store->get_atomic_writer(this, s->yield, s->object->clone(), + processor = driver->get_atomic_writer(this, s->yield, s->object->clone(), s->bucket_owner.get_id(), pdest_placement, olh_epoch, s->req_id); } @@ -4035,7 +4035,7 @@ void RGWPutObj::execute(optional_yield y) } if ((! copy_source.empty()) && !copy_source_range) { std::unique_ptr bucket; - op_ret = store->get_bucket(nullptr, copy_source_bucket_info, &bucket); + op_ret = driver->get_bucket(nullptr, copy_source_bucket_info, &bucket); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to get bucket with error" << op_ret << dendl; return; @@ -4074,7 +4074,7 @@ void RGWPutObj::execute(optional_yield y) // no filters by default rgw::sal::DataProcessor *filter = processor.get(); - const auto& compression_type = store->get_compression_type(*pdest_placement); + const auto& compression_type = driver->get_compression_type(*pdest_placement); CompressorRef plugin; boost::optional compressor; @@ -4266,7 +4266,7 @@ void RGWPutObj::execute(optional_yield y) /* produce torrent */ if (s->cct->_conf->rgw_torrent_flag && (ofs == torrent.get_data_len())) { - torrent.init(s, store); + torrent.init(s, driver); torrent.set_create_date(mtime); op_ret = torrent.complete(y); if (0 != op_ret) @@ -4375,7 +4375,7 @@ void RGWPostObj::execute(optional_yield y) // make reservation for notification if needed std::unique_ptr res - = store->get_notification(s->object.get(), s->src_object.get(), s, rgw::notify::ObjectCreatedPost); + = driver->get_notification(s->object.get(), s->src_object.get(), s, rgw::notify::ObjectCreatedPost); op_ret = res->publish_reserve(this); if (op_ret < 0) { return; @@ -4419,7 +4419,7 @@ void RGWPostObj::execute(optional_yield y) } std::unique_ptr processor; - processor = store->get_atomic_writer(this, s->yield, std::move(obj), + processor = driver->get_atomic_writer(this, s->yield, std::move(obj), s->bucket_owner.get_id(), &s->dest_placement, 0, s->req_id); op_ret = processor->prepare(s->yield); @@ -4438,7 +4438,7 @@ void RGWPostObj::execute(optional_yield y) if (encrypt != nullptr) { filter = encrypt.get(); } else { - const auto& compression_type = store->get_compression_type(s->dest_placement); + const auto& compression_type = driver->get_compression_type(s->dest_placement); if (compression_type != "none") { plugin = Compressor::create(s->cct, compression_type); if (!plugin) { @@ -4842,7 +4842,7 @@ int RGWDeleteObj::handle_slo_manifest(bufferlist& bl, optional_yield y) try { deleter = std::unique_ptr(\ - new RGWBulkDelete::Deleter(this, store, s)); + new RGWBulkDelete::Deleter(this, driver, s)); } catch (const std::bad_alloc&) { return -ENOMEM; } @@ -5059,7 +5059,7 @@ void RGWDeleteObj::execute(optional_yield y) rgw::notify::ObjectRemovedDeleteMarkerCreated : rgw::notify::ObjectRemovedDelete; std::unique_ptr res - = store->get_notification(s->object.get(), s->src_object.get(), s, + = driver->get_notification(s->object.get(), s->src_object.get(), s, event_type); op_ret = res->publish_reserve(this); if (op_ret < 0) { @@ -5183,7 +5183,7 @@ int RGWCopyObj::verify_permission(optional_yield y) return op_ret; } - op_ret = store->get_bucket(this, s->user.get(), + op_ret = driver->get_bucket(this, s->user.get(), rgw_bucket(src_tenant_name, src_bucket_name, s->bucket_instance_id), @@ -5205,7 +5205,7 @@ int RGWCopyObj::verify_permission(optional_yield y) rgw_placement_rule src_placement; /* check source object permissions */ - op_ret = read_obj_policy(this, store, s, src_bucket->get_info(), src_bucket->get_attrs(), &src_acl, &src_placement.storage_class, + op_ret = read_obj_policy(this, driver, s, src_bucket->get_info(), src_bucket->get_attrs(), &src_acl, &src_placement.storage_class, src_policy, src_bucket.get(), s->src_object.get(), y); if (op_ret < 0) { return op_ret; @@ -5299,7 +5299,7 @@ int RGWCopyObj::verify_permission(optional_yield y) or intra region sync */ dest_bucket = src_bucket->clone(); } else { - op_ret = store->get_bucket(this, s->user.get(), dest_tenant_name, dest_bucket_name, &dest_bucket, y); + op_ret = driver->get_bucket(this, s->user.get(), dest_tenant_name, dest_bucket_name, &dest_bucket, y); if (op_ret < 0) { if (op_ret == -ENOENT) { ldpp_dout(this, 0) << "ERROR: Destination Bucket not found for user: " << s->user->get_id().to_str() << dendl; @@ -5313,7 +5313,7 @@ int RGWCopyObj::verify_permission(optional_yield y) dest_object->set_atomic(); /* check dest bucket permissions */ - op_ret = read_bucket_policy(this, store, s, dest_bucket->get_info(), + op_ret = read_bucket_policy(this, driver, s, dest_bucket->get_info(), dest_bucket->get_attrs(), &dest_bucket_policy, dest_bucket->get_key(), y); if (op_ret < 0) { @@ -5462,7 +5462,7 @@ void RGWCopyObj::execute(optional_yield y) // make reservation for notification if needed std::unique_ptr res - = store->get_notification( + = driver->get_notification( s->object.get(), s->src_object.get(), s, rgw::notify::ObjectCreatedCopy); op_ret = res->publish_reserve(this); @@ -5743,7 +5743,7 @@ void RGWPutACLs::execute(optional_yield y) } if (!s->canned_acl.empty() || s->has_acl_header) { - op_ret = get_policy_from_state(store, s, ss); + op_ret = get_policy_from_state(driver, s, ss); if (op_ret < 0) return; @@ -5787,7 +5787,7 @@ void RGWPutACLs::execute(optional_yield y) if (s->canned_acl.empty()) { in_data.append(data); } - op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -5800,7 +5800,7 @@ void RGWPutACLs::execute(optional_yield y) *_dout << dendl; } - op_ret = policy->rebuild(this, store, &owner, new_policy, s->err.message); + op_ret = policy->rebuild(this, driver, &owner, new_policy, s->err.message); if (op_ret < 0) return; @@ -5916,13 +5916,13 @@ void RGWPutLC::execute(optional_yield y) ldpp_dout(this, 15) << "New LifecycleConfiguration:" << ss.str() << dendl; } - op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = store->get_rgwlc()->set_bucket_config(s->bucket.get(), s->bucket_attrs, &new_config); + op_ret = driver->get_rgwlc()->set_bucket_config(s->bucket.get(), s->bucket_attrs, &new_config); if (op_ret < 0) { return; } @@ -5932,13 +5932,13 @@ void RGWPutLC::execute(optional_yield y) void RGWDeleteLC::execute(optional_yield y) { bufferlist data; - op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = store->get_rgwlc()->remove_bucket_config(s->bucket.get(), s->bucket_attrs); + op_ret = driver->get_rgwlc()->remove_bucket_config(s->bucket.get(), s->bucket_attrs); if (op_ret < 0) { return; } @@ -5984,7 +5984,7 @@ void RGWPutCORS::execute(optional_yield y) if (op_ret < 0) return; - op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -6010,7 +6010,7 @@ int RGWDeleteCORS::verify_permission(optional_yield y) void RGWDeleteCORS::execute(optional_yield y) { bufferlist data; - op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -6132,7 +6132,7 @@ void RGWSetRequestPayment::execute(optional_yield y) if (op_ret < 0) return; - op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -6428,7 +6428,7 @@ void RGWCompleteMultipart::execute(optional_yield y) // make reservation for notification if needed std::unique_ptr res - = store->get_notification(meta_obj.get(), nullptr, s, rgw::notify::ObjectCreatedCompleteMultipartUpload, &s->object->get_name()); + = driver->get_notification(meta_obj.get(), nullptr, s, rgw::notify::ObjectCreatedCompleteMultipartUpload, &s->object->get_name()); op_ret = res->publish_reserve(this); if (op_ret < 0) { return; @@ -6962,7 +6962,7 @@ void RGWDeleteMultiObj::handle_individual_object(const rgw_obj_key& o, optional_ rgw::notify::ObjectRemovedDeleteMarkerCreated : rgw::notify::ObjectRemovedDelete; std::unique_ptr res - = store->get_notification(obj.get(), s->src_object.get(), s, event_type); + = driver->get_notification(obj.get(), s->src_object.get(), s, event_type); op_ret = res->publish_reserve(this); if (op_ret < 0) { send_partial_response(o, false, "", op_ret, formatter_flush_cond); @@ -7102,8 +7102,8 @@ bool RGWBulkDelete::Deleter::verify_permission(RGWBucketInfo& binfo, ACLOwner& bucket_owner /* out */, optional_yield y) { - RGWAccessControlPolicy bacl(store->ctx()); - int ret = read_bucket_policy(dpp, store, s, binfo, battrs, &bacl, binfo.bucket, y); + RGWAccessControlPolicy bacl(driver->ctx()); + int ret = read_bucket_policy(dpp, driver, s, binfo, battrs, &bacl, binfo.bucket, y); if (ret < 0) { return false; } @@ -7124,7 +7124,7 @@ bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path, optional_yie ACLOwner bowner; RGWObjVersionTracker ot; - int ret = store->get_bucket(dpp, s->user.get(), s->user->get_tenant(), path.bucket_name, &bucket, y); + int ret = driver->get_bucket(dpp, s->user.get(), s->user->get_tenant(), path.bucket_name, &bucket, y); if (ret < 0) { goto binfo_fail; } @@ -7228,7 +7228,7 @@ void RGWBulkDelete::pre_exec() void RGWBulkDelete::execute(optional_yield y) { - deleter = std::unique_ptr(new Deleter(this, store, s)); + deleter = std::unique_ptr(new Deleter(this, driver, s)); bool is_truncated = false; do { @@ -7359,11 +7359,11 @@ static void forward_req_info(const DoutPrefixProvider *dpp, CephContext *cct, re info.effective_uri = "/" + bucket_name; } -void RGWBulkUploadOp::init(rgw::sal::Store* const store, +void RGWBulkUploadOp::init(rgw::sal::Driver* const driver, req_state* const s, RGWHandler* const h) { - RGWOp::init(store, s, h); + RGWOp::init(driver, s, h); } int RGWBulkUploadOp::handle_dir(const std::string_view path, optional_yield y) @@ -7406,7 +7406,7 @@ int RGWBulkUploadOp::handle_dir(const std::string_view path, optional_yield y) forward_req_info(this, s->cct, info, bucket_name); op_ret = s->user->create_bucket(this, new_bucket, - store->get_zone()->get_zonegroup().get_id(), + driver->get_zone()->get_zonegroup().get_id(), placement_rule, swift_ver_location, pquota_info, policy, attrs, out_info, ep_objv, @@ -7427,8 +7427,8 @@ bool RGWBulkUploadOp::handle_file_verify_permission(RGWBucketInfo& binfo, ACLOwner& bucket_owner /* out */, optional_yield y) { - RGWAccessControlPolicy bacl(store->ctx()); - op_ret = read_bucket_policy(this, store, s, binfo, battrs, &bacl, binfo.bucket, y); + RGWAccessControlPolicy bacl(driver->ctx()); + op_ret = read_bucket_policy(this, driver, s, binfo, battrs, &bacl, binfo.bucket, y); if (op_ret < 0) { ldpp_dout(this, 20) << "cannot read_policy() for bucket" << dendl; return false; @@ -7504,7 +7504,7 @@ int RGWBulkUploadOp::handle_file(const std::string_view path, std::unique_ptr bucket; ACLOwner bowner; - op_ret = store->get_bucket(this, s->user.get(), rgw_bucket(rgw_bucket_key(s->user->get_tenant(), bucket_name)), &bucket, y); + op_ret = driver->get_bucket(this, s->user.get(), rgw_bucket(rgw_bucket_key(s->user->get_tenant(), bucket_name)), &bucket, y); if (op_ret < 0) { if (op_ret == -ENOENT) { ldpp_dout(this, 20) << "non existent directory=" << bucket_name << dendl; @@ -7535,7 +7535,7 @@ int RGWBulkUploadOp::handle_file(const std::string_view path, dest_placement.inherit_from(bucket->get_placement_rule()); std::unique_ptr processor; - processor = store->get_atomic_writer(this, s->yield, std::move(obj), + processor = driver->get_atomic_writer(this, s->yield, std::move(obj), bowner.get_id(), &s->dest_placement, 0, s->req_id); op_ret = processor->prepare(s->yield); @@ -7547,7 +7547,7 @@ int RGWBulkUploadOp::handle_file(const std::string_view path, /* No filters by default. */ rgw::sal::DataProcessor *filter = processor.get(); - const auto& compression_type = store->get_compression_type(dest_placement); + const auto& compression_type = driver->get_compression_type(dest_placement); CompressorRef plugin; boost::optional compressor; if (compression_type != "none") { @@ -7994,11 +7994,11 @@ RGWHandler::~RGWHandler() { } -int RGWHandler::init(rgw::sal::Store* _store, +int RGWHandler::init(rgw::sal::Driver* _driver, req_state *_s, rgw::io::BasicClient *cio) { - store = _store; + driver = _driver; s = _s; return 0; @@ -8006,14 +8006,14 @@ int RGWHandler::init(rgw::sal::Store* _store, int RGWHandler::do_init_permissions(const DoutPrefixProvider *dpp, optional_yield y) { - int ret = rgw_build_bucket_policies(dpp, store, s, y); + int ret = rgw_build_bucket_policies(dpp, driver, s, y); if (ret < 0) { ldpp_dout(dpp, 10) << "init_permissions on " << s->bucket << " failed, ret=" << ret << dendl; return ret==-ENODATA ? -EACCES : ret; } - rgw_build_iam_environment(store, s); + rgw_build_iam_environment(driver, s); return ret; } @@ -8023,7 +8023,7 @@ int RGWHandler::do_read_permissions(RGWOp *op, bool only_bucket, optional_yield /* already read bucket info */ return 0; } - int ret = rgw_build_object_policies(op, store, s, op->prefetch_data(), y); + int ret = rgw_build_object_policies(op, driver, s, op->prefetch_data(), y); if (ret < 0) { ldpp_dout(op, 10) << "read_permissions on " << s->bucket << ":" @@ -8105,7 +8105,7 @@ void RGWPutBucketPolicy::execute(optional_yield y) return; } - op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -8204,7 +8204,7 @@ int RGWDeleteBucketPolicy::verify_permission(optional_yield y) void RGWDeleteBucketPolicy::execute(optional_yield y) { bufferlist data; - op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -8270,7 +8270,7 @@ void RGWPutBucketObjectLock::execute(optional_yield y) return; } - op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -8564,7 +8564,7 @@ void RGWGetObjLegalHold::execute(optional_yield y) void RGWGetClusterStat::execute(optional_yield y) { - op_ret = store->cluster_stat(stats_op); + op_ret = driver->cluster_stat(stats_op); } int RGWGetBucketPolicyStatus::verify_permission(optional_yield y) @@ -8632,7 +8632,7 @@ void RGWPutBucketPublicAccessBlock::execute(optional_yield y) return; } - op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -8708,7 +8708,7 @@ int RGWDeleteBucketPublicAccessBlock::verify_permission(optional_yield y) void RGWDeleteBucketPublicAccessBlock::execute(optional_yield y) { bufferlist data; - op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -8763,7 +8763,7 @@ void RGWPutBucketEncryption::execute(optional_yield y) return; } - op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -8818,7 +8818,7 @@ int RGWDeleteBucketEncryption::verify_permission(optional_yield y) void RGWDeleteBucketEncryption::execute(optional_yield y) { bufferlist data; - op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; diff --git a/src/rgw/rgw_op.h b/src/rgw/rgw_op.h index 3f054de097909..cc65a577632e9 100644 --- a/src/rgw/rgw_op.h +++ b/src/rgw/rgw_op.h @@ -81,7 +81,7 @@ class StrategyRegistry; int rgw_op_get_bucket_policy_from_attr(const DoutPrefixProvider *dpp, CephContext *cct, - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWBucketInfo& bucket_info, std::map& bucket_attrs, RGWAccessControlPolicy *policy, @@ -89,7 +89,7 @@ int rgw_op_get_bucket_policy_from_attr(const DoutPrefixProvider *dpp, class RGWHandler { protected: - rgw::sal::Store* store{nullptr}; + rgw::sal::Driver* driver{nullptr}; req_state *s{nullptr}; int do_init_permissions(const DoutPrefixProvider *dpp, optional_yield y); @@ -99,7 +99,7 @@ public: RGWHandler() {} virtual ~RGWHandler(); - virtual int init(rgw::sal::Store* store, + virtual int init(rgw::sal::Driver* driver, req_state* _s, rgw::io::BasicClient* cio); @@ -177,7 +177,7 @@ class RGWOp : public DoutPrefixProvider { protected: req_state *s; RGWHandler *dialect_handler; - rgw::sal::Store* store; + rgw::sal::Driver* driver; RGWCORSConfiguration bucket_cors; bool cors_exist; RGWQuota quota; @@ -214,7 +214,7 @@ public: RGWOp() : s(nullptr), dialect_handler(nullptr), - store(nullptr), + driver(nullptr), cors_exist(false), op_ret(0) { } @@ -233,8 +233,8 @@ public: return 0; } - virtual void init(rgw::sal::Store* store, req_state *s, RGWHandler *dialect_handler) { - this->store = store; + virtual void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *dialect_handler) { + this->driver = driver; this->s = s; this->dialect_handler = dialect_handler; } @@ -604,15 +604,15 @@ public: unsigned int num_unfound; std::list failures; - rgw::sal::Store* const store; + rgw::sal::Driver* const driver; req_state * const s; public: - Deleter(const DoutPrefixProvider* dpp, rgw::sal::Store* const str, req_state * const s) + Deleter(const DoutPrefixProvider* dpp, rgw::sal::Driver* const str, req_state * const s) : dpp(dpp), num_deleted(0), num_unfound(0), - store(str), + driver(str), s(s) { } @@ -718,7 +718,7 @@ public: : num_created(0) { } - void init(rgw::sal::Store* const store, + void init(rgw::sal::Driver* const driver, req_state* const s, RGWHandler* const h) override; @@ -929,8 +929,8 @@ public: void pre_exec() override; void execute(optional_yield y) override; - void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override { - RGWOp::init(store, s, h); + void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override { + RGWOp::init(driver, s, h); } virtual int get_params(optional_yield y) = 0; void send_response() override = 0; @@ -1102,8 +1102,8 @@ public: int verify_permission(optional_yield y) override; void pre_exec() override; void execute(optional_yield y) override; - void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override { - RGWOp::init(store, s, h); + void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override { + RGWOp::init(driver, s, h); policy.set_ctx(s->cct); relaxed_region_enforcement = s->cct->_conf.get_val("rgw_relaxed_region_enforcement"); @@ -1256,8 +1256,8 @@ public: delete obj_legal_hold; } - void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override { - RGWOp::init(store, s, h); + void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override { + RGWOp::init(driver, s, h); policy.set_ctx(s->cct); } @@ -1332,8 +1332,8 @@ public: attrs.emplace(std::move(key), std::move(bl)); /* key and bl are r-value refs */ } - void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override { - RGWOp::init(store, s, h); + void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override { + RGWOp::init(driver, s, h); policy.set_ctx(s->cct); } @@ -1371,8 +1371,8 @@ public: has_policy(false) { } - void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override { - RGWOp::init(store, s, h); + void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override { + RGWOp::init(driver, s, h); policy.set_ctx(s->cct); } int init_processing(optional_yield y) override; @@ -1410,8 +1410,8 @@ public: attrs.emplace(std::move(key), std::move(bl)); /* key and bl are r-value refs */ } - void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override { - RGWOp::init(store, s, h); + void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override { + RGWOp::init(driver, s, h); policy.set_ctx(s->cct); } @@ -1437,8 +1437,8 @@ public: : dlo_manifest(NULL) {} - void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override { - RGWOp::init(store, s, h); + void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override { + RGWOp::init(driver, s, h); policy.set_ctx(s->cct); } int verify_permission(optional_yield y) override; @@ -1568,8 +1568,8 @@ public: attrs.emplace(std::move(key), std::move(bl)); } - void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override { - RGWOp::init(store, s, h); + void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override { + RGWOp::init(driver, s, h); dest_policy.set_ctx(s->cct); } int verify_permission(optional_yield y) override; @@ -1621,7 +1621,7 @@ public: void pre_exec() override; void execute(optional_yield y) override; - virtual int get_policy_from_state(rgw::sal::Store* store, req_state *s, std::stringstream& ss) { return 0; } + virtual int get_policy_from_state(rgw::sal::Driver* driver, req_state *s, std::stringstream& ss) { return 0; } virtual int get_params(optional_yield y) = 0; void send_response() override = 0; const char* name() const override { return "put_acls"; } @@ -1658,11 +1658,11 @@ public: } ~RGWPutLC() override {} - void init(rgw::sal::Store* store, req_state *s, RGWHandler *dialect_handler) override { + void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *dialect_handler) override { #define COOKIE_LEN 16 char buf[COOKIE_LEN + 1]; - RGWOp::init(store, s, dialect_handler); + RGWOp::init(driver, s, dialect_handler); gen_rand_alphanumeric(s->cct, buf, sizeof(buf) - 1); cookie = buf; } @@ -1671,7 +1671,7 @@ public: void pre_exec() override; void execute(optional_yield y) override; -// virtual int get_policy_from_state(RGWRados* store, req_state *s, std::stringstream& ss) { return 0; } +// virtual int get_policy_from_state(RGWRados* driver, req_state *s, std::stringstream& ss) { return 0; } virtual int get_params(optional_yield y) = 0; void send_response() override = 0; const char* name() const override { return "put_lifecycle"; } @@ -1851,8 +1851,8 @@ protected: public: RGWInitMultipart() {} - void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override { - RGWOp::init(store, s, h); + void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override { + RGWOp::init(driver, s, h); policy.set_ctx(s->cct); } int verify_permission(optional_yield y) override; @@ -1926,8 +1926,8 @@ public: truncated = false; } - void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override { - RGWOp::init(store, s, h); + void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override { + RGWOp::init(driver, s, h); policy = RGWAccessControlPolicy(s->cct); } int verify_permission(optional_yield y) override; @@ -1964,8 +1964,8 @@ public: default_max = 0; } - void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override { - RGWOp::init(store, s, h); + void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override { + RGWOp::init(driver, s, h); max_uploads = default_max; } @@ -2100,11 +2100,11 @@ public: uint32_t op_mask() override { return RGW_OP_TYPE_READ; } }; -extern int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Store* store, +extern int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, req_state* s, optional_yield y); -extern int rgw_build_object_policies(const DoutPrefixProvider *dpp, rgw::sal::Store* store, +extern int rgw_build_object_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, req_state *s, bool prefetch_data, optional_yield y); -extern void rgw_build_iam_environment(rgw::sal::Store* store, +extern void rgw_build_iam_environment(rgw::sal::Driver* driver, req_state* s); extern std::vector get_iam_user_policy_from_attr(CephContext* cct, std::map& attrs, @@ -2570,8 +2570,8 @@ protected: public: RGWGetClusterStat() {} - void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override { - RGWOp::init(store, s, h); + void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override { + RGWOp::init(driver, s, h); } int verify_permission(optional_yield) override {return 0;} virtual void send_response() override = 0; diff --git a/src/rgw/rgw_os_lib.cc b/src/rgw/rgw_os_lib.cc index 64ae8e102b1a0..55eb2fb4ba38e 100644 --- a/src/rgw/rgw_os_lib.cc +++ b/src/rgw/rgw_os_lib.cc @@ -11,7 +11,7 @@ namespace rgw { /* static */ - int RGWHandler_Lib::init_from_header(rgw::sal::Store* store, + int RGWHandler_Lib::init_from_header(rgw::sal::Driver* driver, req_state *s) { string req; @@ -52,10 +52,10 @@ namespace rgw { if (pos >= 0) { // XXX ugh, another copy string encoded_obj_str = req.substr(pos+1); - s->object = store->get_object(rgw_obj_key(encoded_obj_str, s->info.args.get("versionId"))); + s->object = driver->get_object(rgw_obj_key(encoded_obj_str, s->info.args.get("versionId"))); } } else { - s->object = store->get_object(rgw_obj_key(req_name, s->info.args.get("versionId"))); + s->object = driver->get_object(rgw_obj_key(req_name, s->info.args.get("versionId"))); } return 0; } /* init_from_header */ diff --git a/src/rgw/rgw_period_pusher.cc b/src/rgw/rgw_period_pusher.cc index 275f14619d264..d9c899e5c1c54 100644 --- a/src/rgw/rgw_period_pusher.cc +++ b/src/rgw/rgw_period_pusher.cc @@ -163,11 +163,11 @@ class RGWPeriodPusher::CRThread : public DoutPrefixProvider { }; -RGWPeriodPusher::RGWPeriodPusher(const DoutPrefixProvider *dpp, rgw::sal::Store* store, +RGWPeriodPusher::RGWPeriodPusher(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y) - : cct(store->ctx()), store(store) + : cct(driver->ctx()), driver(driver) { - rgw::sal::Zone* zone = store->get_zone(); + rgw::sal::Zone* zone = driver->get_zone(); auto& realm_id = zone->get_realm_id(); if (realm_id.empty()) // no realm configuration return; @@ -175,7 +175,7 @@ RGWPeriodPusher::RGWPeriodPusher(const DoutPrefixProvider *dpp, rgw::sal::Store* // always send out the current period on startup RGWPeriod period; // XXX dang - int r = period.init(dpp, cct, static_cast(store)->svc()->sysobj, realm_id, y, zone->get_realm_name()); + int r = period.init(dpp, cct, static_cast(driver)->svc()->sysobj, realm_id, y, zone->get_realm_name()); if (r < 0) { ldpp_dout(dpp, -1) << "failed to load period for realm " << realm_id << dendl; return; @@ -204,7 +204,7 @@ void RGWPeriodPusher::handle_notify(RGWRealmNotify type, // we can't process this notification without access to our current realm // configuration. queue it until resume() - if (store == nullptr) { + if (driver == nullptr) { pending_periods.emplace_back(std::move(info)); return; } @@ -230,7 +230,7 @@ void RGWPeriodPusher::handle_notify(RGWZonesNeedPeriod&& period) // find our zonegroup in the new period auto& zonegroups = period.get_map().zonegroups; - auto i = zonegroups.find(store->get_zone()->get_zonegroup().get_id()); + auto i = zonegroups.find(driver->get_zone()->get_zonegroup().get_id()); if (i == zonegroups.end()) { lderr(cct) << "The new period does not contain my zonegroup!" << dendl; return; @@ -238,7 +238,7 @@ void RGWPeriodPusher::handle_notify(RGWZonesNeedPeriod&& period) auto& my_zonegroup = i->second; // if we're not a master zone, we're not responsible for pushing any updates - if (my_zonegroup.master_zone != store->get_zone()->get_id()) + if (my_zonegroup.master_zone != driver->get_zone()->get_id()) return; // construct a map of the zones that need this period. the map uses the same @@ -247,11 +247,11 @@ void RGWPeriodPusher::handle_notify(RGWZonesNeedPeriod&& period) auto hint = conns.end(); // are we the master zonegroup in this period? - if (period.get_map().master_zonegroup == store->get_zone()->get_zonegroup().get_id()) { + if (period.get_map().master_zonegroup == driver->get_zone()->get_zonegroup().get_id()) { // update other zonegroup endpoints for (auto& zg : zonegroups) { auto& zonegroup = zg.second; - if (zonegroup.get_id() == store->get_zone()->get_zonegroup().get_id()) + if (zonegroup.get_id() == driver->get_zone()->get_zonegroup().get_id()) continue; if (zonegroup.endpoints.empty()) continue; @@ -259,14 +259,14 @@ void RGWPeriodPusher::handle_notify(RGWZonesNeedPeriod&& period) hint = conns.emplace_hint( hint, std::piecewise_construct, std::forward_as_tuple(zonegroup.get_id()), - std::forward_as_tuple(cct, store, zonegroup.get_id(), zonegroup.endpoints, zonegroup.api_name)); + std::forward_as_tuple(cct, driver, zonegroup.get_id(), zonegroup.endpoints, zonegroup.api_name)); } } // update other zone endpoints for (auto& z : my_zonegroup.zones) { auto& zone = z.second; - if (zone.id == store->get_zone()->get_id()) + if (zone.id == driver->get_zone()->get_id()) continue; if (zone.endpoints.empty()) continue; @@ -274,7 +274,7 @@ void RGWPeriodPusher::handle_notify(RGWZonesNeedPeriod&& period) hint = conns.emplace_hint( hint, std::piecewise_construct, std::forward_as_tuple(zone.id), - std::forward_as_tuple(cct, store, zone.id, zone.endpoints, my_zonegroup.api_name)); + std::forward_as_tuple(cct, driver, zone.id, zone.endpoints, my_zonegroup.api_name)); } if (conns.empty()) { @@ -297,13 +297,13 @@ void RGWPeriodPusher::pause() { ldout(cct, 4) << "paused for realm update" << dendl; std::lock_guard lock(mutex); - store = nullptr; + driver = nullptr; } -void RGWPeriodPusher::resume(rgw::sal::Store* store) +void RGWPeriodPusher::resume(rgw::sal::Driver* driver) { std::lock_guard lock(mutex); - this->store = store; + this->driver = driver; ldout(cct, 4) << "resume with " << pending_periods.size() << " periods pending" << dendl; diff --git a/src/rgw/rgw_period_pusher.h b/src/rgw/rgw_period_pusher.h index 975cf38013144..ae267a11e78e5 100644 --- a/src/rgw/rgw_period_pusher.h +++ b/src/rgw/rgw_period_pusher.h @@ -24,7 +24,7 @@ using RGWZonesNeedPeriod = RGWPeriod; class RGWPeriodPusher final : public RGWRealmWatcher::Watcher, public RGWRealmReloader::Pauser { public: - explicit RGWPeriodPusher(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y); + explicit RGWPeriodPusher(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y); ~RGWPeriodPusher() override; /// respond to realm notifications by pushing new periods to other zones @@ -35,13 +35,13 @@ class RGWPeriodPusher final : public RGWRealmWatcher::Watcher, void pause() override; /// continue processing notifications with a new RGWRados instance - void resume(rgw::sal::Store* store) override; + void resume(rgw::sal::Driver* driver) override; private: void handle_notify(RGWZonesNeedPeriod&& period); CephContext *const cct; - rgw::sal::Store* store; + rgw::sal::Driver* driver; std::mutex mutex; epoch_t realm_epoch{0}; //< the current realm epoch being sent diff --git a/src/rgw/rgw_process.cc b/src/rgw/rgw_process.cc index 2c115444c41d6..ffa9bd3354b8c 100644 --- a/src/rgw/rgw_process.cc +++ b/src/rgw/rgw_process.cc @@ -90,7 +90,7 @@ void RGWProcess::RGWWQ::_process(RGWRequest *req, ThreadPool::TPHandle &) { process->req_throttle.put(1); perfcounter->inc(l_rgw_qactive, -1); } -bool rate_limit(rgw::sal::Store* store, req_state* s) { +bool rate_limit(rgw::sal::Driver* driver, req_state* s) { // we dont want to limit health check or system or admin requests const auto& is_admin_or_system = s->user->get_info(); if ((s->op_type == RGW_OP_GET_HEALTH_CHECK) || is_admin_or_system.admin || is_admin_or_system.system) @@ -101,7 +101,7 @@ bool rate_limit(rgw::sal::Store* store, req_state* s) { RGWRateLimitInfo global_anon; RGWRateLimitInfo* bucket_ratelimit; RGWRateLimitInfo* user_ratelimit; - store->get_ratelimit(global_bucket, global_user, global_anon); + driver->get_ratelimit(global_bucket, global_user, global_anon); bucket_ratelimit = &global_bucket; user_ratelimit = &global_user; s->user->get_id().to_str(userfind); @@ -166,7 +166,7 @@ int rgw_process_authenticated(RGWHandler_REST * const handler, RGWRequest * const req, req_state * const s, optional_yield y, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const bool skip_retarget) { ldpp_dout(op, 2) << "init permissions" << dendl; @@ -244,7 +244,7 @@ int rgw_process_authenticated(RGWHandler_REST * const handler, op->pre_exec(); ldpp_dout(op, 2) << "check rate limiting" << dendl; - if (rate_limit(store, s)) { + if (rate_limit(driver, s)) { return -ERR_RATE_LIMITED; } ldpp_dout(op, 2) << "executing" << dendl; @@ -261,7 +261,7 @@ int rgw_process_authenticated(RGWHandler_REST * const handler, return 0; } -int process_request(rgw::sal::Store* const store, +int process_request(rgw::sal::Driver* const driver, RGWREST* const rest, RGWRequest* const req, const std::string& frontend_prefix, @@ -288,7 +288,7 @@ int process_request(rgw::sal::Store* const store, req_state *s = &rstate; s->ratelimit_data = ratelimit; - std::unique_ptr u = store->get_user(rgw_user()); + std::unique_ptr u = driver->get_user(rgw_user()); s->set_user(u); if (ret < 0) { @@ -297,9 +297,9 @@ int process_request(rgw::sal::Store* const store, return ret; } - s->req_id = store->zone_unique_id(req->id); - s->trans_id = store->zone_unique_trans_id(req->id); - s->host_id = store->get_host_id(); + s->req_id = driver->zone_unique_id(req->id); + s->trans_id = driver->zone_unique_trans_id(req->id); + s->host_id = driver->get_host_id(); s->yield = yield; ldpp_dout(s, 2) << "initializing for trans_id = " << s->trans_id << dendl; @@ -308,7 +308,7 @@ int process_request(rgw::sal::Store* const store, int init_error = 0; bool should_log = false; RGWRESTMgr *mgr; - RGWHandler_REST *handler = rest->get_handler(store, s, + RGWHandler_REST *handler = rest->get_handler(driver, s, auth_registry, frontend_prefix, client_io, &mgr, &init_error); @@ -339,7 +339,7 @@ int process_request(rgw::sal::Store* const store, } else if (rc < 0) { ldpp_dout(op, 5) << "WARNING: failed to read pre request script. error: " << rc << dendl; } else { - rc = rgw::lua::request::execute(store, rest, olog, s, op, script); + rc = rgw::lua::request::execute(driver, rest, olog, s, op, script); if (rc < 0) { ldpp_dout(op, 5) << "WARNING: failed to execute pre request script. error: " << rc << dendl; } @@ -393,7 +393,7 @@ int process_request(rgw::sal::Store* const store, s->trace->SetAttribute(tracing::rgw::OP, op->name()); s->trace->SetAttribute(tracing::rgw::TYPE, tracing::rgw::REQUEST); - ret = rgw_process_authenticated(handler, op, req, s, yield, store); + ret = rgw_process_authenticated(handler, op, req, s, yield, driver); if (ret < 0) { abort_early(s, op, ret, handler, yield); goto done; @@ -424,7 +424,7 @@ done: } else if (rc < 0) { ldpp_dout(op, 5) << "WARNING: failed to read post request script. error: " << rc << dendl; } else { - rc = rgw::lua::request::execute(store, rest, olog, s, op, script); + rc = rgw::lua::request::execute(driver, rest, olog, s, op, script); if (rc < 0) { ldpp_dout(op, 5) << "WARNING: failed to execute post request script. error: " << rc << dendl; } diff --git a/src/rgw/rgw_process.h b/src/rgw/rgw_process.h index 85734447be526..db7752547fe43 100644 --- a/src/rgw/rgw_process.h +++ b/src/rgw/rgw_process.h @@ -29,13 +29,13 @@ namespace rgw::lua { } struct RGWProcessEnv { - rgw::sal::Store* store; + rgw::sal::Driver* driver; RGWREST *rest; OpsLogSink *olog; int port; std::string uri_prefix; std::shared_ptr auth_registry; - //maybe there is a better place to store the rate limit data structure + //maybe there is a better place to driver the rate limit data structure ActiveRateLimiter* ratelimiting; rgw::lua::Background* lua_background; }; @@ -47,7 +47,7 @@ class RGWProcess { std::deque m_req_queue; protected: CephContext *cct; - rgw::sal::Store* store; + rgw::sal::Driver* driver; rgw_auth_registry_ptr_t auth_registry; OpsLogSink* olog; ThreadPool m_tp; @@ -100,7 +100,7 @@ public: const int num_threads, RGWFrontendConfig* const conf) : cct(cct), - store(pe->store), + driver(pe->driver), auth_registry(pe->auth_registry), olog(pe->olog), m_tp(cct, "RGWProcess::m_tp", "tp_rgw_process", num_threads), @@ -110,7 +110,7 @@ public: sock_fd(-1), uri_prefix(pe->uri_prefix), lua_background(pe->lua_background), - lua_manager(store->get_lua_manager()), + lua_manager(driver->get_lua_manager()), req_wq(this, ceph::make_timespan(g_conf()->rgw_op_thread_timeout), ceph::make_timespan(g_conf()->rgw_op_thread_suicide_timeout), @@ -126,11 +126,11 @@ public: m_tp.pause(); } - void unpause_with_new_config(rgw::sal::Store* const store, + void unpause_with_new_config(rgw::sal::Driver* const driver, rgw_auth_registry_ptr_t auth_registry) { - this->store = store; + this->driver = driver; this->auth_registry = std::move(auth_registry); - lua_manager = store->get_lua_manager(); + lua_manager = driver->get_lua_manager(); m_tp.unpause(); } @@ -168,7 +168,7 @@ public: void set_access_key(RGWAccessKey& key) { access_key = key; } }; /* process stream request */ -extern int process_request(rgw::sal::Store* store, +extern int process_request(rgw::sal::Driver* driver, RGWREST* rest, RGWRequest* req, const std::string& frontend_prefix, @@ -189,7 +189,7 @@ extern int rgw_process_authenticated(RGWHandler_REST* handler, RGWRequest* req, req_state* s, optional_yield y, - rgw::sal::Store* store, + rgw::sal::Driver* driver, bool skip_retarget = false); #undef dout_context diff --git a/src/rgw/rgw_quota.cc b/src/rgw/rgw_quota.cc index 51a6a44e36bfe..f1ae34f936809 100644 --- a/src/rgw/rgw_quota.cc +++ b/src/rgw/rgw_quota.cc @@ -46,7 +46,7 @@ struct RGWQuotaCacheStats { template class RGWQuotaCache { protected: - rgw::sal::Store* store; + rgw::sal::Driver* driver; lru_map stats_map; RefCountedWaitObject *async_refcount; @@ -75,7 +75,7 @@ protected: virtual void data_modified(const rgw_user& user, rgw_bucket& bucket) {} public: - RGWQuotaCache(rgw::sal::Store* _store, int size) : store(_store), stats_map(size) { + RGWQuotaCache(rgw::sal::Driver* _driver, int size) : driver(_driver), stats_map(size) { async_refcount = new RefCountedWaitObject; } virtual ~RGWQuotaCache() { @@ -93,10 +93,10 @@ public: class AsyncRefreshHandler { protected: - rgw::sal::Store* store; + rgw::sal::Driver* driver; RGWQuotaCache *cache; public: - AsyncRefreshHandler(rgw::sal::Store* _store, RGWQuotaCache *_cache) : store(_store), cache(_cache) {} + AsyncRefreshHandler(rgw::sal::Driver* _driver, RGWQuotaCache *_cache) : driver(_driver), cache(_cache) {} virtual ~AsyncRefreshHandler() {} virtual int init_fetch() = 0; @@ -134,7 +134,7 @@ int RGWQuotaCache::async_refresh(const rgw_user& user, const rgw_bucket& buck template void RGWQuotaCache::async_refresh_fail(const rgw_user& user, rgw_bucket& bucket) { - ldout(store->ctx(), 20) << "async stats refresh response for bucket=" << bucket << dendl; + ldout(driver->ctx(), 20) << "async stats refresh response for bucket=" << bucket << dendl; async_refcount->put(); } @@ -142,7 +142,7 @@ void RGWQuotaCache::async_refresh_fail(const rgw_user& user, rgw_bucket& buck template void RGWQuotaCache::async_refresh_response(const rgw_user& user, rgw_bucket& bucket, RGWStorageStats& stats) { - ldout(store->ctx(), 20) << "async stats refresh response for bucket=" << bucket << dendl; + ldout(driver->ctx(), 20) << "async stats refresh response for bucket=" << bucket << dendl; RGWQuotaCacheStats qs; @@ -159,8 +159,8 @@ void RGWQuotaCache::set_stats(const rgw_user& user, const rgw_bucket& bucket, qs.stats = stats; qs.expiration = ceph_clock_now(); qs.async_refresh_time = qs.expiration; - qs.expiration += store->ctx()->_conf->rgw_bucket_quota_ttl; - qs.async_refresh_time += store->ctx()->_conf->rgw_bucket_quota_ttl / 2; + qs.expiration += driver->ctx()->_conf->rgw_bucket_quota_ttl; + qs.async_refresh_time += driver->ctx()->_conf->rgw_bucket_quota_ttl / 2; map_add(user, bucket, qs); } @@ -250,9 +250,9 @@ class BucketAsyncRefreshHandler : public RGWQuotaCache::AsyncRefresh public RGWGetBucketStats_CB { rgw_user user; public: - BucketAsyncRefreshHandler(rgw::sal::Store* _store, RGWQuotaCache *_cache, + BucketAsyncRefreshHandler(rgw::sal::Driver* _driver, RGWQuotaCache *_cache, const rgw_user& _user, const rgw_bucket& _bucket) : - RGWQuotaCache::AsyncRefreshHandler(_store, _cache), + RGWQuotaCache::AsyncRefreshHandler(_driver, _cache), RGWGetBucketStats_CB(_bucket), user(_user) {} void drop_reference() override { put(); } @@ -264,8 +264,8 @@ int BucketAsyncRefreshHandler::init_fetch() { std::unique_ptr rbucket; - const DoutPrefix dp(store->ctx(), dout_subsys, "rgw bucket async refresh handler: "); - int r = store->get_bucket(&dp, nullptr, bucket, &rbucket, null_yield); + const DoutPrefix dp(driver->ctx(), dout_subsys, "rgw bucket async refresh handler: "); + int r = driver->get_bucket(&dp, nullptr, bucket, &rbucket, null_yield); if (r < 0) { ldpp_dout(&dp, 0) << "could not get bucket info for bucket=" << bucket << " r=" << r << dendl; return r; @@ -292,7 +292,7 @@ int BucketAsyncRefreshHandler::init_fetch() void BucketAsyncRefreshHandler::handle_response(const int r) { if (r < 0) { - ldout(store->ctx(), 20) << "AsyncRefreshHandler::handle_response() r=" << r << dendl; + ldout(driver->ctx(), 20) << "AsyncRefreshHandler::handle_response() r=" << r << dendl; cache->async_refresh_fail(user, bucket); return; } @@ -327,20 +327,20 @@ protected: int fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats, optional_yield y, const DoutPrefixProvider *dpp) override; public: - explicit RGWBucketStatsCache(rgw::sal::Store* _store) : RGWQuotaCache(_store, _store->ctx()->_conf->rgw_bucket_quota_cache_size) { + explicit RGWBucketStatsCache(rgw::sal::Driver* _driver) : RGWQuotaCache(_driver, _driver->ctx()->_conf->rgw_bucket_quota_cache_size) { } AsyncRefreshHandler *allocate_refresh_handler(const rgw_user& user, const rgw_bucket& bucket) override { - return new BucketAsyncRefreshHandler(store, this, user, bucket); + return new BucketAsyncRefreshHandler(driver, this, user, bucket); } }; int RGWBucketStatsCache::fetch_stats_from_storage(const rgw_user& _u, const rgw_bucket& _b, RGWStorageStats& stats, optional_yield y, const DoutPrefixProvider *dpp) { - std::unique_ptr user = store->get_user(_u); + std::unique_ptr user = driver->get_user(_u); std::unique_ptr bucket; - int r = store->get_bucket(dpp, user.get(), _b, &bucket, y); + int r = driver->get_bucket(dpp, user.get(), _b, &bucket, y); if (r < 0) { ldpp_dout(dpp, 0) << "could not get bucket info for bucket=" << _b << " r=" << r << dendl; return r; @@ -381,9 +381,9 @@ class UserAsyncRefreshHandler : public RGWQuotaCache::AsyncRefreshHand const DoutPrefixProvider *dpp; rgw_bucket bucket; public: - UserAsyncRefreshHandler(const DoutPrefixProvider *_dpp, rgw::sal::Store* _store, RGWQuotaCache *_cache, + UserAsyncRefreshHandler(const DoutPrefixProvider *_dpp, rgw::sal::Driver* _driver, RGWQuotaCache *_cache, const rgw_user& _user, const rgw_bucket& _bucket) : - RGWQuotaCache::AsyncRefreshHandler(_store, _cache), + RGWQuotaCache::AsyncRefreshHandler(_driver, _cache), RGWGetUserStats_CB(_user), dpp(_dpp), bucket(_bucket) {} @@ -395,7 +395,7 @@ public: int UserAsyncRefreshHandler::init_fetch() { - std::unique_ptr ruser = store->get_user(user); + std::unique_ptr ruser = driver->get_user(user); ldpp_dout(dpp, 20) << "initiating async quota refresh for user=" << user << dendl; int r = ruser->read_stats_async(dpp, this); @@ -412,7 +412,7 @@ int UserAsyncRefreshHandler::init_fetch() void UserAsyncRefreshHandler::handle_response(int r) { if (r < 0) { - ldout(store->ctx(), 20) << "AsyncRefreshHandler::handle_response() r=" << r << dendl; + ldout(driver->ctx(), 20) << "AsyncRefreshHandler::handle_response() r=" << r << dendl; cache->async_refresh_fail(user, bucket); return; } @@ -557,13 +557,13 @@ protected: } public: - RGWUserStatsCache(const DoutPrefixProvider *dpp, rgw::sal::Store* _store, bool quota_threads) - : RGWQuotaCache(_store, _store->ctx()->_conf->rgw_bucket_quota_cache_size), dpp(dpp) + RGWUserStatsCache(const DoutPrefixProvider *dpp, rgw::sal::Driver* _driver, bool quota_threads) + : RGWQuotaCache(_driver, _driver->ctx()->_conf->rgw_bucket_quota_cache_size), dpp(dpp) { if (quota_threads) { - buckets_sync_thread = new BucketsSyncThread(store->ctx(), this); + buckets_sync_thread = new BucketsSyncThread(driver->ctx(), this); buckets_sync_thread->create("rgw_buck_st_syn"); - user_sync_thread = new UserSyncThread(store->ctx(), this); + user_sync_thread = new UserSyncThread(driver->ctx(), this); user_sync_thread->create("rgw_user_st_syn"); } else { buckets_sync_thread = NULL; @@ -575,7 +575,7 @@ public: } AsyncRefreshHandler *allocate_refresh_handler(const rgw_user& user, const rgw_bucket& bucket) override { - return new UserAsyncRefreshHandler(dpp, store, this, user, bucket); + return new UserAsyncRefreshHandler(dpp, driver, this, user, bucket); } bool going_down() { @@ -598,7 +598,7 @@ int RGWUserStatsCache::fetch_stats_from_storage(const rgw_user& _u, optional_yield y, const DoutPrefixProvider *dpp) { - std::unique_ptr user = store->get_user(_u); + std::unique_ptr user = driver->get_user(_u); int r = user->read_stats(dpp, y, &stats); if (r < 0) { ldpp_dout(dpp, 0) << "could not get user stats for user=" << user << dendl; @@ -610,10 +610,10 @@ int RGWUserStatsCache::fetch_stats_from_storage(const rgw_user& _u, int RGWUserStatsCache::sync_bucket(const rgw_user& _u, rgw_bucket& _b, optional_yield y, const DoutPrefixProvider *dpp) { - std::unique_ptr user = store->get_user(_u); + std::unique_ptr user = driver->get_user(_u); std::unique_ptr bucket; - int r = store->get_bucket(dpp, user.get(), _b, &bucket, y); + int r = driver->get_bucket(dpp, user.get(), _b, &bucket, y); if (r < 0) { ldpp_dout(dpp, 0) << "could not get bucket info for bucket=" << _b << " r=" << r << dendl; return r; @@ -633,7 +633,7 @@ int RGWUserStatsCache::sync_user(const DoutPrefixProvider *dpp, const rgw_user& RGWStorageStats stats; ceph::real_time last_stats_sync; ceph::real_time last_stats_update; - std::unique_ptr user = store->get_user(rgw_user(_u.to_str())); + std::unique_ptr user = driver->get_user(rgw_user(_u.to_str())); int ret = user->read_stats(dpp, y, &stats, &last_stats_sync, &last_stats_update); if (ret < 0) { @@ -641,19 +641,19 @@ int RGWUserStatsCache::sync_user(const DoutPrefixProvider *dpp, const rgw_user& return ret; } - if (!store->ctx()->_conf->rgw_user_quota_sync_idle_users && + if (!driver->ctx()->_conf->rgw_user_quota_sync_idle_users && last_stats_update < last_stats_sync) { ldpp_dout(dpp, 20) << "user is idle, not doing a full sync (user=" << user << ")" << dendl; return 0; } real_time when_need_full_sync = last_stats_sync; - when_need_full_sync += make_timespan(store->ctx()->_conf->rgw_user_quota_sync_wait_time); + when_need_full_sync += make_timespan(driver->ctx()->_conf->rgw_user_quota_sync_wait_time); // check if enough time passed since last full sync /* FIXME: missing check? */ - ret = rgw_user_sync_all_stats(dpp, store, user.get(), y); + ret = rgw_user_sync_all_stats(dpp, driver, user.get(), y); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed user stats sync, ret=" << ret << dendl; return ret; @@ -667,7 +667,7 @@ int RGWUserStatsCache::sync_all_users(const DoutPrefixProvider *dpp, optional_yi string key = "user"; void *handle; - int ret = store->meta_list_keys_init(dpp, key, string(), &handle); + int ret = driver->meta_list_keys_init(dpp, key, string(), &handle); if (ret < 0) { ldpp_dout(dpp, 10) << "ERROR: can't get key: ret=" << ret << dendl; return ret; @@ -678,7 +678,7 @@ int RGWUserStatsCache::sync_all_users(const DoutPrefixProvider *dpp, optional_yi do { list keys; - ret = store->meta_list_keys_next(dpp, handle, max, keys, &truncated); + ret = driver->meta_list_keys_next(dpp, handle, max, keys, &truncated); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: lists_keys_next(): ret=" << ret << dendl; goto done; @@ -700,7 +700,7 @@ int RGWUserStatsCache::sync_all_users(const DoutPrefixProvider *dpp, optional_yi ret = 0; done: - store->meta_list_keys_complete(handle); + driver->meta_list_keys_complete(handle); return ret; } @@ -873,7 +873,7 @@ const RGWQuotaInfoApplier& RGWQuotaInfoApplier::get_instance( class RGWQuotaHandlerImpl : public RGWQuotaHandler { - rgw::sal::Store* store; + rgw::sal::Driver* driver; RGWBucketStatsCache bucket_stats_cache; RGWUserStatsCache user_stats_cache; @@ -908,9 +908,9 @@ class RGWQuotaHandlerImpl : public RGWQuotaHandler { return 0; } public: - RGWQuotaHandlerImpl(const DoutPrefixProvider *dpp, rgw::sal::Store* _store, bool quota_threads) : store(_store), - bucket_stats_cache(_store), - user_stats_cache(dpp, _store, quota_threads) {} + RGWQuotaHandlerImpl(const DoutPrefixProvider *dpp, rgw::sal::Driver* _driver, bool quota_threads) : driver(_driver), + bucket_stats_cache(_driver), + user_stats_cache(dpp, _driver, quota_threads) {} int check_quota(const DoutPrefixProvider *dpp, const rgw_user& user, @@ -930,7 +930,7 @@ public: * fetch that info and not rely on cached data */ - const DoutPrefix dp(store->ctx(), dout_subsys, "rgw quota handler: "); + const DoutPrefix dp(driver->ctx(), dout_subsys, "rgw quota handler: "); if (quota.bucket_quota.enabled) { RGWStorageStats bucket_stats; int ret = bucket_stats_cache.get_stats(user, bucket, bucket_stats, y, &dp); @@ -987,9 +987,9 @@ public: }; -RGWQuotaHandler *RGWQuotaHandler::generate_handler(const DoutPrefixProvider *dpp, rgw::sal::Store* store, bool quota_threads) +RGWQuotaHandler *RGWQuotaHandler::generate_handler(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, bool quota_threads) { - return new RGWQuotaHandlerImpl(dpp, store, quota_threads); + return new RGWQuotaHandlerImpl(dpp, driver, quota_threads); } void RGWQuotaHandler::free_handler(RGWQuotaHandler *handler) diff --git a/src/rgw/rgw_quota.h b/src/rgw/rgw_quota.h index 5978721098c48..48cddb85b0ac3 100644 --- a/src/rgw/rgw_quota.h +++ b/src/rgw/rgw_quota.h @@ -110,7 +110,7 @@ public: virtual void update_stats(const rgw_user& bucket_owner, rgw_bucket& bucket, int obj_delta, uint64_t added_bytes, uint64_t removed_bytes) = 0; - static RGWQuotaHandler *generate_handler(const DoutPrefixProvider *dpp, rgw::sal::Store* store, bool quota_threads); + static RGWQuotaHandler *generate_handler(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, bool quota_threads); static void free_handler(RGWQuotaHandler *handler); }; diff --git a/src/rgw/rgw_rados.cc b/src/rgw/rgw_rados.cc index 54751b0d5c2a3..e3234751f4ab4 100644 --- a/src/rgw/rgw_rados.cc +++ b/src/rgw/rgw_rados.cc @@ -116,11 +116,11 @@ static string default_storage_extra_pool_suffix = "rgw.buckets.non-ec"; static RGWObjCategory main_category = RGWObjCategory::Main; #define RGW_USAGE_OBJ_PREFIX "usage." -rgw_raw_obj rgw_obj_select::get_raw_obj(rgw::sal::RadosStore* store) const +rgw_raw_obj rgw_obj_select::get_raw_obj(rgw::sal::RadosStore* driver) const { if (!is_raw) { rgw_raw_obj r; - store->get_raw_obj(placement_rule, obj, &r); + driver->get_raw_obj(placement_rule, obj, &r); return r; } return raw_obj; @@ -227,7 +227,7 @@ class RGWMetaNotifierManager : public RGWCoroutinesManager { RGWHTTPManager http_manager; public: - RGWMetaNotifierManager(RGWRados *_store) : RGWCoroutinesManager(_store->ctx(), _store->get_cr_registry()), store(_store), + RGWMetaNotifierManager(RGWRados *_driver) : RGWCoroutinesManager(_driver->ctx(), _driver->get_cr_registry()), store(_driver), http_manager(store->ctx(), completion_mgr) { http_manager.start(); } @@ -254,7 +254,7 @@ class RGWDataNotifierManager : public RGWCoroutinesManager { RGWHTTPManager http_manager; public: - RGWDataNotifierManager(RGWRados *_store) : RGWCoroutinesManager(_store->ctx(), _store->get_cr_registry()), store(_store), + RGWDataNotifierManager(RGWRados *_driver) : RGWCoroutinesManager(_driver->ctx(), _driver->get_cr_registry()), store(_driver), http_manager(store->ctx(), completion_mgr) { http_manager.start(); } @@ -342,8 +342,8 @@ class RGWMetaNotifier : public RGWRadosThread { notify_mgr.stop(); } public: - RGWMetaNotifier(RGWRados *_store, RGWMetadataLog* log) - : RGWRadosThread(_store, "meta-notifier"), notify_mgr(_store), log(log) {} + RGWMetaNotifier(RGWRados *_driver, RGWMetadataLog* log) + : RGWRadosThread(_driver, "meta-notifier"), notify_mgr(_driver), log(log) {} int process(const DoutPrefixProvider *dpp) override; }; @@ -378,7 +378,7 @@ class RGWDataNotifier : public RGWRadosThread { notify_mgr.stop(); } public: - RGWDataNotifier(RGWRados *_store) : RGWRadosThread(_store, "data-notifier"), notify_mgr(_store) {} + RGWDataNotifier(RGWRados *_driver) : RGWRadosThread(_driver, "data-notifier"), notify_mgr(_driver) {} int process(const DoutPrefixProvider *dpp) override; }; @@ -411,8 +411,8 @@ int RGWDataNotifier::process(const DoutPrefixProvider *dpp) class RGWSyncProcessorThread : public RGWRadosThread { public: - RGWSyncProcessorThread(RGWRados *_store, const string& thread_name = "radosgw") : RGWRadosThread(_store, thread_name) {} - RGWSyncProcessorThread(RGWRados *_store) : RGWRadosThread(_store) {} + RGWSyncProcessorThread(RGWRados *_driver, const string& thread_name = "radosgw") : RGWRadosThread(_driver, thread_name) {} + RGWSyncProcessorThread(RGWRados *_driver) : RGWRadosThread(_driver) {} ~RGWSyncProcessorThread() override {} int init(const DoutPrefixProvider *dpp) override = 0 ; int process(const DoutPrefixProvider *dpp) override = 0; @@ -429,8 +429,8 @@ class RGWMetaSyncProcessorThread : public RGWSyncProcessorThread sync.stop(); } public: - RGWMetaSyncProcessorThread(rgw::sal::RadosStore* _store, RGWAsyncRadosProcessor *async_rados) - : RGWSyncProcessorThread(_store->getRados(), "meta-sync"), sync(_store, async_rados) {} + RGWMetaSyncProcessorThread(rgw::sal::RadosStore* _driver, RGWAsyncRadosProcessor *async_rados) + : RGWSyncProcessorThread(_driver->getRados(), "meta-sync"), sync(_driver, async_rados) {} void wakeup_sync_shards(set& shard_ids) { for (set::iterator iter = shard_ids.begin(); iter != shard_ids.end(); ++iter) { @@ -472,11 +472,11 @@ class RGWDataSyncProcessorThread : public RGWSyncProcessorThread sync.stop(); } public: - RGWDataSyncProcessorThread(rgw::sal::RadosStore* _store, RGWAsyncRadosProcessor *async_rados, + RGWDataSyncProcessorThread(rgw::sal::RadosStore* _driver, RGWAsyncRadosProcessor *async_rados, const RGWZone* source_zone) - : RGWSyncProcessorThread(_store->getRados(), "data-sync"), + : RGWSyncProcessorThread(_driver->getRados(), "data-sync"), counters(sync_counters::build(store->ctx(), std::string("data-sync-from-") + source_zone->name)), - sync(_store, async_rados, source_zone->id, counters.get()), + sync(_driver, async_rados, source_zone->id, counters.get()), initialized(false) {} void wakeup_sync_shards(bc::flat_map >& entries) { @@ -793,8 +793,8 @@ class RGWIndexCompletionManager { } public: - RGWIndexCompletionManager(RGWRados *_store) : - store(_store), + RGWIndexCompletionManager(RGWRados *_driver) : + store(_driver), num_shards(store->ctx()->_conf->rgw_thread_pool_size), locks{ceph::make_lock_container( num_shards, @@ -1184,7 +1184,7 @@ int RGWRados::init_complete(const DoutPrefixProvider *dpp) ldpp_dout(dpp, 5) << "note: GC not initialized" << dendl; } - obj_expirer = new RGWObjectExpirer(this->store); + obj_expirer = new RGWObjectExpirer(this->driver); if (use_gc_thread && use_gc) { gc->start_processor(); @@ -1226,7 +1226,7 @@ int RGWRados::init_complete(const DoutPrefixProvider *dpp) } auto async_processor = svc.rados->get_async_processor(); std::lock_guard l{meta_sync_thread_lock}; - meta_sync_processor_thread = new RGWMetaSyncProcessorThread(this->store, async_processor); + meta_sync_processor_thread = new RGWMetaSyncProcessorThread(this->driver, async_processor); ret = meta_sync_processor_thread->init(dpp); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to initialize meta sync thread" << dendl; @@ -1238,7 +1238,7 @@ int RGWRados::init_complete(const DoutPrefixProvider *dpp) rgw::BucketTrimConfig config; rgw::configure_bucket_trim(cct, config); - bucket_trim.emplace(this->store, config); + bucket_trim.emplace(this->driver, config); ret = bucket_trim->init(); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to start bucket trim manager" << dendl; @@ -1249,7 +1249,7 @@ int RGWRados::init_complete(const DoutPrefixProvider *dpp) std::lock_guard dl{data_sync_thread_lock}; for (auto source_zone : svc.zone->get_data_sync_source_zones()) { ldpp_dout(dpp, 5) << "starting data sync thread for zone " << source_zone->name << dendl; - auto *thread = new RGWDataSyncProcessorThread(this->store, svc.rados->get_async_processor(), source_zone); + auto *thread = new RGWDataSyncProcessorThread(this->driver, svc.rados->get_async_processor(), source_zone); ret = thread->init(dpp); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to initialize data sync thread" << dendl; @@ -1260,7 +1260,7 @@ int RGWRados::init_complete(const DoutPrefixProvider *dpp) } auto interval = cct->_conf->rgw_sync_log_trim_interval; if (interval > 0) { - sync_log_trimmer = new RGWSyncLogTrimThread(this->store, &*bucket_trim, interval); + sync_log_trimmer = new RGWSyncLogTrimThread(this->driver, &*bucket_trim, interval); ret = sync_log_trimmer->init(dpp); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to initialize sync log trim thread" << dendl; @@ -1278,12 +1278,12 @@ int RGWRados::init_complete(const DoutPrefixProvider *dpp) binfo_cache->init(svc.cache); lc = new RGWLC(); - lc->initialize(cct, this->store); + lc->initialize(cct, this->driver); if (use_lc_thread) lc->start_processor(); - quota_handler = RGWQuotaHandler::generate_handler(dpp, this->store, quota_threads); + quota_handler = RGWQuotaHandler::generate_handler(dpp, this->driver, quota_threads); bucket_index_max_shards = (cct->_conf->rgw_override_bucket_index_max_shards ? cct->_conf->rgw_override_bucket_index_max_shards : zone.bucket_index_max_shards); @@ -1302,7 +1302,7 @@ int RGWRados::init_complete(const DoutPrefixProvider *dpp) reshard_wait = std::make_shared(); - reshard = new RGWReshard(this->store); + reshard = new RGWReshard(this->driver); // disable reshard thread based on zone/zonegroup support run_reshard_thread = run_reshard_thread && svc.zone->can_reshard(); @@ -1312,7 +1312,7 @@ int RGWRados::init_complete(const DoutPrefixProvider *dpp) } index_completion_manager = new RGWIndexCompletionManager(this); - ret = rgw::notify::init(cct, store, dpp); + ret = rgw::notify::init(cct, driver, dpp); if (ret < 0 ) { ldpp_dout(dpp, 1) << "ERROR: failed to initialize notification manager" << dendl; } @@ -1331,7 +1331,7 @@ int RGWRados::init_svc(bool raw, const DoutPrefixProvider *dpp) int RGWRados::init_ctl(const DoutPrefixProvider *dpp) { - return ctl.init(&svc, store, dpp); + return ctl.init(&svc, driver, dpp); } /** @@ -2606,7 +2606,7 @@ int RGWRados::fix_tail_obj_locator(const DoutPrefixProvider *dpp, bool fix, bool *need_fix, optional_yield y) { std::unique_ptr bucket; - store->get_bucket(nullptr, bucket_info, &bucket); + driver->get_bucket(nullptr, bucket_info, &bucket); std::unique_ptr obj = bucket->get_object(key); if (need_fix) { @@ -2621,7 +2621,7 @@ int RGWRados::fix_tail_obj_locator(const DoutPrefixProvider *dpp, RGWObjState *astate = nullptr; RGWObjManifest* manifest = nullptr; - RGWObjectCtx rctx(this->store); + RGWObjectCtx rctx(this->driver); r = get_obj_state(dpp, &rctx, bucket_info, obj.get(), &astate, &manifest, false, y); if (r < 0) return r; @@ -2629,7 +2629,7 @@ int RGWRados::fix_tail_obj_locator(const DoutPrefixProvider *dpp, if (manifest) { RGWObjManifest::obj_iterator miter; for (miter = manifest->obj_begin(dpp); miter != manifest->obj_end(dpp); ++miter) { - rgw_raw_obj raw_loc = miter.get_location().get_raw_obj(store); + rgw_raw_obj raw_loc = miter.get_location().get_raw_obj(driver); rgw_obj loc; string oid; string locator; @@ -2846,8 +2846,8 @@ int RGWRados::swift_versioning_copy(RGWObjectCtx& obj_ctx, return -ERR_PRECONDITION_FAILED; } - rgw::sal::RadosBucket dest_bucket(store, dest_bucket_info); - rgw::sal::RadosObject dest_obj(store, rgw_obj_key(buf), &dest_bucket); + rgw::sal::RadosBucket dest_bucket(driver, dest_bucket_info); + rgw::sal::RadosObject dest_obj(driver, rgw_obj_key(buf), &dest_bucket); if (dest_bucket_info.versioning_enabled()){ dest_obj.gen_rand_obj_instance_name(); @@ -2942,8 +2942,8 @@ int RGWRados::swift_versioning_restore(RGWObjectCtx& obj_ctx, * irrelevant and may be safely skipped. */ std::map no_attrs; - rgw::sal::RadosBucket archive_bucket(store, archive_binfo); - rgw::sal::RadosObject archive_obj(store, entry.key, &archive_bucket); + rgw::sal::RadosBucket archive_bucket(driver, archive_binfo); + rgw::sal::RadosObject archive_obj(driver, entry.key, &archive_bucket); if (bucket->versioning_enabled()){ obj->gen_rand_obj_instance_name(); @@ -3557,7 +3557,7 @@ static void set_copy_attrs(map& src_attrs, int RGWRados::rewrite_obj(rgw::sal::Object* obj, const DoutPrefixProvider *dpp, optional_yield y) { - RGWObjectCtx rctx(this->store); + RGWObjectCtx rctx(this->driver); rgw::sal::Attrs attrset; uint64_t obj_size; ceph::real_time mtime; @@ -3576,10 +3576,10 @@ int RGWRados::rewrite_obj(rgw::sal::Object* obj, const DoutPrefixProvider *dpp, attrset.erase(RGW_ATTR_TAIL_TAG); attrset.erase(RGW_ATTR_STORAGE_CLASS); - return store->getRados()->copy_obj_data(rctx, obj->get_bucket(), - obj->get_bucket()->get_info().placement_rule, - read_op, obj_size - 1, obj, NULL, mtime, - attrset, 0, real_time(), NULL, dpp, y); + return this->copy_obj_data(rctx, obj->get_bucket(), + obj->get_bucket()->get_info().placement_rule, + read_op, obj_size - 1, obj, NULL, mtime, + attrset, 0, real_time(), NULL, dpp, y); } struct obj_time_weight { @@ -3858,7 +3858,7 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx, rgw::BlockingAioThrottle aio(cct->_conf->rgw_put_obj_min_window_size); using namespace rgw::putobj; - AtomicObjectProcessor processor(&aio, this->store, nullptr, user_id, + AtomicObjectProcessor processor(&aio, this->driver, nullptr, user_id, obj_ctx, dest_obj->clone(), olh_epoch, tag, dpp, null_yield); RGWRESTConn *conn; @@ -4445,7 +4445,7 @@ int RGWRados::copy_obj(RGWObjectCtx& obj_ctx, ref_tag = tag + '\0'; cls_refcount_get(op, ref_tag, true); - auto obj = svc.rados->obj(miter.get_location().get_raw_obj(store)); + auto obj = svc.rados->obj(miter.get_location().get_raw_obj(driver)); ret = obj.open(dpp); if (ret < 0) { ldpp_dout(dpp, 0) << "failed to open rados context for " << obj << dendl; @@ -4563,7 +4563,7 @@ int RGWRados::copy_obj_data(RGWObjectCtx& obj_ctx, using namespace rgw::putobj; // do not change the null_yield in the initialization of this AtomicObjectProcessor // it causes crashes in the ragweed tests - AtomicObjectProcessor processor(&aio, this->store, &dest_placement, + AtomicObjectProcessor processor(&aio, this->driver, &dest_placement, bucket->get_info().owner, obj_ctx, dest_obj->clone(), olh_epoch, tag, dpp, null_yield); @@ -4901,7 +4901,7 @@ void RGWRados::update_gc_chain(const DoutPrefixProvider *dpp, rgw_obj head_obj, rgw_raw_obj raw_head; obj_to_raw(manifest.get_head_placement_rule(), head_obj, &raw_head); for (iter = manifest.obj_begin(dpp); iter != manifest.obj_end(dpp); ++iter) { - const rgw_raw_obj& mobj = iter.get_location().get_raw_obj(store); + const rgw_raw_obj& mobj = iter.get_location().get_raw_obj(driver); if (mobj == raw_head) continue; cls_rgw_obj_key key(mobj.oid); @@ -5300,7 +5300,7 @@ int RGWRados::Object::Delete::delete_obj(optional_yield y, const DoutPrefixProvi return 0; } -int RGWRados::delete_obj(rgw::sal::Store* store, +int RGWRados::delete_obj(rgw::sal::Driver* store, const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, @@ -5372,7 +5372,7 @@ int RGWRados::delete_obj_index(const rgw_obj& obj, ceph::real_time mtime, const return index_op.complete_del(dpp, -1 /* pool */, 0, mtime, NULL); } -static void generate_fake_tag(const DoutPrefixProvider *dpp, rgw::sal::Store* store, map& attrset, RGWObjManifest& manifest, bufferlist& manifest_bl, bufferlist& tag_bl) +static void generate_fake_tag(const DoutPrefixProvider *dpp, rgw::sal::Driver* store, map& attrset, RGWObjManifest& manifest, bufferlist& manifest_bl, bufferlist& tag_bl) { string tag; @@ -5434,7 +5434,7 @@ int RGWRados::get_olh_target_state(const DoutPrefixProvider *dpp, RGWObjectCtx& } std::unique_ptr bucket; - store->get_bucket(nullptr, bucket_info, &bucket); + driver->get_bucket(nullptr, bucket_info, &bucket); std::unique_ptr target_obj = bucket->get_object(target.key); r = get_obj_state(dpp, &obj_ctx, bucket_info, target_obj.get(), target_state, @@ -5565,7 +5565,7 @@ int RGWRados::get_obj_state_impl(const DoutPrefixProvider *dpp, RGWObjectCtx *rc sm->manifest->has_explicit_objs()) { RGWObjManifest::obj_iterator mi; for (mi = sm->manifest->obj_begin(dpp); mi != sm->manifest->obj_end(dpp); ++mi) { - ldpp_dout(dpp, 20) << "manifest: ofs=" << mi.get_ofs() << " loc=" << mi.get_location().get_raw_obj(store) << dendl; + ldpp_dout(dpp, 20) << "manifest: ofs=" << mi.get_ofs() << " loc=" << mi.get_location().get_raw_obj(driver) << dendl; } } @@ -5574,7 +5574,7 @@ int RGWRados::get_obj_state_impl(const DoutPrefixProvider *dpp, RGWObjectCtx *rc * Uh oh, something's wrong, object with manifest should have tag. Let's * create one out of the manifest, would be unique */ - generate_fake_tag(dpp, store, s->attrset, *sm->manifest, manifest_bl, s->obj_tag); + generate_fake_tag(dpp, driver, s->attrset, *sm->manifest, manifest_bl, s->obj_tag); s->fake_tag = true; } } @@ -6385,7 +6385,7 @@ int RGWRados::Object::Read::read(int64_t ofs, int64_t end, RGWObjManifest::obj_iterator iter = manifest->obj_find(dpp, ofs); uint64_t stripe_ofs = iter.get_stripe_ofs(); - read_obj = iter.get_location().get_raw_obj(store->store); + read_obj = iter.get_location().get_raw_obj(store->driver); len = std::min(len, iter.get_stripe_size() - (ofs - stripe_ofs)); read_ofs = iter.location_ofs() + (ofs - stripe_ofs); reading_from_head = (read_obj == state.head_obj); @@ -6621,7 +6621,7 @@ int RGWRados::iterate_obj(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, off_t next_stripe_ofs = stripe_ofs + iter.get_stripe_size(); while (ofs < next_stripe_ofs && ofs <= end) { - read_obj = iter.get_location().get_raw_obj(store); + read_obj = iter.get_location().get_raw_obj(driver); uint64_t read_len = std::min(len, iter.get_stripe_size() - (ofs - stripe_ofs)); read_ofs = iter.location_ofs() + (ofs - stripe_ofs); @@ -6924,10 +6924,10 @@ int RGWRados::block_while_resharding(RGWRados::BucketShard *bs, // since we expect to do this rarely, we'll do our work in a // block and erase our work after each try - RGWObjectCtx obj_ctx(this->store); + RGWObjectCtx obj_ctx(this->driver); const rgw_bucket& b = bs->bucket; std::string bucket_id = b.get_key(); - RGWBucketReshardLock reshard_lock(this->store, bucket_info, true); + RGWBucketReshardLock reshard_lock(this->driver, bucket_info, true); ret = reshard_lock.lock(dpp); if (ret == -ENOENT) { continue; @@ -6951,7 +6951,7 @@ int RGWRados::block_while_resharding(RGWRados::BucketShard *bs, continue; // try again } - ret = RGWBucketReshard::clear_resharding(this->store, bucket_info, bucket_attrs, dpp); + ret = RGWBucketReshard::clear_resharding(this->driver, bucket_info, bucket_attrs, dpp); reshard_lock.unlock(); if (ret == -ENOENT) { ldpp_dout(dpp, 5) << __func__ << @@ -8408,7 +8408,7 @@ int RGWRados::list_lc_progress(string& marker, uint32_t max_entries, int RGWRados::process_lc(const std::unique_ptr& optional_bucket) { RGWLC lc; - lc.initialize(cct, this->store); + lc.initialize(cct, this->driver); RGWLC::LCWorker worker(&lc, cct, &lc, 0); auto ret = lc.process(&worker, optional_bucket, true /* once */); lc.stop_processor(); // sets down_flag, but returns immediately @@ -9264,7 +9264,7 @@ int RGWRados::check_disk_state(const DoutPrefixProvider *dpp, bucket_info.bucket << " dir_entry=" << list_state.key << dendl_bitx; std::unique_ptr bucket; - store->get_bucket(nullptr, bucket_info, &bucket); + driver->get_bucket(nullptr, bucket_info, &bucket); uint8_t suggest_flag = (svc.zone->get_zone().log_data ? CEPH_RGW_DIR_SUGGEST_LOG_OP : 0); std::string loc; @@ -9287,7 +9287,7 @@ int RGWRados::check_disk_state(const DoutPrefixProvider *dpp, RGWObjState *astate = NULL; RGWObjManifest *manifest = nullptr; - RGWObjectCtx rctx(this->store); + RGWObjectCtx rctx(this->driver); int r = get_obj_state(dpp, &rctx, bucket_info, obj.get(), &astate, &manifest, false, y); if (r < 0) return r; @@ -9349,7 +9349,7 @@ int RGWRados::check_disk_state(const DoutPrefixProvider *dpp, if (manifest) { RGWObjManifest::obj_iterator miter; for (miter = manifest->obj_begin(dpp); miter != manifest->obj_end(dpp); ++miter) { - const rgw_raw_obj& raw_loc = miter.get_location().get_raw_obj(store); + const rgw_raw_obj& raw_loc = miter.get_location().get_raw_obj(driver); rgw_obj loc; RGWSI_Tier_RADOS::raw_obj_to_obj(manifest->get_obj().bucket, raw_loc, &loc); @@ -9507,7 +9507,7 @@ int RGWRados::check_bucket_shards(const RGWBucketInfo& bucket_info, int RGWRados::add_bucket_to_reshard(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, uint32_t new_num_shards) { - RGWReshard reshard(this->store, dpp); + RGWReshard reshard(this->driver, dpp); uint32_t num_source_shards = rgw::current_num_shards(bucket_info.layout); diff --git a/src/rgw/rgw_rados.h b/src/rgw/rgw_rados.h index 933c3e4a3dd8a..432b0a8034386 100644 --- a/src/rgw/rgw_rados.h +++ b/src/rgw/rgw_rados.h @@ -184,20 +184,20 @@ struct RGWObjStateManifest { }; class RGWObjectCtx { - rgw::sal::Store* store; + rgw::sal::Driver* driver; ceph::shared_mutex lock = ceph::make_shared_mutex("RGWObjectCtx"); std::map objs_state; public: - explicit RGWObjectCtx(rgw::sal::Store* _store) : store(_store) {} + explicit RGWObjectCtx(rgw::sal::Driver* _driver) : driver(_driver) {} RGWObjectCtx(RGWObjectCtx& _o) { std::unique_lock wl{lock}; - this->store = _o.store; + this->driver = _o.driver; this->objs_state = _o.objs_state; } - rgw::sal::Store* get_store() { - return store; + rgw::sal::Driver* get_driver() { + return driver; } RGWObjStateManifest *get_state(const rgw_obj& obj); @@ -358,7 +358,7 @@ class RGWRados ceph::mutex lock = ceph::make_mutex("rados_timer_lock"); SafeTimer *timer; - rgw::sal::RadosStore* store = nullptr; + rgw::sal::RadosStore* driver = nullptr; RGWGC *gc = nullptr; RGWLC *lc; RGWObjectExpirer *obj_expirer; @@ -524,8 +524,8 @@ public: void set_context(CephContext *_cct) { cct = _cct; } - void set_store(rgw::sal::RadosStore* _store) { - store = _store; + void set_store(rgw::sal::RadosStore* _driver) { + driver = _driver; } RGWServices svc; @@ -1213,7 +1213,7 @@ public: int bucket_suspended(const DoutPrefixProvider *dpp, rgw_bucket& bucket, bool *suspended); /** Delete an object.*/ - int delete_obj(rgw::sal::Store* store, + int delete_obj(rgw::sal::Driver* driver, const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_owner, const rgw_obj& src_obj, diff --git a/src/rgw/rgw_realm_reloader.cc b/src/rgw/rgw_realm_reloader.cc index 1bcfeb15ac821..e82b01fe27609 100644 --- a/src/rgw/rgw_realm_reloader.cc +++ b/src/rgw/rgw_realm_reloader.cc @@ -26,12 +26,12 @@ static constexpr bool USE_SAFE_TIMER_CALLBACKS = false; -RGWRealmReloader::RGWRealmReloader(rgw::sal::Store*& store, std::map& service_map_meta, +RGWRealmReloader::RGWRealmReloader(rgw::sal::Driver*& driver, std::map& service_map_meta, Pauser* frontends) - : store(store), + : driver(driver), service_map_meta(service_map_meta), frontends(frontends), - timer(store->ctx(), mutex, USE_SAFE_TIMER_CALLBACKS), + timer(driver->ctx(), mutex, USE_SAFE_TIMER_CALLBACKS), mutex(ceph::make_mutex("RGWRealmReloader")), reload_scheduled(nullptr) { @@ -54,12 +54,12 @@ class RGWRealmReloader::C_Reload : public Context { void RGWRealmReloader::handle_notify(RGWRealmNotify type, bufferlist::const_iterator& p) { - if (!store) { + if (!driver) { /* we're in the middle of reload */ return; } - CephContext *const cct = store->ctx(); + CephContext *const cct = driver->ctx(); std::lock_guard lock{mutex}; if (reload_scheduled) { @@ -79,7 +79,7 @@ void RGWRealmReloader::handle_notify(RGWRealmNotify type, void RGWRealmReloader::reload() { - CephContext *const cct = store->ctx(); + CephContext *const cct = driver->ctx(); const DoutPrefix dp(cct, dout_subsys, "rgw realm reloader: "); ldpp_dout(&dp, 1) << "Pausing frontends for realm update..." << dendl; @@ -90,11 +90,11 @@ void RGWRealmReloader::reload() // TODO: make RGWRados responsible for rgw_log_usage lifetime rgw_log_usage_finalize(); - // destroy the existing store - StoreManager::close_storage(store); - store = nullptr; + // destroy the existing driver + DriverManager::close_storage(driver); + driver = nullptr; - ldpp_dout(&dp, 1) << "Store closed" << dendl; + ldpp_dout(&dp, 1) << "driver closed" << dendl; { // allow a new notify to reschedule us. it's important that we do this // before we start loading the new realm, or we could miss some updates @@ -103,13 +103,13 @@ void RGWRealmReloader::reload() } - while (!store) { - // recreate and initialize a new store - StoreManager::Config cfg; + while (!driver) { + // recreate and initialize a new driver + DriverManager::Config cfg; cfg.store_name = "rados"; cfg.filter_name = "none"; - store = - StoreManager::get_storage(&dp, cct, + driver = + DriverManager::get_storage(&dp, cct, cfg, cct->_conf->rgw_enable_gc_threads, cct->_conf->rgw_enable_lc_threads, @@ -118,9 +118,9 @@ void RGWRealmReloader::reload() cct->_conf.get_val("rgw_dynamic_resharding"), cct->_conf->rgw_cache_enabled); - ldpp_dout(&dp, 1) << "Creating new store" << dendl; + ldpp_dout(&dp, 1) << "Creating new driver" << dendl; - rgw::sal::Store* store_cleanup = nullptr; + rgw::sal::Driver* store_cleanup = nullptr; { std::unique_lock lock{mutex}; @@ -128,7 +128,7 @@ void RGWRealmReloader::reload() // don't want to assert or abort the entire cluster. instead, just // sleep until we get another notification, and retry until we get // a working configuration - if (store == nullptr) { + if (driver == nullptr) { ldpp_dout(&dp, -1) << "Failed to reinitialize RGWRados after a realm " "configuration update. Waiting for a new update." << dendl; @@ -143,9 +143,9 @@ void RGWRealmReloader::reload() timer.cancel_event(reload_scheduled); reload_scheduled = nullptr; - // if we successfully created a store, clean it up outside of the lock, + // if we successfully created a driver, clean it up outside of the lock, // then continue to loop and recreate another - std::swap(store, store_cleanup); + std::swap(driver, store_cleanup); } } @@ -153,25 +153,25 @@ void RGWRealmReloader::reload() ldpp_dout(&dp, 4) << "Got another notification, restarting RGWRados " "initialization." << dendl; - StoreManager::close_storage(store_cleanup); + DriverManager::close_storage(store_cleanup); } } - int r = store->register_to_service_map(&dp, "rgw", service_map_meta); + int r = driver->register_to_service_map(&dp, "rgw", service_map_meta); if (r < 0) { ldpp_dout(&dp, -1) << "ERROR: failed to register to service map: " << cpp_strerror(-r) << dendl; /* ignore error */ } - ldpp_dout(&dp, 1) << "Finishing initialization of new store" << dendl; - // finish initializing the new store + ldpp_dout(&dp, 1) << "Finishing initialization of new driver" << dendl; + // finish initializing the new driver ldpp_dout(&dp, 1) << " - REST subsystem init" << dendl; - rgw_rest_init(cct, store->get_zone()->get_zonegroup()); + rgw_rest_init(cct, driver->get_zone()->get_zonegroup()); ldpp_dout(&dp, 1) << " - usage subsystem init" << dendl; - rgw_log_usage_init(cct, store); + rgw_log_usage_init(cct, driver); ldpp_dout(&dp, 1) << "Resuming frontends with new realm configuration." << dendl; - frontends->resume(store); + frontends->resume(driver); } diff --git a/src/rgw/rgw_realm_reloader.h b/src/rgw/rgw_realm_reloader.h index baf8dce97050a..20538ad65bdf8 100644 --- a/src/rgw/rgw_realm_reloader.h +++ b/src/rgw/rgw_realm_reloader.h @@ -28,10 +28,10 @@ class RGWRealmReloader : public RGWRealmWatcher::Watcher { /// pause all frontends while realm reconfiguration is in progress virtual void pause() = 0; /// resume all frontends with the given RGWRados instance - virtual void resume(rgw::sal::Store* store) = 0; + virtual void resume(rgw::sal::Driver* driver) = 0; }; - RGWRealmReloader(rgw::sal::Store*& store, std::map& service_map_meta, + RGWRealmReloader(rgw::sal::Driver*& driver, std::map& service_map_meta, Pauser* frontends); ~RGWRealmReloader() override; @@ -44,8 +44,8 @@ class RGWRealmReloader : public RGWRealmWatcher::Watcher { class C_Reload; //< Context that calls reload() - /// main()'s Store pointer as a reference, modified by reload() - rgw::sal::Store*& store; + /// main()'s driver pointer as a reference, modified by reload() + rgw::sal::Driver*& driver; std::map& service_map_meta; Pauser *const frontends; diff --git a/src/rgw/rgw_rest.cc b/src/rgw/rgw_rest.cc index 80538467393ed..19336657fa857 100644 --- a/src/rgw/rgw_rest.cc +++ b/src/rgw/rgw_rest.cc @@ -1701,7 +1701,7 @@ RGWOp* RGWHandler_REST::get_op(void) } if (op) { - op->init(store, s, this); + op->init(driver, s, this); } return op; } /* get_op */ @@ -1881,7 +1881,7 @@ int RGWHandler_REST::init_permissions(RGWOp* op, optional_yield y) ldpp_dout(op, -1) << "Error reading IAM User Policy: " << e.what() << dendl; } } - rgw_build_iam_environment(store, s); + rgw_build_iam_environment(driver, s); return 0; } @@ -2291,7 +2291,7 @@ int RGWREST::preprocess(req_state *s, rgw::io::BasicClient* cio) } RGWHandler_REST* RGWREST::get_handler( - rgw::sal::Store* const store, + rgw::sal::Driver* const driver, req_state* const s, const rgw::auth::StrategyRegistry& auth_registry, const std::string& frontend_prefix, @@ -2315,12 +2315,12 @@ RGWHandler_REST* RGWREST::get_handler( *pmgr = m; } - RGWHandler_REST* handler = m->get_handler(store, s, auth_registry, frontend_prefix); + RGWHandler_REST* handler = m->get_handler(driver, s, auth_registry, frontend_prefix); if (! handler) { *init_error = -ERR_METHOD_NOT_ALLOWED; return NULL; } - *init_error = handler->init(store, s, rio); + *init_error = handler->init(driver, s, rio); if (*init_error < 0) { m->put_handler(handler); return nullptr; diff --git a/src/rgw/rgw_rest.h b/src/rgw/rgw_rest.h index 0b7defa2f113f..3780ed423e42b 100644 --- a/src/rgw/rgw_rest.h +++ b/src/rgw/rgw_rest.h @@ -122,8 +122,8 @@ protected: public: RGWGetObj_ObjStore() : sent_header(false) {} - void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override { - RGWGetObj::init(store, s, h); + void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override { + RGWGetObj::init(driver, s, h); sent_header = false; } @@ -523,9 +523,9 @@ protected: RGWRESTFlusher flusher; public: - void init(rgw::sal::Store* store, req_state *s, + void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *dialect_handler) override { - RGWOp::init(store, s, dialect_handler); + RGWOp::init(driver, s, dialect_handler); flusher.init(s, this); } void send_response() override; @@ -616,7 +616,7 @@ public: } virtual RGWHandler_REST* get_handler( - rgw::sal::Store* store, + rgw::sal::Driver* driver, req_state* const s, const rgw::auth::StrategyRegistry& auth_registry, const std::string& frontend_prefix @@ -648,7 +648,7 @@ class RGWREST { static int preprocess(req_state *s, rgw::io::BasicClient* rio); public: RGWREST() {} - RGWHandler_REST *get_handler(rgw::sal::Store* store, + RGWHandler_REST *get_handler(rgw::sal::Driver* driver, req_state *s, const rgw::auth::StrategyRegistry& auth_registry, const std::string& frontend_prefix, @@ -656,7 +656,7 @@ public: RGWRESTMgr **pmgr, int *init_error); #if 0 - RGWHandler *get_handler(RGWRados *store, req_state *s, + RGWHandler *get_handler(RGWRados *driver, req_state *s, RGWLibIO *io, RGWRESTMgr **pmgr, int *init_error); #endif diff --git a/src/rgw/rgw_rest_bucket.cc b/src/rgw/rgw_rest_bucket.cc index 748b983cd2e1b..bc3be370a6afd 100644 --- a/src/rgw/rgw_rest_bucket.cc +++ b/src/rgw/rgw_rest_bucket.cc @@ -49,7 +49,7 @@ void RGWOp_Bucket_Info::execute(optional_yield y) op_state.set_bucket_name(bucket); op_state.set_fetch_stats(fetch_stats); - op_ret = RGWBucketAdminOp::info(store, op_state, flusher, y, this); + op_ret = RGWBucketAdminOp::info(driver, op_state, flusher, y, this); } class RGWOp_Get_Policy : public RGWRESTOp { @@ -79,7 +79,7 @@ void RGWOp_Get_Policy::execute(optional_yield y) op_state.set_bucket_name(bucket); op_state.set_object(object); - op_ret = RGWBucketAdminOp::get_policy(store, op_state, flusher, this); + op_ret = RGWBucketAdminOp::get_policy(driver, op_state, flusher, this); } class RGWOp_Check_Bucket_Index : public RGWRESTOp { @@ -113,7 +113,7 @@ void RGWOp_Check_Bucket_Index::execute(optional_yield y) op_state.set_fix_index(fix_index); op_state.set_check_objects(check_objects); - op_ret = RGWBucketAdminOp::check_index(store, op_state, flusher, s->yield, s); + op_ret = RGWBucketAdminOp::check_index(driver, op_state, flusher, s->yield, s); } class RGWOp_Bucket_Link : public RGWRESTOp { @@ -151,12 +151,12 @@ void RGWOp_Bucket_Link::execute(optional_yield y) op_state.set_new_bucket_name(new_bucket_name); bufferlist data; - op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = RGWBucketAdminOp::link(store, op_state, s); + op_ret = RGWBucketAdminOp::link(driver, op_state, s); } class RGWOp_Bucket_Unlink : public RGWRESTOp { @@ -189,12 +189,12 @@ void RGWOp_Bucket_Unlink::execute(optional_yield y) op_state.set_bucket_name(bucket); bufferlist data; - op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = RGWBucketAdminOp::unlink(store, op_state, s); + op_ret = RGWBucketAdminOp::unlink(driver, op_state, s); } class RGWOp_Bucket_Remove : public RGWRESTOp { @@ -222,7 +222,7 @@ void RGWOp_Bucket_Remove::execute(optional_yield y) /* FIXME We're abusing the owner of the bucket to pass the user, so that it can be forwarded to * the master. This user is actually the OP caller, not the bucket owner. */ - op_ret = store->get_bucket(s, s->user.get(), string(), bucket_name, &bucket, y); + op_ret = driver->get_bucket(s, s->user.get(), string(), bucket_name, &bucket, y); if (op_ret < 0) { ldpp_dout(this, 0) << "get_bucket returned ret=" << op_ret << dendl; if (op_ret == -ENOENT) { @@ -279,7 +279,7 @@ void RGWOp_Set_Bucket_Quota::execute(optional_yield y) RGWQuotaInfo quota; if (!use_http_params) { bool empty; - op_ret = get_json_input(store->ctx(), s, quota, QUOTA_INPUT_MAX_LEN, &empty); + op_ret = get_json_input(driver->ctx(), s, quota, QUOTA_INPUT_MAX_LEN, &empty); if (op_ret < 0) { if (!empty) return; @@ -289,7 +289,7 @@ void RGWOp_Set_Bucket_Quota::execute(optional_yield y) } if (use_http_params) { std::unique_ptr bucket; - op_ret = store->get_bucket(s, nullptr, uid.tenant, bucket_name, &bucket, s->yield); + op_ret = driver->get_bucket(s, nullptr, uid.tenant, bucket_name, &bucket, s->yield); if (op_ret < 0) { return; } @@ -310,7 +310,7 @@ void RGWOp_Set_Bucket_Quota::execute(optional_yield y) op_state.set_bucket_name(bucket_name); op_state.set_quota(quota); - op_ret = RGWBucketAdminOp::set_quota(store, op_state, s); + op_ret = RGWBucketAdminOp::set_quota(driver, op_state, s); } class RGWOp_Sync_Bucket : public RGWRESTOp { @@ -342,7 +342,7 @@ void RGWOp_Sync_Bucket::execute(optional_yield y) op_state.set_tenant(tenant); op_state.set_sync_bucket(sync_bucket); - op_ret = RGWBucketAdminOp::sync_bucket(store, op_state, s); + op_ret = RGWBucketAdminOp::sync_bucket(driver, op_state, s); } class RGWOp_Object_Remove: public RGWRESTOp { @@ -372,7 +372,7 @@ void RGWOp_Object_Remove::execute(optional_yield y) op_state.set_bucket_name(bucket); op_state.set_object(object); - op_ret = RGWBucketAdminOp::remove_object(store, op_state, s); + op_ret = RGWBucketAdminOp::remove_object(driver, op_state, s); } @@ -410,4 +410,4 @@ RGWOp *RGWHandler_Bucket::op_delete() return new RGWOp_Object_Remove; return new RGWOp_Bucket_Remove; -} \ No newline at end of file +} diff --git a/src/rgw/rgw_rest_bucket.h b/src/rgw/rgw_rest_bucket.h index 3f3d5a6ba9b63..00f0b64397a23 100644 --- a/src/rgw/rgw_rest_bucket.h +++ b/src/rgw/rgw_rest_bucket.h @@ -27,7 +27,7 @@ public: RGWRESTMgr_Bucket() = default; ~RGWRESTMgr_Bucket() override = default; - RGWHandler_REST* get_handler(rgw::sal::Store* store, + RGWHandler_REST* get_handler(rgw::sal::Driver* driver, req_state*, const rgw::auth::StrategyRegistry& auth_registry, const std::string&) override { diff --git a/src/rgw/rgw_rest_config.cc b/src/rgw/rgw_rest_config.cc index dec8655c00a50..a3b93ea3afcf7 100644 --- a/src/rgw/rgw_rest_config.cc +++ b/src/rgw/rgw_rest_config.cc @@ -33,7 +33,7 @@ using namespace std; void RGWOp_ZoneConfig_Get::send_response() { - const RGWZoneParams& zone_params = static_cast(store)->svc()->zone->get_zone_params(); + const RGWZoneParams& zone_params = static_cast(driver)->svc()->zone->get_zone_params(); set_req_state_err(s, op_ret); dump_errno(s); diff --git a/src/rgw/rgw_rest_config.h b/src/rgw/rgw_rest_config.h index 81717cc0d80e2..5a0feb533b2f4 100644 --- a/src/rgw/rgw_rest_config.h +++ b/src/rgw/rgw_rest_config.h @@ -30,7 +30,7 @@ public: int verify_permission(optional_yield) override { return check_caps(s->user->get_caps()); } - void execute(optional_yield) override {} /* store already has the info we need, just need to send response */ + void execute(optional_yield) override {} /* driver already has the info we need, just need to send response */ void send_response() override ; const char* name() const override { return "get_zone_config"; @@ -55,7 +55,7 @@ public: RGWRESTMgr_Config() = default; ~RGWRESTMgr_Config() override = default; - RGWHandler_REST* get_handler(rgw::sal::Store* , + RGWHandler_REST* get_handler(rgw::sal::Driver* , req_state*, const rgw::auth::StrategyRegistry& auth_registry, const std::string&) override { diff --git a/src/rgw/rgw_rest_conn.cc b/src/rgw/rgw_rest_conn.cc index 7f02097ca30ae..6a753f7dc1de4 100644 --- a/src/rgw/rgw_rest_conn.cc +++ b/src/rgw/rgw_rest_conn.cc @@ -10,7 +10,7 @@ using namespace std; -RGWRESTConn::RGWRESTConn(CephContext *_cct, rgw::sal::Store* store, +RGWRESTConn::RGWRESTConn(CephContext *_cct, rgw::sal::Driver* driver, const string& _remote_id, const list& remote_endpoints, std::optional _api_name, @@ -21,9 +21,9 @@ RGWRESTConn::RGWRESTConn(CephContext *_cct, rgw::sal::Store* store, api_name(_api_name), host_style(_host_style) { - if (store) { - key = store->get_zone()->get_system_key(); - self_zone_group = store->get_zone()->get_zonegroup().get_id(); + if (driver) { + key = driver->get_zone()->get_system_key(); + self_zone_group = driver->get_zone()->get_zonegroup().get_id(); } } diff --git a/src/rgw/rgw_rest_conn.h b/src/rgw/rgw_rest_conn.h index a612cf0cc3051..63a3878691737 100644 --- a/src/rgw/rgw_rest_conn.h +++ b/src/rgw/rgw_rest_conn.h @@ -79,7 +79,7 @@ class RGWRESTConn public: RGWRESTConn(CephContext *_cct, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const std::string& _remote_id, const std::list& endpoints, std::optional _api_name, @@ -227,8 +227,8 @@ class S3RESTConn : public RGWRESTConn { public: - S3RESTConn(CephContext *_cct, rgw::sal::Store* store, const std::string& _remote_id, const std::list& endpoints, std::optional _api_name, HostStyle _host_style = PathStyle) : - RGWRESTConn(_cct, store, _remote_id, endpoints, _api_name, _host_style) {} + S3RESTConn(CephContext *_cct, rgw::sal::Driver* driver, const std::string& _remote_id, const std::list& endpoints, std::optional _api_name, HostStyle _host_style = PathStyle) : + RGWRESTConn(_cct, driver, _remote_id, endpoints, _api_name, _host_style) {} S3RESTConn(CephContext *_cct, const std::string& _remote_id, const std::list& endpoints, RGWAccessKey _cred, std::string _zone_group, std::optional _api_name, HostStyle _host_style = PathStyle): RGWRESTConn(_cct, _remote_id, endpoints, _cred, _zone_group, _api_name, _host_style) {} ~S3RESTConn() override = default; diff --git a/src/rgw/rgw_rest_iam.cc b/src/rgw/rgw_rest_iam.cc index d71b5b3180812..029267e67f913 100644 --- a/src/rgw/rgw_rest_iam.cc +++ b/src/rgw/rgw_rest_iam.cc @@ -93,7 +93,7 @@ RGWOp *RGWHandler_REST_IAM::op_post() return nullptr; } -int RGWHandler_REST_IAM::init(rgw::sal::Store* store, +int RGWHandler_REST_IAM::init(rgw::sal::Driver* driver, req_state *s, rgw::io::BasicClient *cio) { @@ -104,12 +104,12 @@ int RGWHandler_REST_IAM::init(rgw::sal::Store* store, return ret; } - return RGWHandler_REST::init(store, s, cio); + return RGWHandler_REST::init(driver, s, cio); } int RGWHandler_REST_IAM::authorize(const DoutPrefixProvider* dpp, optional_yield y) { - return RGW_Auth_S3::authorize(dpp, store, auth_registry, s, y); + return RGW_Auth_S3::authorize(dpp, driver, auth_registry, s, y); } int RGWHandler_REST_IAM::init_from_header(req_state* s, @@ -155,7 +155,7 @@ int RGWHandler_REST_IAM::init_from_header(req_state* s, } RGWHandler_REST* -RGWRESTMgr_IAM::get_handler(rgw::sal::Store* store, +RGWRESTMgr_IAM::get_handler(rgw::sal::Driver* driver, req_state* const s, const rgw::auth::StrategyRegistry& auth_registry, const std::string& frontend_prefix) diff --git a/src/rgw/rgw_rest_iam.h b/src/rgw/rgw_rest_iam.h index 698758e836a33..1a25362f4ae2c 100644 --- a/src/rgw/rgw_rest_iam.h +++ b/src/rgw/rgw_rest_iam.h @@ -23,7 +23,7 @@ public: bl_post_body(bl_post_body) {} ~RGWHandler_REST_IAM() override = default; - int init(rgw::sal::Store* store, + int init(rgw::sal::Driver* driver, req_state *s, rgw::io::BasicClient *cio) override; int authorize(const DoutPrefixProvider* dpp, optional_yield y) override; @@ -41,7 +41,7 @@ public: return this; } - RGWHandler_REST* get_handler(rgw::sal::Store* store, + RGWHandler_REST* get_handler(rgw::sal::Driver* driver, req_state*, const rgw::auth::StrategyRegistry&, const std::string&) override; diff --git a/src/rgw/rgw_rest_info.cc b/src/rgw/rgw_rest_info.cc index 4276819a394b5..65323dd004b83 100644 --- a/src/rgw/rgw_rest_info.cc +++ b/src/rgw/rgw_rest_info.cc @@ -33,8 +33,8 @@ void RGWOp_Info_Get::execute(optional_yield y) { formatter->open_array_section("storage_backends"); // for now, just return the backend that is accessible formatter->open_object_section("dummy"); - formatter->dump_string("name", store->get_name()); - formatter->dump_string("cluster_id", store->get_cluster_id(this, y)); + formatter->dump_string("name", driver->get_name()); + formatter->dump_string("cluster_id", driver->get_cluster_id(this, y)); formatter->close_section(); formatter->close_section(); formatter->close_section(); diff --git a/src/rgw/rgw_rest_info.h b/src/rgw/rgw_rest_info.h index f109af01f65d4..0c4467073f421 100644 --- a/src/rgw/rgw_rest_info.h +++ b/src/rgw/rgw_rest_info.h @@ -24,7 +24,7 @@ public: RGWRESTMgr_Info() = default; ~RGWRESTMgr_Info() override = default; - RGWHandler_REST* get_handler(rgw::sal::Store* store, + RGWHandler_REST* get_handler(rgw::sal::Driver* driver, req_state*, const rgw::auth::StrategyRegistry& auth_registry, const std::string&) override { diff --git a/src/rgw/rgw_rest_log.cc b/src/rgw/rgw_rest_log.cc index 81622cab5b41c..3563cf051bd7b 100644 --- a/src/rgw/rgw_rest_log.cc +++ b/src/rgw/rgw_rest_log.cc @@ -78,7 +78,7 @@ void RGWOp_MDLog_List::execute(optional_yield y) { if (period.empty()) { ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl; - period = store->get_zone()->get_current_period_id(); + period = driver->get_zone()->get_current_period_id(); if (period.empty()) { ldpp_dout(this, 5) << "Missing period id" << dendl; op_ret = -EINVAL; @@ -86,7 +86,7 @@ void RGWOp_MDLog_List::execute(optional_yield y) { } } - RGWMetadataLog meta_log{s->cct, static_cast(store)->svc()->zone, static_cast(store)->svc()->cls, period}; + RGWMetadataLog meta_log{s->cct, static_cast(driver)->svc()->zone, static_cast(driver)->svc()->cls, period}; meta_log.init_list_entries(shard_id, {}, {}, marker, &handle); @@ -112,7 +112,7 @@ void RGWOp_MDLog_List::send_response() { for (list::iterator iter = entries.begin(); iter != entries.end(); ++iter) { cls_log_entry& entry = *iter; - static_cast(store)->ctl()->meta.mgr->dump_log_entry(entry, s->formatter); + static_cast(driver)->ctl()->meta.mgr->dump_log_entry(entry, s->formatter); flusher.flush(); } s->formatter->close_section(); @@ -123,7 +123,7 @@ void RGWOp_MDLog_List::send_response() { void RGWOp_MDLog_Info::execute(optional_yield y) { num_objects = s->cct->_conf->rgw_md_log_max_shards; - period = static_cast(store)->svc()->mdlog->read_oldest_log_period(y, s); + period = static_cast(driver)->svc()->mdlog->read_oldest_log_period(y, s); op_ret = period.get_error(); } @@ -156,7 +156,7 @@ void RGWOp_MDLog_ShardInfo::execute(optional_yield y) { if (period.empty()) { ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl; - period = store->get_zone()->get_current_period_id(); + period = driver->get_zone()->get_current_period_id(); if (period.empty()) { ldpp_dout(this, 5) << "Missing period id" << dendl; @@ -164,7 +164,7 @@ void RGWOp_MDLog_ShardInfo::execute(optional_yield y) { return; } } - RGWMetadataLog meta_log{s->cct, static_cast(store)->svc()->zone, static_cast(store)->svc()->cls, period}; + RGWMetadataLog meta_log{s->cct, static_cast(driver)->svc()->zone, static_cast(driver)->svc()->cls, period}; op_ret = meta_log.get_info(this, shard_id, &info); } @@ -222,7 +222,7 @@ void RGWOp_MDLog_Delete::execute(optional_yield y) { if (period.empty()) { ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl; - period = store->get_zone()->get_current_period_id(); + period = driver->get_zone()->get_current_period_id(); if (period.empty()) { ldpp_dout(this, 5) << "Missing period id" << dendl; @@ -230,7 +230,7 @@ void RGWOp_MDLog_Delete::execute(optional_yield y) { return; } } - RGWMetadataLog meta_log{s->cct, static_cast(store)->svc()->zone, static_cast(store)->svc()->cls, period}; + RGWMetadataLog meta_log{s->cct, static_cast(driver)->svc()->zone, static_cast(driver)->svc()->cls, period}; op_ret = meta_log.trim(this, shard_id, {}, {}, {}, marker); } @@ -249,7 +249,7 @@ void RGWOp_MDLog_Lock::execute(optional_yield y) { if (period.empty()) { ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl; - period = store->get_zone()->get_current_period_id(); + period = driver->get_zone()->get_current_period_id(); } if (period.empty() || @@ -270,7 +270,7 @@ void RGWOp_MDLog_Lock::execute(optional_yield y) { return; } - RGWMetadataLog meta_log{s->cct, static_cast(store)->svc()->zone, static_cast(store)->svc()->cls, period}; + RGWMetadataLog meta_log{s->cct, static_cast(driver)->svc()->zone, static_cast(driver)->svc()->cls, period}; unsigned dur; dur = (unsigned)strict_strtol(duration_str.c_str(), 10, &err); if (!err.empty() || dur <= 0) { @@ -297,7 +297,7 @@ void RGWOp_MDLog_Unlock::execute(optional_yield y) { if (period.empty()) { ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl; - period = store->get_zone()->get_current_period_id(); + period = driver->get_zone()->get_current_period_id(); } if (period.empty() || @@ -317,7 +317,7 @@ void RGWOp_MDLog_Unlock::execute(optional_yield y) { return; } - RGWMetadataLog meta_log{s->cct, static_cast(store)->svc()->zone, static_cast(store)->svc()->cls, period}; + RGWMetadataLog meta_log{s->cct, static_cast(driver)->svc()->zone, static_cast(driver)->svc()->cls, period}; op_ret = meta_log.unlock(s, shard_id, zone_id, locker_id); } @@ -352,13 +352,13 @@ void RGWOp_MDLog_Notify::execute(optional_yield y) { return; } - if (store->ctx()->_conf->subsys.should_gather()) { + if (driver->ctx()->_conf->subsys.should_gather()) { for (set::iterator iter = updated_shards.begin(); iter != updated_shards.end(); ++iter) { ldpp_dout(this, 20) << __func__ << "(): updated shard=" << *iter << dendl; } } - store->wakeup_meta_sync_shards(updated_shards); + driver->wakeup_meta_sync_shards(updated_shards); op_ret = 0; } @@ -414,7 +414,7 @@ void RGWOp_BILog_List::execute(optional_yield y) { b.name = bn; b.bucket_id = bucket_instance; } - op_ret = store->get_bucket(s, nullptr, b, &bucket, y); + op_ret = driver->get_bucket(s, nullptr, b, &bucket, y); if (op_ret < 0) { ldpp_dout(this, 5) << "could not get bucket info for bucket=" << bucket_name << dendl; return; @@ -451,7 +451,7 @@ void RGWOp_BILog_List::execute(optional_yield y) { send_response(); do { list entries; - int ret = static_cast(store)->svc()->bilog_rados->log_list(s, bucket->get_info(), log_layout, shard_id, + int ret = static_cast(driver)->svc()->bilog_rados->log_list(s, bucket->get_info(), log_layout, shard_id, marker, max_entries - count, entries, &truncated); if (ret < 0) { @@ -541,7 +541,7 @@ void RGWOp_BILog_Info::execute(optional_yield y) { b.name = bn; b.bucket_id = bucket_instance; } - op_ret = store->get_bucket(s, nullptr, b, &bucket, y); + op_ret = driver->get_bucket(s, nullptr, b, &bucket, y); if (op_ret < 0) { ldpp_dout(this, 5) << "could not get bucket info for bucket=" << bucket_name << dendl; return; @@ -635,13 +635,13 @@ void RGWOp_BILog_Delete::execute(optional_yield y) { b.name = bn; b.bucket_id = bucket_instance; } - op_ret = store->get_bucket(s, nullptr, b, &bucket, y); + op_ret = driver->get_bucket(s, nullptr, b, &bucket, y); if (op_ret < 0) { ldpp_dout(this, 5) << "could not get bucket info for bucket=" << bucket_name << dendl; return; } - op_ret = bilog_trim(this, static_cast(store), + op_ret = bilog_trim(this, static_cast(driver), bucket->get_info(), gen, shard_id, start_marker, end_marker); if (op_ret < 0) { @@ -688,7 +688,7 @@ void RGWOp_DATALog_List::execute(optional_yield y) { // Note that last_marker is updated to be the marker of the last // entry listed - op_ret = static_cast(store)->svc()->datalog_rados->list_entries(this, shard_id, + op_ret = static_cast(driver)->svc()->datalog_rados->list_entries(this, shard_id, max_entries, entries, marker, &last_marker, &truncated); @@ -749,7 +749,7 @@ void RGWOp_DATALog_ShardInfo::execute(optional_yield y) { return; } - op_ret = static_cast(store)->svc()->datalog_rados->get_info(this, shard_id, &info); + op_ret = static_cast(driver)->svc()->datalog_rados->get_info(this, shard_id, &info); } void RGWOp_DATALog_ShardInfo::send_response() { @@ -794,7 +794,7 @@ void RGWOp_DATALog_Notify::execute(optional_yield y) { return; } - if (store->ctx()->_conf->subsys.should_gather()) { + if (driver->ctx()->_conf->subsys.should_gather()) { for (bc::flat_map >::iterator iter = updated_shards.begin(); iter != updated_shards.end(); ++iter) { ldpp_dout(this, 20) << __func__ << "(): updated shard=" << iter->first << dendl; bc::flat_set& entries = iter->second; @@ -805,7 +805,7 @@ void RGWOp_DATALog_Notify::execute(optional_yield y) { } } - store->wakeup_data_sync_shards(this, source_zone, updated_shards); + driver->wakeup_data_sync_shards(this, source_zone, updated_shards); op_ret = 0; } @@ -842,7 +842,7 @@ void RGWOp_DATALog_Notify2::execute(optional_yield y) { return; } - if (store->ctx()->_conf->subsys.should_gather()) { + if (driver->ctx()->_conf->subsys.should_gather()) { for (bc::flat_map >::iterator iter = updated_shards.begin(); iter != updated_shards.end(); ++iter) { ldpp_dout(this, 20) << __func__ << "(): updated shard=" << iter->first << dendl; @@ -854,7 +854,7 @@ void RGWOp_DATALog_Notify2::execute(optional_yield y) { } } - store->wakeup_data_sync_shards(this, source_zone, updated_shards); + driver->wakeup_data_sync_shards(this, source_zone, updated_shards); op_ret = 0; } @@ -898,7 +898,7 @@ void RGWOp_DATALog_Delete::execute(optional_yield y) { return; } - op_ret = static_cast(store)->svc()->datalog_rados->trim_entries(this, shard_id, marker); + op_ret = static_cast(driver)->svc()->datalog_rados->trim_entries(this, shard_id, marker); } // not in header to avoid pulling in rgw_sync.h @@ -918,7 +918,7 @@ public: void RGWOp_MDLog_Status::execute(optional_yield y) { - auto sync = static_cast(store)->getRados()->get_meta_sync_manager(); + auto sync = static_cast(driver)->getRados()->get_meta_sync_manager(); if (sync == nullptr) { ldpp_dout(this, 1) << "no sync manager" << dendl; op_ret = -ENOENT; @@ -984,7 +984,7 @@ void RGWOp_BILog_Status::execute(optional_yield y) // read the bucket instance info for num_shards std::unique_ptr bucket; - op_ret = store->get_bucket(s, nullptr, b, &bucket, y); + op_ret = driver->get_bucket(s, nullptr, b, &bucket, y); if (op_ret < 0) { ldpp_dout(this, 4) << "failed to read bucket info: " << cpp_strerror(op_ret) << dendl; return; @@ -1003,7 +1003,7 @@ void RGWOp_BILog_Status::execute(optional_yield y) } } - const auto& local_zone_id = store->get_zone()->get_id(); + const auto& local_zone_id = driver->get_zone()->get_id(); if (!merge) { rgw_sync_bucket_pipe pipe; @@ -1016,7 +1016,7 @@ void RGWOp_BILog_Status::execute(optional_yield y) op_ret = rgw_read_bucket_full_sync_status( this, - static_cast(store), + static_cast(driver), pipe, &status.sync_status, s->yield); @@ -1028,7 +1028,7 @@ void RGWOp_BILog_Status::execute(optional_yield y) op_ret = rgw_read_bucket_inc_sync_status( this, - static_cast(store), + static_cast(driver), pipe, status.sync_status.incremental_gen, &status.inc_status); @@ -1041,7 +1041,7 @@ void RGWOp_BILog_Status::execute(optional_yield y) rgw_zone_id source_zone_id(source_zone); RGWBucketSyncPolicyHandlerRef source_handler; - op_ret = store->get_sync_policy_handler(s, source_zone_id, source_bucket, &source_handler, y); + op_ret = driver->get_sync_policy_handler(s, source_zone_id, source_bucket, &source_handler, y); if (op_ret < 0) { ldpp_dout(this, -1) << "could not get bucket sync policy handler (r=" << op_ret << ")" << dendl; return; @@ -1068,7 +1068,7 @@ void RGWOp_BILog_Status::execute(optional_yield y) if (*pipe.dest.bucket != pinfo->bucket) { opt_dest_info.emplace(); std::unique_ptr dest_bucket; - op_ret = store->get_bucket(s, nullptr, *pipe.dest.bucket, &dest_bucket, y); + op_ret = driver->get_bucket(s, nullptr, *pipe.dest.bucket, &dest_bucket, y); if (op_ret < 0) { ldpp_dout(this, 4) << "failed to read target bucket info (bucket=: " << cpp_strerror(op_ret) << dendl; return; @@ -1081,7 +1081,7 @@ void RGWOp_BILog_Status::execute(optional_yield y) op_ret = rgw_read_bucket_full_sync_status( this, - static_cast(store), + static_cast(driver), pipe, &status.sync_status, s->yield); @@ -1091,7 +1091,7 @@ void RGWOp_BILog_Status::execute(optional_yield y) } current_status.resize(status.sync_status.shards_done_with_gen.size()); - int r = rgw_read_bucket_inc_sync_status(this, static_cast(store), + int r = rgw_read_bucket_inc_sync_status(this, static_cast(driver), pipe, status.sync_status.incremental_gen, ¤t_status); if (r < 0) { ldpp_dout(this, -1) << "ERROR: rgw_read_bucket_inc_sync_status() on pipe=" << pipe << " returned ret=" << r << dendl; @@ -1157,7 +1157,7 @@ public: void RGWOp_DATALog_Status::execute(optional_yield y) { const auto source_zone = s->info.args.get("source-zone"); - auto sync = store->get_data_sync_manager(source_zone); + auto sync = driver->get_data_sync_manager(source_zone); if (sync == nullptr) { ldpp_dout(this, 1) << "no sync manager for source-zone " << source_zone << dendl; op_ret = -ENOENT; diff --git a/src/rgw/rgw_rest_log.h b/src/rgw/rgw_rest_log.h index 36936f1eb4f7b..c8a0c4df07beb 100644 --- a/src/rgw/rgw_rest_log.h +++ b/src/rgw/rgw_rest_log.h @@ -328,7 +328,7 @@ public: RGWRESTMgr_Log() = default; ~RGWRESTMgr_Log() override = default; - RGWHandler_REST* get_handler(rgw::sal::Store* store, + RGWHandler_REST* get_handler(rgw::sal::Driver* driver, req_state* const, const rgw::auth::StrategyRegistry& auth_registry, const std::string& frontend_prefixs) override { diff --git a/src/rgw/rgw_rest_metadata.cc b/src/rgw/rgw_rest_metadata.cc index 52303b871dcca..23f78819c645a 100644 --- a/src/rgw/rgw_rest_metadata.cc +++ b/src/rgw/rgw_rest_metadata.cc @@ -56,7 +56,7 @@ void RGWOp_Metadata_Get::execute(optional_yield y) { frame_metadata_key(s, metadata_key); - auto meta_mgr = static_cast(store)->ctl()->meta.mgr; + auto meta_mgr = static_cast(driver)->ctl()->meta.mgr; /* Get keys */ op_ret = meta_mgr->get(metadata_key, s->formatter, s->yield, s); @@ -125,7 +125,7 @@ void RGWOp_Metadata_List::execute(optional_yield y) { marker = "3:bf885d8f:root::sorry_janefonda_665:head"; */ - op_ret = store->meta_list_keys_init(this, metadata_key, marker, &handle); + op_ret = driver->meta_list_keys_init(this, metadata_key, marker, &handle); if (op_ret < 0) { ldpp_dout(this, 5) << "ERROR: can't get key: " << cpp_strerror(op_ret) << dendl; return; @@ -144,7 +144,7 @@ void RGWOp_Metadata_List::execute(optional_yield y) { do { list keys; left = (max_entries_specified ? max_entries - count : max); - op_ret = store->meta_list_keys_next(this, handle, left, keys, &truncated); + op_ret = driver->meta_list_keys_next(this, handle, left, keys, &truncated); if (op_ret < 0) { ldpp_dout(this, 5) << "ERROR: lists_keys_next(): " << cpp_strerror(op_ret) << dendl; @@ -166,12 +166,12 @@ void RGWOp_Metadata_List::execute(optional_yield y) { encode_json("count", count, s->formatter); if (truncated) { string esc_marker = - rgw::to_base64(store->meta_get_marker(handle)); + rgw::to_base64(driver->meta_get_marker(handle)); encode_json("marker", esc_marker, s->formatter); } s->formatter->close_section(); } - store->meta_list_keys_complete(handle); + driver->meta_list_keys_complete(handle); op_ret = 0; } @@ -263,7 +263,7 @@ void RGWOp_Metadata_Put::execute(optional_yield y) { } } - op_ret = static_cast(store)->ctl()->meta.mgr->put(metadata_key, bl, s->yield, s, sync_type, + op_ret = static_cast(driver)->ctl()->meta.mgr->put(metadata_key, bl, s->yield, s, sync_type, false, &ondisk_version); if (op_ret < 0) { ldpp_dout(s, 5) << "ERROR: can't put key: " << cpp_strerror(op_ret) << dendl; @@ -294,7 +294,7 @@ void RGWOp_Metadata_Delete::execute(optional_yield y) { string metadata_key; frame_metadata_key(s, metadata_key); - op_ret = static_cast(store)->ctl()->meta.mgr->remove(metadata_key, s->yield, s); + op_ret = static_cast(driver)->ctl()->meta.mgr->remove(metadata_key, s->yield, s); if (op_ret < 0) { ldpp_dout(s, 5) << "ERROR: can't remove key: " << cpp_strerror(op_ret) << dendl; return; diff --git a/src/rgw/rgw_rest_metadata.h b/src/rgw/rgw_rest_metadata.h index 9dc29953a05ee..c741aefcb332c 100644 --- a/src/rgw/rgw_rest_metadata.h +++ b/src/rgw/rgw_rest_metadata.h @@ -98,7 +98,7 @@ public: RGWRESTMgr_Metadata() = default; ~RGWRESTMgr_Metadata() override = default; - RGWHandler_REST* get_handler(rgw::sal::Store* store, + RGWHandler_REST* get_handler(rgw::sal::Driver* driver, req_state* const s, const rgw::auth::StrategyRegistry& auth_registry, const std::string& frontend_prefix) override { diff --git a/src/rgw/rgw_rest_oidc_provider.cc b/src/rgw/rgw_rest_oidc_provider.cc index 1a4dca23c8adc..2fc0bab25a61e 100644 --- a/src/rgw/rgw_rest_oidc_provider.cc +++ b/src/rgw/rgw_rest_oidc_provider.cc @@ -123,7 +123,7 @@ void RGWCreateOIDCProvider::execute(optional_yield y) return; } - std::unique_ptr provider = store->get_oidc_provider(); + std::unique_ptr provider = driver->get_oidc_provider(); provider->set_url(provider_url); provider->set_tenant(s->user->get_tenant()); provider->set_client_ids(client_ids); @@ -145,7 +145,7 @@ void RGWCreateOIDCProvider::execute(optional_yield y) void RGWDeleteOIDCProvider::execute(optional_yield y) { - std::unique_ptr provider = store->get_oidc_provider(); + std::unique_ptr provider = driver->get_oidc_provider(); provider->set_arn(provider_arn); provider->set_tenant(s->user->get_tenant()); op_ret = provider->delete_obj(s, y); @@ -165,7 +165,7 @@ void RGWDeleteOIDCProvider::execute(optional_yield y) void RGWGetOIDCProvider::execute(optional_yield y) { - std::unique_ptr provider = store->get_oidc_provider(); + std::unique_ptr provider = driver->get_oidc_provider(); provider->set_arn(provider_arn); provider->set_tenant(s->user->get_tenant()); op_ret = provider->get(s); @@ -209,7 +209,7 @@ int RGWListOIDCProviders::verify_permission(optional_yield y) void RGWListOIDCProviders::execute(optional_yield y) { vector> result; - op_ret = store->get_oidc_providers(s, s->user->get_tenant(), result); + op_ret = driver->get_oidc_providers(s, s->user->get_tenant(), result); if (op_ret == 0) { s->formatter->open_array_section("ListOpenIDConnectProvidersResponse"); diff --git a/src/rgw/rgw_rest_ratelimit.cc b/src/rgw/rgw_rest_ratelimit.cc index df6ba0f44ffdc..b482b4f82c49a 100644 --- a/src/rgw/rgw_rest_ratelimit.cc +++ b/src/rgw/rgw_rest_ratelimit.cc @@ -36,7 +36,7 @@ void RGWOp_Ratelimit_Info::execute(optional_yield y) if (ratelimit_scope == "bucket" && !bucket_name.empty() && !global) { std::unique_ptr bucket; - int r = store->get_bucket(s, nullptr, tenant_name, bucket_name, &bucket, y); + int r = driver->get_bucket(s, nullptr, tenant_name, bucket_name, &bucket, y); if (r != 0) { op_ret = r; ldpp_dout(this, 0) << "Error on getting bucket info" << dendl; @@ -66,7 +66,7 @@ void RGWOp_Ratelimit_Info::execute(optional_yield y) RGWRateLimitInfo ratelimit_info; rgw_user user(uid_str); std::unique_ptr user_sal; - user_sal = store->get_user(user); + user_sal = driver->get_user(user); if (!rgw::sal::User::empty(user_sal)) { op_ret = user_sal->load_user(this, y); if (op_ret) { @@ -98,9 +98,9 @@ void RGWOp_Ratelimit_Info::execute(optional_yield y) flusher.flush(); } if (global) { - std::string realm_id = store->get_zone()->get_realm_id(); + std::string realm_id = driver->get_zone()->get_realm_id(); RGWPeriodConfig period_config; - op_ret = period_config.read(this, static_cast(store)->svc()->sysobj, realm_id, y); + op_ret = period_config.read(this, static_cast(driver)->svc()->sysobj, realm_id, y); if (op_ret && op_ret != -ENOENT) { ldpp_dout(this, 0) << "Error on period config read" << dendl; return; @@ -235,7 +235,7 @@ void RGWOp_Ratelimit_Set::execute(optional_yield y) if (ratelimit_scope == "user" && !uid_str.empty() && !global) { rgw_user user(uid_str); std::unique_ptr user_sal; - user_sal = store->get_user(user); + user_sal = driver->get_user(user); if (!rgw::sal::User::empty(user_sal)) { op_ret = user_sal->load_user(this, y); if (op_ret) { @@ -273,7 +273,7 @@ void RGWOp_Ratelimit_Set::execute(optional_yield y) if (ratelimit_scope == "bucket" && !bucket_name.empty() && !global) { ldpp_dout(this, 0) << "getting bucket info" << dendl; std::unique_ptr bucket; - op_ret = store->get_bucket(this, nullptr, tenant_name, bucket_name, &bucket, y); + op_ret = driver->get_bucket(this, nullptr, tenant_name, bucket_name, &bucket, y); if (op_ret) { ldpp_dout(this, 0) << "Error on getting bucket info" << dendl; return; @@ -301,9 +301,9 @@ void RGWOp_Ratelimit_Set::execute(optional_yield y) return; } if (global) { - std::string realm_id = store->get_zone()->get_realm_id(); + std::string realm_id = driver->get_zone()->get_realm_id(); RGWPeriodConfig period_config; - op_ret = period_config.read(s, static_cast(store)->svc()->sysobj, realm_id, y); + op_ret = period_config.read(s, static_cast(driver)->svc()->sysobj, realm_id, y); if (op_ret && op_ret != -ENOENT) { ldpp_dout(this, 0) << "Error on period config read" << dendl; return; @@ -314,7 +314,7 @@ void RGWOp_Ratelimit_Set::execute(optional_yield y) have_max_read_bytes, max_read_bytes, have_max_write_bytes, max_write_bytes, have_enabled, enabled, ratelimit_configured, ratelimit_info); period_config.bucket_ratelimit = ratelimit_info; - op_ret = period_config.write(s, static_cast(store)->svc()->sysobj, realm_id, y); + op_ret = period_config.write(s, static_cast(driver)->svc()->sysobj, realm_id, y); return; } if (ratelimit_scope == "anon") { @@ -323,7 +323,7 @@ void RGWOp_Ratelimit_Set::execute(optional_yield y) have_max_read_bytes, max_read_bytes, have_max_write_bytes, max_write_bytes, have_enabled, enabled, ratelimit_configured, ratelimit_info); period_config.anon_ratelimit = ratelimit_info; - op_ret = period_config.write(s, static_cast(store)->svc()->sysobj, realm_id, y); + op_ret = period_config.write(s, static_cast(driver)->svc()->sysobj, realm_id, y); return; } if (ratelimit_scope == "user") { @@ -332,7 +332,7 @@ void RGWOp_Ratelimit_Set::execute(optional_yield y) have_max_read_bytes, max_read_bytes, have_max_write_bytes, max_write_bytes, have_enabled, enabled, ratelimit_configured, ratelimit_info); period_config.user_ratelimit = ratelimit_info; - op_ret = period_config.write(s, static_cast(store)->svc()->sysobj, realm_id, y); + op_ret = period_config.write(s, static_cast(driver)->svc()->sysobj, realm_id, y); return; } } diff --git a/src/rgw/rgw_rest_ratelimit.h b/src/rgw/rgw_rest_ratelimit.h index 0f2f594203b48..c3a942b197bb6 100644 --- a/src/rgw/rgw_rest_ratelimit.h +++ b/src/rgw/rgw_rest_ratelimit.h @@ -25,10 +25,10 @@ public: RGWRESTMgr_Ratelimit() = default; ~RGWRESTMgr_Ratelimit() override = default; - RGWHandler_REST *get_handler(rgw::sal::Store* store, + RGWHandler_REST *get_handler(rgw::sal::Driver* driver, req_state*, const rgw::auth::StrategyRegistry& auth_registry, const std::string&) override { return new RGWHandler_Ratelimit(auth_registry); } -}; \ No newline at end of file +}; diff --git a/src/rgw/rgw_rest_role.cc b/src/rgw/rgw_rest_role.cc index 0fe6981cb27e9..e397910a0fbf0 100644 --- a/src/rgw/rgw_rest_role.cc +++ b/src/rgw/rgw_rest_role.cc @@ -29,7 +29,7 @@ int RGWRestRole::verify_permission(optional_yield y) } string role_name = s->info.args.get("RoleName"); - std::unique_ptr role = store->get_role(role_name, + std::unique_ptr role = driver->get_role(role_name, s->user->get_tenant()); if (op_ret = role->get(s, y); op_ret < 0) { if (op_ret == -ENOENT) { @@ -191,7 +191,7 @@ void RGWCreateRole::execute(optional_yield y) return; } std::string user_tenant = s->user->get_tenant(); - std::unique_ptr role = store->get_role(role_name, + std::unique_ptr role = driver->get_role(role_name, user_tenant, role_path, trust_policy, @@ -206,7 +206,7 @@ void RGWCreateRole::execute(optional_yield y) std::string role_id; - if (!store->is_meta_master()) { + if (!driver->is_meta_master()) { RGWXMLDecoder::XMLParser parser; if (!parser.init()) { ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl; @@ -236,7 +236,7 @@ void RGWCreateRole::execute(optional_yield y) RGWAccessKey cred = it->second; key.key = cred.key; } - op_ret = store->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y); + op_ret = driver->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y); if (op_ret < 0) { ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << op_ret << dendl; return; @@ -319,7 +319,7 @@ void RGWDeleteRole::execute(optional_yield y) return; } - if (!store->is_meta_master()) { + if (!driver->is_meta_master()) { is_master = false; RGWXMLDecoder::XMLParser parser; if (!parser.init()) { @@ -340,7 +340,7 @@ void RGWDeleteRole::execute(optional_yield y) RGWAccessKey cred = it->second; key.key = cred.key; } - master_op_ret = store->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y); + master_op_ret = driver->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y); if (master_op_ret < 0) { op_ret = master_op_ret; ldpp_dout(this, 0) << "forward_iam_request_to_master returned ret=" << op_ret << dendl; @@ -413,7 +413,7 @@ void RGWGetRole::execute(optional_yield y) if (op_ret < 0) { return; } - std::unique_ptr role = store->get_role(role_name, + std::unique_ptr role = driver->get_role(role_name, s->user->get_tenant()); op_ret = role->get(s, y); @@ -463,7 +463,7 @@ void RGWModifyRoleTrustPolicy::execute(optional_yield y) return; } - if (!store->is_meta_master()) { + if (!driver->is_meta_master()) { RGWXMLDecoder::XMLParser parser; if (!parser.init()) { ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl; @@ -485,7 +485,7 @@ void RGWModifyRoleTrustPolicy::execute(optional_yield y) RGWAccessKey cred = it->second; key.key = cred.key; } - op_ret = store->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y); + op_ret = driver->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y); if (op_ret < 0) { ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << op_ret << dendl; return; @@ -536,7 +536,7 @@ void RGWListRoles::execute(optional_yield y) return; } vector> result; - op_ret = store->get_roles(s, y, path_prefix, s->user->get_tenant(), result); + op_ret = driver->get_roles(s, y, path_prefix, s->user->get_tenant(), result); if (op_ret == 0) { s->formatter->open_array_section("ListRolesResponse"); @@ -584,7 +584,7 @@ void RGWPutRolePolicy::execute(optional_yield y) return; } - if (!store->is_meta_master()) { + if (!driver->is_meta_master()) { RGWXMLDecoder::XMLParser parser; if (!parser.init()) { ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl; @@ -607,7 +607,7 @@ void RGWPutRolePolicy::execute(optional_yield y) RGWAccessKey cred = it->second; key.key = cred.key; } - op_ret = store->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y); + op_ret = driver->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y); if (op_ret < 0) { ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << op_ret << dendl; return; @@ -717,7 +717,7 @@ void RGWDeleteRolePolicy::execute(optional_yield y) return; } - if (!store->is_meta_master()) { + if (!driver->is_meta_master()) { RGWXMLDecoder::XMLParser parser; if (!parser.init()) { ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl; @@ -739,7 +739,7 @@ void RGWDeleteRolePolicy::execute(optional_yield y) RGWAccessKey cred = it->second; key.key = cred.key; } - op_ret = store->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y); + op_ret = driver->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y); if (op_ret < 0) { ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << op_ret << dendl; return; @@ -786,7 +786,7 @@ void RGWTagRole::execute(optional_yield y) return; } - if (!store->is_meta_master()) { + if (!driver->is_meta_master()) { RGWXMLDecoder::XMLParser parser; if (!parser.init()) { ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl; @@ -813,7 +813,7 @@ void RGWTagRole::execute(optional_yield y) RGWAccessKey cred = it->second; key.key = cred.key; } - op_ret = store->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y); + op_ret = driver->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y); if (op_ret < 0) { ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << op_ret << dendl; return; @@ -900,7 +900,7 @@ void RGWUntagRole::execute(optional_yield y) return; } - if (!store->is_meta_master()) { + if (!driver->is_meta_master()) { RGWXMLDecoder::XMLParser parser; if (!parser.init()) { ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl; @@ -931,7 +931,7 @@ void RGWUntagRole::execute(optional_yield y) RGWAccessKey cred = it->second; key.key = cred.key; } - op_ret = store->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y); + op_ret = driver->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y); if (op_ret < 0) { ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << op_ret << dendl; return; @@ -970,7 +970,7 @@ void RGWUpdateRole::execute(optional_yield y) return; } - if (!store->is_meta_master()) { + if (!driver->is_meta_master()) { RGWXMLDecoder::XMLParser parser; if (!parser.init()) { ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl; @@ -992,7 +992,7 @@ void RGWUpdateRole::execute(optional_yield y) RGWAccessKey cred = it->second; key.key = cred.key; } - op_ret = store->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y); + op_ret = driver->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y); if (op_ret < 0) { ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << op_ret << dendl; return; @@ -1013,4 +1013,4 @@ void RGWUpdateRole::execute(optional_yield y) s->formatter->dump_string("RequestId", s->trans_id); s->formatter->close_section(); s->formatter->close_section(); -} \ No newline at end of file +} diff --git a/src/rgw/rgw_rest_s3.cc b/src/rgw/rgw_rest_s3.cc index 2576e31b7eec2..6a2ec4a0b9f6e 100644 --- a/src/rgw/rgw_rest_s3.cc +++ b/src/rgw/rgw_rest_s3.cc @@ -788,7 +788,7 @@ int RGWPutBucketTags_ObjStore_S3::get_params(const DoutPrefixProvider *dpp, opti ldpp_dout(dpp, 20) << "Read " << obj_tags.count() << "tags" << dendl; // forward bucket tags requests to meta master zone - if (!store->is_meta_master()) { + if (!driver->is_meta_master()) { /* only need to keep this data around if we're not meta master */ in_data = std::move(data); } @@ -1058,13 +1058,13 @@ struct ReplicationConfiguration { } }; - set get_zone_ids_from_names(rgw::sal::Store* store, + set get_zone_ids_from_names(rgw::sal::Driver* driver, const vector& zone_names) const { set ids; for (auto& name : zone_names) { std::unique_ptr zone; - int ret = store->get_zone()->get_zonegroup().get_zone_by_name(name, &zone); + int ret = driver->get_zone()->get_zonegroup().get_zone_by_name(name, &zone); if (ret >= 0) { rgw_zone_id id = zone->get_id(); ids.insert(std::move(id)); @@ -1074,13 +1074,13 @@ struct ReplicationConfiguration { return ids; } - vector get_zone_names_from_ids(rgw::sal::Store* store, + vector get_zone_names_from_ids(rgw::sal::Driver* driver, const set& zone_ids) const { vector names; for (auto& id : zone_ids) { std::unique_ptr zone; - int ret = store->get_zone()->get_zonegroup().get_zone_by_id(id.id, &zone); + int ret = driver->get_zone()->get_zonegroup().get_zone_by_id(id.id, &zone); if (ret >= 0) { names.emplace_back(zone->get_name()); } @@ -1145,7 +1145,7 @@ struct ReplicationConfiguration { return true; } - int to_sync_policy_pipe(req_state *s, rgw::sal::Store* store, + int to_sync_policy_pipe(req_state *s, rgw::sal::Driver* driver, rgw_sync_bucket_pipes *pipe, bool *enabled) const { if (!is_valid(s->cct)) { @@ -1161,12 +1161,12 @@ struct ReplicationConfiguration { destination.bucket); if (source && !source->zone_names.empty()) { - pipe->source.zones = get_zone_ids_from_names(store, source->zone_names); + pipe->source.zones = get_zone_ids_from_names(driver, source->zone_names); } else { pipe->source.set_all_zones(true); } if (!destination.zone_names.empty()) { - pipe->dest.zones = get_zone_ids_from_names(store, destination.zone_names); + pipe->dest.zones = get_zone_ids_from_names(driver, destination.zone_names); } else { pipe->dest.set_all_zones(true); } @@ -1196,7 +1196,7 @@ struct ReplicationConfiguration { return 0; } - void from_sync_policy_pipe(rgw::sal::Store* store, + void from_sync_policy_pipe(rgw::sal::Driver* driver, const rgw_sync_bucket_pipes& pipe, bool enabled) { id = pipe.id; @@ -1207,12 +1207,12 @@ struct ReplicationConfiguration { source.reset(); } else if (pipe.source.zones) { source.emplace(); - source->zone_names = get_zone_names_from_ids(store, *pipe.source.zones); + source->zone_names = get_zone_names_from_ids(driver, *pipe.source.zones); } if (!pipe.dest.all_zones && pipe.dest.zones) { - destination.zone_names = get_zone_names_from_ids(store, *pipe.dest.zones); + destination.zone_names = get_zone_names_from_ids(driver, *pipe.dest.zones); } if (pipe.params.dest.acl_translation) { @@ -1249,7 +1249,7 @@ struct ReplicationConfiguration { encode_xml("Rule", rules, f); } - int to_sync_policy_groups(req_state *s, rgw::sal::Store* store, + int to_sync_policy_groups(req_state *s, rgw::sal::Driver* driver, vector *result) const { result->resize(2); @@ -1264,7 +1264,7 @@ struct ReplicationConfiguration { for (auto& rule : rules) { rgw_sync_bucket_pipes pipe; bool enabled; - int r = rule.to_sync_policy_pipe(s, store, &pipe, &enabled); + int r = rule.to_sync_policy_pipe(s, driver, &pipe, &enabled); if (r < 0) { ldpp_dout(s, 5) << "NOTICE: failed to convert replication configuration into sync policy pipe (rule.id=" << rule.id << "): " << cpp_strerror(-r) << dendl; return r; @@ -1279,14 +1279,14 @@ struct ReplicationConfiguration { return 0; } - void from_sync_policy_group(rgw::sal::Store* store, + void from_sync_policy_group(rgw::sal::Driver* driver, const rgw_sync_policy_group& group) { bool enabled = (group.status == rgw_sync_policy_group::Status::ENABLED); for (auto& pipe : group.pipes) { auto& rule = rules.emplace_back(); - rule.from_sync_policy_pipe(store, pipe, enabled); + rule.from_sync_policy_pipe(driver, pipe, enabled); } } }; @@ -1308,11 +1308,11 @@ void RGWGetBucketReplication_ObjStore_S3::send_response_data() auto iter = policy->groups.find(enabled_group_id); if (iter != policy->groups.end()) { - conf.from_sync_policy_group(store, iter->second); + conf.from_sync_policy_group(driver, iter->second); } iter = policy->groups.find(disabled_group_id); if (iter != policy->groups.end()) { - conf.from_sync_policy_group(store, iter->second); + conf.from_sync_policy_group(driver, iter->second); } } @@ -1354,13 +1354,13 @@ int RGWPutBucketReplication_ObjStore_S3::get_params(optional_yield y) return -ERR_MALFORMED_XML; } - r = conf.to_sync_policy_groups(s, store, &sync_policy_groups); + r = conf.to_sync_policy_groups(s, driver, &sync_policy_groups); if (r < 0) { return r; } // forward requests to meta master zone - if (!store->is_meta_master()) { + if (!driver->is_meta_master()) { /* only need to keep this data around if we're not meta master */ in_data = std::move(data); } @@ -2051,7 +2051,7 @@ void RGWGetBucketLocation_ObjStore_S3::send_response() std::unique_ptr zonegroup; string api_name; - int ret = store->get_zonegroup(s->bucket->get_info().zonegroup, &zonegroup); + int ret = driver->get_zonegroup(s->bucket->get_info().zonegroup, &zonegroup); if (ret >= 0) { api_name = zonegroup->get_api_name(); } else { @@ -2147,7 +2147,7 @@ int RGWSetBucketVersioning_ObjStore_S3::get_params(optional_yield y) return -EINVAL; } - if (!store->is_meta_master()) { + if (!driver->is_meta_master()) { /* only need to keep this data around if we're not meta master */ in_data.append(data); } @@ -2319,7 +2319,7 @@ void RGWStatBucket_ObjStore_S3::send_response() dump_start(s); } -static int create_s3_policy(req_state *s, rgw::sal::Store* store, +static int create_s3_policy(req_state *s, rgw::sal::Driver* driver, RGWAccessControlPolicy_S3& s3policy, ACLOwner& owner) { @@ -2327,7 +2327,7 @@ static int create_s3_policy(req_state *s, rgw::sal::Store* store, if (!s->canned_acl.empty()) return -ERR_INVALID_REQUEST; - return s3policy.create_from_headers(s, store, s->info.env, owner); + return s3policy.create_from_headers(s, driver, s->info.env, owner); } return s3policy.create_canned(owner, s->bucket_owner, s->canned_acl); @@ -2393,7 +2393,7 @@ int RGWCreateBucket_ObjStore_S3::get_params(optional_yield y) if (r) return r; } - r = create_s3_policy(s, store, s3policy, s->owner); + r = create_s3_policy(s, driver, s3policy, s->owner); if (r < 0) return r; @@ -2519,7 +2519,7 @@ int RGWPutObj_ObjStore_S3::get_params(optional_yield y) } RGWAccessControlPolicy_S3 s3policy(s->cct); - ret = create_s3_policy(s, store, s3policy, s->owner); + ret = create_s3_policy(s, driver, s3policy, s->owner); if (ret < 0) return ret; @@ -2872,7 +2872,7 @@ int RGWPostObj_ObjStore_S3::get_params(optional_yield y) return -EINVAL; } - s->object = store->get_object(rgw_obj_key(object_str)); + s->object = driver->get_object(rgw_obj_key(object_str)); rebuild_key(s->object.get()); @@ -2895,7 +2895,7 @@ int RGWPostObj_ObjStore_S3::get_params(optional_yield y) if (! storage_class.empty()) { s->dest_placement.storage_class = storage_class; - if (!store->valid_placement(s->dest_placement)) { + if (!driver->valid_placement(s->dest_placement)) { ldpp_dout(this, 0) << "NOTICE: invalid dest placement: " << s->dest_placement.to_str() << dendl; err_msg = "The storage class you specified is not valid"; return -EINVAL; @@ -3361,7 +3361,7 @@ int RGWCopyObj_ObjStore_S3::init_dest_policy() RGWAccessControlPolicy_S3 s3policy(s->cct); /* build a policy for the target object */ - int r = create_s3_policy(s, store, s3policy, s->owner); + int r = create_s3_policy(s, driver, s3policy, s->owner); if (r < 0) return r; @@ -3532,7 +3532,7 @@ int RGWPutACLs_ObjStore_S3::get_params(optional_yield y) return ret; } -int RGWPutACLs_ObjStore_S3::get_policy_from_state(rgw::sal::Store* store, +int RGWPutACLs_ObjStore_S3::get_policy_from_state(rgw::sal::Driver* driver, req_state *s, stringstream& ss) { @@ -3544,7 +3544,7 @@ int RGWPutACLs_ObjStore_S3::get_policy_from_state(rgw::sal::Store* store, s->canned_acl.clear(); } - int r = create_s3_policy(s, store, s3policy, owner); + int r = create_s3_policy(s, driver, s3policy, owner); if (r < 0) return r; @@ -3693,7 +3693,7 @@ int RGWPutCORS_ObjStore_S3::get_params(optional_yield y) } // forward bucket cors requests to meta master zone - if (!store->is_meta_master()) { + if (!driver->is_meta_master()) { /* only need to keep this data around if we're not meta master */ in_data.append(data); } @@ -3877,7 +3877,7 @@ void RGWSetRequestPayment_ObjStore_S3::send_response() int RGWInitMultipart_ObjStore_S3::get_params(optional_yield y) { RGWAccessControlPolicy_S3 s3policy(s->cct); - op_ret = create_s3_policy(s, store, s3policy, s->owner); + op_ret = create_s3_policy(s, driver, s3policy, s->owner); if (op_ret < 0) return op_ret; @@ -4490,7 +4490,7 @@ RGWOp *RGWHandler_REST_Service_S3::op_post() if (isSTSEnabled) { RGWHandler_REST_STS sts_handler(auth_registry, post_body); - sts_handler.init(store, s, s->cio); + sts_handler.init(driver, s, s->cio); auto op = sts_handler.get_op(); if (op) { return op; @@ -4499,7 +4499,7 @@ RGWOp *RGWHandler_REST_Service_S3::op_post() if (isIAMEnabled) { RGWHandler_REST_IAM iam_handler(auth_registry, data); - iam_handler.init(store, s, s->cio); + iam_handler.init(driver, s, s->cio); auto op = iam_handler.get_op(); if (op) { return op; @@ -4508,7 +4508,7 @@ RGWOp *RGWHandler_REST_Service_S3::op_post() if (isPSEnabled) { RGWHandler_REST_PSTopic_AWS topic_handler(auth_registry, post_body); - topic_handler.init(store, s, s->cio); + topic_handler.init(driver, s, s->cio); auto op = topic_handler.get_op(); if (op) { return op; @@ -4634,7 +4634,7 @@ RGWOp *RGWHandler_REST_Bucket_S3::op_put() return RGWHandler_REST_PSNotifs_S3::create_put_op(); } else if (is_replication_op()) { RGWBucketSyncPolicyHandlerRef sync_policy_handler; - int ret = store->get_sync_policy_handler(s, nullopt, nullopt, + int ret = driver->get_sync_policy_handler(s, nullopt, nullopt, &sync_policy_handler, null_yield); if (ret < 0 || !sync_policy_handler || sync_policy_handler->is_legacy_config()) { @@ -4791,7 +4791,7 @@ RGWOp *RGWHandler_REST_Obj_S3::op_options() return new RGWOptionsCORS_ObjStore_S3; } -int RGWHandler_REST_S3::init_from_header(rgw::sal::Store* store, +int RGWHandler_REST_S3::init_from_header(rgw::sal::Driver* driver, req_state* s, RGWFormat default_formatter, bool configurable_format) @@ -4856,14 +4856,14 @@ int RGWHandler_REST_S3::init_from_header(rgw::sal::Store* store, if (s->bucket) { s->object = s->bucket->get_object(rgw_obj_key(encoded_obj_str, s->info.args.get("versionId"))); } else { - s->object = store->get_object(rgw_obj_key(encoded_obj_str, s->info.args.get("versionId"))); + s->object = driver->get_object(rgw_obj_key(encoded_obj_str, s->info.args.get("versionId"))); } } } else { if (s->bucket) { s->object = s->bucket->get_object(rgw_obj_key(req_name, s->info.args.get("versionId"))); } else { - s->object = store->get_object(rgw_obj_key(req_name, s->info.args.get("versionId"))); + s->object = driver->get_object(rgw_obj_key(req_name, s->info.args.get("versionId"))); } } return 0; @@ -4915,7 +4915,7 @@ int RGWHandler_REST_S3::postauth_init(optional_yield y) return 0; } -int RGWHandler_REST_S3::init(rgw::sal::Store* store, req_state *s, +int RGWHandler_REST_S3::init(rgw::sal::Driver* driver, req_state *s, rgw::io::BasicClient *cio) { int ret; @@ -4951,7 +4951,7 @@ int RGWHandler_REST_S3::init(rgw::sal::Store* store, req_state *s, ldpp_dout(s, 0) << "failed to parse copy location" << dendl; return -EINVAL; // XXX why not -ERR_INVALID_BUCKET_NAME or -ERR_BAD_URL? } - s->src_object = store->get_object(key); + s->src_object = driver->get_object(key); } const char *sc = s->info.env->get("HTTP_X_AMZ_STORAGE_CLASS"); @@ -4959,15 +4959,15 @@ int RGWHandler_REST_S3::init(rgw::sal::Store* store, req_state *s, s->info.storage_class = sc; } - return RGWHandler_REST::init(store, s, cio); + return RGWHandler_REST::init(driver, s, cio); } int RGWHandler_REST_S3::authorize(const DoutPrefixProvider *dpp, optional_yield y) { if (s->info.args.exists("Action") && s->info.args.get("Action") == "AssumeRoleWithWebIdentity") { - return RGW_Auth_STS::authorize(dpp, store, auth_registry, s, y); + return RGW_Auth_STS::authorize(dpp, driver, auth_registry, s, y); } - return RGW_Auth_S3::authorize(dpp, store, auth_registry, s, y); + return RGW_Auth_S3::authorize(dpp, driver, auth_registry, s, y); } enum class AwsVersion { @@ -5025,15 +5025,15 @@ discover_aws_flavour(const req_info& info) * it tries AWS v4 before AWS v2 */ int RGW_Auth_S3::authorize(const DoutPrefixProvider *dpp, - rgw::sal::Store* const store, + rgw::sal::Driver* const driver, const rgw::auth::StrategyRegistry& auth_registry, req_state* const s, optional_yield y) { /* neither keystone and rados enabled; warn and exit! */ - if (!store->ctx()->_conf->rgw_s3_auth_use_rados && - !store->ctx()->_conf->rgw_s3_auth_use_keystone && - !store->ctx()->_conf->rgw_s3_auth_use_ldap) { + if (!driver->ctx()->_conf->rgw_s3_auth_use_rados && + !driver->ctx()->_conf->rgw_s3_auth_use_keystone && + !driver->ctx()->_conf->rgw_s3_auth_use_ldap) { ldpp_dout(dpp, 0) << "WARNING: no authorization backend enabled! Users will never authenticate." << dendl; return -EPERM; } @@ -5047,24 +5047,24 @@ int RGW_Auth_S3::authorize(const DoutPrefixProvider *dpp, return ret; } -int RGWHandler_Auth_S3::init(rgw::sal::Store* store, req_state *state, +int RGWHandler_Auth_S3::init(rgw::sal::Driver* driver, req_state *state, rgw::io::BasicClient *cio) { - int ret = RGWHandler_REST_S3::init_from_header(store, state, RGWFormat::JSON, true); + int ret = RGWHandler_REST_S3::init_from_header(driver, state, RGWFormat::JSON, true); if (ret < 0) return ret; - return RGWHandler_REST::init(store, state, cio); + return RGWHandler_REST::init(driver, state, cio); } -RGWHandler_REST* RGWRESTMgr_S3::get_handler(rgw::sal::Store* store, +RGWHandler_REST* RGWRESTMgr_S3::get_handler(rgw::sal::Driver* driver, req_state* const s, const rgw::auth::StrategyRegistry& auth_registry, const std::string& frontend_prefix) { bool is_s3website = enable_s3website && (s->prot_flags & RGW_REST_WEBSITE); int ret = - RGWHandler_REST_S3::init_from_header(store, s, + RGWHandler_REST_S3::init_from_header(driver, s, is_s3website ? RGWFormat::HTML : RGWFormat::XML, true); if (ret < 0) @@ -5124,7 +5124,7 @@ bool RGWHandler_REST_S3Website::web_dir() const { return state->exists; } -int RGWHandler_REST_S3Website::init(rgw::sal::Store* store, req_state *s, +int RGWHandler_REST_S3Website::init(rgw::sal::Driver* driver, req_state *s, rgw::io::BasicClient* cio) { // save the original object name before retarget() replaces it with the @@ -5136,7 +5136,7 @@ int RGWHandler_REST_S3Website::init(rgw::sal::Store* store, req_state *s, original_object_name = ""; } - return RGWHandler_REST_S3::init(store, s, cio); + return RGWHandler_REST_S3::init(driver, s, cio); } int RGWHandler_REST_S3Website::retarget(RGWOp* op, RGWOp** new_op, optional_yield y) { @@ -5220,7 +5220,7 @@ int RGWHandler_REST_S3Website::serve_errordoc(const DoutPrefixProvider *dpp, int if (getop.get() == NULL) { return -1; // Trigger double error handler } - getop->init(store, s, this); + getop->init(driver, s, this); getop->range_str = NULL; getop->if_mod = NULL; getop->if_unmod = NULL; @@ -5229,7 +5229,7 @@ int RGWHandler_REST_S3Website::serve_errordoc(const DoutPrefixProvider *dpp, int /* This is okay. It's an error, so nothing will run after this, and it can be * called by abort_early(), which can be called before s->object or s->bucket * are set up. Note, it won't have bucket. */ - s->object = store->get_object(errordoc_key); + s->object = driver->get_object(errordoc_key); ret = init_permissions(getop.get(), y); if (ret < 0) { @@ -6022,7 +6022,7 @@ rgw::auth::s3::LDAPEngine::authenticate( //return error. /*RGWUserInfo user_info; user_info.user_id = base64_token.id; - if (rgw_get_user_info_by_uid(store, user_info.user_id, user_info) >= 0) { + if (rgw_get_user_info_by_uid(driver, user_info.user_id, user_info) >= 0) { if (user_info.type != TYPE_LDAP) { ldpp_dout(dpp, 10) << "ERROR: User id of type: " << user_info.type << " is already present" << dendl; return nullptr; @@ -6062,7 +6062,7 @@ rgw::auth::s3::LocalEngine::authenticate( std::unique_ptr user; const std::string access_key_id(_access_key_id); /* TODO(rzarzynski): we need to have string-view taking variant. */ - if (store->get_user_by_access_key(dpp, access_key_id, y, &user) < 0) { + if (driver->get_user_by_access_key(dpp, access_key_id, y, &user) < 0) { ldpp_dout(dpp, 5) << "error reading user info, uid=" << access_key_id << " can't authenticate" << dendl; return result_t::deny(-ERR_INVALID_ACCESS_KEY); @@ -6239,7 +6239,7 @@ rgw::auth::s3::STSEngine::authenticate( rgw::auth::RoleApplier::Role r; rgw::auth::RoleApplier::TokenAttrs t_attrs; if (! token.roleId.empty()) { - std::unique_ptr role = store->get_role(token.roleId); + std::unique_ptr role = driver->get_role(token.roleId); if (role->get_by_id(dpp, y) < 0) { return result_t::deny(-EPERM); } @@ -6256,7 +6256,7 @@ rgw::auth::s3::STSEngine::authenticate( } } - user = store->get_user(token.user); + user = driver->get_user(token.user); if (! token.user.empty() && token.acct_type != TYPE_ROLE) { // get user info int ret = user->load_user(dpp, y); diff --git a/src/rgw/rgw_rest_s3.h b/src/rgw/rgw_rest_s3.h index 6b8ef5392507f..7c1829aa7eb70 100644 --- a/src/rgw/rgw_rest_s3.h +++ b/src/rgw/rgw_rest_s3.h @@ -355,7 +355,7 @@ public: RGWPutACLs_ObjStore_S3() {} ~RGWPutACLs_ObjStore_S3() override {} - int get_policy_from_state(rgw::sal::Store* store, req_state *s, std::stringstream& ss) override; + int get_policy_from_state(rgw::sal::Driver* driver, req_state *s, std::stringstream& ss) override; void send_response() override; int get_params(optional_yield y) override; }; @@ -616,7 +616,7 @@ public: class RGW_Auth_S3 { public: static int authorize(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const rgw::auth::StrategyRegistry& auth_registry, req_state *s, optional_yield y); }; @@ -636,11 +636,11 @@ public: static int validate_bucket_name(const std::string& bucket); static int validate_object_name(const std::string& bucket); - int init(rgw::sal::Store* store, + int init(rgw::sal::Driver* driver, req_state *s, rgw::io::BasicClient *cio) override; int authorize(const DoutPrefixProvider *dpp, optional_yield y) override { - return RGW_Auth_S3::authorize(dpp, store, auth_registry, s, y); + return RGW_Auth_S3::authorize(dpp, driver, auth_registry, s, y); } int postauth_init(optional_yield) override { return 0; } }; @@ -650,7 +650,7 @@ class RGWHandler_REST_S3 : public RGWHandler_REST { protected: const rgw::auth::StrategyRegistry& auth_registry; public: - static int init_from_header(rgw::sal::Store* store, req_state *s, RGWFormat default_formatter, + static int init_from_header(rgw::sal::Driver* driver, req_state *s, RGWFormat default_formatter, bool configurable_format); explicit RGWHandler_REST_S3(const rgw::auth::StrategyRegistry& auth_registry) @@ -659,7 +659,7 @@ public: } ~RGWHandler_REST_S3() override = default; - int init(rgw::sal::Store* store, + int init(rgw::sal::Driver* driver, req_state *s, rgw::io::BasicClient *cio) override; int authorize(const DoutPrefixProvider *dpp, optional_yield y) override; @@ -794,7 +794,7 @@ public: ~RGWRESTMgr_S3() override = default; - RGWHandler_REST *get_handler(rgw::sal::Store* store, + RGWHandler_REST *get_handler(rgw::sal::Driver* driver, req_state* s, const rgw::auth::StrategyRegistry& auth_registry, const std::string& frontend_prefix) override; @@ -1093,7 +1093,7 @@ class LDAPEngine : public AWSEngine { using result_t = rgw::auth::Engine::result_t; protected: - rgw::sal::Store* store; + rgw::sal::Driver* driver; const rgw::auth::RemoteApplier::Factory* const apl_factory; acl_strategy_t get_acl_strategy() const; @@ -1110,11 +1110,11 @@ protected: optional_yield y) const override; public: LDAPEngine(CephContext* const cct, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const VersionAbstractor& ver_abstractor, const rgw::auth::RemoteApplier::Factory* const apl_factory) : AWSEngine(cct, ver_abstractor), - store(store), + driver(driver), apl_factory(apl_factory) { init(cct); } @@ -1130,7 +1130,7 @@ public: }; class LocalEngine : public AWSEngine { - rgw::sal::Store* store; + rgw::sal::Driver* driver; const rgw::auth::LocalApplier::Factory* const apl_factory; result_t authenticate(const DoutPrefixProvider* dpp, @@ -1144,11 +1144,11 @@ class LocalEngine : public AWSEngine { optional_yield y) const override; public: LocalEngine(CephContext* const cct, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const VersionAbstractor& ver_abstractor, const rgw::auth::LocalApplier::Factory* const apl_factory) : AWSEngine(cct, ver_abstractor), - store(store), + driver(driver), apl_factory(apl_factory) { } @@ -1160,7 +1160,7 @@ public: }; class STSEngine : public AWSEngine { - rgw::sal::Store* store; + rgw::sal::Driver* driver; const rgw::auth::LocalApplier::Factory* const local_apl_factory; const rgw::auth::RemoteApplier::Factory* const remote_apl_factory; const rgw::auth::RoleApplier::Factory* const role_apl_factory; @@ -1185,13 +1185,13 @@ class STSEngine : public AWSEngine { optional_yield y) const override; public: STSEngine(CephContext* const cct, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const VersionAbstractor& ver_abstractor, const rgw::auth::LocalApplier::Factory* const local_apl_factory, const rgw::auth::RemoteApplier::Factory* const remote_apl_factory, const rgw::auth::RoleApplier::Factory* const role_apl_factory) : AWSEngine(cct, ver_abstractor), - store(store), + driver(driver), local_apl_factory(local_apl_factory), remote_apl_factory(remote_apl_factory), role_apl_factory(role_apl_factory) { diff --git a/src/rgw/rgw_rest_s3website.h b/src/rgw/rgw_rest_s3website.h index 33d816aed30a1..03bc64e47e4b3 100644 --- a/src/rgw/rgw_rest_s3website.h +++ b/src/rgw/rgw_rest_s3website.h @@ -40,7 +40,7 @@ public: using RGWHandler_REST_S3::RGWHandler_REST_S3; ~RGWHandler_REST_S3Website() override = default; - int init(rgw::sal::Store* store, req_state *s, rgw::io::BasicClient* cio) override; + int init(rgw::sal::Driver* driver, req_state *s, rgw::io::BasicClient* cio) override; int error_handler(int err_no, std::string *error_content, optional_yield y) override; }; diff --git a/src/rgw/rgw_rest_sts.cc b/src/rgw/rgw_rest_sts.cc index 026f9a28934fc..5ae85fa9efcbe 100644 --- a/src/rgw/rgw_rest_sts.cc +++ b/src/rgw/rgw_rest_sts.cc @@ -101,7 +101,7 @@ WebTokenEngine::get_provider(const DoutPrefixProvider *dpp, const string& role_a } auto provider_arn = rgw::ARN(idp_url, "oidc-provider", tenant); string p_arn = provider_arn.to_string(); - std::unique_ptr provider = store->get_oidc_provider(); + std::unique_ptr provider = driver->get_oidc_provider(); provider->set_arn(p_arn); provider->set_tenant(tenant); auto ret = provider->get(dpp); @@ -490,7 +490,7 @@ WebTokenEngine::authenticate( const DoutPrefixProvider* dpp, string role_arn = s->info.args.get("RoleArn"); string role_tenant = get_role_tenant(role_arn); string role_name = get_role_name(role_arn); - std::unique_ptr role = store->get_role(role_name, role_tenant); + std::unique_ptr role = driver->get_role(role_name, role_tenant); int ret = role->get(dpp, y); if (ret < 0) { ldpp_dout(dpp, 0) << "Role not found: name:" << role_name << " tenant: " << role_tenant << dendl; @@ -511,7 +511,7 @@ WebTokenEngine::authenticate( const DoutPrefixProvider* dpp, int RGWREST_STS::verify_permission(optional_yield y) { - STS::STSService _sts(s->cct, store, s->user->get_id(), s->auth.identity.get()); + STS::STSService _sts(s->cct, driver, s->user->get_id(), s->auth.identity.get()); sts = std::move(_sts); string rArn = s->info.args.get("RoleArn"); @@ -608,7 +608,7 @@ void RGWSTSGetSessionToken::execute(optional_yield y) return; } - STS::STSService sts(s->cct, store, s->user->get_id(), s->auth.identity.get()); + STS::STSService sts(s->cct, driver, s->user->get_id(), s->auth.identity.get()); STS::GetSessionTokenRequest req(duration, serialNumber, tokenCode); const auto& [ret, creds] = sts.getSessionToken(this, req); @@ -741,7 +741,7 @@ void RGWSTSAssumeRole::execute(optional_yield y) } int RGW_Auth_STS::authorize(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const rgw::auth::StrategyRegistry& auth_registry, req_state *s, optional_yield y) { @@ -787,7 +787,7 @@ RGWOp *RGWHandler_REST_STS::op_post() return nullptr; } -int RGWHandler_REST_STS::init(rgw::sal::Store* store, +int RGWHandler_REST_STS::init(rgw::sal::Driver* driver, req_state *s, rgw::io::BasicClient *cio) { @@ -798,15 +798,15 @@ int RGWHandler_REST_STS::init(rgw::sal::Store* store, return ret; } - return RGWHandler_REST::init(store, s, cio); + return RGWHandler_REST::init(driver, s, cio); } int RGWHandler_REST_STS::authorize(const DoutPrefixProvider* dpp, optional_yield y) { if (s->info.args.exists("Action") && s->info.args.get("Action") == "AssumeRoleWithWebIdentity") { - return RGW_Auth_STS::authorize(dpp, store, auth_registry, s, y); + return RGW_Auth_STS::authorize(dpp, driver, auth_registry, s, y); } - return RGW_Auth_S3::authorize(dpp, store, auth_registry, s, y); + return RGW_Auth_S3::authorize(dpp, driver, auth_registry, s, y); } int RGWHandler_REST_STS::init_from_header(req_state* s, @@ -852,7 +852,7 @@ int RGWHandler_REST_STS::init_from_header(req_state* s, } RGWHandler_REST* -RGWRESTMgr_STS::get_handler(rgw::sal::Store* store, +RGWRESTMgr_STS::get_handler(rgw::sal::Driver* driver, req_state* const s, const rgw::auth::StrategyRegistry& auth_registry, const std::string& frontend_prefix) diff --git a/src/rgw/rgw_rest_sts.h b/src/rgw/rgw_rest_sts.h index a129074b48e94..9083bf64533f8 100644 --- a/src/rgw/rgw_rest_sts.h +++ b/src/rgw/rgw_rest_sts.h @@ -24,7 +24,7 @@ namespace rgw::auth::sts { class WebTokenEngine : public rgw::auth::Engine { static constexpr std::string_view princTagsNamespace = "https://aws.amazon.com/tags"; CephContext* const cct; - rgw::sal::Store* store; + rgw::sal::Driver* driver; using result_t = rgw::auth::Engine::result_t; using Pair = std::pair; @@ -63,11 +63,11 @@ class WebTokenEngine : public rgw::auth::Engine { public: WebTokenEngine(CephContext* const cct, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const rgw::auth::TokenExtractor* const extractor, const rgw::auth::WebIdentityApplier::Factory* const apl_factory) : cct(cct), - store(store), + driver(driver), extractor(extractor), apl_factory(apl_factory) { } @@ -84,7 +84,7 @@ public: class DefaultStrategy : public rgw::auth::Strategy, public rgw::auth::TokenExtractor, public rgw::auth::WebIdentityApplier::Factory { - rgw::sal::Store* store; + rgw::sal::Driver* driver; ImplicitTenants& implicit_tenant_context; /* The engine. */ @@ -104,18 +104,18 @@ class DefaultStrategy : public rgw::auth::Strategy, const std::unordered_multimap& token, boost::optional> role_tags, boost::optional>> principal_tags) const override { - auto apl = rgw::auth::add_sysreq(cct, store, s, - rgw::auth::WebIdentityApplier(cct, store, role_session, role_tenant, token, role_tags, principal_tags)); + auto apl = rgw::auth::add_sysreq(cct, driver, s, + rgw::auth::WebIdentityApplier(cct, driver, role_session, role_tenant, token, role_tags, principal_tags)); return aplptr_t(new decltype(apl)(std::move(apl))); } public: DefaultStrategy(CephContext* const cct, ImplicitTenants& implicit_tenant_context, - rgw::sal::Store* store) - : store(store), + rgw::sal::Driver* driver) + : driver(driver), implicit_tenant_context(implicit_tenant_context), - web_token_engine(cct, store, + web_token_engine(cct, driver, static_cast(this), static_cast(this)) { /* When the constructor's body is being executed, all member engines @@ -192,7 +192,7 @@ public: class RGW_Auth_STS { public: static int authorize(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const rgw::auth::StrategyRegistry& auth_registry, req_state *s, optional_yield y); }; @@ -212,7 +212,7 @@ public: post_body(post_body) {} ~RGWHandler_REST_STS() override = default; - int init(rgw::sal::Store* store, + int init(rgw::sal::Driver* driver, req_state *s, rgw::io::BasicClient *cio) override; int authorize(const DoutPrefixProvider* dpp, optional_yield y) override; @@ -230,7 +230,7 @@ public: return this; } - RGWHandler_REST* get_handler(rgw::sal::Store* store, + RGWHandler_REST* get_handler(rgw::sal::Driver* driver, req_state*, const rgw::auth::StrategyRegistry&, const std::string&) override; diff --git a/src/rgw/rgw_rest_swift.cc b/src/rgw/rgw_rest_swift.cc index 6ae2fa7efd951..4e62c3bc15a27 100644 --- a/src/rgw/rgw_rest_swift.cc +++ b/src/rgw/rgw_rest_swift.cc @@ -587,7 +587,7 @@ void RGWStatBucket_ObjStore_SWIFT::send_response() } static int get_swift_container_settings(req_state * const s, - rgw::sal::Store* const store, + rgw::sal::Driver* const driver, RGWAccessControlPolicy * const policy, bool * const has_policy, uint32_t * rw_mask, @@ -601,7 +601,7 @@ static int get_swift_container_settings(req_state * const s, if (read_list || write_list) { RGWAccessControlPolicy_SWIFT swift_policy(s->cct); - const auto r = swift_policy.create(s, store, + const auto r = swift_policy.create(s, driver, s->user->get_id(), s->user->get_display_name(), read_list, @@ -704,7 +704,7 @@ int RGWCreateBucket_ObjStore_SWIFT::get_params(optional_yield y) bool has_policy; uint32_t policy_rw_mask = 0; - int r = get_swift_container_settings(s, store, &policy, &has_policy, + int r = get_swift_container_settings(s, driver, &policy, &has_policy, &policy_rw_mask, &cors_config, &has_cors); if (r < 0) { return r; @@ -714,7 +714,7 @@ int RGWCreateBucket_ObjStore_SWIFT::get_params(optional_yield y) policy.create_default(s->user->get_id(), s->user->get_display_name()); } - location_constraint = store->get_zone()->get_zonegroup().get_api_name(); + location_constraint = driver->get_zone()->get_zonegroup().get_api_name(); get_rmattrs_from_headers(s, CONT_PUT_ATTR_PREFIX, CONT_REMOVE_ATTR_PREFIX, rmattr_names); placement_rule.init(s->info.env->get("HTTP_X_STORAGE_POLICY", ""), s->info.storage_class); @@ -850,7 +850,7 @@ int RGWPutObj_ObjStore_SWIFT::update_slo_segment_size(rgw_slo_entry& entry) { std::unique_ptr bucket; if (bucket_name.compare(s->bucket->get_name()) != 0) { - r = store->get_bucket(s, s->user.get(), s->user->get_id().tenant, bucket_name, &bucket, s->yield); + r = driver->get_bucket(s, s->user.get(), s->user->get_id().tenant, bucket_name, &bucket, s->yield); if (r < 0) { ldpp_dout(this, 0) << "could not get bucket info for bucket=" << bucket_name << dendl; @@ -1044,7 +1044,7 @@ void RGWPutObj_ObjStore_SWIFT::send_response() } static int get_swift_account_settings(req_state * const s, - rgw::sal::Store* const store, + rgw::sal::Driver* const driver, RGWAccessControlPolicy_SWIFTAcct* const policy, bool * const has_policy) { @@ -1053,7 +1053,7 @@ static int get_swift_account_settings(req_state * const s, const char * const acl_attr = s->info.env->get("HTTP_X_ACCOUNT_ACCESS_CONTROL"); if (acl_attr) { RGWAccessControlPolicy_SWIFTAcct swift_acct_policy(s->cct); - const bool r = swift_acct_policy.create(s, store, + const bool r = swift_acct_policy.create(s, driver, s->user->get_id(), s->user->get_display_name(), string(acl_attr)); @@ -1075,7 +1075,7 @@ int RGWPutMetadataAccount_ObjStore_SWIFT::get_params(optional_yield y) } int ret = get_swift_account_settings(s, - store, + driver, // FIXME: we need to carry unique_ptr in generic class // and allocate appropriate ACL class in the ctor static_cast(&policy), @@ -1112,7 +1112,7 @@ int RGWPutMetadataBucket_ObjStore_SWIFT::get_params(optional_yield y) return -EINVAL; } - int r = get_swift_container_settings(s, store, &policy, &has_policy, + int r = get_swift_container_settings(s, driver, &policy, &has_policy, &policy_rw_mask, &cors_config, &has_cors); if (r < 0) { return r; @@ -1850,7 +1850,7 @@ void RGWInfo_ObjStore_SWIFT::execute(optional_yield y) s->formatter->close_section(); } else { - pair.second.list_data(*(s->formatter), s->cct->_conf, store); + pair.second.list_data(*(s->formatter), s->cct->_conf, driver); } } @@ -1870,7 +1870,7 @@ void RGWInfo_ObjStore_SWIFT::send_response() void RGWInfo_ObjStore_SWIFT::list_swift_data(Formatter& formatter, const ConfigProxy& config, - rgw::sal::Store* store) + rgw::sal::Driver* driver) { formatter.open_object_section("swift"); formatter.dump_int("max_file_size", config->rgw_max_put_size); @@ -1899,7 +1899,7 @@ void RGWInfo_ObjStore_SWIFT::list_swift_data(Formatter& formatter, } formatter.open_array_section("policies"); - const rgw::sal::ZoneGroup& zonegroup = store->get_zone()->get_zonegroup(); + const rgw::sal::ZoneGroup& zonegroup = driver->get_zone()->get_zonegroup(); std::set targets; if (zonegroup.get_placement_target_names(targets)) { @@ -1921,7 +1921,7 @@ void RGWInfo_ObjStore_SWIFT::list_swift_data(Formatter& formatter, void RGWInfo_ObjStore_SWIFT::list_tempauth_data(Formatter& formatter, const ConfigProxy& config, - rgw::sal::Store* store) + rgw::sal::Driver* driver) { formatter.open_object_section("tempauth"); formatter.dump_bool("account_acls", true); @@ -1929,7 +1929,7 @@ void RGWInfo_ObjStore_SWIFT::list_tempauth_data(Formatter& formatter, } void RGWInfo_ObjStore_SWIFT::list_tempurl_data(Formatter& formatter, const ConfigProxy& config, - rgw::sal::Store* store) + rgw::sal::Driver* driver) { formatter.open_object_section("tempurl"); formatter.open_array_section("methods"); @@ -1944,7 +1944,7 @@ void RGWInfo_ObjStore_SWIFT::list_tempurl_data(Formatter& formatter, void RGWInfo_ObjStore_SWIFT::list_slo_data(Formatter& formatter, const ConfigProxy& config, - rgw::sal::Store* store) + rgw::sal::Driver* driver) { formatter.open_object_section("slo"); formatter.dump_int("max_manifest_segments", config->rgw_max_slo_entries); @@ -1971,14 +1971,14 @@ bool RGWInfo_ObjStore_SWIFT::is_expired(const std::string& expires, const DoutPr } -void RGWFormPost::init(rgw::sal::Store* const store, +void RGWFormPost::init(rgw::sal::Driver* const driver, req_state* const s, RGWHandler* const dialect_handler) { prefix = std::move(s->object->get_name()); s->object->set_key(rgw_obj_key()); - return RGWPostObj_ObjStore::init(store, s, dialect_handler); + return RGWPostObj_ObjStore::init(driver, s, dialect_handler); } std::size_t RGWFormPost::get_max_file_size() /*const*/ @@ -2086,7 +2086,7 @@ void RGWFormPost::get_owner_info(const req_state* const s, const rgw_user uid(s->account_name); if (uid.tenant.empty()) { const rgw_user tenanted_uid(uid.id, uid.id); - user = store->get_user(tenanted_uid); + user = driver->get_user(tenanted_uid); if (user->load_user(s, s->yield) >= 0) { /* Succeeded. */ @@ -2095,7 +2095,7 @@ void RGWFormPost::get_owner_info(const req_state* const s, } if (!found) { - user = store->get_user(uid); + user = driver->get_user(uid); if (user->load_user(s, s->yield) < 0) { throw -EPERM; } @@ -2104,7 +2104,7 @@ void RGWFormPost::get_owner_info(const req_state* const s, /* Need to get user info of bucket owner. */ std::unique_ptr bucket; - int ret = store->get_bucket(s, user.get(), user->get_tenant(), bucket_name, &bucket, s->yield); + int ret = driver->get_bucket(s, user.get(), user->get_tenant(), bucket_name, &bucket, s->yield); if (ret < 0) { throw ret; } @@ -2112,7 +2112,7 @@ void RGWFormPost::get_owner_info(const req_state* const s, ldpp_dout(this, 20) << "temp url user (bucket owner): " << bucket->get_info().owner << dendl; - user = store->get_user(bucket->get_info().owner); + user = driver->get_user(bucket->get_info().owner); if (user->load_user(s, s->yield) < 0) { throw -EPERM; } @@ -2172,7 +2172,7 @@ int RGWFormPost::get_params(optional_yield y) * only. They will be picked up by ::get_data(). */ break; } else { - /* Control part ahead. Receive, parse and store for later usage. */ + /* Control part ahead. Receive, parse and driver for later usage. */ bool boundary; ret = read_data(part.data, s->cct->_conf->rgw_max_chunk_size, boundary, stream_done); @@ -2349,13 +2349,13 @@ int RGWSwiftWebsiteHandler::serve_errordoc(const int http_ret, class RGWGetErrorPage : public RGWGetObj_ObjStore_SWIFT { public: - RGWGetErrorPage(rgw::sal::Store* const store, + RGWGetErrorPage(rgw::sal::Driver* const driver, RGWHandler_REST* const handler, req_state* const s, const int http_ret) { /* Calling a virtual from the base class is safe as the subobject should * be properly initialized and we haven't overridden the init method. */ - init(store, s, handler); + init(driver, s, handler); set_get_data(true); set_custom_http_response(http_ret); } @@ -2367,7 +2367,7 @@ int RGWSwiftWebsiteHandler::serve_errordoc(const int http_ret, * fault situation by sending the original message. */ return 0; } - } get_errpage_op(store, handler, s, http_ret); + } get_errpage_op(driver, handler, s, http_ret); /* This is okay. It's an error, so nothing will run after this, and it can be * called by abort_early(), which can be called before s->object or s->bucket @@ -2375,12 +2375,12 @@ int RGWSwiftWebsiteHandler::serve_errordoc(const int http_ret, if (!rgw::sal::Bucket::empty(s->bucket.get())) { s->object = s->bucket->get_object(rgw_obj_key(std::to_string(http_ret) + error_doc)); } else { - s->object = store->get_object(rgw_obj_key(std::to_string(http_ret) + error_doc)); + s->object = driver->get_object(rgw_obj_key(std::to_string(http_ret) + error_doc)); } RGWOp* newop = &get_errpage_op; RGWRequest req(0); - return rgw_process_authenticated(handler, newop, &req, s, y, store, true); + return rgw_process_authenticated(handler, newop, &req, s, y, driver, true); } int RGWSwiftWebsiteHandler::error_handler(const int err_no, @@ -2631,7 +2631,7 @@ int RGWSwiftWebsiteHandler::retarget_bucket(RGWOp* op, RGWOp** new_op) if (op_override) { handler->put_op(op); - op_override->init(store, s, handler); + op_override->init(driver, s, handler); *new_op = op_override; } else { @@ -2670,7 +2670,7 @@ int RGWSwiftWebsiteHandler::retarget_object(RGWOp* op, RGWOp** new_op) if (op_override) { handler->put_op(op); - op_override->init(store, s, handler); + op_override->init(driver, s, handler); *new_op = op_override; } else { @@ -2815,7 +2815,7 @@ int RGWHandler_REST_SWIFT::postauth_init(optional_yield y) if (!s->object) { /* Need an object, even an empty one */ - s->object = store->get_object(rgw_obj_key()); + s->object = driver->get_object(rgw_obj_key()); } ldpp_dout(s, 10) << "s->object=" << @@ -2908,7 +2908,7 @@ static void next_tok(string& str, string& tok, char delim) } } -int RGWHandler_REST_SWIFT::init_from_header(rgw::sal::Store* store, +int RGWHandler_REST_SWIFT::init_from_header(rgw::sal::Driver* driver, req_state* const s, const std::string& frontend_prefix) { @@ -3026,7 +3026,7 @@ int RGWHandler_REST_SWIFT::init_from_header(rgw::sal::Store* store, s->init_state.url_bucket = first; if (req.size()) { - s->object = store->get_object( + s->object = driver->get_object( rgw_obj_key(req, s->info.env->get("HTTP_X_OBJECT_VERSION_ID", ""))); /* rgw swift extension */ s->info.effective_uri.append("/" + s->object->get_name()); } @@ -3034,7 +3034,7 @@ int RGWHandler_REST_SWIFT::init_from_header(rgw::sal::Store* store, return 0; } -int RGWHandler_REST_SWIFT::init(rgw::sal::Store* store, req_state* s, +int RGWHandler_REST_SWIFT::init(rgw::sal::Driver* driver, req_state* s, rgw::io::BasicClient *cio) { struct req_init_state *t = &s->init_state; @@ -3047,7 +3047,7 @@ int RGWHandler_REST_SWIFT::init(rgw::sal::Store* store, req_state* s, bool result = RGWCopyObj::parse_copy_location(copy_source, t->src_bucket, key, s); if (!result) return -ERR_BAD_URL; - s->src_object = store->get_object(key); + s->src_object = driver->get_object(key); if (!s->src_object) return -ERR_BAD_URL; } @@ -3077,16 +3077,16 @@ int RGWHandler_REST_SWIFT::init(rgw::sal::Store* store, req_state* s, s->info.storage_class = s->info.env->get("HTTP_X_OBJECT_STORAGE_CLASS", ""); - return RGWHandler_REST::init(store, s, cio); + return RGWHandler_REST::init(driver, s, cio); } RGWHandler_REST* -RGWRESTMgr_SWIFT::get_handler(rgw::sal::Store* store, +RGWRESTMgr_SWIFT::get_handler(rgw::sal::Driver* driver, req_state* const s, const rgw::auth::StrategyRegistry& auth_registry, const std::string& frontend_prefix) { - int ret = RGWHandler_REST_SWIFT::init_from_header(store, s, frontend_prefix); + int ret = RGWHandler_REST_SWIFT::init_from_header(driver, s, frontend_prefix); if (ret < 0) { ldpp_dout(s, 10) << "init_from_header returned err=" << ret << dendl; return nullptr; @@ -3106,7 +3106,7 @@ RGWRESTMgr_SWIFT::get_handler(rgw::sal::Store* store, } RGWHandler_REST* RGWRESTMgr_SWIFT_Info::get_handler( - rgw::sal::Store* store, + rgw::sal::Driver* driver, req_state* const s, const rgw::auth::StrategyRegistry& auth_registry, const std::string& frontend_prefix) diff --git a/src/rgw/rgw_rest_swift.h b/src/rgw/rgw_rest_swift.h index c4842f70635f2..89873131cfbb7 100644 --- a/src/rgw/rgw_rest_swift.h +++ b/src/rgw/rgw_rest_swift.h @@ -235,7 +235,7 @@ protected: struct info { bool is_admin_info; - std::function list_data; + std::function list_data; }; static const std::vector> swift_info; @@ -245,10 +245,10 @@ public: void execute(optional_yield y) override; void send_response() override; - static void list_swift_data(Formatter& formatter, const ConfigProxy& config, rgw::sal::Store* store); - static void list_tempauth_data(Formatter& formatter, const ConfigProxy& config, rgw::sal::Store* store); - static void list_tempurl_data(Formatter& formatter, const ConfigProxy& config, rgw::sal::Store* store); - static void list_slo_data(Formatter& formatter, const ConfigProxy& config, rgw::sal::Store* store); + static void list_swift_data(Formatter& formatter, const ConfigProxy& config, rgw::sal::Driver* driver); + static void list_tempauth_data(Formatter& formatter, const ConfigProxy& config, rgw::sal::Driver* driver); + static void list_tempurl_data(Formatter& formatter, const ConfigProxy& config, rgw::sal::Driver* driver); + static void list_slo_data(Formatter& formatter, const ConfigProxy& config, rgw::sal::Driver* driver); static bool is_expired(const std::string& expires, const DoutPrefixProvider* dpp); }; @@ -273,7 +273,7 @@ public: RGWFormPost() = default; ~RGWFormPost() = default; - void init(rgw::sal::Store* store, + void init(rgw::sal::Driver* driver, req_state* s, RGWHandler* dialect_handler) override; @@ -344,7 +344,7 @@ public: class RGWSwiftWebsiteHandler { - rgw::sal::Store* const store; + rgw::sal::Driver* const driver; req_state* const s; RGWHandler_REST* const handler; @@ -359,10 +359,10 @@ class RGWSwiftWebsiteHandler { RGWOp* get_ws_index_op(); RGWOp* get_ws_listing_op(); public: - RGWSwiftWebsiteHandler(rgw::sal::Store* const store, + RGWSwiftWebsiteHandler(rgw::sal::Driver* const driver, req_state* const s, RGWHandler_REST* const handler) - : store(store), + : driver(driver), s(s), handler(handler) { } @@ -385,7 +385,7 @@ protected: return false; } - static int init_from_header(rgw::sal::Store* store, req_state* s, + static int init_from_header(rgw::sal::Driver* driver, req_state* s, const std::string& frontend_prefix); public: explicit RGWHandler_REST_SWIFT(const rgw::auth::Strategy& auth_strategy) @@ -395,7 +395,7 @@ public: int validate_bucket_name(const std::string& bucket); - int init(rgw::sal::Store* store, req_state *s, rgw::io::BasicClient *cio) override; + int init(rgw::sal::Driver* driver, req_state *s, rgw::io::BasicClient *cio) override; int authorize(const DoutPrefixProvider *dpp, optional_yield y) override; int postauth_init(optional_yield y) override; @@ -443,11 +443,11 @@ public: return website_handler->retarget_bucket(op, new_op); } - int init(rgw::sal::Store* const store, + int init(rgw::sal::Driver* const driver, req_state* const s, rgw::io::BasicClient* const cio) override { - website_handler = boost::in_place(store, s, this); - return RGWHandler_REST_SWIFT::init(store, s, cio); + website_handler = boost::in_place(driver, s, this); + return RGWHandler_REST_SWIFT::init(driver, s, cio); } }; @@ -482,11 +482,11 @@ public: return website_handler->retarget_object(op, new_op); } - int init(rgw::sal::Store* const store, + int init(rgw::sal::Driver* const driver, req_state* const s, rgw::io::BasicClient* const cio) override { - website_handler = boost::in_place(store, s, this); - return RGWHandler_REST_SWIFT::init(store, s, cio); + website_handler = boost::in_place(driver, s, this); + return RGWHandler_REST_SWIFT::init(driver, s, cio); } }; @@ -502,7 +502,7 @@ public: RGWRESTMgr_SWIFT() = default; ~RGWRESTMgr_SWIFT() override = default; - RGWHandler_REST *get_handler(rgw::sal::Store* store, + RGWHandler_REST *get_handler(rgw::sal::Driver* driver, req_state *s, const rgw::auth::StrategyRegistry& auth_registry, const std::string& frontend_prefix) override; @@ -536,14 +536,14 @@ public: return new RGWGetCrossDomainPolicy_ObjStore_SWIFT(); } - int init(rgw::sal::Store* const store, + int init(rgw::sal::Driver* const driver, req_state* const state, rgw::io::BasicClient* const cio) override { state->dialect = "swift"; state->formatter = new JSONFormatter; state->format = RGWFormat::JSON; - return RGWHandler::init(store, state, cio); + return RGWHandler::init(driver, state, cio); } int authorize(const DoutPrefixProvider *dpp, optional_yield) override { @@ -574,7 +574,7 @@ public: RGWRESTMgr_SWIFT_CrossDomain() = default; ~RGWRESTMgr_SWIFT_CrossDomain() override = default; - RGWHandler_REST* get_handler(rgw::sal::Store* store, + RGWHandler_REST* get_handler(rgw::sal::Driver* driver, req_state* const s, const rgw::auth::StrategyRegistry&, const std::string&) override { @@ -593,14 +593,14 @@ public: return new RGWGetHealthCheck_ObjStore_SWIFT(); } - int init(rgw::sal::Store* const store, + int init(rgw::sal::Driver* const driver, req_state* const state, rgw::io::BasicClient* const cio) override { state->dialect = "swift"; state->formatter = new JSONFormatter; state->format = RGWFormat::JSON; - return RGWHandler::init(store, state, cio); + return RGWHandler::init(driver, state, cio); } int authorize(const DoutPrefixProvider *dpp, optional_yield y) override { @@ -631,7 +631,7 @@ public: RGWRESTMgr_SWIFT_HealthCheck() = default; ~RGWRESTMgr_SWIFT_HealthCheck() override = default; - RGWHandler_REST* get_handler(rgw::sal::Store* store, + RGWHandler_REST* get_handler(rgw::sal::Driver* driver, req_state* const s, const rgw::auth::StrategyRegistry&, const std::string&) override { @@ -650,14 +650,14 @@ public: return new RGWInfo_ObjStore_SWIFT(); } - int init(rgw::sal::Store* const store, + int init(rgw::sal::Driver* const driver, req_state* const state, rgw::io::BasicClient* const cio) override { state->dialect = "swift"; state->formatter = new JSONFormatter; state->format = RGWFormat::JSON; - return RGWHandler::init(store, state, cio); + return RGWHandler::init(driver, state, cio); } int authorize(const DoutPrefixProvider *dpp, optional_yield) override { @@ -678,7 +678,7 @@ public: RGWRESTMgr_SWIFT_Info() = default; ~RGWRESTMgr_SWIFT_Info() override = default; - RGWHandler_REST *get_handler(rgw::sal::Store* store, + RGWHandler_REST *get_handler(rgw::sal::Driver* driver, req_state* s, const rgw::auth::StrategyRegistry& auth_registry, const std::string& frontend_prefix) override; diff --git a/src/rgw/rgw_rest_usage.cc b/src/rgw/rgw_rest_usage.cc index 1de0abdd9a55c..104dc193884de 100644 --- a/src/rgw/rgw_rest_usage.cc +++ b/src/rgw/rgw_rest_usage.cc @@ -36,11 +36,11 @@ void RGWOp_Usage_Get::execute(optional_yield y) { RESTArgs::get_string(s, "uid", uid_str, &uid_str); RESTArgs::get_string(s, "bucket", bucket_name, &bucket_name); - std::unique_ptr user = store->get_user(rgw_user(uid_str)); + std::unique_ptr user = driver->get_user(rgw_user(uid_str)); std::unique_ptr bucket; if (!bucket_name.empty()) { - store->get_bucket(nullptr, user.get(), std::string(), bucket_name, &bucket, null_yield); + driver->get_bucket(nullptr, user.get(), std::string(), bucket_name, &bucket, null_yield); } RESTArgs::get_epoch(s, "start", 0, &start); @@ -60,7 +60,7 @@ void RGWOp_Usage_Get::execute(optional_yield y) { } } - op_ret = RGWUsage::show(this, store, user.get(), bucket.get(), start, end, show_entries, show_summary, &categories, flusher); + op_ret = RGWUsage::show(this, driver, user.get(), bucket.get(), start, end, show_entries, show_summary, &categories, flusher); } class RGWOp_Usage_Delete : public RGWRESTOp { @@ -83,11 +83,11 @@ void RGWOp_Usage_Delete::execute(optional_yield y) { RESTArgs::get_string(s, "uid", uid_str, &uid_str); RESTArgs::get_string(s, "bucket", bucket_name, &bucket_name); - std::unique_ptr user = store->get_user(rgw_user(uid_str)); + std::unique_ptr user = driver->get_user(rgw_user(uid_str)); std::unique_ptr bucket; if (!bucket_name.empty()) { - store->get_bucket(nullptr, user.get(), std::string(), bucket_name, &bucket, null_yield); + driver->get_bucket(nullptr, user.get(), std::string(), bucket_name, &bucket, null_yield); } RESTArgs::get_epoch(s, "start", 0, &start); @@ -105,7 +105,7 @@ void RGWOp_Usage_Delete::execute(optional_yield y) { } } - op_ret = RGWUsage::trim(this, store, user.get(), bucket.get(), start, end); + op_ret = RGWUsage::trim(this, driver, user.get(), bucket.get(), start, end); } RGWOp *RGWHandler_Usage::op_get() diff --git a/src/rgw/rgw_rest_usage.h b/src/rgw/rgw_rest_usage.h index a6c1a8493cb93..f68edb0ecf7ba 100644 --- a/src/rgw/rgw_rest_usage.h +++ b/src/rgw/rgw_rest_usage.h @@ -25,7 +25,7 @@ public: RGWRESTMgr_Usage() = default; ~RGWRESTMgr_Usage() override = default; - RGWHandler_REST* get_handler(rgw::sal::Store* store, + RGWHandler_REST* get_handler(rgw::sal::Driver* driver, req_state*, const rgw::auth::StrategyRegistry& auth_registry, const std::string&) override { diff --git a/src/rgw/rgw_rest_user_policy.cc b/src/rgw/rgw_rest_user_policy.cc index 9606ae7b59d63..5eee746f5925e 100644 --- a/src/rgw/rgw_rest_user_policy.cc +++ b/src/rgw/rgw_rest_user_policy.cc @@ -119,7 +119,7 @@ void RGWPutUserPolicy::execute(optional_yield y) bufferlist bl = bufferlist::static_from_string(policy); - std::unique_ptr user = store->get_user(rgw_user(user_name)); + std::unique_ptr user = driver->get_user(rgw_user(user_name)); op_ret = user->load_user(s, s->yield); if (op_ret < 0) { @@ -134,7 +134,7 @@ void RGWPutUserPolicy::execute(optional_yield y) } ceph::bufferlist in_data; - op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: forward_request_to_master returned ret=" << op_ret << dendl; return; @@ -199,7 +199,7 @@ void RGWGetUserPolicy::execute(optional_yield y) return; } - std::unique_ptr user = store->get_user(rgw_user(user_name)); + std::unique_ptr user = driver->get_user(rgw_user(user_name)); op_ret = user->read_attrs(s, s->yield); if (op_ret == -ENOENT) { ldpp_dout(this, 0) << "ERROR: attrs not found for user" << user_name << dendl; @@ -268,7 +268,7 @@ void RGWListUserPolicies::execute(optional_yield y) return; } - std::unique_ptr user = store->get_user(rgw_user(user_name)); + std::unique_ptr user = driver->get_user(rgw_user(user_name)); op_ret = user->read_attrs(s, s->yield); if (op_ret == -ENOENT) { ldpp_dout(this, 0) << "ERROR: attrs not found for user" << user_name << dendl; @@ -335,7 +335,7 @@ void RGWDeleteUserPolicy::execute(optional_yield y) return; } - std::unique_ptr user = store->get_user(rgw_user(user_name)); + std::unique_ptr user = driver->get_user(rgw_user(user_name)); op_ret = user->load_user(s, s->yield); if (op_ret < 0) { op_ret = -ERR_NO_SUCH_ENTITY; @@ -349,7 +349,7 @@ void RGWDeleteUserPolicy::execute(optional_yield y) } ceph::bufferlist in_data; - op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); if (op_ret < 0) { // a policy might've been uploaded to this site when there was no sync // req. in earlier releases, proceed deletion diff --git a/src/rgw/rgw_role.cc b/src/rgw/rgw_role.cc index b6dde215f9470..fb188e7f80fa3 100644 --- a/src/rgw/rgw_role.cc +++ b/src/rgw/rgw_role.cc @@ -335,10 +335,10 @@ const string& RGWRole::get_path_oid_prefix() return role_path_oid_prefix; } -RGWRoleMetadataHandler::RGWRoleMetadataHandler(Store* store, +RGWRoleMetadataHandler::RGWRoleMetadataHandler(Driver* driver, RGWSI_Role_RADOS *role_svc) { - this->store = store; + this->driver = driver; base_init(role_svc->ctx(), role_svc->get_be_handler()); } @@ -354,7 +354,7 @@ RGWMetadataObject *RGWRoleMetadataHandler::get_meta_obj(JSONObj *jo, return nullptr; } - return new RGWRoleMetadataObject(info, objv, mtime, store); + return new RGWRoleMetadataObject(info, objv, mtime, driver); } int RGWRoleMetadataHandler::do_get(RGWSI_MetaBackend_Handler::Op *op, @@ -363,7 +363,7 @@ int RGWRoleMetadataHandler::do_get(RGWSI_MetaBackend_Handler::Op *op, optional_yield y, const DoutPrefixProvider *dpp) { - std::unique_ptr role = store->get_role(entry); + std::unique_ptr role = driver->get_role(entry); int ret = role->read_info(dpp, y); if (ret < 0) { return ret; @@ -374,7 +374,7 @@ int RGWRoleMetadataHandler::do_get(RGWSI_MetaBackend_Handler::Op *op, RGWRoleInfo info = role->get_info(); RGWRoleMetadataObject *rdo = new RGWRoleMetadataObject(info, objv_tracker.read_version, - mtime, store); + mtime, driver); *obj = rdo; return 0; @@ -386,7 +386,7 @@ int RGWRoleMetadataHandler::do_remove(RGWSI_MetaBackend_Handler::Op *op, optional_yield y, const DoutPrefixProvider *dpp) { - std::unique_ptr role = store->get_role(entry); + std::unique_ptr role = driver->get_role(entry); int ret = role->read_info(dpp, y); if (ret < 0) { return ret == -ENOENT? 0 : ret; @@ -416,9 +416,9 @@ public: int put_checked(const DoutPrefixProvider *dpp) override { auto& info = mdo->get_role_info(); auto mtime = mdo->get_mtime(); - auto* store = mdo->get_store(); + auto* driver = mdo->get_driver(); info.mtime = mtime; - std::unique_ptr role = store->get_role(info); + std::unique_ptr role = driver->get_role(info); int ret = role->create(dpp, true, info.id, y); if (ret == -EEXIST) { ret = role->update(dpp, y); diff --git a/src/rgw/rgw_role.h b/src/rgw/rgw_role.h index eda04ef714e04..868578924f517 100644 --- a/src/rgw/rgw_role.h +++ b/src/rgw/rgw_role.h @@ -151,13 +151,13 @@ public: class RGWRoleMetadataObject: public RGWMetadataObject { RGWRoleInfo info; - Store* store; + Driver* driver; public: RGWRoleMetadataObject() = default; RGWRoleMetadataObject(RGWRoleInfo& info, const obj_version& v, real_time m, - Store* store) : RGWMetadataObject(v,m), info(info), store(store) {} + Driver* driver) : RGWMetadataObject(v,m), info(info), driver(driver) {} void dump(Formatter *f) const override { info.dump(f); @@ -167,15 +167,15 @@ public: return info; } - Store* get_store() { - return store; + Driver* get_driver() { + return driver; } }; class RGWRoleMetadataHandler: public RGWMetadataHandler_GenericMetaBE { public: - RGWRoleMetadataHandler(Store* store, RGWSI_Role_RADOS *role_svc); + RGWRoleMetadataHandler(Driver* driver, RGWSI_Role_RADOS *role_svc); std::string get_type() final { return "roles"; } @@ -205,7 +205,7 @@ public: bool from_remote_zone) override; private: - Store* store; + Driver* driver; }; } } // namespace rgw::sal diff --git a/src/rgw/rgw_sal.cc b/src/rgw/rgw_sal.cc index 90786ac49c996..0eee88fd674a5 100644 --- a/src/rgw/rgw_sal.cc +++ b/src/rgw/rgw_sal.cc @@ -43,17 +43,17 @@ #define dout_subsys ceph_subsys_rgw extern "C" { -extern rgw::sal::Store* newStore(void); +extern rgw::sal::Driver* newRadosStore(void); #ifdef WITH_RADOSGW_DBSTORE -extern rgw::sal::Store* newDBStore(CephContext *cct); +extern rgw::sal::Driver* newDBStore(CephContext *cct); #endif #ifdef WITH_RADOSGW_MOTR -extern rgw::sal::Store* newMotrStore(CephContext *cct); +extern rgw::sal::Driver* newMotrStore(CephContext *cct); #endif #ifdef WITH_RADOSGW_DAOS -extern rgw::sal::Store* newDaosStore(CephContext *cct); +extern rgw::sal::Driver* newDaosStore(CephContext *cct); #endif -extern rgw::sal::Store* newBaseFilter(rgw::sal::Store* next); +extern rgw::sal::Driver* newBaseFilter(rgw::sal::Driver* next); } @@ -92,7 +92,7 @@ RGWObjState::RGWObjState(const RGWObjState& rhs) : obj (rhs.obj) { compressed = rhs.compressed; } -rgw::sal::Store* StoreManager::init_storage_provider(const DoutPrefixProvider* dpp, +rgw::sal::Driver* DriverManager::init_storage_provider(const DoutPrefixProvider* dpp, CephContext* cct, const Config& cfg, bool use_gc_thread, @@ -103,11 +103,11 @@ rgw::sal::Store* StoreManager::init_storage_provider(const DoutPrefixProvider* d bool use_cache, bool use_gc) { - rgw::sal::Store* store{nullptr}; + rgw::sal::Driver* driver{nullptr}; if (cfg.store_name.compare("rados") == 0) { - store = newStore(); - RGWRados* rados = static_cast(store)->getRados(); + driver = newRadosStore(); + RGWRados* rados = static_cast(driver)->getRados(); if ((*rados).set_use_cache(use_cache) .set_use_datacache(false) @@ -118,23 +118,23 @@ rgw::sal::Store* StoreManager::init_storage_provider(const DoutPrefixProvider* d .set_run_sync_thread(run_sync_thread) .set_run_reshard_thread(run_reshard_thread) .init_begin(cct, dpp) < 0) { - delete store; + delete driver; return nullptr; } - if (store->initialize(cct, dpp) < 0) { - delete store; + if (driver->initialize(cct, dpp) < 0) { + delete driver; return nullptr; } if (rados->init_complete(dpp) < 0) { - delete store; + delete driver; return nullptr; } } else if (cfg.store_name.compare("d3n") == 0) { - store = new rgw::sal::RadosStore(); + driver = new rgw::sal::RadosStore(); RGWRados* rados = new D3nRGWDataCache; - dynamic_cast(store)->setRados(rados); - rados->set_store(static_cast(store)); + dynamic_cast(driver)->setRados(rados); + rados->set_store(static_cast(driver)); if ((*rados).set_use_cache(use_cache) .set_use_datacache(true) @@ -144,15 +144,15 @@ rgw::sal::Store* StoreManager::init_storage_provider(const DoutPrefixProvider* d .set_run_sync_thread(run_sync_thread) .set_run_reshard_thread(run_reshard_thread) .init_begin(cct, dpp) < 0) { - delete store; + delete driver; return nullptr; } - if (store->initialize(cct, dpp) < 0) { - delete store; + if (driver->initialize(cct, dpp) < 0) { + delete driver; return nullptr; } if (rados->init_complete(dpp) < 0) { - delete store; + delete driver; return nullptr; } @@ -171,11 +171,11 @@ rgw::sal::Store* StoreManager::init_storage_provider(const DoutPrefixProvider* d } #ifdef WITH_RADOSGW_DBSTORE else if (cfg.store_name.compare("dbstore") == 0) { - store = newDBStore(cct); + driver = newDBStore(cct); - if ((*(rgw::sal::DBStore*)store).set_run_lc_thread(use_lc_thread) + if ((*(rgw::sal::DBStore*)driver).set_run_lc_thread(use_lc_thread) .initialize(cct, dpp) < 0) { - delete store; + delete driver; return nullptr; } } @@ -183,16 +183,16 @@ rgw::sal::Store* StoreManager::init_storage_provider(const DoutPrefixProvider* d #ifdef WITH_RADOSGW_MOTR else if (cfg.store_name.compare("motr") == 0) { - store = newMotrStore(cct); - if (store == nullptr) { + driver = newMotrStore(cct); + if (driver == nullptr) { ldpp_dout(dpp, 0) << "newMotrStore() failed!" << dendl; - return store; + return driver; } - ((rgw::sal::MotrStore *)store)->init_metadata_cache(dpp, cct); + ((rgw::sal::MotrStore *)driver)->init_metadata_cache(dpp, cct); /* XXX: temporary - create testid user */ rgw_user testid_user("tenant", "tester", "ns"); - std::unique_ptr user = store->get_user(testid_user); + std::unique_ptr user = driver->get_user(testid_user); user->get_info().user_id = testid_user; user->get_info().display_name = "Motr Explorer"; user->get_info().user_email = "tester@seagate.com"; @@ -207,7 +207,7 @@ rgw::sal::Store* StoreManager::init_storage_provider(const DoutPrefixProvider* d // Read user info and compare. rgw_user ruser("", "tester", ""); - std::unique_ptr suser = store->get_user(ruser); + std::unique_ptr suser = driver->get_user(ruser); suser->get_info().user_id = ruser; rc = suser->load_user(dpp, null_yield); if (rc != 0) { @@ -223,115 +223,115 @@ rgw::sal::Store* StoreManager::init_storage_provider(const DoutPrefixProvider* d #ifdef WITH_RADOSGW_DAOS else if (cfg.store_name.compare("daos") == 0) { - store = newDaosStore(cct); - if (store == nullptr) { + driver = newDaosStore(cct); + if (driver == nullptr) { ldpp_dout(dpp, 0) << "newDaosStore() failed!" << dendl; - return store; + return driver; } - int ret = store->initialize(cct, dpp); + int ret = driver->initialize(cct, dpp); if (ret != 0) { ldpp_dout(dpp, 20) << "ERROR: store->initialize() failed: " << ret << dendl; - delete store; + delete driver; return nullptr; } } #endif if (cfg.filter_name.compare("base") == 0) { - rgw::sal::Store* next = store; - store = newBaseFilter(next); + rgw::sal::Driver* next = driver; + driver = newBaseFilter(next); - if (store->initialize(cct, dpp) < 0) { - delete store; + if (driver->initialize(cct, dpp) < 0) { + delete driver; delete next; return nullptr; } } - return store; + return driver; } -rgw::sal::Store* StoreManager::init_raw_storage_provider(const DoutPrefixProvider* dpp, CephContext* cct, const Config& cfg) +rgw::sal::Driver* DriverManager::init_raw_storage_provider(const DoutPrefixProvider* dpp, CephContext* cct, const Config& cfg) { - rgw::sal::Store* store = nullptr; + rgw::sal::Driver* driver = nullptr; if (cfg.store_name.compare("rados") == 0) { - store = newStore(); - RGWRados* rados = static_cast(store)->getRados(); + driver = newRadosStore(); + RGWRados* rados = static_cast(driver)->getRados(); rados->set_context(cct); int ret = rados->init_svc(true, dpp); if (ret < 0) { ldout(cct, 0) << "ERROR: failed to init services (ret=" << cpp_strerror(-ret) << ")" << dendl; - delete store; + delete driver; return nullptr; } if (rados->init_rados() < 0) { - delete store; + delete driver; return nullptr; } - if (store->initialize(cct, dpp) < 0) { - delete store; + if (driver->initialize(cct, dpp) < 0) { + delete driver; return nullptr; } } else if (cfg.store_name.compare("dbstore") == 0) { #ifdef WITH_RADOSGW_DBSTORE - store = newDBStore(cct); + driver = newDBStore(cct); - if ((*(rgw::sal::DBStore*)store).initialize(cct, dpp) < 0) { - delete store; + if ((*(rgw::sal::DBStore*)driver).initialize(cct, dpp) < 0) { + delete driver; return nullptr; } #else - store = nullptr; + driver = nullptr; #endif } else if (cfg.store_name.compare("motr") == 0) { #ifdef WITH_RADOSGW_MOTR - store = newMotrStore(cct); + driver = newMotrStore(cct); #else - store = nullptr; + driver = nullptr; #endif } else if (cfg.store_name.compare("daos") == 0) { #ifdef WITH_RADOSGW_DAOS - store = newDaosStore(cct); + driver = newDaosStore(cct); - if (store->initialize(cct, dpp) < 0) { - delete store; + if (driver->initialize(cct, dpp) < 0) { + delete driver; return nullptr; } #else - store = nullptr; + driver = nullptr; #endif } if (cfg.filter_name.compare("base") == 0) { - rgw::sal::Store* next = store; - store = newBaseFilter(next); + rgw::sal::Driver* next = driver; + driver = newBaseFilter(next); - if (store->initialize(cct, dpp) < 0) { - delete store; + if (driver->initialize(cct, dpp) < 0) { + delete driver; delete next; return nullptr; } } - return store; + return driver; } -void StoreManager::close_storage(rgw::sal::Store* store) +void DriverManager::close_storage(rgw::sal::Driver* driver) { - if (!store) + if (!driver) return; - store->finalize(); + driver->finalize(); - delete store; + delete driver; } -StoreManager::Config StoreManager::get_config(bool admin, CephContext* cct) +DriverManager::Config DriverManager::get_config(bool admin, CephContext* cct) { - StoreManager::Config cfg; + DriverManager::Config cfg; // Get the store backend const auto& config_store = g_conf().get_val("rgw_backend_store"); @@ -377,7 +377,7 @@ StoreManager::Config StoreManager::get_config(bool admin, CephContext* cct) return cfg; } -auto StoreManager::create_config_store(const DoutPrefixProvider* dpp, +auto DriverManager::create_config_store(const DoutPrefixProvider* dpp, std::string_view type) -> std::unique_ptr { diff --git a/src/rgw/rgw_sal.h b/src/rgw/rgw_sal.h index c0ea2e631963d..1a24045adf184 100644 --- a/src/rgw/rgw_sal.h +++ b/src/rgw/rgw_sal.h @@ -258,48 +258,48 @@ class Completions { /** * @brief Base singleton representing a Store or Filter * - * The Store is the base abstraction of the SAL layer. It represents a base storage + * The Driver is the base abstraction of the SAL layer. It represents a base storage * mechanism, or a intermediate stacking layer. There is a single instance of a given - * Store per RGW, and this Store mediates all access to it's backing. + * Driver per RGW, and this Driver mediates all access to it's backing. * - * A store contains, loosely, @a User, @a Bucket, and @a Object entities. The @a Object + * A Driver contains, loosely, @a User, @a Bucket, and @a Object entities. The @a Object * contains data, and it's associated metadata. The @a Bucket contains Objects, and * metadata about the bucket. Both Buckets and Objects are owned by a @a User, which is * the basic unit of access control. * - * A store also has metadata and some global responsibilities. For example, a store is + * A Driver also has metadata and some global responsibilities. For example, a driver is * responsible for managing the LifeCycle activities for it's data. */ -class Store { +class Driver { public: - Store() {} - virtual ~Store() = default; + Driver() {} + virtual ~Driver() = default; - /** Post-creation initialization of store */ + /** Post-creation initialization of driver */ virtual int initialize(CephContext *cct, const DoutPrefixProvider *dpp) = 0; - /** Name of this store provider (e.g., "rados") */ + /** Name of this driver provider (e.g., "rados") */ virtual const std::string get_name() const = 0; /** Get cluster unique identifier */ virtual std::string get_cluster_id(const DoutPrefixProvider* dpp, optional_yield y) = 0; - /** Get a User from a rgw_user. Does not query store for user info, so quick */ + /** Get a User from a rgw_user. Does not query driver for user info, so quick */ virtual std::unique_ptr get_user(const rgw_user& u) = 0; - /** Lookup a User by access key. Queries store for user info. */ + /** Lookup a User by access key. Queries driver for user info. */ virtual int get_user_by_access_key(const DoutPrefixProvider* dpp, const std::string& key, optional_yield y, std::unique_ptr* user) = 0; - /** Lookup a User by email address. Queries store for user info. */ + /** Lookup a User by email address. Queries driver for user info. */ virtual int get_user_by_email(const DoutPrefixProvider* dpp, const std::string& email, optional_yield y, std::unique_ptr* user) = 0; - /** Lookup a User by swift username. Queries store for user info. */ + /** Lookup a User by swift username. Queries driver for user info. */ virtual int get_user_by_swift(const DoutPrefixProvider* dpp, const std::string& user_str, optional_yield y, std::unique_ptr* user) = 0; /** Get a basic Object. This Object is not looked up, and is incomplete, since is * does not have a bucket. This should only be used when an Object is needed before * there is a Bucket, otherwise use the get_object() in the Bucket class. */ virtual std::unique_ptr get_object(const rgw_obj_key& k) = 0; - /** Get a Bucket by info. Does not query the store, just uses the give bucket info. */ + /** Get a Bucket by info. Does not query the driver, just uses the give bucket info. */ virtual int get_bucket(User* u, const RGWBucketInfo& i, std::unique_ptr* bucket) = 0; - /** Lookup a Bucket by key. Queries store for bucket info. */ + /** Lookup a Bucket by key. Queries driver for bucket info. */ virtual int get_bucket(const DoutPrefixProvider* dpp, User* u, const rgw_bucket& b, std::unique_ptr* bucket, optional_yield y) = 0; - /** Lookup a Bucket by name. Queries store for bucket info. */ + /** Lookup a Bucket by name. Queries driver for bucket info. */ virtual int get_bucket(const DoutPrefixProvider* dpp, User* u, const std::string& tenant, const std::string& name, std::unique_ptr* bucket, optional_yield y) = 0; - /** For multisite, this Store is the zone's master */ + /** For multisite, this driver is the zone's master */ virtual bool is_meta_master() = 0; /** For multisite, forward an OP to the zone's master */ virtual int forward_request_to_master(const DoutPrefixProvider *dpp, User* user, obj_version* objv, @@ -309,7 +309,7 @@ class Store { bufferlist& in_data, RGWXMLDecoder::XMLParser* parser, req_info& info, optional_yield y) = 0; - /** Get zone info for this store */ + /** Get zone info for this driver */ virtual Zone* get_zone() = 0; /** Get a unique ID specific to this zone. */ virtual std::string zone_unique_id(uint64_t unique_num) = 0; @@ -319,7 +319,7 @@ class Store { virtual int get_zonegroup(const std::string& id, std::unique_ptr* zonegroup) = 0; /** List all zones in all zone groups by ID */ virtual int list_all_zones(const DoutPrefixProvider* dpp, std::list& zone_ids) = 0; - /** Get statistics about the cluster represented by this Store */ + /** Get statistics about the cluster represented by this driver */ virtual int cluster_stat(RGWClusterStat& stats) = 0; /** Get a @a Lifecycle object. Used to manage/run lifecycle transitions */ virtual std::unique_ptr get_lifecycle(void) = 0; @@ -342,12 +342,12 @@ class Store { /** Get access to the coroutine registry. Used to create new coroutine managers */ virtual RGWCoroutinesManagerRegistry* get_cr_registry() = 0; - /** Log usage data to the store. Usage data is things like bytes sent/received and + /** Log usage data to the driver. Usage data is things like bytes sent/received and * op count */ virtual int log_usage(const DoutPrefixProvider *dpp, std::map& usage_info) = 0; - /** Log OP data to the store. Data is opaque to SAL */ + /** Log OP data to the driver. Data is opaque to SAL */ virtual int log_op(const DoutPrefixProvider *dpp, std::string& oid, bufferlist& bl) = 0; - /** Register this Store to the service map. Somewhat Rados specific; may be removed*/ + /** Register this driver to the service map. Somewhat Rados specific; may be removed*/ virtual int register_to_service_map(const DoutPrefixProvider *dpp, const std::string& daemon_type, const std::map& meta) = 0; /** Get default quota info. Used as fallback if a user or bucket has no quota set*/ @@ -442,17 +442,17 @@ class Store { /** Check to see if this placement rule is valid */ virtual bool valid_placement(const rgw_placement_rule& rule) = 0; - /** Clean up a store for termination */ + /** Clean up a driver for termination */ virtual void finalize(void) = 0; - /** Get the Ceph context associated with this store. May be removed. */ + /** Get the Ceph context associated with this driver. May be removed. */ virtual CephContext* ctx(void) = 0; /** Get the location of where lua packages are installed */ virtual const std::string& get_luarocks_path() const = 0; /** Set the location of where lua packages are installed */ virtual void set_luarocks_path(const std::string& path) = 0; - /** Register admin APIs unique to this store */ + /** Register admin APIs unique to this driver */ virtual void register_admin_apis(RGWRESTMgr* mgr) = 0; }; @@ -1020,7 +1020,7 @@ class Object { optional_yield y) = 0; /** Check to see if two placement rules match */ virtual bool placement_rules_match(rgw_placement_rule& r1, rgw_placement_rule& r2) = 0; - /** Dump store-specific object layout info in JSON */ + /** Dump driver-specific object layout info in JSON */ virtual int dump_obj_layout(const DoutPrefixProvider *dpp, optional_yield y, Formatter* f) = 0; /** Get the cached attributes for this object */ @@ -1550,14 +1550,14 @@ public: } } // namespace rgw::sal /** - * @brief A manager for Stores + * @brief A manager for Drivers * - * This will manage the singleton instances of the various stores. Stores come in two - * varieties: Full and Raw. A full store is suitable for use in a radosgw daemon. It - * has full access to the cluster, if any. A raw store is a stripped down store, used + * This will manage the singleton instances of the various drivers. Drivers come in two + * varieties: Full and Raw. A full driver is suitable for use in a radosgw daemon. It + * has full access to the cluster, if any. A raw driver is a stripped down driver, used * for admin commands. */ -class StoreManager { +class DriverManager { public: struct Config { /** Name of store to create */ @@ -1566,9 +1566,9 @@ public: std::string filter_name; }; - StoreManager() {} - /** Get a full store by service name */ - static rgw::sal::Store* get_storage(const DoutPrefixProvider* dpp, + DriverManager() {} + /** Get a full driver by service name */ + static rgw::sal::Driver* get_storage(const DoutPrefixProvider* dpp, CephContext* cct, const Config& cfg, bool use_gc_thread, @@ -1578,22 +1578,22 @@ public: bool run_reshard_thread, bool use_cache = true, bool use_gc = true) { - rgw::sal::Store* store = init_storage_provider(dpp, cct, cfg, use_gc_thread, + rgw::sal::Driver* driver = init_storage_provider(dpp, cct, cfg, use_gc_thread, use_lc_thread, quota_threads, run_sync_thread, run_reshard_thread, use_cache, use_gc); - return store; + return driver; } - /** Get a stripped down store by service name */ - static rgw::sal::Store* get_raw_storage(const DoutPrefixProvider* dpp, + /** Get a stripped down driver by service name */ + static rgw::sal::Driver* get_raw_storage(const DoutPrefixProvider* dpp, CephContext* cct, const Config& cfg) { - rgw::sal::Store* store = init_raw_storage_provider(dpp, cct, cfg); - return store; + rgw::sal::Driver* driver = init_raw_storage_provider(dpp, cct, cfg); + return driver; } - /** Initialize a new full Store */ - static rgw::sal::Store* init_storage_provider(const DoutPrefixProvider* dpp, + /** Initialize a new full Driver */ + static rgw::sal::Driver* init_storage_provider(const DoutPrefixProvider* dpp, CephContext* cct, const Config& cfg, bool use_gc_thread, @@ -1603,14 +1603,14 @@ public: bool run_reshard_thread, bool use_metadata_cache, bool use_gc); - /** Initialize a new raw Store */ - static rgw::sal::Store* init_raw_storage_provider(const DoutPrefixProvider* dpp, + /** Initialize a new raw Driver */ + static rgw::sal::Driver* init_raw_storage_provider(const DoutPrefixProvider* dpp, CephContext* cct, const Config& cfg); - /** Close a Store when it's no longer needed */ - static void close_storage(rgw::sal::Store* store); + /** Close a Driver when it's no longer needed */ + static void close_storage(rgw::sal::Driver* driver); - /** Get the config for stores/filters */ + /** Get the config for Drivers */ static Config get_config(bool admin, CephContext* cct); /** Create a ConfigStore */ diff --git a/src/rgw/rgw_sal_daos.h b/src/rgw/rgw_sal_daos.h index 8dab0cad05933..f172a8abe3bf9 100644 --- a/src/rgw/rgw_sal_daos.h +++ b/src/rgw/rgw_sal_daos.h @@ -879,7 +879,7 @@ class DaosMultipartUpload : public StoreMultipartUpload { const std::string& get_bucket_name() { return bucket->get_name(); } }; -class DaosStore : public StoreStore { +class DaosStore : public StoreDriver { private: std::string luarocks_path; DaosZone zone; diff --git a/src/rgw/rgw_sal_dbstore.cc b/src/rgw/rgw_sal_dbstore.cc index e47548ef4ab03..a99a412d4dd50 100644 --- a/src/rgw/rgw_sal_dbstore.cc +++ b/src/rgw/rgw_sal_dbstore.cc @@ -1239,12 +1239,12 @@ namespace rgw::sal { optional_yield y, MultipartUpload* upload, std::unique_ptr _head_obj, - DBStore* _store, + DBStore* _driver, const rgw_user& _owner, const rgw_placement_rule *_ptail_placement_rule, uint64_t _part_num, const std::string& _part_num_str): StoreWriter(dpp, y), - store(_store), + store(_driver), owner(_owner), ptail_placement_rule(_ptail_placement_rule), head_obj(std::move(_head_obj)), @@ -1253,7 +1253,7 @@ namespace rgw::sal { oid(head_obj->get_name() + "." + upload_id + "." + std::to_string(part_num)), meta_obj(((DBMultipartUpload*)upload)->get_meta_obj()), - op_target(_store->getDB(), head_obj->get_bucket()->get_info(), head_obj->get_obj(), upload_id), + op_target(_driver->getDB(), head_obj->get_bucket()->get_info(), head_obj->get_obj(), upload_id), parent_op(&op_target), part_num_str(_part_num_str) {} @@ -1388,19 +1388,19 @@ namespace rgw::sal { DBAtomicWriter::DBAtomicWriter(const DoutPrefixProvider *dpp, optional_yield y, std::unique_ptr _head_obj, - DBStore* _store, + DBStore* _driver, const rgw_user& _owner, const rgw_placement_rule *_ptail_placement_rule, uint64_t _olh_epoch, const std::string& _unique_tag) : StoreWriter(dpp, y), - store(_store), + store(_driver), owner(_owner), ptail_placement_rule(_ptail_placement_rule), olh_epoch(_olh_epoch), unique_tag(_unique_tag), - obj(_store, _head_obj->get_key(), _head_obj->get_bucket()), - op_target(_store->getDB(), obj.get_bucket()->get_info(), obj.get_obj()), + obj(_driver, _head_obj->get_key(), _head_obj->get_bucket()), + op_target(_driver->getDB(), obj.get_bucket()->get_info(), obj.get_obj()), parent_op(&op_target) {} int DBAtomicWriter::prepare(optional_yield y) @@ -2024,22 +2024,22 @@ extern "C" { void *newDBStore(CephContext *cct) { - rgw::sal::DBStore *store = new rgw::sal::DBStore(); + rgw::sal::DBStore *driver = new rgw::sal::DBStore(); DBStoreManager *dbsm = new DBStoreManager(cct); DB *db = dbsm->getDB(); if (!db) { delete dbsm; - delete store; + delete driver; return nullptr; } - store->setDBStoreManager(dbsm); - store->setDB(db); - db->set_store((rgw::sal::Store*)store); + driver->setDBStoreManager(dbsm); + driver->setDB(db); + db->set_driver((rgw::sal::Driver*)driver); db->set_context(cct); - return store; + return driver; } } diff --git a/src/rgw/rgw_sal_dbstore.h b/src/rgw/rgw_sal_dbstore.h index 33ad7c48c6238..c46fb6f842e09 100644 --- a/src/rgw/rgw_sal_dbstore.h +++ b/src/rgw/rgw_sal_dbstore.h @@ -752,7 +752,7 @@ public: optional_yield y) override; }; - class DBStore : public StoreStore { + class DBStore : public StoreDriver { private: /* DBStoreManager is used in case multiple * connections are needed one for each tenant. diff --git a/src/rgw/rgw_sal_filter.cc b/src/rgw/rgw_sal_filter.cc index 6c415786e3bd2..ba5ed02322708 100644 --- a/src/rgw/rgw_sal_filter.cc +++ b/src/rgw/rgw_sal_filter.cc @@ -89,31 +89,31 @@ int FilterZoneGroup::get_zone_by_name(const std::string& name, std::unique_ptr(next->get_zone()->clone()); return 0; } -const std::string FilterStore::get_name() const +const std::string FilterDriver::get_name() const { std::string name = "filter<" + next->get_name() + ">"; return name; } -std::string FilterStore::get_cluster_id(const DoutPrefixProvider* dpp, optional_yield y) +std::string FilterDriver::get_cluster_id(const DoutPrefixProvider* dpp, optional_yield y) { return next->get_cluster_id(dpp, y); } -std::unique_ptr FilterStore::get_user(const rgw_user &u) +std::unique_ptr FilterDriver::get_user(const rgw_user &u) { std::unique_ptr user = next->get_user(u); return std::make_unique(std::move(user)); } -int FilterStore::get_user_by_access_key(const DoutPrefixProvider* dpp, const std::string& key, optional_yield y, std::unique_ptr* user) +int FilterDriver::get_user_by_access_key(const DoutPrefixProvider* dpp, const std::string& key, optional_yield y, std::unique_ptr* user) { std::unique_ptr nu; int ret; @@ -127,7 +127,7 @@ int FilterStore::get_user_by_access_key(const DoutPrefixProvider* dpp, const std return 0; } -int FilterStore::get_user_by_email(const DoutPrefixProvider* dpp, const std::string& email, optional_yield y, std::unique_ptr* user) +int FilterDriver::get_user_by_email(const DoutPrefixProvider* dpp, const std::string& email, optional_yield y, std::unique_ptr* user) { std::unique_ptr nu; int ret; @@ -141,7 +141,7 @@ int FilterStore::get_user_by_email(const DoutPrefixProvider* dpp, const std::str return 0; } -int FilterStore::get_user_by_swift(const DoutPrefixProvider* dpp, const std::string& user_str, optional_yield y, std::unique_ptr* user) +int FilterDriver::get_user_by_swift(const DoutPrefixProvider* dpp, const std::string& user_str, optional_yield y, std::unique_ptr* user) { std::unique_ptr nu; int ret; @@ -155,13 +155,13 @@ int FilterStore::get_user_by_swift(const DoutPrefixProvider* dpp, const std::str return 0; } -std::unique_ptr FilterStore::get_object(const rgw_obj_key& k) +std::unique_ptr FilterDriver::get_object(const rgw_obj_key& k) { std::unique_ptr o = next->get_object(k); return std::make_unique(std::move(o)); } -int FilterStore::get_bucket(const DoutPrefixProvider* dpp, User* u, const rgw_bucket& b, std::unique_ptr* bucket, optional_yield y) +int FilterDriver::get_bucket(const DoutPrefixProvider* dpp, User* u, const rgw_bucket& b, std::unique_ptr* bucket, optional_yield y) { std::unique_ptr nb; int ret; @@ -176,7 +176,7 @@ int FilterStore::get_bucket(const DoutPrefixProvider* dpp, User* u, const rgw_bu return 0; } -int FilterStore::get_bucket(User* u, const RGWBucketInfo& i, std::unique_ptr* bucket) +int FilterDriver::get_bucket(User* u, const RGWBucketInfo& i, std::unique_ptr* bucket) { std::unique_ptr nb; int ret; @@ -191,7 +191,7 @@ int FilterStore::get_bucket(User* u, const RGWBucketInfo& i, std::unique_ptr* bucket, optional_yield y) +int FilterDriver::get_bucket(const DoutPrefixProvider* dpp, User* u, const std::string& tenant, const std::string& name, std::unique_ptr* bucket, optional_yield y) { std::unique_ptr nb; int ret; @@ -206,12 +206,12 @@ int FilterStore::get_bucket(const DoutPrefixProvider* dpp, User* u, const std::s return 0; } -bool FilterStore::is_meta_master() +bool FilterDriver::is_meta_master() { return next->is_meta_master(); } -int FilterStore::forward_request_to_master(const DoutPrefixProvider *dpp, +int FilterDriver::forward_request_to_master(const DoutPrefixProvider *dpp, User* user, obj_version* objv, bufferlist& in_data, JSONParser* jp, req_info& info, @@ -220,7 +220,7 @@ int FilterStore::forward_request_to_master(const DoutPrefixProvider *dpp, return next->forward_request_to_master(dpp, user, objv, in_data, jp, info, y); } -int FilterStore::forward_iam_request_to_master(const DoutPrefixProvider *dpp, +int FilterDriver::forward_iam_request_to_master(const DoutPrefixProvider *dpp, const RGWAccessKey& key, obj_version* objv, bufferlist& in_data, @@ -231,17 +231,17 @@ int FilterStore::forward_iam_request_to_master(const DoutPrefixProvider *dpp, return next->forward_iam_request_to_master(dpp, key, objv, in_data, parser, info, y); } -std::string FilterStore::zone_unique_id(uint64_t unique_num) +std::string FilterDriver::zone_unique_id(uint64_t unique_num) { return next->zone_unique_id(unique_num); } -std::string FilterStore::zone_unique_trans_id(uint64_t unique_num) +std::string FilterDriver::zone_unique_trans_id(uint64_t unique_num) { return next->zone_unique_trans_id(unique_num); } -int FilterStore::get_zonegroup(const std::string& id, +int FilterDriver::get_zonegroup(const std::string& id, std::unique_ptr* zonegroup) { std::unique_ptr ngz; @@ -256,24 +256,24 @@ int FilterStore::get_zonegroup(const std::string& id, return 0; } -int FilterStore::cluster_stat(RGWClusterStat& stats) +int FilterDriver::cluster_stat(RGWClusterStat& stats) { return next->cluster_stat(stats); } -std::unique_ptr FilterStore::get_lifecycle(void) +std::unique_ptr FilterDriver::get_lifecycle(void) { std::unique_ptr lc = next->get_lifecycle(); return std::make_unique(std::move(lc)); } -std::unique_ptr FilterStore::get_completions(void) +std::unique_ptr FilterDriver::get_completions(void) { std::unique_ptr c = next->get_completions(); return std::make_unique(std::move(c)); } -std::unique_ptr FilterStore::get_notification(rgw::sal::Object* obj, +std::unique_ptr FilterDriver::get_notification(rgw::sal::Object* obj, rgw::sal::Object* src_obj, req_state* s, rgw::notify::EventType event_type, const std::string* object_name) @@ -285,7 +285,7 @@ std::unique_ptr FilterStore::get_notification(rgw::sal::Object* ob return std::make_unique(std::move(n)); } -std::unique_ptr FilterStore::get_notification(const DoutPrefixProvider* dpp, +std::unique_ptr FilterDriver::get_notification(const DoutPrefixProvider* dpp, rgw::sal::Object* obj, rgw::sal::Object* src_obj, rgw::notify::EventType event_type, rgw::sal::Bucket* _bucket, std::string& _user_id, @@ -302,57 +302,57 @@ std::unique_ptr FilterStore::get_notification(const DoutPrefixProv return std::make_unique(std::move(n)); } -RGWLC* FilterStore::get_rgwlc() +RGWLC* FilterDriver::get_rgwlc() { return next->get_rgwlc(); } -RGWCoroutinesManagerRegistry* FilterStore::get_cr_registry() +RGWCoroutinesManagerRegistry* FilterDriver::get_cr_registry() { return next->get_cr_registry(); } -int FilterStore::log_usage(const DoutPrefixProvider *dpp, std::map& usage_info) +int FilterDriver::log_usage(const DoutPrefixProvider *dpp, std::map& usage_info) { return next->log_usage(dpp, usage_info); } -int FilterStore::log_op(const DoutPrefixProvider *dpp, std::string& oid, bufferlist& bl) +int FilterDriver::log_op(const DoutPrefixProvider *dpp, std::string& oid, bufferlist& bl) { return next->log_op(dpp, oid, bl); } -int FilterStore::register_to_service_map(const DoutPrefixProvider *dpp, +int FilterDriver::register_to_service_map(const DoutPrefixProvider *dpp, const std::string& daemon_type, const std::map& meta) { return next->register_to_service_map(dpp, daemon_type, meta); } -void FilterStore::get_quota(RGWQuota& quota) +void FilterDriver::get_quota(RGWQuota& quota) { return next->get_quota(quota); } -void FilterStore::get_ratelimit(RGWRateLimitInfo& bucket_ratelimit, +void FilterDriver::get_ratelimit(RGWRateLimitInfo& bucket_ratelimit, RGWRateLimitInfo& user_ratelimit, RGWRateLimitInfo& anon_ratelimit) { return next->get_ratelimit(bucket_ratelimit, user_ratelimit, anon_ratelimit); } -int FilterStore::set_buckets_enabled(const DoutPrefixProvider* dpp, +int FilterDriver::set_buckets_enabled(const DoutPrefixProvider* dpp, std::vector& buckets, bool enabled) { return next->set_buckets_enabled(dpp, buckets, enabled); } -uint64_t FilterStore::get_new_req_id() +uint64_t FilterDriver::get_new_req_id() { return next->get_new_req_id(); } -int FilterStore::get_sync_policy_handler(const DoutPrefixProvider* dpp, +int FilterDriver::get_sync_policy_handler(const DoutPrefixProvider* dpp, std::optional zone, std::optional bucket, RGWBucketSyncPolicyHandlerRef* phandler, @@ -361,29 +361,29 @@ int FilterStore::get_sync_policy_handler(const DoutPrefixProvider* dpp, return next->get_sync_policy_handler(dpp, zone, bucket, phandler, y); } -RGWDataSyncStatusManager* FilterStore::get_data_sync_manager(const rgw_zone_id& source_zone) +RGWDataSyncStatusManager* FilterDriver::get_data_sync_manager(const rgw_zone_id& source_zone) { return next->get_data_sync_manager(source_zone); } -void FilterStore::wakeup_meta_sync_shards(std::set& shard_ids) +void FilterDriver::wakeup_meta_sync_shards(std::set& shard_ids) { return next->wakeup_meta_sync_shards(shard_ids); } -void FilterStore::wakeup_data_sync_shards(const DoutPrefixProvider *dpp, +void FilterDriver::wakeup_data_sync_shards(const DoutPrefixProvider *dpp, const rgw_zone_id& source_zone, boost::container::flat_map>& shard_ids) { return next->wakeup_data_sync_shards(dpp, source_zone, shard_ids); } -int FilterStore::clear_usage(const DoutPrefixProvider *dpp) +int FilterDriver::clear_usage(const DoutPrefixProvider *dpp) { return next->clear_usage(dpp); } -int FilterStore::read_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, +int FilterDriver::read_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool* is_truncated, RGWUsageIter& usage_iter, std::map& usage) @@ -392,60 +392,60 @@ int FilterStore::read_all_usage(const DoutPrefixProvider *dpp, uint64_t start_ep is_truncated, usage_iter, usage); } -int FilterStore::trim_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, +int FilterDriver::trim_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) { return next->trim_all_usage(dpp, start_epoch, end_epoch); } -int FilterStore::get_config_key_val(std::string name, bufferlist* bl) +int FilterDriver::get_config_key_val(std::string name, bufferlist* bl) { return next->get_config_key_val(name, bl); } -int FilterStore::meta_list_keys_init(const DoutPrefixProvider *dpp, +int FilterDriver::meta_list_keys_init(const DoutPrefixProvider *dpp, const std::string& section, const std::string& marker, void** phandle) { return next->meta_list_keys_init(dpp, section, marker, phandle); } -int FilterStore::meta_list_keys_next(const DoutPrefixProvider *dpp, void* handle, +int FilterDriver::meta_list_keys_next(const DoutPrefixProvider *dpp, void* handle, int max, std::list& keys, bool* truncated) { return next->meta_list_keys_next(dpp, handle, max, keys, truncated); } -void FilterStore::meta_list_keys_complete(void* handle) +void FilterDriver::meta_list_keys_complete(void* handle) { next->meta_list_keys_complete(handle); } -std::string FilterStore::meta_get_marker(void* handle) +std::string FilterDriver::meta_get_marker(void* handle) { return next->meta_get_marker(handle); } -int FilterStore::meta_remove(const DoutPrefixProvider* dpp, std::string& metadata_key, +int FilterDriver::meta_remove(const DoutPrefixProvider* dpp, std::string& metadata_key, optional_yield y) { return next->meta_remove(dpp, metadata_key, y); } -const RGWSyncModuleInstanceRef& FilterStore::get_sync_module() +const RGWSyncModuleInstanceRef& FilterDriver::get_sync_module() { return next->get_sync_module(); } -std::unique_ptr FilterStore::get_lua_manager() +std::unique_ptr FilterDriver::get_lua_manager() { std::unique_ptr nm = next->get_lua_manager(); return std::make_unique(std::move(nm)); } -std::unique_ptr FilterStore::get_role(std::string name, +std::unique_ptr FilterDriver::get_role(std::string name, std::string tenant, std::string path, std::string trust_policy, @@ -455,17 +455,17 @@ std::unique_ptr FilterStore::get_role(std::string name, return next->get_role(name, tenant, path, trust_policy, max_session_duration_str, tags); } -std::unique_ptr FilterStore::get_role(std::string id) +std::unique_ptr FilterDriver::get_role(std::string id) { return next->get_role(id); } -std::unique_ptr FilterStore::get_role(const RGWRoleInfo& info) +std::unique_ptr FilterDriver::get_role(const RGWRoleInfo& info) { return next->get_role(info); } -int FilterStore::get_roles(const DoutPrefixProvider *dpp, +int FilterDriver::get_roles(const DoutPrefixProvider *dpp, optional_yield y, const std::string& path_prefix, const std::string& tenant, @@ -474,19 +474,19 @@ int FilterStore::get_roles(const DoutPrefixProvider *dpp, return next->get_roles(dpp, y, path_prefix, tenant, roles); } -std::unique_ptr FilterStore::get_oidc_provider() +std::unique_ptr FilterDriver::get_oidc_provider() { return next->get_oidc_provider(); } -int FilterStore::get_oidc_providers(const DoutPrefixProvider *dpp, +int FilterDriver::get_oidc_providers(const DoutPrefixProvider *dpp, const std::string& tenant, std::vector>& providers) { return next->get_oidc_providers(dpp, tenant, providers); } -std::unique_ptr FilterStore::get_append_writer(const DoutPrefixProvider *dpp, +std::unique_ptr FilterDriver::get_append_writer(const DoutPrefixProvider *dpp, optional_yield y, std::unique_ptr _head_obj, const rgw_user& owner, @@ -505,7 +505,7 @@ std::unique_ptr FilterStore::get_append_writer(const DoutPrefixProvider return std::make_unique(std::move(writer), std::move(_head_obj)); } -std::unique_ptr FilterStore::get_atomic_writer(const DoutPrefixProvider *dpp, +std::unique_ptr FilterDriver::get_atomic_writer(const DoutPrefixProvider *dpp, optional_yield y, std::unique_ptr _head_obj, const rgw_user& owner, @@ -522,32 +522,32 @@ std::unique_ptr FilterStore::get_atomic_writer(const DoutPrefixProvider return std::make_unique(std::move(writer), std::move(_head_obj)); } -const std::string& FilterStore::get_compression_type(const rgw_placement_rule& rule) +const std::string& FilterDriver::get_compression_type(const rgw_placement_rule& rule) { return next->get_compression_type(rule); } -bool FilterStore::valid_placement(const rgw_placement_rule& rule) +bool FilterDriver::valid_placement(const rgw_placement_rule& rule) { return next->valid_placement(rule); } -void FilterStore::finalize(void) +void FilterDriver::finalize(void) { next->finalize(); } -CephContext* FilterStore::ctx(void) +CephContext* FilterDriver::ctx(void) { return next->ctx(); } -const std::string& FilterStore::get_luarocks_path() const +const std::string& FilterDriver::get_luarocks_path() const { return next->get_luarocks_path(); } -void FilterStore::set_luarocks_path(const std::string& path) +void FilterDriver::set_luarocks_path(const std::string& path) { next->set_luarocks_path(path); } @@ -1373,11 +1373,11 @@ int FilterLuaManager::list_packages(const DoutPrefixProvider* dpp, optional_yiel extern "C" { -rgw::sal::Store* newBaseFilter(rgw::sal::Store* next) +rgw::sal::Driver* newBaseFilter(rgw::sal::Driver* next) { - rgw::sal::FilterStore* store = new rgw::sal::FilterStore(next); + rgw::sal::FilterDriver* driver = new rgw::sal::FilterDriver(next); - return store; + return driver; } } diff --git a/src/rgw/rgw_sal_filter.h b/src/rgw/rgw_sal_filter.h index 501b380daaa7c..88bd81ee10d9e 100644 --- a/src/rgw/rgw_sal_filter.h +++ b/src/rgw/rgw_sal_filter.h @@ -144,15 +144,15 @@ public: } }; -class FilterStore : public Store { +class FilterDriver : public Driver { protected: - Store* next; + Driver* next; private: std::unique_ptr zone; public: - FilterStore(Store* _next) : next(_next) {} - virtual ~FilterStore() = default; + FilterDriver(Driver* _next) : next(_next) {} + virtual ~FilterDriver() = default; virtual int initialize(CephContext *cct, const DoutPrefixProvider *dpp) override; virtual const std::string get_name() const override; diff --git a/src/rgw/rgw_sal_fwd.h b/src/rgw/rgw_sal_fwd.h index 23d132cf3d4ff..08866c2bea287 100644 --- a/src/rgw/rgw_sal_fwd.h +++ b/src/rgw/rgw_sal_fwd.h @@ -18,7 +18,7 @@ namespace rgw { namespace sal { - class Store; + class Driver; class User; class Bucket; class BucketList; diff --git a/src/rgw/rgw_sal_motr.h b/src/rgw/rgw_sal_motr.h index cb190e715783a..ad9328b951975 100644 --- a/src/rgw/rgw_sal_motr.h +++ b/src/rgw/rgw_sal_motr.h @@ -897,7 +897,7 @@ public: int delete_parts(const DoutPrefixProvider *dpp); }; -class MotrStore : public StoreStore { +class MotrStore : public StoreDriver { private: std::string luarocks_path; MotrZone zone; diff --git a/src/rgw/rgw_sal_store.h b/src/rgw/rgw_sal_store.h index 5e2e2b7bcce9b..78b32021fcb30 100644 --- a/src/rgw/rgw_sal_store.h +++ b/src/rgw/rgw_sal_store.h @@ -19,10 +19,10 @@ namespace rgw { namespace sal { -class StoreStore : public Store { +class StoreDriver : public Driver { public: - StoreStore() {} - virtual ~StoreStore() = default; + StoreDriver() {} + virtual ~StoreDriver() = default; virtual uint64_t get_new_req_id() override { return ceph::util::generate_random_number(); diff --git a/src/rgw/rgw_sts.cc b/src/rgw/rgw_sts.cc index 4c3d75925fe5e..b3926f5f7625f 100644 --- a/src/rgw/rgw_sts.cc +++ b/src/rgw/rgw_sts.cc @@ -159,7 +159,7 @@ void AssumedRoleUser::dump(Formatter *f) const } int AssumedRoleUser::generateAssumedRoleUser(CephContext* cct, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const string& roleId, const rgw::ARN& roleArn, const string& roleSessionName) @@ -290,7 +290,7 @@ std::tuple STSService::getRoleInfo(const DoutPrefixProv if (auto r_arn = rgw::ARN::parse(arn); r_arn) { auto pos = r_arn->resource.find_last_of('/'); string roleName = r_arn->resource.substr(pos + 1); - std::unique_ptr role = store->get_role(roleName, r_arn->account); + std::unique_ptr role = driver->get_role(roleName, r_arn->account); if (int ret = role->get(dpp, y); ret < 0) { if (ret == -ENOENT) { ldpp_dout(dpp, 0) << "Role doesn't exist: " << roleName << dendl; @@ -322,7 +322,7 @@ std::tuple STSService::getRoleInfo(const DoutPrefixProv int STSService::storeARN(const DoutPrefixProvider *dpp, string& arn, optional_yield y) { int ret = 0; - std::unique_ptr user = store->get_user(user_id); + std::unique_ptr user = driver->get_user(user_id); if ((ret = user->load_user(dpp, y)) < 0) { return -ERR_NO_SUCH_ENTITY; } @@ -376,7 +376,7 @@ AssumeRoleWithWebIdentityResponse STSService::assumeRoleWithWebIdentity(const Do //Generate Assumed Role User response.assumeRoleResp.retCode = response.assumeRoleResp.user.generateAssumedRoleUser(cct, - store, + driver, roleId, r_arn.get(), req.getRoleSessionName()); @@ -430,7 +430,7 @@ AssumeRoleResponse STSService::assumeRole(const DoutPrefixProvider *dpp, response.packedPolicySize = (policy.size() / req.getMaxPolicySize()) * 100; //Generate Assumed Role User - response.retCode = response.user.generateAssumedRoleUser(cct, store, roleId, r_arn.get(), req.getRoleSessionName()); + response.retCode = response.user.generateAssumedRoleUser(cct, driver, roleId, r_arn.get(), req.getRoleSessionName()); if (response.retCode < 0) { return response; } diff --git a/src/rgw/rgw_sts.h b/src/rgw/rgw_sts.h index 05bd328bd1155..f73be37658972 100644 --- a/src/rgw/rgw_sts.h +++ b/src/rgw/rgw_sts.h @@ -115,7 +115,7 @@ class AssumedRoleUser { std::string assumeRoleId; public: int generateAssumedRoleUser( CephContext* cct, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const std::string& roleId, const rgw::ARN& roleArn, const std::string& roleSessionName); @@ -235,16 +235,16 @@ using AssumeRoleWithWebIdentityResponse = struct AssumeRoleWithWebIdentityRespon class STSService { CephContext* cct; - rgw::sal::Store* store; + rgw::sal::Driver* driver; rgw_user user_id; std::unique_ptr role; rgw::auth::Identity* identity; int storeARN(const DoutPrefixProvider *dpp, std::string& arn, optional_yield y); public: STSService() = default; - STSService(CephContext* cct, rgw::sal::Store* store, rgw_user user_id, + STSService(CephContext* cct, rgw::sal::Driver* driver, rgw_user user_id, rgw::auth::Identity* identity) - : cct(cct), store(store), user_id(user_id), identity(identity) {} + : cct(cct), driver(driver), user_id(user_id), identity(identity) {} std::tuple getRoleInfo(const DoutPrefixProvider *dpp, const std::string& arn, optional_yield y); AssumeRoleResponse assumeRole(const DoutPrefixProvider *dpp, AssumeRoleRequest& req, optional_yield y); GetSessionTokenResponse getSessionToken(const DoutPrefixProvider *dpp, GetSessionTokenRequest& req); diff --git a/src/rgw/rgw_swift_auth.cc b/src/rgw/rgw_swift_auth.cc index 8415a41e3a5db..197c4e19dd689 100644 --- a/src/rgw/rgw_swift_auth.cc +++ b/src/rgw/rgw_swift_auth.cc @@ -98,7 +98,7 @@ void TempURLEngine::get_owner_info(const DoutPrefixProvider* dpp, const req_stat rgw_user uid(s->account_name); if (uid.tenant.empty()) { rgw_user tenanted_uid(uid.id, uid.id); - user = store->get_user(tenanted_uid); + user = driver->get_user(tenanted_uid); if (user->load_user(dpp, s->yield) >= 0) { /* Succeeded */ found = true; @@ -106,7 +106,7 @@ void TempURLEngine::get_owner_info(const DoutPrefixProvider* dpp, const req_stat } if (!found) { - user = store->get_user(uid); + user = driver->get_user(uid); if (user->load_user(dpp, s->yield) < 0) { throw -EPERM; } @@ -119,7 +119,7 @@ void TempURLEngine::get_owner_info(const DoutPrefixProvider* dpp, const req_stat b.tenant = std::move(bucket_tenant); b.name = std::move(bucket_name); std::unique_ptr bucket; - int ret = store->get_bucket(dpp, nullptr, b, &bucket, s->yield); + int ret = driver->get_bucket(dpp, nullptr, b, &bucket, s->yield); if (ret < 0) { throw ret; } @@ -128,7 +128,7 @@ void TempURLEngine::get_owner_info(const DoutPrefixProvider* dpp, const req_stat << dendl; std::unique_ptr user; - user = store->get_user(bucket->get_info().owner); + user = driver->get_user(bucket->get_info().owner); if (user->load_user(dpp, s->yield) < 0) { throw -EPERM; } @@ -459,7 +459,7 @@ ExternalTokenEngine::authenticate(const DoutPrefixProvider* dpp, ldpp_dout(dpp, 10) << "swift user=" << swift_user << dendl; std::unique_ptr user; - ret = store->get_user_by_swift(dpp, swift_user, s->yield, &user); + ret = driver->get_user_by_swift(dpp, swift_user, s->yield, &user); if (ret < 0) { ldpp_dout(dpp, 0) << "NOTICE: couldn't map swift user" << dendl; throw ret; @@ -580,7 +580,7 @@ SignedTokenEngine::authenticate(const DoutPrefixProvider* dpp, } std::unique_ptr user; - ret = store->get_user_by_swift(dpp, swift_user, s->yield, &user); + ret = driver->get_user_by_swift(dpp, swift_user, s->yield, &user); if (ret < 0) { throw ret; } @@ -697,7 +697,7 @@ void RGW_SWIFT_Auth_Get::execute(optional_yield y) user_str = user_name; - ret = store->get_user_by_swift(s, user_str, s->yield, &user); + ret = driver->get_user_by_swift(s, user_str, s->yield, &user); if (ret < 0) { ret = -EACCES; goto done; @@ -751,14 +751,14 @@ done: end_header(s); } -int RGWHandler_SWIFT_Auth::init(rgw::sal::Store* store, req_state *state, +int RGWHandler_SWIFT_Auth::init(rgw::sal::Driver* driver, req_state *state, rgw::io::BasicClient *cio) { state->dialect = "swift-auth"; state->formatter = new JSONFormatter; state->format = RGWFormat::JSON; - return RGWHandler::init(store, state, cio); + return RGWHandler::init(driver, state, cio); } int RGWHandler_SWIFT_Auth::authorize(const DoutPrefixProvider *dpp, optional_yield) diff --git a/src/rgw/rgw_swift_auth.h b/src/rgw/rgw_swift_auth.h index 1faf8c9db2ac7..8ff731e061f6a 100644 --- a/src/rgw/rgw_swift_auth.h +++ b/src/rgw/rgw_swift_auth.h @@ -43,7 +43,7 @@ class TempURLEngine : public rgw::auth::Engine { using result_t = rgw::auth::Engine::result_t; CephContext* const cct; - rgw::sal::Store* store; + rgw::sal::Driver* driver; const TempURLApplier::Factory* const apl_factory; /* Helper methods. */ @@ -61,10 +61,10 @@ class TempURLEngine : public rgw::auth::Engine { public: TempURLEngine(CephContext* const cct, - rgw::sal::Store* _store , + rgw::sal::Driver* _driver , const TempURLApplier::Factory* const apl_factory) : cct(cct), - store(_store), + driver(_driver), apl_factory(apl_factory) { } @@ -82,7 +82,7 @@ class SignedTokenEngine : public rgw::auth::Engine { using result_t = rgw::auth::Engine::result_t; CephContext* const cct; - rgw::sal::Store* store; + rgw::sal::Driver* driver; const rgw::auth::TokenExtractor* const extractor; const rgw::auth::LocalApplier::Factory* const apl_factory; @@ -94,11 +94,11 @@ class SignedTokenEngine : public rgw::auth::Engine { public: SignedTokenEngine(CephContext* const cct, - rgw::sal::Store* _store, + rgw::sal::Driver* _driver, const rgw::auth::TokenExtractor* const extractor, const rgw::auth::LocalApplier::Factory* const apl_factory) : cct(cct), - store(_store), + driver(_driver), extractor(extractor), apl_factory(apl_factory) { } @@ -119,7 +119,7 @@ class ExternalTokenEngine : public rgw::auth::Engine { using result_t = rgw::auth::Engine::result_t; CephContext* const cct; - rgw::sal::Store* store; + rgw::sal::Driver* driver; const rgw::auth::TokenExtractor* const extractor; const rgw::auth::LocalApplier::Factory* const apl_factory; @@ -130,11 +130,11 @@ class ExternalTokenEngine : public rgw::auth::Engine { public: ExternalTokenEngine(CephContext* const cct, - rgw::sal::Store* _store, + rgw::sal::Driver* _driver, const rgw::auth::TokenExtractor* const extractor, const rgw::auth::LocalApplier::Factory* const apl_factory) : cct(cct), - store(_store), + driver(_driver), extractor(extractor), apl_factory(apl_factory) { } @@ -185,7 +185,7 @@ class DefaultStrategy : public rgw::auth::Strategy, public rgw::auth::RemoteApplier::Factory, public rgw::auth::LocalApplier::Factory, public rgw::auth::swift::TempURLApplier::Factory { - rgw::sal::Store* store; + rgw::sal::Driver* driver; ImplicitTenants& implicit_tenant_context; /* The engines. */ @@ -221,9 +221,9 @@ class DefaultStrategy : public rgw::auth::Strategy, acl_strategy_t&& extra_acl_strategy, const rgw::auth::RemoteApplier::AuthInfo &info) const override { auto apl = \ - rgw::auth::add_3rdparty(store, rgw_user(s->account_name), - rgw::auth::add_sysreq(cct, store, s, - rgw::auth::RemoteApplier(cct, store, std::move(extra_acl_strategy), info, + rgw::auth::add_3rdparty(driver, rgw_user(s->account_name), + rgw::auth::add_sysreq(cct, driver, s, + rgw::auth::RemoteApplier(cct, driver, std::move(extra_acl_strategy), info, implicit_tenant_context, rgw::auth::ImplicitTenants::IMPLICIT_TENANTS_SWIFT))); /* TODO(rzarzynski): replace with static_ptr. */ @@ -237,8 +237,8 @@ class DefaultStrategy : public rgw::auth::Strategy, const std::optional& perm_mask, const std::string& access_key_id) const override { auto apl = \ - rgw::auth::add_3rdparty(store, rgw_user(s->account_name), - rgw::auth::add_sysreq(cct, store, s, + rgw::auth::add_3rdparty(driver, rgw_user(s->account_name), + rgw::auth::add_sysreq(cct, driver, s, rgw::auth::LocalApplier(cct, user_info, subuser, perm_mask, access_key_id))); /* TODO(rzarzynski): replace with static_ptr. */ return aplptr_t(new decltype(apl)(std::move(apl))); @@ -256,18 +256,18 @@ class DefaultStrategy : public rgw::auth::Strategy, public: DefaultStrategy(CephContext* const cct, ImplicitTenants& implicit_tenant_context, - rgw::sal::Store* _store) - : store(_store), + rgw::sal::Driver* _driver) + : driver(_driver), implicit_tenant_context(implicit_tenant_context), tempurl_engine(cct, - store, + driver, static_cast(this)), signed_engine(cct, - store, + driver, static_cast(&auth_token_extractor), static_cast(this)), external_engine(cct, - store, + driver, static_cast(&auth_token_extractor), static_cast(this)), anon_engine(cct, @@ -326,7 +326,7 @@ public: ~RGWHandler_SWIFT_Auth() override {} RGWOp *op_get() override; - int init(rgw::sal::Store* store, req_state *state, rgw::io::BasicClient *cio) override; + int init(rgw::sal::Driver* driver, req_state *state, rgw::io::BasicClient *cio) override; int authorize(const DoutPrefixProvider *dpp, optional_yield y) override; int postauth_init(optional_yield) override { return 0; } int read_permissions(RGWOp *op, optional_yield) override { return 0; } @@ -346,7 +346,7 @@ public: return this; } - RGWHandler_REST* get_handler(rgw::sal::Store* store, + RGWHandler_REST* get_handler(rgw::sal::Driver* driver, req_state*, const rgw::auth::StrategyRegistry&, const std::string&) override { diff --git a/src/rgw/rgw_torrent.cc b/src/rgw/rgw_torrent.cc index 5670bc1a0d90b..e1a1417a5a6fd 100644 --- a/src/rgw/rgw_torrent.cc +++ b/src/rgw/rgw_torrent.cc @@ -34,13 +34,13 @@ seed::~seed() seed::info.sha1_bl.clear(); bl.clear(); s = NULL; - store = NULL; + driver = NULL; } -void seed::init(req_state *p_req, rgw::sal::Store* p_store) +void seed::init(req_state *_req, rgw::sal::Driver* _driver) { - s = p_req; - store = p_store; + s = _req; + driver = _driver; } int seed::get_torrent_file(rgw::sal::Object* object, diff --git a/src/rgw/rgw_torrent.h b/src/rgw/rgw_torrent.h index 7e225b771400d..1f62ced351799 100644 --- a/src/rgw/rgw_torrent.h +++ b/src/rgw/rgw_torrent.h @@ -107,7 +107,7 @@ private: bufferlist bl; // bufflist ready to send req_state *s{nullptr}; - rgw::sal::Store* store{nullptr}; + rgw::sal::Driver* driver{nullptr}; SHA1 h; TorrentBencode dencode; @@ -116,7 +116,7 @@ public: ~seed(); int get_params(); - void init(req_state *p_req, rgw::sal::Store* p_store); + void init(req_state *p_req, rgw::sal::Driver* _driver); int get_torrent_file(rgw::sal::Object* object, uint64_t &total_len, ceph::bufferlist &bl_data, diff --git a/src/rgw/rgw_usage.cc b/src/rgw/rgw_usage.cc index cf0cac66cd629..ca7ca20eb12cf 100644 --- a/src/rgw/rgw_usage.cc +++ b/src/rgw/rgw_usage.cc @@ -30,7 +30,7 @@ static void dump_usage_categories_info(Formatter *formatter, const rgw_usage_log formatter->close_section(); // categories } -int RGWUsage::show(const DoutPrefixProvider *dpp, rgw::sal::Store* store, +int RGWUsage::show(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::User* user , rgw::sal::Bucket* bucket, uint64_t start_epoch, uint64_t end_epoch, bool show_log_entries, bool show_log_sum, @@ -64,7 +64,7 @@ int RGWUsage::show(const DoutPrefixProvider *dpp, rgw::sal::Store* store, ret = user->read_usage(dpp, start_epoch, end_epoch, max_entries, &is_truncated, usage_iter, usage); } else { - ret = store->read_all_usage(dpp, start_epoch, end_epoch, max_entries, &is_truncated, + ret = driver->read_all_usage(dpp, start_epoch, end_epoch, max_entries, &is_truncated, usage_iter, usage); } @@ -152,7 +152,7 @@ int RGWUsage::show(const DoutPrefixProvider *dpp, rgw::sal::Store* store, return 0; } -int RGWUsage::trim(const DoutPrefixProvider *dpp, rgw::sal::Store* store, +int RGWUsage::trim(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::User* user , rgw::sal::Bucket* bucket, uint64_t start_epoch, uint64_t end_epoch) { @@ -161,11 +161,11 @@ int RGWUsage::trim(const DoutPrefixProvider *dpp, rgw::sal::Store* store, } else if (user) { return user->trim_usage(dpp, start_epoch, end_epoch); } else { - return store->trim_all_usage(dpp, start_epoch, end_epoch); + return driver->trim_all_usage(dpp, start_epoch, end_epoch); } } -int RGWUsage::clear(const DoutPrefixProvider *dpp, rgw::sal::Store* store) +int RGWUsage::clear(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver) { - return store->clear_usage(dpp); + return driver->clear_usage(dpp); } diff --git a/src/rgw/rgw_usage.h b/src/rgw/rgw_usage.h index 474b45e7cb8e7..ec596ed75469f 100644 --- a/src/rgw/rgw_usage.h +++ b/src/rgw/rgw_usage.h @@ -17,17 +17,17 @@ class RGWUsage { public: - static int show(const DoutPrefixProvider *dpp, rgw::sal::Store* store, + static int show(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::User* user , rgw::sal::Bucket* bucket, uint64_t start_epoch, uint64_t end_epoch, bool show_log_entries, bool show_log_sum, std::map *categories, RGWFormatterFlusher& flusher); - static int trim(const DoutPrefixProvider *dpp, rgw::sal::Store* store, + static int trim(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::User* user , rgw::sal::Bucket* bucket, uint64_t start_epoch, uint64_t end_epoch); - static int clear(const DoutPrefixProvider *dpp, rgw::sal::Store* store); + static int clear(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver); }; diff --git a/src/rgw/rgw_user.cc b/src/rgw/rgw_user.cc index 954c7fd246d53..e5e07cbc492d2 100644 --- a/src/rgw/rgw_user.cc +++ b/src/rgw/rgw_user.cc @@ -13,12 +13,12 @@ using namespace std; -int rgw_user_sync_all_stats(const DoutPrefixProvider *dpp, rgw::sal::Store* store, +int rgw_user_sync_all_stats(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::User* user, optional_yield y) { rgw::sal::BucketList user_buckets; - CephContext *cct = store->ctx(); + CephContext *cct = driver->ctx(); size_t max_entries = cct->_conf->rgw_list_buckets_max_chunk; string marker; int ret; @@ -62,12 +62,12 @@ int rgw_user_sync_all_stats(const DoutPrefixProvider *dpp, rgw::sal::Store* stor } int rgw_user_get_all_buckets_stats(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, rgw::sal::User* user, map& buckets_usage_map, optional_yield y) { - CephContext *cct = store->ctx(); + CephContext *cct = driver->ctx(); size_t max_entries = cct->_conf->rgw_list_buckets_max_chunk; bool done; string marker; diff --git a/src/rgw/rgw_zone.cc b/src/rgw/rgw_zone.cc index 0f4f7d1104d83..d886b25a6b4ed 100644 --- a/src/rgw/rgw_zone.cc +++ b/src/rgw/rgw_zone.cc @@ -567,7 +567,7 @@ int RGWZoneParams::fix_pool_names(const DoutPrefixProvider *dpp, optional_yield list zones; int r = zone_svc->list_zones(dpp, zones); if (r < 0) { - ldpp_dout(dpp, 10) << "WARNING: store->list_zones() returned r=" << r << dendl; + ldpp_dout(dpp, 10) << "WARNING: driver->list_zones() returned r=" << r << dendl; } set pools; diff --git a/src/rgw/store/dbstore/common/dbstore.h b/src/rgw/store/dbstore/common/dbstore.h index 606066c2be657..12ab3f0600ddd 100644 --- a/src/rgw/store/dbstore/common/dbstore.h +++ b/src/rgw/store/dbstore/common/dbstore.h @@ -1486,7 +1486,7 @@ WRITE_CLASS_ENCODER(DBOLHInfo) class DB { private: const std::string db_name; - rgw::sal::Store* store; + rgw::sal::Driver* driver; const std::string user_table; const std::string bucket_table; const std::string quota_table; @@ -1549,8 +1549,8 @@ class DB { struct DBOps dbops; // DB operations, make it private? - void set_store(rgw::sal::Store* _store) { - store = _store; + void set_driver(rgw::sal::Driver* _driver) { + driver = _driver; } void set_context(CephContext *_cct) { diff --git a/src/rgw/store/rados/rgw_bucket.cc b/src/rgw/store/rados/rgw_bucket.cc index ba81411050823..7f600fe457e2b 100644 --- a/src/rgw/store/rados/rgw_bucket.cc +++ b/src/rgw/store/rados/rgw_bucket.cc @@ -81,7 +81,7 @@ static void dump_mulipart_index_results(list& objs_to_unlink, } } -void check_bad_user_bucket_mapping(rgw::sal::Store* store, rgw::sal::User* user, +void check_bad_user_bucket_mapping(rgw::sal::Driver* driver, rgw::sal::User* user, bool fix, optional_yield y, const DoutPrefixProvider *dpp) @@ -89,14 +89,14 @@ void check_bad_user_bucket_mapping(rgw::sal::Store* store, rgw::sal::User* user, rgw::sal::BucketList user_buckets; string marker; - CephContext *cct = store->ctx(); + CephContext *cct = driver->ctx(); size_t max_entries = cct->_conf->rgw_list_buckets_max_chunk; do { int ret = user->list_buckets(dpp, marker, string(), max_entries, false, user_buckets, y); if (ret < 0) { - ldout(store->ctx(), 0) << "failed to read user buckets: " + ldout(driver->ctx(), 0) << "failed to read user buckets: " << cpp_strerror(-ret) << dendl; return; } @@ -110,9 +110,9 @@ void check_bad_user_bucket_mapping(rgw::sal::Store* store, rgw::sal::User* user, auto& bucket = i->second; std::unique_ptr actual_bucket; - int r = store->get_bucket(dpp, user, user->get_tenant(), bucket->get_name(), &actual_bucket, null_yield); + int r = driver->get_bucket(dpp, user, user->get_tenant(), bucket->get_name(), &actual_bucket, null_yield); if (r < 0) { - ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket << dendl; + ldout(driver->ctx(), 0) << "could not get bucket info for bucket=" << bucket << dendl; continue; } @@ -142,7 +142,7 @@ bool rgw_bucket_object_check_filter(const std::string& oid) return rgw_obj_key::oid_to_key_in_ns(oid, &key, empty_ns); } -int rgw_remove_object(const DoutPrefixProvider *dpp, rgw::sal::Store* store, rgw::sal::Bucket* bucket, rgw_obj_key& key) +int rgw_remove_object(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::Bucket* bucket, rgw_obj_key& key) { if (key.instance.empty()) { key.instance = "null"; @@ -159,22 +159,22 @@ static void set_err_msg(std::string *sink, std::string msg) *sink = msg; } -int RGWBucket::init(rgw::sal::Store* _store, RGWBucketAdminOpState& op_state, +int RGWBucket::init(rgw::sal::Driver* _driver, RGWBucketAdminOpState& op_state, optional_yield y, const DoutPrefixProvider *dpp, std::string *err_msg) { - if (!_store) { + if (!_driver) { set_err_msg(err_msg, "no storage!"); return -EINVAL; } - store = _store; + driver = _driver; std::string bucket_name = op_state.get_bucket_name(); if (bucket_name.empty() && op_state.get_user_id().empty()) return -EINVAL; - user = store->get_user(op_state.get_user_id()); + user = driver->get_user(op_state.get_user_id()); std::string tenant = user->get_tenant(); // split possible tenant/name @@ -184,7 +184,7 @@ int RGWBucket::init(rgw::sal::Store* _store, RGWBucketAdminOpState& op_state, bucket_name = bucket_name.substr(pos + 1); } - int r = store->get_bucket(dpp, user.get(), tenant, bucket_name, &bucket, y); + int r = driver->get_bucket(dpp, user.get(), tenant, bucket_name, &bucket, y); if (r < 0) { set_err_msg(err_msg, "failed to fetch bucket info for bucket=" + bucket_name); return r; @@ -206,25 +206,25 @@ int RGWBucket::init(rgw::sal::Store* _store, RGWBucketAdminOpState& op_state, return 0; } -bool rgw_find_bucket_by_id(const DoutPrefixProvider *dpp, CephContext *cct, rgw::sal::Store* store, +bool rgw_find_bucket_by_id(const DoutPrefixProvider *dpp, CephContext *cct, rgw::sal::Driver* driver, const string& marker, const string& bucket_id, rgw_bucket* bucket_out) { void *handle = NULL; bool truncated = false; string s; - int ret = store->meta_list_keys_init(dpp, "bucket.instance", marker, &handle); + int ret = driver->meta_list_keys_init(dpp, "bucket.instance", marker, &handle); if (ret < 0) { cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl; - store->meta_list_keys_complete(handle); + driver->meta_list_keys_complete(handle); return -ret; } do { list keys; - ret = store->meta_list_keys_next(dpp, handle, 1000, keys, &truncated); + ret = driver->meta_list_keys_next(dpp, handle, 1000, keys, &truncated); if (ret < 0) { cerr << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << std::endl; - store->meta_list_keys_complete(handle); + driver->meta_list_keys_complete(handle); return -ret; } for (list::iterator iter = keys.begin(); iter != keys.end(); ++iter) { @@ -234,12 +234,12 @@ bool rgw_find_bucket_by_id(const DoutPrefixProvider *dpp, CephContext *cct, rgw: continue; } if (bucket_id == bucket_out->bucket_id) { - store->meta_list_keys_complete(handle); + driver->meta_list_keys_complete(handle); return true; } } } while (truncated); - store->meta_list_keys_complete(handle); + driver->meta_list_keys_complete(handle); return false; } @@ -275,7 +275,7 @@ int RGWBucket::remove_object(const DoutPrefixProvider *dpp, RGWBucketAdminOpStat bucket = op_state.get_bucket()->clone(); - int ret = rgw_remove_object(dpp, store, bucket.get(), key); + int ret = rgw_remove_object(dpp, driver, bucket.get(), key); if (ret < 0) { set_err_msg(err_msg, "unable to remove object" + cpp_strerror(-ret)); return ret; @@ -491,7 +491,7 @@ int RGWBucket::check_index(const DoutPrefixProvider *dpp, int RGWBucket::sync(RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, std::string *err_msg) { - if (!store->is_meta_master()) { + if (!driver->is_meta_master()) { set_err_msg(err_msg, "ERROR: failed to update bucket sync: only allowed on meta master zone"); return -EINVAL; } @@ -519,14 +519,14 @@ int RGWBucket::policy_bl_to_stream(bufferlist& bl, ostream& o) RGWAccessControlPolicy_S3 policy(g_ceph_context); int ret = decode_bl(bl, policy); if (ret < 0) { - ldout(store->ctx(),0) << "failed to decode RGWAccessControlPolicy" << dendl; + ldout(driver->ctx(),0) << "failed to decode RGWAccessControlPolicy" << dendl; } policy.to_xml(o); return 0; } int rgw_object_get_attr(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, rgw::sal::Object* obj, + rgw::sal::Driver* driver, rgw::sal::Object* obj, const char* attr_name, bufferlist& out_bl, optional_yield y) { std::unique_ptr rop = obj->get_read_op(); @@ -545,14 +545,14 @@ int RGWBucket::get_policy(RGWBucketAdminOpState& op_state, RGWAccessControlPolic bufferlist bl; std::unique_ptr obj = bucket->get_object(rgw_obj_key(object_name)); - ret = rgw_object_get_attr(dpp, store, obj.get(), RGW_ATTR_ACL, bl, y); + ret = rgw_object_get_attr(dpp, driver, obj.get(), RGW_ATTR_ACL, bl, y); if (ret < 0){ return ret; } ret = decode_bl(bl, policy); if (ret < 0) { - ldout(store->ctx(),0) << "failed to decode RGWAccessControlPolicy" << dendl; + ldout(driver->ctx(),0) << "failed to decode RGWAccessControlPolicy" << dendl; } return ret; } @@ -564,19 +564,19 @@ int RGWBucket::get_policy(RGWBucketAdminOpState& op_state, RGWAccessControlPolic ret = decode_bl(aiter->second, policy); if (ret < 0) { - ldout(store->ctx(),0) << "failed to decode RGWAccessControlPolicy" << dendl; + ldout(driver->ctx(),0) << "failed to decode RGWAccessControlPolicy" << dendl; } return ret; } -int RGWBucketAdminOp::get_policy(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, +int RGWBucketAdminOp::get_policy(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWAccessControlPolicy& policy, const DoutPrefixProvider *dpp) { RGWBucket bucket; - int ret = bucket.init(store, op_state, null_yield, dpp); + int ret = bucket.init(driver, op_state, null_yield, dpp); if (ret < 0) return ret; @@ -590,12 +590,12 @@ int RGWBucketAdminOp::get_policy(rgw::sal::Store* store, RGWBucketAdminOpState& /* Wrappers to facilitate RESTful interface */ -int RGWBucketAdminOp::get_policy(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, +int RGWBucketAdminOp::get_policy(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp) { - RGWAccessControlPolicy policy(store->ctx()); + RGWAccessControlPolicy policy(driver->ctx()); - int ret = get_policy(store, op_state, policy, dpp); + int ret = get_policy(driver, op_state, policy, dpp); if (ret < 0) return ret; @@ -612,12 +612,12 @@ int RGWBucketAdminOp::get_policy(rgw::sal::Store* store, RGWBucketAdminOpState& return 0; } -int RGWBucketAdminOp::dump_s3_policy(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, +int RGWBucketAdminOp::dump_s3_policy(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, ostream& os, const DoutPrefixProvider *dpp) { - RGWAccessControlPolicy_S3 policy(store->ctx()); + RGWAccessControlPolicy_S3 policy(driver->ctx()); - int ret = get_policy(store, op_state, policy, dpp); + int ret = get_policy(driver, op_state, policy, dpp); if (ret < 0) return ret; @@ -626,18 +626,18 @@ int RGWBucketAdminOp::dump_s3_policy(rgw::sal::Store* store, RGWBucketAdminOpSta return 0; } -int RGWBucketAdminOp::unlink(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp) +int RGWBucketAdminOp::unlink(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp) { RGWBucket bucket; - int ret = bucket.init(store, op_state, null_yield, dpp); + int ret = bucket.init(driver, op_state, null_yield, dpp); if (ret < 0) return ret; - return static_cast(store)->ctl()->bucket->unlink_bucket(op_state.get_user_id(), op_state.get_bucket()->get_info().bucket, null_yield, dpp, true); + return static_cast(driver)->ctl()->bucket->unlink_bucket(op_state.get_user_id(), op_state.get_bucket()->get_info().bucket, null_yield, dpp, true); } -int RGWBucketAdminOp::link(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, string *err) +int RGWBucketAdminOp::link(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, string *err) { if (!op_state.is_user_op()) { set_err_msg(err, "empty user id"); @@ -645,7 +645,7 @@ int RGWBucketAdminOp::link(rgw::sal::Store* store, RGWBucketAdminOpState& op_sta } RGWBucket bucket; - int ret = bucket.init(store, op_state, null_yield, dpp, err); + int ret = bucket.init(driver, op_state, null_yield, dpp, err); if (ret < 0) return ret; @@ -700,7 +700,7 @@ int RGWBucketAdminOp::link(rgw::sal::Store* store, RGWBucketAdminOpState& op_sta return -EIO; } - int r = static_cast(store)->ctl()->bucket->unlink_bucket(owner.get_id(), old_bucket->get_info().bucket, null_yield, dpp, false); + int r = static_cast(driver)->ctl()->bucket->unlink_bucket(owner.get_id(), old_bucket->get_info().bucket, null_yield, dpp, false); if (r < 0) { set_err_msg(err, "could not unlink policy from user " + owner.get_id().to_str()); return r; @@ -741,7 +741,7 @@ int RGWBucketAdminOp::link(rgw::sal::Store* store, RGWBucketAdminOpState& op_sta rgw::sal::Attrs ep_attrs; rgw_ep_info ep_data{ep, ep_attrs}; - r = static_cast(store)->ctl()->bucket->link_bucket(op_state.get_user_id(), loc_bucket->get_info().bucket, loc_bucket->get_info().creation_time, null_yield, dpp, true, &ep_data); + r = static_cast(driver)->ctl()->bucket->link_bucket(op_state.get_user_id(), loc_bucket->get_info().bucket, loc_bucket->get_info().creation_time, null_yield, dpp, true, &ep_data); if (r < 0) { set_err_msg(err, "failed to relink bucket"); return r; @@ -749,7 +749,7 @@ int RGWBucketAdminOp::link(rgw::sal::Store* store, RGWBucketAdminOpState& op_sta if (*loc_bucket != *old_bucket) { // like RGWRados::delete_bucket -- excepting no bucket_index work. - r = static_cast(store)->ctl()->bucket->remove_bucket_entrypoint_info( + r = static_cast(driver)->ctl()->bucket->remove_bucket_entrypoint_info( old_bucket->get_key(), null_yield, dpp, RGWBucketCtl::Bucket::RemoveParams() .set_objv_tracker(&ep_data.ep_objv)); @@ -757,7 +757,7 @@ int RGWBucketAdminOp::link(rgw::sal::Store* store, RGWBucketAdminOpState& op_sta set_err_msg(err, "failed to unlink old bucket " + old_bucket->get_tenant() + "/" + old_bucket->get_name()); return r; } - r = static_cast(store)->ctl()->bucket->remove_bucket_instance_info( + r = static_cast(driver)->ctl()->bucket->remove_bucket_instance_info( old_bucket->get_key(), old_bucket->get_info(), null_yield, dpp, RGWBucketCtl::BucketInstance::RemoveParams() @@ -771,11 +771,11 @@ int RGWBucketAdminOp::link(rgw::sal::Store* store, RGWBucketAdminOpState& op_sta return 0; } -int RGWBucketAdminOp::chown(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const string& marker, const DoutPrefixProvider *dpp, string *err) +int RGWBucketAdminOp::chown(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const string& marker, const DoutPrefixProvider *dpp, string *err) { RGWBucket bucket; - int ret = bucket.init(store, op_state, null_yield, dpp, err); + int ret = bucket.init(driver, op_state, null_yield, dpp, err); if (ret < 0) return ret; @@ -783,7 +783,7 @@ int RGWBucketAdminOp::chown(rgw::sal::Store* store, RGWBucketAdminOpState& op_st } -int RGWBucketAdminOp::check_index(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, +int RGWBucketAdminOp::check_index(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y, const DoutPrefixProvider *dpp) { int ret; @@ -793,7 +793,7 @@ int RGWBucketAdminOp::check_index(rgw::sal::Store* store, RGWBucketAdminOpState& RGWBucket bucket; - ret = bucket.init(store, op_state, null_yield, dpp); + ret = bucket.init(driver, op_state, null_yield, dpp); if (ret < 0) return ret; @@ -818,14 +818,14 @@ int RGWBucketAdminOp::check_index(rgw::sal::Store* store, RGWBucketAdminOpState& return 0; } -int RGWBucketAdminOp::remove_bucket(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, +int RGWBucketAdminOp::remove_bucket(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, optional_yield y, const DoutPrefixProvider *dpp, bool bypass_gc, bool keep_index_consistent) { std::unique_ptr bucket; - std::unique_ptr user = store->get_user(op_state.get_user_id()); + std::unique_ptr user = driver->get_user(op_state.get_user_id()); - int ret = store->get_bucket(dpp, user.get(), user->get_tenant(), op_state.get_bucket_name(), + int ret = driver->get_bucket(dpp, user.get(), user->get_tenant(), op_state.get_bucket_name(), &bucket, y); if (ret < 0) return ret; @@ -839,21 +839,21 @@ int RGWBucketAdminOp::remove_bucket(rgw::sal::Store* store, RGWBucketAdminOpStat return ret; } -int RGWBucketAdminOp::remove_object(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp) +int RGWBucketAdminOp::remove_object(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp) { RGWBucket bucket; - int ret = bucket.init(store, op_state, null_yield, dpp); + int ret = bucket.init(driver, op_state, null_yield, dpp); if (ret < 0) return ret; return bucket.remove_object(dpp, op_state); } -int RGWBucketAdminOp::sync_bucket(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, string *err_msg) +int RGWBucketAdminOp::sync_bucket(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, string *err_msg) { RGWBucket bucket; - int ret = bucket.init(store, op_state, null_yield, dpp, err_msg); + int ret = bucket.init(driver, op_state, null_yield, dpp, err_msg); if (ret < 0) { return ret; @@ -861,7 +861,7 @@ int RGWBucketAdminOp::sync_bucket(rgw::sal::Store* store, RGWBucketAdminOpState& return bucket.sync(op_state, dpp, err_msg); } -static int bucket_stats(rgw::sal::Store* store, +static int bucket_stats(rgw::sal::Driver* driver, const std::string& tenant_name, const std::string& bucket_name, Formatter *formatter, @@ -871,7 +871,7 @@ static int bucket_stats(rgw::sal::Store* store, map stats; real_time mtime; - int ret = store->get_bucket(dpp, nullptr, tenant_name, bucket_name, &bucket, null_yield); + int ret = driver->get_bucket(dpp, nullptr, tenant_name, bucket_name, &bucket, null_yield); if (ret < 0) { return ret; } @@ -934,7 +934,7 @@ static int bucket_stats(rgw::sal::Store* store, return 0; } -int RGWBucketAdminOp::limit_check(rgw::sal::Store* store, +int RGWBucketAdminOp::limit_check(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const std::list& user_ids, RGWFormatterFlusher& flusher, optional_yield y, @@ -943,13 +943,13 @@ int RGWBucketAdminOp::limit_check(rgw::sal::Store* store, { int ret = 0; const size_t max_entries = - store->ctx()->_conf->rgw_list_buckets_max_chunk; + driver->ctx()->_conf->rgw_list_buckets_max_chunk; const size_t safe_max_objs_per_shard = - store->ctx()->_conf->rgw_safe_max_objects_per_shard; + driver->ctx()->_conf->rgw_safe_max_objects_per_shard; uint16_t shard_warn_pct = - store->ctx()->_conf->rgw_shard_warning_threshold; + driver->ctx()->_conf->rgw_shard_warning_threshold; if (shard_warn_pct > 100) shard_warn_pct = 90; @@ -967,7 +967,7 @@ int RGWBucketAdminOp::limit_check(rgw::sal::Store* store, string marker; rgw::sal::BucketList buckets; do { - std::unique_ptr user = store->get_user(rgw_user(user_id)); + std::unique_ptr user = driver->get_user(rgw_user(user_id)); ret = user->list_buckets(dpp, marker, string(), max_entries, false, buckets, y); @@ -1050,7 +1050,7 @@ int RGWBucketAdminOp::limit_check(rgw::sal::Store* store, return ret; } /* RGWBucketAdminOp::limit_check */ -int RGWBucketAdminOp::info(rgw::sal::Store* store, +int RGWBucketAdminOp::info(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y, @@ -1060,7 +1060,7 @@ int RGWBucketAdminOp::info(rgw::sal::Store* store, int ret = 0; const std::string& bucket_name = op_state.get_bucket_name(); if (!bucket_name.empty()) { - ret = bucket.init(store, op_state, null_yield, dpp); + ret = bucket.init(driver, op_state, null_yield, dpp); if (-ENOENT == ret) return -ERR_NO_SUCH_BUCKET; else if (ret < 0) @@ -1070,7 +1070,7 @@ int RGWBucketAdminOp::info(rgw::sal::Store* store, Formatter *formatter = flusher.get_formatter(); flusher.start(0); - CephContext *cct = store->ctx(); + CephContext *cct = driver->ctx(); const size_t max_entries = cct->_conf->rgw_list_buckets_max_chunk; @@ -1080,7 +1080,7 @@ int RGWBucketAdminOp::info(rgw::sal::Store* store, formatter->open_array_section("buckets"); rgw::sal::BucketList buckets; - std::unique_ptr user = store->get_user(op_state.get_user_id()); + std::unique_ptr user = driver->get_user(op_state.get_user_id()); std::string marker; const std::string empty_end_marker; constexpr bool no_need_stats = false; // set need_stats to false @@ -1102,7 +1102,7 @@ int RGWBucketAdminOp::info(rgw::sal::Store* store, } if (show_stats) { - bucket_stats(store, user_id.tenant, obj_name, formatter, dpp); + bucket_stats(driver, user_id.tenant, obj_name, formatter, dpp); } else { formatter->dump_string("bucket", obj_name); } @@ -1118,7 +1118,7 @@ int RGWBucketAdminOp::info(rgw::sal::Store* store, formatter->close_section(); } else if (!bucket_name.empty()) { - ret = bucket_stats(store, user_id.tenant, bucket_name, formatter, dpp); + ret = bucket_stats(driver, user_id.tenant, bucket_name, formatter, dpp); if (ret < 0) { return ret; } @@ -1127,21 +1127,21 @@ int RGWBucketAdminOp::info(rgw::sal::Store* store, bool truncated = true; formatter->open_array_section("buckets"); - ret = store->meta_list_keys_init(dpp, "bucket", string(), &handle); + ret = driver->meta_list_keys_init(dpp, "bucket", string(), &handle); while (ret == 0 && truncated) { std::list buckets; constexpr int max_keys = 1000; - ret = store->meta_list_keys_next(dpp, handle, max_keys, buckets, + ret = driver->meta_list_keys_next(dpp, handle, max_keys, buckets, &truncated); for (auto& bucket_name : buckets) { if (show_stats) { - bucket_stats(store, user_id.tenant, bucket_name, formatter, dpp); + bucket_stats(driver, user_id.tenant, bucket_name, formatter, dpp); } else { formatter->dump_string("bucket", bucket_name); } } } - store->meta_list_keys_complete(handle); + driver->meta_list_keys_complete(handle); formatter->close_section(); } @@ -1151,11 +1151,11 @@ int RGWBucketAdminOp::info(rgw::sal::Store* store, return 0; } -int RGWBucketAdminOp::set_quota(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp) +int RGWBucketAdminOp::set_quota(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp) { RGWBucket bucket; - int ret = bucket.init(store, op_state, null_yield, dpp); + int ret = bucket.init(driver, op_state, null_yield, dpp); if (ret < 0) return ret; return bucket.set_quota(op_state, dpp); @@ -1170,7 +1170,7 @@ inline auto split_tenant(const std::string& bucket_name){ } using bucket_instance_ls = std::vector; -void get_stale_instances(rgw::sal::Store* store, const std::string& bucket_name, +void get_stale_instances(rgw::sal::Driver* driver, const std::string& bucket_name, const vector& lst, bucket_instance_ls& stale_instances, const DoutPrefixProvider *dpp) @@ -1183,8 +1183,8 @@ void get_stale_instances(rgw::sal::Store* store, const std::string& bucket_name, RGWBucketInfo binfo; std::unique_ptr bucket; rgw_bucket rbucket; - rgw_bucket_parse_bucket_key(store->ctx(), bucket_instance, &rbucket, nullptr); - int r = store->get_bucket(dpp, nullptr, rbucket, &bucket, null_yield); + rgw_bucket_parse_bucket_key(driver->ctx(), bucket_instance, &rbucket, nullptr); + int r = driver->get_bucket(dpp, nullptr, rbucket, &bucket, null_yield); if (r < 0){ // this can only happen if someone deletes us right when we're processing ldpp_dout(dpp, -1) << "Bucket instance is invalid: " << bucket_instance @@ -1204,7 +1204,7 @@ void get_stale_instances(rgw::sal::Store* store, const std::string& bucket_name, auto [tenant, bname] = split_tenant(bucket_name); RGWBucketInfo cur_bucket_info; std::unique_ptr cur_bucket; - int r = store->get_bucket(dpp, nullptr, tenant, bname, &cur_bucket, null_yield); + int r = driver->get_bucket(dpp, nullptr, tenant, bname, &cur_bucket, null_yield); if (r < 0) { if (r == -ENOENT) { // bucket doesn't exist, everything is stale then @@ -1241,7 +1241,7 @@ void get_stale_instances(rgw::sal::Store* store, const std::string& bucket_name, // bucket and walk through these instances to make sure no one else interferes // with these { - RGWBucketReshardLock reshard_lock(static_cast(store), cur_bucket->get_info(), true); + RGWBucketReshardLock reshard_lock(static_cast(driver), cur_bucket->get_info(), true); r = reshard_lock.lock(dpp); if (r < 0) { // most likely bucket is under reshard, return the sureshot stale instances @@ -1260,19 +1260,19 @@ void get_stale_instances(rgw::sal::Store* store, const std::string& bucket_name, return; } -static int process_stale_instances(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, +static int process_stale_instances(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp, std::function process_f) + rgw::sal::Driver*)> process_f) { std::string marker; void *handle; Formatter *formatter = flusher.get_formatter(); static constexpr auto default_max_keys = 1000; - int ret = store->meta_list_keys_init(dpp, "bucket.instance", marker, &handle); + int ret = driver->meta_list_keys_init(dpp, "bucket.instance", marker, &handle); if (ret < 0) { cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl; return ret; @@ -1281,8 +1281,8 @@ static int process_stale_instances(rgw::sal::Store* store, RGWBucketAdminOpState bool truncated; formatter->open_array_section("keys"); - auto g = make_scope_guard([&store, &handle, &formatter]() { - store->meta_list_keys_complete(handle); + auto g = make_scope_guard([&driver, &handle, &formatter]() { + driver->meta_list_keys_complete(handle); formatter->close_section(); // keys formatter->flush(cout); }); @@ -1290,7 +1290,7 @@ static int process_stale_instances(rgw::sal::Store* store, RGWBucketAdminOpState do { list keys; - ret = store->meta_list_keys_next(dpp, handle, default_max_keys, keys, &truncated); + ret = driver->meta_list_keys_next(dpp, handle, default_max_keys, keys, &truncated); if (ret < 0 && ret != -ENOENT) { cerr << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << std::endl; return ret; @@ -1305,8 +1305,8 @@ static int process_stale_instances(rgw::sal::Store* store, RGWBucketAdminOpState } for (const auto& kv: bucket_instance_map) { bucket_instance_ls stale_lst; - get_stale_instances(store, kv.first, kv.second, stale_lst, dpp); - process_f(stale_lst, formatter, store); + get_stale_instances(driver, kv.first, kv.second, stale_lst, dpp); + process_f(stale_lst, formatter, driver); } } } while (truncated); @@ -1314,36 +1314,36 @@ static int process_stale_instances(rgw::sal::Store* store, RGWBucketAdminOpState return 0; } -int RGWBucketAdminOp::list_stale_instances(rgw::sal::Store* store, +int RGWBucketAdminOp::list_stale_instances(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp) { auto process_f = [](const bucket_instance_ls& lst, Formatter *formatter, - rgw::sal::Store*){ + rgw::sal::Driver*){ for (const auto& binfo: lst) formatter->dump_string("key", binfo.bucket.get_key()); }; - return process_stale_instances(store, op_state, flusher, dpp, process_f); + return process_stale_instances(driver, op_state, flusher, dpp, process_f); } -int RGWBucketAdminOp::clear_stale_instances(rgw::sal::Store* store, +int RGWBucketAdminOp::clear_stale_instances(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp) { auto process_f = [dpp](const bucket_instance_ls& lst, Formatter *formatter, - rgw::sal::Store* store){ + rgw::sal::Driver* driver){ for (const auto &binfo: lst) { std::unique_ptr bucket; - store->get_bucket(nullptr, binfo, &bucket); + driver->get_bucket(nullptr, binfo, &bucket); int ret = bucket->purge_instance(dpp); if (ret == 0){ auto md_key = "bucket.instance:" + binfo.bucket.get_key(); - ret = store->meta_remove(dpp, md_key, null_yield); + ret = driver->meta_remove(dpp, md_key, null_yield); } formatter->open_object_section("delete_status"); formatter->dump_string("bucket_instance", binfo.bucket.get_key()); @@ -1352,23 +1352,23 @@ int RGWBucketAdminOp::clear_stale_instances(rgw::sal::Store* store, } }; - return process_stale_instances(store, op_state, flusher, dpp, process_f); + return process_stale_instances(driver, op_state, flusher, dpp, process_f); } -static int fix_single_bucket_lc(rgw::sal::Store* store, +static int fix_single_bucket_lc(rgw::sal::Driver* driver, const std::string& tenant_name, const std::string& bucket_name, const DoutPrefixProvider *dpp) { std::unique_ptr bucket; - int ret = store->get_bucket(dpp, nullptr, tenant_name, bucket_name, &bucket, null_yield); + int ret = driver->get_bucket(dpp, nullptr, tenant_name, bucket_name, &bucket, null_yield); if (ret < 0) { // TODO: Should we handle the case where the bucket could've been removed between // listing and fetching? return ret; } - return rgw::lc::fix_lc_shard_entry(dpp, store, store->get_rgwlc()->get_lc(), bucket.get()); + return rgw::lc::fix_lc_shard_entry(dpp, driver, driver->get_rgwlc()->get_lc(), bucket.get()); } static void format_lc_status(Formatter* formatter, @@ -1383,17 +1383,17 @@ static void format_lc_status(Formatter* formatter, formatter->close_section(); // bucket_entry } -static void process_single_lc_entry(rgw::sal::Store* store, +static void process_single_lc_entry(rgw::sal::Driver* driver, Formatter *formatter, const std::string& tenant_name, const std::string& bucket_name, const DoutPrefixProvider *dpp) { - int ret = fix_single_bucket_lc(store, tenant_name, bucket_name, dpp); + int ret = fix_single_bucket_lc(driver, tenant_name, bucket_name, dpp); format_lc_status(formatter, tenant_name, bucket_name, -ret); } -int RGWBucketAdminOp::fix_lc_shards(rgw::sal::Store* store, +int RGWBucketAdminOp::fix_lc_shards(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp) @@ -1407,10 +1407,10 @@ int RGWBucketAdminOp::fix_lc_shards(rgw::sal::Store* store, if (const std::string& bucket_name = op_state.get_bucket_name(); ! bucket_name.empty()) { const rgw_user user_id = op_state.get_user_id(); - process_single_lc_entry(store, formatter, user_id.tenant, bucket_name, dpp); + process_single_lc_entry(driver, formatter, user_id.tenant, bucket_name, dpp); formatter->flush(cout); } else { - int ret = store->meta_list_keys_init(dpp, "bucket", marker, &handle); + int ret = driver->meta_list_keys_init(dpp, "bucket", marker, &handle); if (ret < 0) { std::cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl; return ret; @@ -1418,21 +1418,21 @@ int RGWBucketAdminOp::fix_lc_shards(rgw::sal::Store* store, { formatter->open_array_section("lc_fix_status"); - auto sg = make_scope_guard([&store, &handle, &formatter](){ - store->meta_list_keys_complete(handle); + auto sg = make_scope_guard([&driver, &handle, &formatter](){ + driver->meta_list_keys_complete(handle); formatter->close_section(); // lc_fix_status formatter->flush(cout); }); do { list keys; - ret = store->meta_list_keys_next(dpp, handle, default_max_keys, keys, &truncated); + ret = driver->meta_list_keys_next(dpp, handle, default_max_keys, keys, &truncated); if (ret < 0 && ret != -ENOENT) { std::cerr << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << std::endl; return ret; } if (ret != -ENOENT) { for (const auto &key:keys) { auto [tenant_name, bucket_name] = split_tenant(key); - process_single_lc_entry(store, formatter, tenant_name, bucket_name, dpp); + process_single_lc_entry(driver, formatter, tenant_name, bucket_name, dpp); } } formatter->flush(cout); // regularly flush every 1k entries @@ -1445,14 +1445,14 @@ int RGWBucketAdminOp::fix_lc_shards(rgw::sal::Store* store, } static bool has_object_expired(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, rgw::sal::Bucket* bucket, const rgw_obj_key& key, utime_t& delete_at) { std::unique_ptr obj = bucket->get_object(key); bufferlist delete_at_bl; - int ret = rgw_object_get_attr(dpp, store, obj.get(), RGW_ATTR_DELETE_AT, delete_at_bl, null_yield); + int ret = rgw_object_get_attr(dpp, driver, obj.get(), RGW_ATTR_DELETE_AT, delete_at_bl, null_yield); if (ret < 0) { return false; // no delete at attr, proceed } @@ -1470,7 +1470,7 @@ static bool has_object_expired(const DoutPrefixProvider *dpp, } static int fix_bucket_obj_expiry(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, rgw::sal::Bucket* bucket, RGWFormatterFlusher& flusher, bool dry_run) { @@ -1501,13 +1501,13 @@ static int fix_bucket_obj_expiry(const DoutPrefixProvider *dpp, for (const auto& obj : results.objs) { rgw_obj_key key(obj.key); utime_t delete_at; - if (has_object_expired(dpp, store, bucket, key, delete_at)) { + if (has_object_expired(dpp, driver, bucket, key, delete_at)) { formatter->open_object_section("object_status"); formatter->dump_string("object", key.name); formatter->dump_stream("delete_at") << delete_at; if (!dry_run) { - ret = rgw_remove_object(dpp, store, bucket, key); + ret = rgw_remove_object(dpp, driver, bucket, key); formatter->dump_int("status", ret); } @@ -1520,24 +1520,24 @@ static int fix_bucket_obj_expiry(const DoutPrefixProvider *dpp, return 0; } -int RGWBucketAdminOp::fix_obj_expiry(rgw::sal::Store* store, +int RGWBucketAdminOp::fix_obj_expiry(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp, bool dry_run) { RGWBucket admin_bucket; - int ret = admin_bucket.init(store, op_state, null_yield, dpp); + int ret = admin_bucket.init(driver, op_state, null_yield, dpp); if (ret < 0) { ldpp_dout(dpp, -1) << "failed to initialize bucket" << dendl; return ret; } std::unique_ptr bucket; - ret = store->get_bucket(nullptr, admin_bucket.get_bucket_info(), &bucket); + ret = driver->get_bucket(nullptr, admin_bucket.get_bucket_info(), &bucket); if (ret < 0) { return ret; } - return fix_bucket_obj_expiry(dpp, store, bucket.get(), flusher, dry_run); + return fix_bucket_obj_expiry(dpp, driver, bucket.get(), flusher, dry_run); } void RGWBucketCompleteInfo::dump(Formatter *f) const { @@ -1978,10 +1978,10 @@ public: RGWSI_BucketIndex *bi{nullptr}; } svc; - rgw::sal::Store* store; + rgw::sal::Driver* driver; - RGWBucketInstanceMetadataHandler(rgw::sal::Store* store) - : store(store) {} + RGWBucketInstanceMetadataHandler(rgw::sal::Driver* driver) + : driver(driver) {} void init(RGWSI_Zone *zone_svc, RGWSI_Bucket *bucket_svc, @@ -2216,7 +2216,7 @@ int RGWMetadataHandlerPut_BucketInstance::put_post(const DoutPrefixProvider *dpp /* update lifecyle policy */ { std::unique_ptr bucket; - ret = bihandler->store->get_bucket(nullptr, bci.info, &bucket); + ret = bihandler->driver->get_bucket(nullptr, bci.info, &bucket); if (ret < 0) { ldpp_dout(dpp, 0) << __func__ << " failed to get_bucket(...) for " << bci.info.bucket.name @@ -2224,7 +2224,7 @@ int RGWMetadataHandlerPut_BucketInstance::put_post(const DoutPrefixProvider *dpp return ret; } - auto lc = bihandler->store->get_rgwlc(); + auto lc = bihandler->driver->get_rgwlc(); auto lc_it = bci.attrs.find(RGW_ATTR_LC); if (lc_it != bci.attrs.end()) { @@ -2254,8 +2254,8 @@ int RGWMetadataHandlerPut_BucketInstance::put_post(const DoutPrefixProvider *dpp class RGWArchiveBucketInstanceMetadataHandler : public RGWBucketInstanceMetadataHandler { public: - RGWArchiveBucketInstanceMetadataHandler(rgw::sal::Store* store) - : RGWBucketInstanceMetadataHandler(store) {} + RGWArchiveBucketInstanceMetadataHandler(rgw::sal::Driver* driver) + : RGWBucketInstanceMetadataHandler(driver) {} // N.B. replication of lifecycle policy relies on logic in RGWBucketInstanceMetadataHandler::do_put(...), override with caution @@ -2654,7 +2654,7 @@ int RGWBucketCtl::do_link_bucket(RGWSI_Bucket_EP_Ctx& ctx, nullptr, &attrs, y, dpp); if (ret < 0 && ret != -ENOENT) { - ldpp_dout(dpp, 0) << "ERROR: store->get_bucket_entrypoint_info() returned: " + ldpp_dout(dpp, 0) << "ERROR: read_bucket_entrypoint_info() returned: " << cpp_strerror(-ret) << dendl; } pattrs = &attrs; @@ -2739,7 +2739,7 @@ int RGWBucketCtl::do_unlink_bucket(RGWSI_Bucket_EP_Ctx& ctx, } // TODO: remove RGWRados dependency for bucket listing -int RGWBucketCtl::chown(rgw::sal::Store* store, rgw::sal::Bucket* bucket, +int RGWBucketCtl::chown(rgw::sal::Driver* driver, rgw::sal::Bucket* bucket, const rgw_user& user_id, const std::string& display_name, const std::string& marker, optional_yield y, const DoutPrefixProvider *dpp) { @@ -2758,7 +2758,7 @@ int RGWBucketCtl::chown(rgw::sal::Store* store, rgw::sal::Bucket* bucket, //Loop through objects and update object acls to point to bucket owner do { - RGWObjectCtx obj_ctx(store); + RGWObjectCtx obj_ctx(driver); results.objs.clear(); int ret = bucket->list(dpp, params, max_entries, results, y); if (ret < 0) { @@ -2783,7 +2783,7 @@ int RGWBucketCtl::chown(rgw::sal::Store* store, rgw::sal::Bucket* bucket, continue; } else { bufferlist& bl = aiter->second; - RGWAccessControlPolicy policy(store->ctx()); + RGWAccessControlPolicy policy(driver->ctx()); ACLOwner owner; try { decode(policy, bl); @@ -2916,9 +2916,9 @@ RGWBucketMetadataHandlerBase* RGWBucketMetaHandlerAllocator::alloc() return new RGWBucketMetadataHandler(); } -RGWBucketInstanceMetadataHandlerBase* RGWBucketInstanceMetaHandlerAllocator::alloc(rgw::sal::Store* store) +RGWBucketInstanceMetadataHandlerBase* RGWBucketInstanceMetaHandlerAllocator::alloc(rgw::sal::Driver* driver) { - return new RGWBucketInstanceMetadataHandler(store); + return new RGWBucketInstanceMetadataHandler(driver); } RGWBucketMetadataHandlerBase* RGWArchiveBucketMetaHandlerAllocator::alloc() @@ -2926,9 +2926,9 @@ RGWBucketMetadataHandlerBase* RGWArchiveBucketMetaHandlerAllocator::alloc() return new RGWArchiveBucketMetadataHandler(); } -RGWBucketInstanceMetadataHandlerBase* RGWArchiveBucketInstanceMetaHandlerAllocator::alloc(rgw::sal::Store* store) +RGWBucketInstanceMetadataHandlerBase* RGWArchiveBucketInstanceMetaHandlerAllocator::alloc(rgw::sal::Driver* driver) { - return new RGWArchiveBucketInstanceMetadataHandler(store); + return new RGWArchiveBucketInstanceMetadataHandler(driver); } diff --git a/src/rgw/store/rados/rgw_bucket.h b/src/rgw/store/rados/rgw_bucket.h index fccae7eaeb5aa..636a1f2f6c029 100644 --- a/src/rgw/store/rados/rgw_bucket.h +++ b/src/rgw/store/rados/rgw_bucket.h @@ -121,7 +121,7 @@ public: }; /** - * Store a list of the user's buckets, with associated functinos. + * store a list of the user's buckets, with associated functinos. */ class RGWUserBuckets { std::map buckets; @@ -204,7 +204,7 @@ public: class RGWBucketInstanceMetaHandlerAllocator { public: - static RGWBucketInstanceMetadataHandlerBase *alloc(rgw::sal::Store* store); + static RGWBucketInstanceMetadataHandlerBase *alloc(rgw::sal::Driver* driver); }; class RGWArchiveBucketMetaHandlerAllocator { @@ -214,16 +214,16 @@ public: class RGWArchiveBucketInstanceMetaHandlerAllocator { public: - static RGWBucketInstanceMetadataHandlerBase *alloc(rgw::sal::Store* store); + static RGWBucketInstanceMetadataHandlerBase *alloc(rgw::sal::Driver* driver); }; -extern int rgw_remove_object(const DoutPrefixProvider *dpp, rgw::sal::Store* store, rgw::sal::Bucket* bucket, rgw_obj_key& key); +extern int rgw_remove_object(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::Bucket* bucket, rgw_obj_key& key); -extern int rgw_object_get_attr(rgw::sal::Store* store, rgw::sal::Object* obj, +extern int rgw_object_get_attr(rgw::sal::Driver* driver, rgw::sal::Object* obj, const char* attr_name, bufferlist& out_bl, optional_yield y); -extern void check_bad_user_bucket_mapping(rgw::sal::Store* store, rgw::sal::User* user, bool fix, optional_yield y, const DoutPrefixProvider *dpp); +extern void check_bad_user_bucket_mapping(rgw::sal::Driver* driver, rgw::sal::User* user, bool fix, optional_yield y, const DoutPrefixProvider *dpp); struct RGWBucketAdminOpState { rgw_user uid; @@ -318,7 +318,7 @@ struct RGWBucketAdminOpState { */ class RGWBucket { RGWUserBuckets buckets; - rgw::sal::Store* store; + rgw::sal::Driver* driver; RGWAccessHandle handle; std::unique_ptr bucket; @@ -329,8 +329,8 @@ class RGWBucket { RGWObjVersionTracker ep_objv; // entrypoint object version public: - RGWBucket() : store(NULL), handle(NULL), failure(false) {} - int init(rgw::sal::Store* storage, RGWBucketAdminOpState& op_state, optional_yield y, + RGWBucket() : driver(NULL), handle(NULL), failure(false) {} + int init(rgw::sal::Driver* storage, RGWBucketAdminOpState& op_state, optional_yield y, const DoutPrefixProvider *dpp, std::string *err_msg = NULL); int check_bad_index_multipart(RGWBucketAdminOpState& op_state, @@ -365,42 +365,42 @@ public: class RGWBucketAdminOp { public: - static int get_policy(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, + static int get_policy(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp); - static int get_policy(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, + static int get_policy(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWAccessControlPolicy& policy, const DoutPrefixProvider *dpp); - static int dump_s3_policy(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, + static int dump_s3_policy(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, std::ostream& os, const DoutPrefixProvider *dpp); - static int unlink(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp); - static int link(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, std::string *err_msg = NULL); - static int chown(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const std::string& marker, const DoutPrefixProvider *dpp, std::string *err_msg = NULL); + static int unlink(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp); + static int link(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, std::string *err_msg = NULL); + static int chown(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const std::string& marker, const DoutPrefixProvider *dpp, std::string *err_msg = NULL); - static int check_index(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, + static int check_index(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y, const DoutPrefixProvider *dpp); - static int remove_bucket(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, optional_yield y, + static int remove_bucket(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, optional_yield y, const DoutPrefixProvider *dpp, bool bypass_gc = false, bool keep_index_consistent = true); - static int remove_object(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp); - static int info(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y, const DoutPrefixProvider *dpp); - static int limit_check(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, + static int remove_object(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp); + static int info(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y, const DoutPrefixProvider *dpp); + static int limit_check(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const std::list& user_ids, RGWFormatterFlusher& flusher, optional_yield y, const DoutPrefixProvider *dpp, bool warnings_only = false); - static int set_quota(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp); + static int set_quota(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp); - static int list_stale_instances(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, + static int list_stale_instances(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp); - static int clear_stale_instances(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, + static int clear_stale_instances(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp); - static int fix_lc_shards(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, + static int fix_lc_shards(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp); - static int fix_obj_expiry(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, + static int fix_obj_expiry(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp, bool dry_run = false); - static int sync_bucket(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, std::string *err_msg = NULL); + static int sync_bucket(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, std::string *err_msg = NULL); }; struct rgw_ep_info { @@ -688,7 +688,7 @@ public: const DoutPrefixProvider *dpp, bool update_entrypoint = true); - int chown(rgw::sal::Store* store, rgw::sal::Bucket* bucket, + int chown(rgw::sal::Driver* driver, rgw::sal::Bucket* bucket, const rgw_user& user_id, const std::string& display_name, const std::string& marker, optional_yield y, const DoutPrefixProvider *dpp); @@ -761,5 +761,5 @@ private: }; -bool rgw_find_bucket_by_id(const DoutPrefixProvider *dpp, CephContext *cct, rgw::sal::Store* store, const std::string& marker, +bool rgw_find_bucket_by_id(const DoutPrefixProvider *dpp, CephContext *cct, rgw::sal::Driver* driver, const std::string& marker, const std::string& bucket_id, rgw_bucket* bucket_out); diff --git a/src/rgw/store/rados/rgw_data_sync.cc b/src/rgw/store/rados/rgw_data_sync.cc index c207927ca5e99..47573b765da84 100644 --- a/src/rgw/store/rados/rgw_data_sync.cc +++ b/src/rgw/store/rados/rgw_data_sync.cc @@ -165,7 +165,7 @@ bool RGWReadDataSyncRecoveringShardsCR::spawn_next() string error_oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id) + ".retry"; auto& shard_keys = omapkeys[shard_id]; shard_keys = std::make_shared(); - spawn(new RGWRadosGetOmapKeysCR(env->store, rgw_raw_obj(env->svc->zone->get_zone_params().log_pool, error_oid), + spawn(new RGWRadosGetOmapKeysCR(env->driver, rgw_raw_obj(env->svc->zone->get_zone_params().log_pool, error_oid), marker, max_entries, shard_keys), false); ++shard_id; @@ -520,7 +520,7 @@ class RGWInitDataSyncStatusCoroutine : public RGWCoroutine { static constexpr uint32_t lock_duration = 30; RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; - rgw::sal::RadosStore* store; // RGWDataSyncEnv also has a pointer to store + rgw::sal::RadosStore* driver; // RGWDataSyncEnv also has a pointer to driver const rgw_pool& pool; const uint32_t num_shards; @@ -537,7 +537,7 @@ public: uint64_t instance_id, RGWSyncTraceNodeRef& _tn_parent, rgw_data_sync_status *status) - : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), store(sync_env->store), + : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), driver(sync_env->driver), pool(sync_env->svc->zone->get_zone_params().log_pool), num_shards(num_shards), status(status), tn(sync_env->sync_tracer->add_node(_tn_parent, "init_data_sync_status")) { @@ -559,7 +559,7 @@ public: int ret; reenter(this) { using LockCR = RGWSimpleRadosLockCR; - yield call(new LockCR(sync_env->async_rados, store, + yield call(new LockCR(sync_env->async_rados, driver, rgw_raw_obj{pool, sync_status_oid}, lock_name, cookie, lock_duration)); if (retcode < 0) { @@ -576,7 +576,7 @@ public: } /* take lock again, we just recreated the object */ - yield call(new LockCR(sync_env->async_rados, store, + yield call(new LockCR(sync_env->async_rados, driver, rgw_raw_obj{pool, sync_status_oid}, lock_name, cookie, lock_duration)); if (retcode < 0) { @@ -632,7 +632,7 @@ public: tn->log(0, SSTR("ERROR: failed to write sync status info with " << retcode)); return set_cr_error(retcode); } - yield call(new RGWSimpleRadosUnlockCR(sync_env->async_rados, store, + yield call(new RGWSimpleRadosUnlockCR(sync_env->async_rados, driver, rgw_raw_obj{pool, sync_status_oid}, lock_name, cookie)); return set_cr_done(); @@ -642,13 +642,13 @@ public: }; RGWRemoteDataLog::RGWRemoteDataLog(const DoutPrefixProvider *dpp, - rgw::sal::RadosStore* store, + rgw::sal::RadosStore* driver, RGWAsyncRadosProcessor *async_rados) - : RGWCoroutinesManager(store->ctx(), store->getRados()->get_cr_registry()), - dpp(dpp), store(store), - cct(store->ctx()), cr_registry(store->getRados()->get_cr_registry()), + : RGWCoroutinesManager(driver->ctx(), driver->getRados()->get_cr_registry()), + dpp(dpp), driver(driver), + cct(driver->ctx()), cr_registry(driver->getRados()->get_cr_registry()), async_rados(async_rados), - http_manager(store->ctx(), completion_mgr), + http_manager(driver->ctx(), completion_mgr), data_sync_cr(NULL), initialized(false) { @@ -690,7 +690,7 @@ int RGWRemoteDataLog::init(const rgw_zone_id& _source_zone, RGWRESTConn *_conn, RGWSyncTraceManager *_sync_tracer, RGWSyncModuleInstanceRef& _sync_module, PerfCounters* counters) { - sync_env.init(dpp, cct, store, store->svc(), async_rados, &http_manager, _error_logger, + sync_env.init(dpp, cct, driver, driver->svc(), async_rados, &http_manager, _error_logger, _sync_tracer, _sync_module, counters); sc.init(&sync_env, _conn, _source_zone); @@ -872,7 +872,7 @@ class RGWListBucketIndexesCR : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env = sc->env; - rgw::sal::RadosStore* store = sync_env->store; + rgw::sal::RadosStore* driver = sync_env->driver; rgw_data_sync_status *sync_status; @@ -902,7 +902,7 @@ public: int operate(const DoutPrefixProvider *dpp) override { reenter(this) { entries_index = std::make_unique( - sync_env->async_rados, store, this, + sync_env->async_rados, driver, this, cct->_conf->rgw_data_log_num_shards, sync_env->svc->zone->get_zone_params().log_pool, oid_prefix); @@ -988,7 +988,7 @@ public: if (ret < 0) { yield call(sync_env->error_logger->log_error_cr( dpp, sc->conn->get_remote_id(), "data.init", "", - -ret, string("failed to store sync status: ") + + -ret, string("failed to driver sync status: ") + cpp_strerror(-ret))); req_ret = ret; } @@ -1381,7 +1381,7 @@ public: } if (complete->timestamp != ceph::real_time{}) { tn->log(10, SSTR("writing " << *complete << " to error repo for retry")); - yield call(rgw::error_repo::write_cr(sync_env->store->svc()->rados, error_repo, + yield call(rgw::error_repo::write_cr(sync_env->driver->svc()->rados, error_repo, rgw::error_repo::encode_key(complete->bs, complete->gen), complete->timestamp)); if (retcode < 0) { @@ -1389,7 +1389,7 @@ public: } } } else if (complete->retry) { - yield call(rgw::error_repo::remove_cr(sync_env->store->svc()->rados, error_repo, + yield call(rgw::error_repo::remove_cr(sync_env->driver->svc()->rados, error_repo, rgw::error_repo::encode_key(complete->bs, complete->gen), complete->timestamp)); if (retcode < 0) { @@ -1414,9 +1414,9 @@ public: } }; -rgw_raw_obj datalog_oid_for_error_repo(RGWDataSyncCtx *sc, rgw::sal::RadosStore* store, +rgw_raw_obj datalog_oid_for_error_repo(RGWDataSyncCtx *sc, rgw::sal::RadosStore* driver, rgw_pool& pool, rgw_bucket_shard& bs) { - int datalog_shard = store->svc()->datalog_rados->choose_oid(bs); + int datalog_shard = driver->svc()->datalog_rados->choose_oid(bs); string oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, datalog_shard); return rgw_raw_obj(pool, oid + ".retry"); } @@ -1456,9 +1456,9 @@ public: for (sid = 0; sid < each->num_shards; sid++) { bs.bucket = source_bs.bucket; bs.shard_id = sid; - error_repo = datalog_oid_for_error_repo(sc, sync_env->store, pool, source_bs); + error_repo = datalog_oid_for_error_repo(sc, sync_env->driver, pool, source_bs); tn->log(10, SSTR("writing shard_id " << sid << " of gen " << each->gen << " to error repo for retry")); - yield_spawn_window(rgw::error_repo::write_cr(sync_env->store->svc()->rados, error_repo, + yield_spawn_window(rgw::error_repo::write_cr(sync_env->driver->svc()->rados, error_repo, rgw::error_repo::encode_key(bs, each->gen), timestamp), cct->_conf->rgw_data_sync_spawn_window, [&](uint64_t stack_id, int ret) { @@ -1477,7 +1477,7 @@ public: }); // once everything succeeds, remove the full sync obligation from the error repo - yield call(rgw::error_repo::remove_cr(sync_env->store->svc()->rados, error_repo, + yield call(rgw::error_repo::remove_cr(sync_env->driver->svc()->rados, error_repo, error_marker, timestamp)); return set_cr_done(); } @@ -1502,10 +1502,10 @@ RGWCoroutine* data_sync_single_entry(RGWDataSyncCtx *sc, const rgw_bucket_shard& lease_cr.get(), tn); } -static ceph::real_time timestamp_for_bucket_shard(rgw::sal::RadosStore* store, +static ceph::real_time timestamp_for_bucket_shard(rgw::sal::RadosStore* driver, const rgw_data_sync_status& sync_status, const rgw_bucket_shard& bs) { - int datalog_shard = store->svc()->datalog_rados->choose_oid(bs); + int datalog_shard = driver->svc()->datalog_rados->choose_oid(bs); auto status = sync_status.sync_markers.find(datalog_shard); if (status == sync_status.sync_markers.end()) { return ceph::real_clock::zero(); @@ -1562,7 +1562,7 @@ public: if (retcode < 0) { tn->log(10, SSTR("full sync: failed to read remote bucket info. Writing " << source_bs.shard_id << " to error repo for retry")); - yield call(rgw::error_repo::write_cr(sync_env->store->svc()->rados, error_repo, + yield call(rgw::error_repo::write_cr(sync_env->driver->svc()->rados, error_repo, rgw::error_repo::encode_key(source_bs, std::nullopt), timestamp)); if (retcode < 0) { @@ -1580,11 +1580,11 @@ public: for (sid = 0; sid < each->num_shards; sid++) { source_bs.shard_id = sid; // use the error repo and sync status timestamp from the datalog shard corresponding to source_bs - error_repo = datalog_oid_for_error_repo(sc, sync_env->store, pool, source_bs); - timestamp = timestamp_for_bucket_shard(sync_env->store, sync_status, source_bs); + error_repo = datalog_oid_for_error_repo(sc, sync_env->driver, pool, source_bs); + timestamp = timestamp_for_bucket_shard(sync_env->driver, sync_status, source_bs); if (retcode < 0) { tn->log(10, SSTR("Write " << source_bs.shard_id << " to error repo for retry")); - yield_spawn_window(rgw::error_repo::write_cr(sync_env->store->svc()->rados, error_repo, + yield_spawn_window(rgw::error_repo::write_cr(sync_env->driver->svc()->rados, error_repo, rgw::error_repo::encode_key(source_bs, each->gen), timestamp), cct->_conf->rgw_data_sync_spawn_window, std::nullopt); } else { @@ -1693,7 +1693,7 @@ public: return set_cr_error(-ECANCELED); } omapvals = std::make_shared(); - yield call(new RGWRadosGetOmapValsCR(sc->env->store, + yield call(new RGWRadosGetOmapValsCR(sc->env->driver, rgw_raw_obj(pool, oid), sync_marker.marker, OMAP_GET_MAX_ENTRIES, omapvals)); @@ -1752,7 +1752,7 @@ public: } // clean up full sync index, ignoring errors - yield call(new RGWRadosRemoveCR(sc->env->store, {pool, oid})); + yield call(new RGWRadosRemoveCR(sc->env->driver, {pool, oid})); // transition to incremental sync return set_cr_done(); @@ -1856,7 +1856,7 @@ public: if (error_retry_time <= ceph::coarse_real_clock::now()) { /* process bucket shards that previously failed */ omapvals = std::make_shared(); - yield call(new RGWRadosGetOmapValsCR(sc->env->store, error_repo, + yield call(new RGWRadosGetOmapValsCR(sc->env->driver, error_repo, error_marker, max_error_entries, omapvals)); error_entries = std::move(omapvals->entries); @@ -1873,7 +1873,7 @@ public: } if (retcode < 0) { tn->log(1, SSTR("failed to parse bucket shard: " << error_marker)); - spawn(rgw::error_repo::remove_cr(sc->env->store->svc()->rados, + spawn(rgw::error_repo::remove_cr(sc->env->driver->svc()->rados, error_repo, error_marker, entry_timestamp), false); @@ -2085,8 +2085,8 @@ public: if (lease_cr) { lease_cr->abort(); } - auto store = sync_env->store; - lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, store, + auto driver = sync_env->driver; + lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, driver, rgw_raw_obj(pool, status_oid), lock_name, lock_duration, this)); lease_stack.reset(spawn(lease_cr.get(), false)); @@ -2329,7 +2329,7 @@ class RGWUserPermHandler { uid(handler->uid), info(handler->info) {} int operate() override { - auto user_ctl = sync_env->store->getRados()->ctl.user; + auto user_ctl = sync_env->driver->getRados()->ctl.user; ret = user_ctl->get_info_by_uid(sync_env->dpp, uid, &info->user_info, null_yield); if (ret < 0) { @@ -2648,7 +2648,7 @@ public: * the correct policy configuration. This can happen if there are multiple * policy rules, and some depend on the object tagging */ yield call(new RGWStatRemoteObjCR(sync_env->async_rados, - sync_env->store, + sync_env->driver, sc->source_zone, sync_pipe.info.source_bs.bucket, key, @@ -2733,7 +2733,7 @@ public: std::move(dest_params), need_retry); - call(new RGWFetchRemoteObjCR(sync_env->async_rados, sync_env->store, sc->source_zone, + call(new RGWFetchRemoteObjCR(sync_env->async_rados, sync_env->driver, sc->source_zone, nullopt, sync_pipe.info.source_bs.bucket, std::nullopt, sync_pipe.dest_bucket_info, @@ -2769,7 +2769,7 @@ RGWCoroutine *RGWDefaultDataSyncModule::remove_object(const DoutPrefixProvider * real_time& mtime, bool versioned, uint64_t versioned_epoch, rgw_zone_set *zones_trace) { auto sync_env = sc->env; - return new RGWRemoveObjCR(sync_env->dpp, sync_env->async_rados, sync_env->store, sc->source_zone, + return new RGWRemoveObjCR(sync_env->dpp, sync_env->async_rados, sync_env->driver, sc->source_zone, sync_pipe.dest_bucket_info, key, versioned, versioned_epoch, NULL, NULL, false, &mtime, zones_trace); } @@ -2778,7 +2778,7 @@ RGWCoroutine *RGWDefaultDataSyncModule::create_delete_marker(const DoutPrefixPro rgw_bucket_entry_owner& owner, bool versioned, uint64_t versioned_epoch, rgw_zone_set *zones_trace) { auto sync_env = sc->env; - return new RGWRemoveObjCR(sync_env->dpp, sync_env->async_rados, sync_env->store, sc->source_zone, + return new RGWRemoveObjCR(sync_env->dpp, sync_env->async_rados, sync_env->driver, sc->source_zone, sync_pipe.dest_bucket_info, key, versioned, versioned_epoch, &owner.id, &owner.display_name, true, &mtime, zones_trace); } @@ -2803,8 +2803,8 @@ public: RGWMetadataHandler *alloc_bucket_meta_handler() override { return RGWArchiveBucketMetaHandlerAllocator::alloc(); } - RGWBucketInstanceMetadataHandlerBase *alloc_bucket_instance_meta_handler(rgw::sal::Store* store) override { - return RGWArchiveBucketInstanceMetaHandlerAllocator::alloc(store); + RGWBucketInstanceMetadataHandlerBase *alloc_bucket_instance_meta_handler(rgw::sal::Driver* driver) override { + return RGWArchiveBucketInstanceMetaHandlerAllocator::alloc(driver); } }; @@ -2822,7 +2822,7 @@ RGWCoroutine *RGWArchiveDataSyncModule::sync_object(const DoutPrefixProvider *dp (sync_pipe.dest_bucket_info.flags & BUCKET_VERSIONS_SUSPENDED)) { ldout(sc->cct, 0) << "SYNC_ARCHIVE: sync_object: enabling object versioning for archive bucket" << dendl; sync_pipe.dest_bucket_info.flags = (sync_pipe.dest_bucket_info.flags & ~BUCKET_VERSIONS_SUSPENDED) | BUCKET_VERSIONED; - int op_ret = sync_env->store->getRados()->put_bucket_instance_info(sync_pipe.dest_bucket_info, false, real_time(), NULL, sync_env->dpp); + int op_ret = sync_env->driver->getRados()->put_bucket_instance_info(sync_pipe.dest_bucket_info, false, real_time(), NULL, sync_env->dpp); if (op_ret < 0) { ldpp_dout(sync_env->dpp, 0) << "SYNC_ARCHIVE: sync_object: error versioning archive bucket" << dendl; return NULL; @@ -2835,7 +2835,7 @@ RGWCoroutine *RGWArchiveDataSyncModule::sync_object(const DoutPrefixProvider *dp versioned_epoch = 0; dest_key = key; if (key.instance.empty()) { - sync_env->store->getRados()->gen_rand_obj_instance_name(&(*dest_key)); + sync_env->driver->getRados()->gen_rand_obj_instance_name(&(*dest_key)); } } @@ -2855,7 +2855,7 @@ RGWCoroutine *RGWArchiveDataSyncModule::create_delete_marker(const DoutPrefixPro ldout(sc->cct, 0) << "SYNC_ARCHIVE: create_delete_marker: b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " mtime=" << mtime << " versioned=" << versioned << " versioned_epoch=" << versioned_epoch << dendl; auto sync_env = sc->env; - return new RGWRemoveObjCR(sync_env->dpp, sync_env->async_rados, sync_env->store, sc->source_zone, + return new RGWRemoveObjCR(sync_env->dpp, sync_env->async_rados, sync_env->driver, sc->source_zone, sync_pipe.dest_bucket_info, key, versioned, versioned_epoch, &owner.id, &owner.display_name, true, &mtime, zones_trace); } @@ -2932,37 +2932,37 @@ int RGWRemoteDataLog::run_sync(const DoutPrefixProvider *dpp, int num_shards) CephContext *RGWDataSyncStatusManager::get_cct() const { - return store->ctx(); + return driver->ctx(); } int RGWDataSyncStatusManager::init(const DoutPrefixProvider *dpp) { RGWZone *zone_def; - if (!(zone_def = store->svc()->zone->find_zone(source_zone))) { + if (!(zone_def = driver->svc()->zone->find_zone(source_zone))) { ldpp_dout(this, 0) << "ERROR: failed to find zone config info for zone=" << source_zone << dendl; return -EIO; } - if (!store->svc()->sync_modules->get_manager()->supports_data_export(zone_def->tier_type)) { + if (!driver->svc()->sync_modules->get_manager()->supports_data_export(zone_def->tier_type)) { return -ENOTSUP; } - const RGWZoneParams& zone_params = store->svc()->zone->get_zone_params(); + const RGWZoneParams& zone_params = driver->svc()->zone->get_zone_params(); if (sync_module == nullptr) { - sync_module = store->get_sync_module(); + sync_module = driver->get_sync_module(); } - conn = store->svc()->zone->get_zone_conn(source_zone); + conn = driver->svc()->zone->get_zone_conn(source_zone); if (!conn) { ldpp_dout(this, 0) << "connection object to zone " << source_zone << " does not exist" << dendl; return -EINVAL; } - error_logger = new RGWSyncErrorLogger(store, RGW_SYNC_ERROR_LOG_SHARD_PREFIX, ERROR_LOGGER_SHARDS); + error_logger = new RGWSyncErrorLogger(driver, RGW_SYNC_ERROR_LOG_SHARD_PREFIX, ERROR_LOGGER_SHARDS); - int r = source_log.init(source_zone, conn, error_logger, store->getRados()->get_sync_tracer(), + int r = source_log.init(source_zone, conn, error_logger, driver->getRados()->get_sync_tracer(), sync_module, counters); if (r < 0) { ldpp_dout(this, 0) << "ERROR: failed to init remote log, r=" << r << dendl; @@ -3323,7 +3323,7 @@ public: int operate(const DoutPrefixProvider *dpp) override { reenter(this) { - yield call(new RGWRadosRemoveCR(sync_env->store, obj, &objv)); + yield call(new RGWRadosRemoveCR(sync_env->driver, obj, &objv)); if (retcode < 0 && retcode != -ENOENT) { ldout(cct, 20) << "ERROR: failed to remove bucket shard status for: " << sync_pair << ". with error: " << retcode << dendl; @@ -3493,7 +3493,7 @@ public: class RGWReadRecoveringBucketShardsCoroutine : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; - rgw::sal::RadosStore* store; + rgw::sal::RadosStore* driver; const int shard_id; int max_entries; @@ -3511,7 +3511,7 @@ public: RGWReadRecoveringBucketShardsCoroutine(RGWDataSyncCtx *_sc, const int _shard_id, set& _recovering_buckets, const int _max_entries) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), - store(sync_env->store), shard_id(_shard_id), max_entries(_max_entries), + driver(sync_env->driver), shard_id(_shard_id), max_entries(_max_entries), recovering_buckets(_recovering_buckets), max_omap_entries(OMAP_READ_MAX_ENTRIES) { error_oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id) + ".retry"; @@ -3527,7 +3527,7 @@ int RGWReadRecoveringBucketShardsCoroutine::operate(const DoutPrefixProvider *dp count = 0; do { omapkeys = std::make_shared(); - yield call(new RGWRadosGetOmapKeysCR(store, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, error_oid), + yield call(new RGWRadosGetOmapKeysCR(driver, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, error_oid), marker, max_omap_entries, omapkeys)); if (retcode == -ENOENT) { @@ -3560,7 +3560,7 @@ int RGWReadRecoveringBucketShardsCoroutine::operate(const DoutPrefixProvider *dp class RGWReadPendingBucketShardsCoroutine : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; - rgw::sal::RadosStore* store; + rgw::sal::RadosStore* driver; const int shard_id; int max_entries; @@ -3581,7 +3581,7 @@ public: set& _pending_buckets, rgw_data_sync_marker* _sync_marker, const int _max_entries) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), - store(sync_env->store), shard_id(_shard_id), max_entries(_max_entries), + driver(sync_env->driver), shard_id(_shard_id), max_entries(_max_entries), pending_buckets(_pending_buckets), sync_marker(_sync_marker) { status_oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id); @@ -3640,8 +3640,8 @@ int RGWReadPendingBucketShardsCoroutine::operate(const DoutPrefixProvider *dpp) int RGWRemoteDataLog::read_shard_status(const DoutPrefixProvider *dpp, int shard_id, set& pending_buckets, set& recovering_buckets, rgw_data_sync_marker *sync_marker, const int max_entries) { // cannot run concurrently with run_sync(), so run in a separate manager - RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry()); - RGWHTTPManager http_manager(store->ctx(), crs.get_completion_mgr()); + RGWCoroutinesManager crs(driver->ctx(), driver->getRados()->get_cr_registry()); + RGWHTTPManager http_manager(driver->ctx(), crs.get_completion_mgr()); int ret = http_manager.start(); if (ret < 0) { ldpp_dout(dpp, 0) << "failed in http_manager.start() ret=" << ret << dendl; @@ -3652,10 +3652,10 @@ int RGWRemoteDataLog::read_shard_status(const DoutPrefixProvider *dpp, int shard RGWDataSyncCtx sc_local = sc; sc_local.env = &sync_env_local; list stacks; - RGWCoroutinesStack* recovering_stack = new RGWCoroutinesStack(store->ctx(), &crs); + RGWCoroutinesStack* recovering_stack = new RGWCoroutinesStack(driver->ctx(), &crs); recovering_stack->call(new RGWReadRecoveringBucketShardsCoroutine(&sc_local, shard_id, recovering_buckets, max_entries)); stacks.push_back(recovering_stack); - RGWCoroutinesStack* pending_stack = new RGWCoroutinesStack(store->ctx(), &crs); + RGWCoroutinesStack* pending_stack = new RGWCoroutinesStack(driver->ctx(), &crs); pending_stack->call(new RGWReadPendingBucketShardsCoroutine(&sc_local, shard_id, pending_buckets, sync_marker, max_entries)); stacks.push_back(pending_stack); ret = crs.run(dpp, stacks); @@ -3665,7 +3665,7 @@ int RGWRemoteDataLog::read_shard_status(const DoutPrefixProvider *dpp, int shard CephContext *RGWBucketPipeSyncStatusManager::get_cct() const { - return store->ctx(); + return driver->ctx(); } void rgw_bucket_entry_owner::decode_json(JSONObj *obj) @@ -4102,7 +4102,7 @@ public: if (sc->env->ostr) { RGWZone* z; - if ((z = sc->env->store->svc()->zone->find_zone(sc->source_zone))) { + if ((z = sc->env->driver->svc()->zone->find_zone(sc->source_zone))) { zone_name = z->name; } } @@ -4804,7 +4804,7 @@ int RGWBucketShardIncrementalSyncCR::operate(const DoutPrefixProvider *dpp) if (retcode < 0) { return set_cr_error(retcode); } - call(new RGWRadosRemoveOidCR(sync_env->store, std::move(status_obj))); + call(new RGWRadosRemoveOidCR(sync_env->driver, std::move(status_obj))); if (retcode < 0) { ldpp_dout(dpp, 20) << "failed to remove shard status object: " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); @@ -5064,14 +5064,14 @@ public: int RGWSyncGetBucketInfoCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { - yield call(new RGWGetBucketInstanceInfoCR(sync_env->async_rados, sync_env->store, bucket, pbucket_info, pattrs, dpp)); + yield call(new RGWGetBucketInstanceInfoCR(sync_env->async_rados, sync_env->driver, bucket, pbucket_info, pattrs, dpp)); if (retcode == -ENOENT) { /* bucket instance info has not been synced in yet, fetch it now */ yield { tn->log(10, SSTR("no local info for bucket:" << ": fetching metadata")); string raw_key = string("bucket.instance:") + bucket.get_key(); - meta_sync_env.init(dpp, cct, sync_env->store, sync_env->svc->zone->get_master_conn(), sync_env->async_rados, + meta_sync_env.init(dpp, cct, sync_env->driver, sync_env->svc->zone->get_master_conn(), sync_env->async_rados, sync_env->http_manager, sync_env->error_logger, sync_env->sync_tracer); call(new RGWMetaSyncSingleEntryCR(&meta_sync_env, raw_key, @@ -5085,7 +5085,7 @@ int RGWSyncGetBucketInfoCR::operate(const DoutPrefixProvider *dpp) return set_cr_error(retcode); } - yield call(new RGWGetBucketInstanceInfoCR(sync_env->async_rados, sync_env->store, bucket, pbucket_info, pattrs, dpp)); + yield call(new RGWGetBucketInstanceInfoCR(sync_env->async_rados, sync_env->driver, bucket, pbucket_info, pattrs, dpp)); } if (retcode < 0) { tn->log(0, SSTR("ERROR: failed to retrieve bucket info for bucket=" << bucket_str{bucket})); @@ -5180,7 +5180,7 @@ public: reenter(this) { for (i = 0; i < 2; ++i) { yield call(new RGWBucketGetSyncPolicyHandlerCR(sync_env->async_rados, - sync_env->store, + sync_env->driver, get_policy_params, policy, dpp)); @@ -5501,7 +5501,7 @@ int RGWSyncBucketCR::operate(const DoutPrefixProvider *dpp) // remote indicates stopped state tn->log(20, "remote bilog indicates that sync was stopped"); if (!bucket_lease_cr) { - bucket_lease_cr.reset(new RGWContinuousLeaseCR(env->async_rados, env->store, status_obj, + bucket_lease_cr.reset(new RGWContinuousLeaseCR(env->async_rados, env->driver, status_obj, lock_name, lock_duration, this)); yield spawn(bucket_lease_cr.get(), false); while (!bucket_lease_cr->is_locked()) { @@ -5558,7 +5558,7 @@ int RGWSyncBucketCR::operate(const DoutPrefixProvider *dpp) // if the state wasn't Incremental, take a bucket-wide lease to prevent // different shards from duplicating the init and full sync if (!bucket_lease_cr) { - bucket_lease_cr.reset(new RGWContinuousLeaseCR(env->async_rados, env->store, status_obj, + bucket_lease_cr.reset(new RGWContinuousLeaseCR(env->async_rados, env->driver, status_obj, lock_name, lock_duration, this)); yield spawn(bucket_lease_cr.get(), false); while (!bucket_lease_cr->is_locked()) { @@ -5669,11 +5669,11 @@ int RGWBucketPipeSyncStatusManager::do_init(const DoutPrefixProvider *dpp, } sync_module.reset(new RGWDefaultSyncModuleInstance()); - auto async_rados = store->svc()->rados->get_async_processor(); + auto async_rados = driver->svc()->rados->get_async_processor(); - sync_env.init(this, store->ctx(), store, - store->svc(), async_rados, &http_manager, - error_logger.get(), store->getRados()->get_sync_tracer(), + sync_env.init(this, driver->ctx(), driver, + driver->svc(), async_rados, &http_manager, + error_logger.get(), driver->getRados()->get_sync_tracer(), sync_module, nullptr); sync_env.ostr = ostr; @@ -5699,14 +5699,14 @@ int RGWBucketPipeSyncStatusManager::do_init(const DoutPrefixProvider *dpp, for (auto& pipe : pipes) { auto& szone = pipe.source.zone; - auto conn = store->svc()->zone->get_zone_conn(szone); + auto conn = driver->svc()->zone->get_zone_conn(szone); if (!conn) { ldpp_dout(this, 0) << "connection object to zone " << szone << " does not exist" << dendl; return -EINVAL; } RGWZone* z; - if (!(z = store->svc()->zone->find_zone(szone))) { + if (!(z = driver->svc()->zone->find_zone(szone))) { ldpp_dout(this, 0) << "zone " << szone << " does not exist" << dendl; return -EINVAL; } @@ -5752,14 +5752,14 @@ int RGWBucketPipeSyncStatusManager::remote_info(const DoutPrefixProvider *dpp, tl::expected, int> RGWBucketPipeSyncStatusManager::construct( const DoutPrefixProvider* dpp, - rgw::sal::RadosStore* store, + rgw::sal::RadosStore* driver, std::optional source_zone, std::optional source_bucket, const rgw_bucket& dest_bucket, std::ostream* ostr) { std::unique_ptr self{ - new RGWBucketPipeSyncStatusManager(store, source_zone, source_bucket, + new RGWBucketPipeSyncStatusManager(driver, source_zone, source_bucket, dest_bucket)}; auto r = self->do_init(dpp, ostr); if (r < 0) { @@ -5775,7 +5775,7 @@ int RGWBucketPipeSyncStatusManager::init_sync_status( // practice we only do one zone at a time. for (auto& source : sources) { list stacks; - RGWCoroutinesStack *stack = new RGWCoroutinesStack(store->ctx(), &cr_mgr); + RGWCoroutinesStack *stack = new RGWCoroutinesStack(driver->ctx(), &cr_mgr); pretty_print(source.sc.env, "Initializing sync state of bucket {} with zone {}.\n", source.info.bucket.name, source.zone_name); stack->call(new RGWSimpleRadosWriteCR( @@ -5829,7 +5829,7 @@ RGWBucketPipeSyncStatusManager::read_sync_status( << ret << dendl; return tl::unexpected(ret); } - auto stack = new RGWCoroutinesStack(store->ctx(), &cr_mgr); + auto stack = new RGWCoroutinesStack(driver->ctx(), &cr_mgr); std::vector pairs(num_shards); for (auto shard = 0u; shard < num_shards; ++shard) { auto& pair = pairs[shard]; @@ -6095,7 +6095,7 @@ int RGWBucketPipeSyncStatusManager::run(const DoutPrefixProvider *dpp) { list stacks; for (auto& source : sources) { - auto stack = new RGWCoroutinesStack(store->ctx(), &cr_mgr); + auto stack = new RGWCoroutinesStack(driver->ctx(), &cr_mgr); stack->call(new rgw::bucket_sync_run::SourceCR( source.sc, source.info, source.dest, source.handler, source.zone_name)); @@ -6193,7 +6193,7 @@ int rgw_read_remote_bilog_info(const DoutPrefixProvider *dpp, class RGWCollectBucketSyncStatusCR : public RGWShardCollectCR { static constexpr int max_concurrent_shards = 16; - rgw::sal::RadosStore* const store; + rgw::sal::RadosStore* const driver; RGWDataSyncCtx *const sc; RGWDataSyncEnv *const env; const uint64_t gen; @@ -6213,12 +6213,12 @@ class RGWCollectBucketSyncStatusCR : public RGWShardCollectCR { return r; } public: - RGWCollectBucketSyncStatusCR(rgw::sal::RadosStore* store, RGWDataSyncCtx *sc, + RGWCollectBucketSyncStatusCR(rgw::sal::RadosStore* driver, RGWDataSyncCtx *sc, const rgw_bucket_sync_pair_info& sync_pair, uint64_t gen, Vector *status) : RGWShardCollectCR(sc->cct, max_concurrent_shards), - store(store), sc(sc), env(sc->env), gen(gen), sync_pair(sync_pair), + driver(driver), sc(sc), env(sc->env), gen(gen), sync_pair(sync_pair), i(status->begin()), end(status->end()) {} @@ -6234,16 +6234,16 @@ class RGWCollectBucketSyncStatusCR : public RGWShardCollectCR { }; int rgw_read_bucket_full_sync_status(const DoutPrefixProvider *dpp, - rgw::sal::RadosStore *store, + rgw::sal::RadosStore *driver, const rgw_sync_bucket_pipe& pipe, rgw_bucket_sync_status *status, optional_yield y) { auto get_oid = RGWBucketPipeSyncStatusManager::full_status_oid; - const rgw_raw_obj obj{store->svc()->zone->get_zone_params().log_pool, + const rgw_raw_obj obj{driver->svc()->zone->get_zone_params().log_pool, get_oid(*pipe.source.zone, *pipe.source.bucket, *pipe.dest.bucket)}; - auto svc = store->svc()->sysobj; + auto svc = driver->svc()->sysobj; auto sysobj = svc->get_obj(obj); bufferlist bl; int ret = sysobj.rop().read(dpp, &bl, y); @@ -6264,7 +6264,7 @@ int rgw_read_bucket_full_sync_status(const DoutPrefixProvider *dpp, } int rgw_read_bucket_inc_sync_status(const DoutPrefixProvider *dpp, - rgw::sal::RadosStore *store, + rgw::sal::RadosStore *driver, const rgw_sync_bucket_pipe& pipe, uint64_t gen, std::vector *status) @@ -6283,14 +6283,14 @@ int rgw_read_bucket_inc_sync_status(const DoutPrefixProvider *dpp, RGWDataSyncEnv env; RGWSyncModuleInstanceRef module; // null sync module - env.init(dpp, store->ctx(), store, store->svc(), store->svc()->rados->get_async_processor(), + env.init(dpp, driver->ctx(), driver, driver->svc(), driver->svc()->rados->get_async_processor(), nullptr, nullptr, nullptr, module, nullptr); RGWDataSyncCtx sc; sc.init(&env, nullptr, *pipe.source.zone); - RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry()); - return crs.run(dpp, new RGWCollectBucketSyncStatusCR(store, &sc, + RGWCoroutinesManager crs(driver->ctx(), driver->getRados()->get_cr_registry()); + return crs.run(dpp, new RGWCollectBucketSyncStatusCR(driver, &sc, sync_pair, gen, status)); diff --git a/src/rgw/store/rados/rgw_data_sync.h b/src/rgw/store/rados/rgw_data_sync.h index b35744206cf61..6cc714dbaf8c7 100644 --- a/src/rgw/store/rados/rgw_data_sync.h +++ b/src/rgw/store/rados/rgw_data_sync.h @@ -304,7 +304,7 @@ class RGWServices; struct RGWDataSyncEnv { const DoutPrefixProvider *dpp{nullptr}; CephContext *cct{nullptr}; - rgw::sal::RadosStore* store{nullptr}; + rgw::sal::RadosStore* driver{nullptr}; RGWServices *svc{nullptr}; RGWAsyncRadosProcessor *async_rados{nullptr}; RGWHTTPManager *http_manager{nullptr}; @@ -315,14 +315,14 @@ struct RGWDataSyncEnv { RGWDataSyncEnv() {} - void init(const DoutPrefixProvider *_dpp, CephContext *_cct, rgw::sal::RadosStore* _store, RGWServices *_svc, + void init(const DoutPrefixProvider *_dpp, CephContext *_cct, rgw::sal::RadosStore* _driver, RGWServices *_svc, RGWAsyncRadosProcessor *_async_rados, RGWHTTPManager *_http_manager, RGWSyncErrorLogger *_error_logger, RGWSyncTraceManager *_sync_tracer, RGWSyncModuleInstanceRef& _sync_module, PerfCounters* _counters) { dpp = _dpp; cct = _cct; - store = _store; + driver = _driver; svc = _svc; async_rados = _async_rados; http_manager = _http_manager; @@ -375,7 +375,7 @@ class RGWRados; class RGWRemoteDataLog : public RGWCoroutinesManager { const DoutPrefixProvider *dpp; - rgw::sal::RadosStore* store; + rgw::sal::RadosStore* driver; CephContext *cct; RGWCoroutinesManagerRegistry *cr_registry; RGWAsyncRadosProcessor *async_rados; @@ -413,7 +413,7 @@ public: }; class RGWDataSyncStatusManager : public DoutPrefixProvider { - rgw::sal::RadosStore* store; + rgw::sal::RadosStore* driver; rgw_zone_id source_zone; RGWRESTConn *conn; @@ -431,17 +431,17 @@ class RGWDataSyncStatusManager : public DoutPrefixProvider { int num_shards; public: - RGWDataSyncStatusManager(rgw::sal::RadosStore* _store, RGWAsyncRadosProcessor *async_rados, + RGWDataSyncStatusManager(rgw::sal::RadosStore* _driver, RGWAsyncRadosProcessor *async_rados, const rgw_zone_id& _source_zone, PerfCounters* counters) - : store(_store), source_zone(_source_zone), conn(NULL), error_logger(NULL), + : driver(_driver), source_zone(_source_zone), conn(NULL), error_logger(NULL), sync_module(nullptr), counters(counters), - source_log(this, store, async_rados), num_shards(0) {} - RGWDataSyncStatusManager(rgw::sal::RadosStore* _store, RGWAsyncRadosProcessor *async_rados, + source_log(this, driver, async_rados), num_shards(0) {} + RGWDataSyncStatusManager(rgw::sal::RadosStore* _driver, RGWAsyncRadosProcessor *async_rados, const rgw_zone_id& _source_zone, PerfCounters* counters, const RGWSyncModuleInstanceRef& _sync_module) - : store(_store), source_zone(_source_zone), conn(NULL), error_logger(NULL), + : driver(_driver), source_zone(_source_zone), conn(NULL), error_logger(NULL), sync_module(_sync_module), counters(counters), - source_log(this, store, async_rados), num_shards(0) {} + source_log(this, driver, async_rados), num_shards(0) {} ~RGWDataSyncStatusManager() { finalize(); } @@ -713,20 +713,20 @@ int rgw_read_remote_bilog_info(const DoutPrefixProvider *dpp, optional_yield y); class RGWBucketPipeSyncStatusManager : public DoutPrefixProvider { - rgw::sal::RadosStore* store; + rgw::sal::RadosStore* driver; RGWDataSyncEnv sync_env; - RGWCoroutinesManager cr_mgr{store->ctx(), - store->getRados()->get_cr_registry()}; + RGWCoroutinesManager cr_mgr{driver->ctx(), + driver->getRados()->get_cr_registry()}; - RGWHTTPManager http_manager{store->ctx(), cr_mgr.get_completion_mgr()}; + RGWHTTPManager http_manager{driver->ctx(), cr_mgr.get_completion_mgr()}; std::optional source_zone; std::optional source_bucket; std::unique_ptr error_logger = - std::make_unique(store, RGW_SYNC_ERROR_LOG_SHARD_PREFIX, + std::make_unique(driver, RGW_SYNC_ERROR_LOG_SHARD_PREFIX, ERROR_LOGGER_SHARDS); RGWSyncModuleInstanceRef sync_module; @@ -749,11 +749,11 @@ class RGWBucketPipeSyncStatusManager : public DoutPrefixProvider { std::vector sources; int do_init(const DoutPrefixProvider *dpp, std::ostream* ostr); - RGWBucketPipeSyncStatusManager(rgw::sal::RadosStore* store, + RGWBucketPipeSyncStatusManager(rgw::sal::RadosStore* driver, std::optional source_zone, std::optional source_bucket, const rgw_bucket& dest_bucket) - : store(store), source_zone(source_zone), source_bucket(source_bucket), + : driver(driver), source_zone(source_zone), source_bucket(source_bucket), dest_bucket(dest_bucket) {} int remote_info(const DoutPrefixProvider *dpp, source& s, @@ -761,7 +761,7 @@ class RGWBucketPipeSyncStatusManager : public DoutPrefixProvider { uint64_t* num_shards); public: static tl::expected, int> - construct(const DoutPrefixProvider* dpp, rgw::sal::RadosStore* store, + construct(const DoutPrefixProvider* dpp, rgw::sal::RadosStore* driver, std::optional source_zone, std::optional source_bucket, const rgw_bucket& dest_bucket, std::ostream *ostream); @@ -792,14 +792,14 @@ public: /// read the full sync status with respect to a source bucket int rgw_read_bucket_full_sync_status(const DoutPrefixProvider *dpp, - rgw::sal::RadosStore *store, + rgw::sal::RadosStore *driver, const rgw_sync_bucket_pipe& pipe, rgw_bucket_sync_status *status, optional_yield y); /// read the incremental sync status of all bucket shards from the given source zone int rgw_read_bucket_inc_sync_status(const DoutPrefixProvider *dpp, - rgw::sal::RadosStore *store, + rgw::sal::RadosStore *driver, const rgw_sync_bucket_pipe& pipe, uint64_t gen, std::vector *status); diff --git a/src/rgw/store/rados/rgw_lc_tier.cc b/src/rgw/store/rados/rgw_lc_tier.cc index 2a90c10ca5ffe..0ad2169312330 100644 --- a/src/rgw/store/rados/rgw_lc_tier.cc +++ b/src/rgw/store/rados/rgw_lc_tier.cc @@ -99,11 +99,11 @@ static inline string obj_to_aws_path(const rgw_obj& obj) return path; } -static int read_upload_status(const DoutPrefixProvider *dpp, rgw::sal::Store *store, +static int read_upload_status(const DoutPrefixProvider *dpp, rgw::sal::Driver *driver, const rgw_raw_obj *status_obj, rgw_lc_multipart_upload_info *status) { int ret = 0; - rgw::sal::RadosStore *rados = dynamic_cast(store); + rgw::sal::RadosStore *rados = dynamic_cast(driver); if (!rados) { ldpp_dout(dpp, 0) << "ERROR: Not a RadosStore. Cannot be transitioned to cloud." << dendl; @@ -138,11 +138,11 @@ static int read_upload_status(const DoutPrefixProvider *dpp, rgw::sal::Store *st return 0; } -static int put_upload_status(const DoutPrefixProvider *dpp, rgw::sal::Store *store, +static int put_upload_status(const DoutPrefixProvider *dpp, rgw::sal::Driver *driver, const rgw_raw_obj *status_obj, rgw_lc_multipart_upload_info *status) { int ret = 0; - rgw::sal::RadosStore *rados = dynamic_cast(store); + rgw::sal::RadosStore *rados = dynamic_cast(driver); if (!rados) { ldpp_dout(dpp, 0) << "ERROR: Not a RadosStore. Cannot be transitioned to cloud." << dendl; @@ -161,11 +161,11 @@ static int put_upload_status(const DoutPrefixProvider *dpp, rgw::sal::Store *sto return ret; } -static int delete_upload_status(const DoutPrefixProvider *dpp, rgw::sal::Store *store, +static int delete_upload_status(const DoutPrefixProvider *dpp, rgw::sal::Driver *driver, const rgw_raw_obj *status_obj) { int ret = 0; - rgw::sal::RadosStore *rados = dynamic_cast(store); + rgw::sal::RadosStore *rados = dynamic_cast(driver); if (!rados) { ldpp_dout(dpp, 0) << "ERROR: Not a RadosStore. Cannot be transitioned to cloud." << dendl; @@ -255,7 +255,7 @@ static int cloud_tier_get_object(RGWLCCloudTierCtx& tier_ctx, bool head, target_obj_name += get_key_instance(tier_ctx.obj->get_key()); } - ret = tier_ctx.store->get_bucket(nullptr, b, &dest_bucket); + ret = tier_ctx.driver->get_bucket(nullptr, b, &dest_bucket); if (ret < 0) { ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to initialize dest_bucket - " << tier_ctx.target_bucket_name << " , reterr = " << ret << dendl; return ret; @@ -772,7 +772,7 @@ static int cloud_tier_plain_transfer(RGWLCCloudTierCtx& tier_ctx) { target_obj_name += get_key_instance(tier_ctx.obj->get_key()); } - ret = tier_ctx.store->get_bucket(nullptr, b, &dest_bucket); + ret = tier_ctx.driver->get_bucket(nullptr, b, &dest_bucket); if (ret < 0) { ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to initialize dest_bucket - " << tier_ctx.target_bucket_name << " , ret = " << ret << dendl; return ret; @@ -827,7 +827,7 @@ static int cloud_tier_send_multipart_part(RGWLCCloudTierCtx& tier_ctx, target_obj_name += get_key_instance(tier_ctx.obj->get_key()); } - ret = tier_ctx.store->get_bucket(nullptr, b, &dest_bucket); + ret = tier_ctx.driver->get_bucket(nullptr, b, &dest_bucket); if (ret < 0) { ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to initialize dest_bucket - " << tier_ctx.target_bucket_name << " , ret = " << ret << dendl; return ret; @@ -1054,7 +1054,7 @@ static int cloud_tier_abort_multipart_upload(RGWLCCloudTierCtx& tier_ctx, /* ignore error, best effort */ } /* remove status obj */ - ret = delete_upload_status(tier_ctx.dpp, tier_ctx.store, &status_obj); + ret = delete_upload_status(tier_ctx.dpp, tier_ctx.driver, &status_obj); if (ret < 0) { ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to remove sync status obj obj=" << status_obj << " ret=" << ret << dendl; // ignore error, best effort @@ -1104,10 +1104,10 @@ static int cloud_tier_multipart_transfer(RGWLCCloudTierCtx& tier_ctx) { } dest_obj.init(target_bucket, target_obj_name); - rgw_pool pool = static_cast(tier_ctx.store)->svc()->zone->get_zone_params().log_pool; + rgw_pool pool = static_cast(tier_ctx.driver)->svc()->zone->get_zone_params().log_pool; status_obj = rgw_raw_obj(pool, "lc_multipart_" + tier_ctx.obj->get_oid()); - ret = read_upload_status(tier_ctx.dpp, tier_ctx.store, &status_obj, &status); + ret = read_upload_status(tier_ctx.dpp, tier_ctx.driver, &status_obj, &status); if (ret < 0 && ret != -ENOENT) { ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to read sync status of object " << src_obj << " ret=" << ret << dendl; @@ -1141,10 +1141,10 @@ static int cloud_tier_multipart_transfer(RGWLCCloudTierCtx& tier_ctx) { status.mtime = obj_properties.mtime; status.etag = obj_properties.etag; - ret = put_upload_status(tier_ctx.dpp, tier_ctx.store, &status_obj, &status); + ret = put_upload_status(tier_ctx.dpp, tier_ctx.driver, &status_obj, &status); if (ret < 0) { - ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to store multipart upload state, ret=" << ret << dendl; + ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to driver multipart upload state, ret=" << ret << dendl; // continue with upload anyway } @@ -1193,7 +1193,7 @@ static int cloud_tier_multipart_transfer(RGWLCCloudTierCtx& tier_ctx) { } /* remove status obj */ - ret = delete_upload_status(tier_ctx.dpp, tier_ctx.store, &status_obj); + ret = delete_upload_status(tier_ctx.dpp, tier_ctx.driver, &status_obj); if (ret < 0) { ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to abort multipart upload obj=" << tier_ctx.obj << " upload_id=" << status.upload_id << " part number " << cur_part << " (" << cpp_strerror(-ret) << ")" << dendl; // ignore error, best effort diff --git a/src/rgw/store/rados/rgw_lc_tier.h b/src/rgw/store/rados/rgw_lc_tier.h index a268dd417185d..1b21f262092af 100644 --- a/src/rgw/store/rados/rgw_lc_tier.h +++ b/src/rgw/store/rados/rgw_lc_tier.h @@ -20,7 +20,7 @@ struct RGWLCCloudTierCtx { /* Source */ rgw_bucket_dir_entry& o; - rgw::sal::Store *store; + rgw::sal::Driver *driver; RGWBucketInfo& bucket_info; std::string storage_class; @@ -39,11 +39,11 @@ struct RGWLCCloudTierCtx { bool target_bucket_created{true}; RGWLCCloudTierCtx(CephContext* _cct, const DoutPrefixProvider *_dpp, - rgw_bucket_dir_entry& _o, rgw::sal::Store *_store, + rgw_bucket_dir_entry& _o, rgw::sal::Driver *_driver, RGWBucketInfo &_binfo, rgw::sal::Object *_obj, RGWRESTConn& _conn, std::string& _bucket, std::string& _storage_class) : - cct(_cct), dpp(_dpp), o(_o), store(_store), bucket_info(_binfo), + cct(_cct), dpp(_dpp), o(_o), driver(_driver), bucket_info(_binfo), obj(_obj), conn(_conn), target_bucket_name(_bucket), target_storage_class(_storage_class) {} }; diff --git a/src/rgw/store/rados/rgw_object_expirer_core.cc b/src/rgw/store/rados/rgw_object_expirer_core.cc index f8c88c226fcef..ec1bf3fb6dc99 100644 --- a/src/rgw/store/rados/rgw_object_expirer_core.cc +++ b/src/rgw/store/rados/rgw_object_expirer_core.cc @@ -108,7 +108,7 @@ int RGWObjExpStore::objexp_hint_add(const DoutPrefixProvider *dpp, cls_timeindex_add(op, utime_t(delete_at), keyext, hebl); string shard_name = objexp_hint_get_shardname(objexp_key_shard(obj_key, cct->_conf->rgw_objexp_hints_num_shards)); - auto obj = rados_svc->obj(rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, shard_name)); + auto obj = rados_svc->obj(rgw_raw_obj(driver->svc()->zone->get_zone_params().log_pool, shard_name)); int r = obj.open(dpp); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl; @@ -131,7 +131,7 @@ int RGWObjExpStore::objexp_hint_list(const DoutPrefixProvider *dpp, cls_timeindex_list(op, utime_t(start_time), utime_t(end_time), marker, max_entries, entries, out_marker, truncated); - auto obj = rados_svc->obj(rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, oid)); + auto obj = rados_svc->obj(rgw_raw_obj(driver->svc()->zone->get_zone_params().log_pool, oid)); int r = obj.open(dpp); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl; @@ -180,7 +180,7 @@ int RGWObjExpStore::objexp_hint_trim(const DoutPrefixProvider *dpp, const string& from_marker, const string& to_marker) { - auto obj = rados_svc->obj(rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, oid)); + auto obj = rados_svc->obj(rgw_raw_obj(driver->svc()->zone->get_zone_params().log_pool, oid)); int r = obj.open(dpp); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl; @@ -201,7 +201,7 @@ int RGWObjectExpirer::garbage_single_object(const DoutPrefixProvider *dpp, objex RGWBucketInfo bucket_info; std::unique_ptr bucket; - int ret = store->get_bucket(dpp, nullptr, rgw_bucket(hint.tenant, hint.bucket_name, hint.bucket_id), &bucket, null_yield); + int ret = driver->get_bucket(dpp, nullptr, rgw_bucket(hint.tenant, hint.bucket_name, hint.bucket_id), &bucket, null_yield); if (-ENOENT == ret) { ldpp_dout(dpp, 15) << "NOTICE: cannot find bucket = " \ << hint.bucket_name << ". The object must be already removed" << dendl; @@ -238,7 +238,7 @@ void RGWObjectExpirer::garbage_chunk(const DoutPrefixProvider *dpp, ldpp_dout(dpp, 15) << "got removal hint for: " << iter->key_ts.sec() \ << " - " << iter->key_ext << dendl; - int ret = objexp_hint_parse(dpp, store->ctx(), *iter, &hint); + int ret = objexp_hint_parse(dpp, driver->ctx(), *iter, &hint); if (ret < 0) { ldpp_dout(dpp, 1) << "cannot parse removal hint for " << hint.obj_key << dendl; continue; @@ -291,7 +291,7 @@ bool RGWObjectExpirer::process_single_shard(const DoutPrefixProvider *dpp, bool truncated = false; bool done = true; - CephContext *cct = store->ctx(); + CephContext *cct = driver->ctx(); int num_entries = cct->_conf->rgw_objexp_chunk_size; int max_secs = cct->_conf->rgw_objexp_gc_interval; @@ -303,7 +303,7 @@ bool RGWObjectExpirer::process_single_shard(const DoutPrefixProvider *dpp, utime_t time(max_secs, 0); l.set_duration(time); - int ret = l.lock_exclusive(&static_cast(store)->getRados()->objexp_pool_ctx, shard); + int ret = l.lock_exclusive(&static_cast(driver)->getRados()->objexp_pool_ctx, shard); if (ret == -EBUSY) { /* already locked by another processor */ ldpp_dout(dpp, 5) << __func__ << "(): failed to acquire lock on " << shard << dendl; return false; @@ -339,7 +339,7 @@ bool RGWObjectExpirer::process_single_shard(const DoutPrefixProvider *dpp, marker = out_marker; } while (truncated); - l.unlock(&static_cast(store)->getRados()->objexp_pool_ctx, shard); + l.unlock(&static_cast(driver)->getRados()->objexp_pool_ctx, shard); return done; } @@ -348,7 +348,7 @@ bool RGWObjectExpirer::inspect_all_shards(const DoutPrefixProvider *dpp, const utime_t& last_run, const utime_t& round_start) { - CephContext * const cct = store->ctx(); + CephContext * const cct = driver->ctx(); int num_shards = cct->_conf->rgw_objexp_hints_num_shards; bool all_done = true; @@ -373,7 +373,7 @@ bool RGWObjectExpirer::going_down() void RGWObjectExpirer::start_processor() { - worker = new OEWorker(store->ctx(), this); + worker = new OEWorker(driver->ctx(), this); worker->create("rgw_obj_expirer"); } diff --git a/src/rgw/store/rados/rgw_object_expirer_core.h b/src/rgw/store/rados/rgw_object_expirer_core.h index 90261cdb223a7..fccd4199e7ea9 100644 --- a/src/rgw/store/rados/rgw_object_expirer_core.h +++ b/src/rgw/store/rados/rgw_object_expirer_core.h @@ -39,11 +39,11 @@ class cls_timeindex_entry; class RGWObjExpStore { CephContext *cct; RGWSI_RADOS *rados_svc; - rgw::sal::RadosStore* store; + rgw::sal::RadosStore* driver; public: - RGWObjExpStore(CephContext *_cct, RGWSI_RADOS *_rados_svc, rgw::sal::RadosStore* _store) : cct(_cct), + RGWObjExpStore(CephContext *_cct, RGWSI_RADOS *_rados_svc, rgw::sal::RadosStore* _driver) : cct(_cct), rados_svc(_rados_svc), - store(_store) {} + driver(_driver) {} int objexp_hint_add(const DoutPrefixProvider *dpp, const ceph::real_time& delete_at, @@ -72,7 +72,7 @@ public: class RGWObjectExpirer { protected: - rgw::sal::Store* store; + rgw::sal::Driver* driver; RGWObjExpStore exp_store; class OEWorker : public Thread, public DoutPrefixProvider { @@ -100,9 +100,9 @@ protected: std::atomic down_flag = { false }; public: - explicit RGWObjectExpirer(rgw::sal::Store* _store) - : store(_store), - exp_store(_store->ctx(), static_cast(store)->svc()->rados, static_cast(store)), + explicit RGWObjectExpirer(rgw::sal::Driver* _driver) + : driver(_driver), + exp_store(_driver->ctx(), static_cast(driver)->svc()->rados, static_cast(driver)), worker(NULL) { } ~RGWObjectExpirer() { diff --git a/src/rgw/store/rados/rgw_period.cc b/src/rgw/store/rados/rgw_period.cc index df4316c1a9b43..61602b354e28e 100644 --- a/src/rgw/store/rados/rgw_period.cc +++ b/src/rgw/store/rados/rgw_period.cc @@ -150,9 +150,9 @@ void RGWPeriod::fork() realm_epoch++; } -static int read_sync_status(const DoutPrefixProvider *dpp, rgw::sal::Store* store, rgw_meta_sync_status *sync_status) +static int read_sync_status(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw_meta_sync_status *sync_status) { - rgw::sal::RadosStore* rados_store = static_cast(store); + rgw::sal::RadosStore* rados_store = static_cast(driver); // initialize a sync status manager to read the status RGWMetaSyncStatusManager mgr(rados_store, rados_store->svc()->rados->get_async_processor()); int r = mgr.init(dpp); @@ -165,13 +165,13 @@ static int read_sync_status(const DoutPrefixProvider *dpp, rgw::sal::Store* stor } int RGWPeriod::update_sync_status(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, /* for now */ + rgw::sal::Driver* driver, /* for now */ const RGWPeriod ¤t_period, std::ostream& error_stream, bool force_if_stale) { rgw_meta_sync_status status; - int r = read_sync_status(dpp, store, &status); + int r = read_sync_status(dpp, driver, &status); if (r < 0) { ldpp_dout(dpp, 0) << "period failed to read sync status: " << cpp_strerror(-r) << dendl; @@ -216,7 +216,7 @@ int RGWPeriod::update_sync_status(const DoutPrefixProvider *dpp, } int RGWPeriod::commit(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWRealm& realm, const RGWPeriod& current_period, std::ostream& error_stream, optional_yield y, bool force_if_stale) @@ -250,7 +250,7 @@ int RGWPeriod::commit(const DoutPrefixProvider *dpp, // did the master zone change? if (master_zone != current_period.get_master_zone()) { // store the current metadata sync status in the period - int r = update_sync_status(dpp, store, current_period, error_stream, force_if_stale); + int r = update_sync_status(dpp, driver, current_period, error_stream, force_if_stale); if (r < 0) { ldpp_dout(dpp, 0) << "failed to update metadata sync status: " << cpp_strerror(-r) << dendl; diff --git a/src/rgw/store/rados/rgw_rest_pubsub.cc b/src/rgw/store/rados/rgw_rest_pubsub.cc index 21f48b7e55f78..23d56615ac953 100644 --- a/src/rgw/store/rados/rgw_rest_pubsub.cc +++ b/src/rgw/store/rados/rgw_rest_pubsub.cc @@ -117,7 +117,7 @@ class RGWPSCreateTopicOp : public RGWOp { dest.arn_topic = topic_name; // the topic ARN will be sent in the reply const rgw::ARN arn(rgw::Partition::aws, rgw::Service::sns, - store->get_zone()->get_zonegroup().get_name(), + driver->get_zone()->get_zonegroup().get_name(), s->user->get_tenant(), topic_name); topic_arn = arn.to_string(); return 0; @@ -167,7 +167,7 @@ void RGWPSCreateTopicOp::execute(optional_yield y) { return; } - ps.emplace(static_cast(store), s->owner.get_id().tenant); + ps.emplace(static_cast(driver), s->owner.get_id().tenant); op_ret = ps->create_topic(this, topic_name, dest, topic_arn, opaque_data, y); if (op_ret < 0) { ldpp_dout(this, 1) << "failed to create topic '" << topic_name << "', ret=" << op_ret << dendl; @@ -222,7 +222,7 @@ public: }; void RGWPSListTopicsOp::execute(optional_yield y) { - ps.emplace(static_cast(store), s->owner.get_id().tenant); + ps.emplace(static_cast(driver), s->owner.get_id().tenant); op_ret = ps->get_topics(&result); // if there are no topics it is not considered an error op_ret = op_ret == -ENOENT ? 0 : op_ret; @@ -301,7 +301,7 @@ void RGWPSGetTopicOp::execute(optional_yield y) { if (op_ret < 0) { return; } - ps.emplace(static_cast(store), s->owner.get_id().tenant); + ps.emplace(static_cast(driver), s->owner.get_id().tenant); op_ret = ps->get_topic(topic_name, &result); if (op_ret < 0) { ldpp_dout(this, 1) << "failed to get topic '" << topic_name << "', ret=" << op_ret << dendl; @@ -378,7 +378,7 @@ void RGWPSGetTopicAttributesOp::execute(optional_yield y) { if (op_ret < 0) { return; } - ps.emplace(static_cast(store), s->owner.get_id().tenant); + ps.emplace(static_cast(driver), s->owner.get_id().tenant); op_ret = ps->get_topic(topic_name, &result); if (op_ret < 0) { ldpp_dout(this, 1) << "failed to get topic '" << topic_name << "', ret=" << op_ret << dendl; @@ -464,7 +464,7 @@ void RGWPSDeleteTopicOp::execute(optional_yield y) { if (op_ret < 0) { return; } - ps.emplace(static_cast(store), s->owner.get_id().tenant); + ps.emplace(static_cast(driver), s->owner.get_id().tenant); op_ret = ps->remove_topic(this, topic_name, y); if (op_ret < 0) { ldpp_dout(this, 1) << "failed to remove topic '" << topic_name << ", ret=" << op_ret << dendl; @@ -606,7 +606,7 @@ RGWOp* RGWHandler_REST_PSTopic_AWS::op_post() { } int RGWHandler_REST_PSTopic_AWS::authorize(const DoutPrefixProvider* dpp, optional_yield y) { - return RGW_Auth_S3::authorize(dpp, store, auth_registry, s, y); + return RGW_Auth_S3::authorize(dpp, driver, auth_registry, s, y); } namespace { @@ -740,7 +740,7 @@ void RGWPSCreateNotifOp::execute(optional_yield y) { return; } - ps.emplace(static_cast(store), s->owner.get_id().tenant); + ps.emplace(static_cast(driver), s->owner.get_id().tenant); auto b = ps->get_bucket(bucket_info.bucket); ceph_assert(b); @@ -829,9 +829,9 @@ int RGWPSCreateNotifOp::verify_permission(optional_yield y) { return ret; } - std::unique_ptr user = store->get_user(s->owner.get_id()); + std::unique_ptr user = driver->get_user(s->owner.get_id()); std::unique_ptr bucket; - ret = store->get_bucket(this, user.get(), s->owner.get_id().tenant, bucket_name, &bucket, y); + ret = driver->get_bucket(this, user.get(), s->owner.get_id().tenant, bucket_name, &bucket, y); if (ret < 0) { ldpp_dout(this, 1) << "failed to get bucket info, cannot verify ownership" << dendl; return ret; @@ -888,7 +888,7 @@ void RGWPSDeleteNotifOp::execute(optional_yield y) { return; } - ps.emplace(static_cast(store), s->owner.get_id().tenant); + ps.emplace(static_cast(driver), s->owner.get_id().tenant); auto b = ps->get_bucket(bucket_info.bucket); ceph_assert(b); @@ -922,9 +922,9 @@ int RGWPSDeleteNotifOp::verify_permission(optional_yield y) { return ret; } - std::unique_ptr user = store->get_user(s->owner.get_id()); + std::unique_ptr user = driver->get_user(s->owner.get_id()); std::unique_ptr bucket; - ret = store->get_bucket(this, user.get(), s->owner.get_id().tenant, bucket_name, &bucket, y); + ret = driver->get_bucket(this, user.get(), s->owner.get_id().tenant, bucket_name, &bucket, y); if (ret < 0) { return ret; } @@ -989,7 +989,7 @@ private: }; void RGWPSListNotifsOp::execute(optional_yield y) { - ps.emplace(static_cast(store), s->owner.get_id().tenant); + ps.emplace(static_cast(driver), s->owner.get_id().tenant); auto b = ps->get_bucket(bucket_info.bucket); ceph_assert(b); @@ -1027,9 +1027,9 @@ int RGWPSListNotifsOp::verify_permission(optional_yield y) { return ret; } - std::unique_ptr user = store->get_user(s->owner.get_id()); + std::unique_ptr user = driver->get_user(s->owner.get_id()); std::unique_ptr bucket; - ret = store->get_bucket(this, user.get(), s->owner.get_id().tenant, bucket_name, &bucket, y); + ret = driver->get_bucket(this, user.get(), s->owner.get_id().tenant, bucket_name, &bucket, y); if (ret < 0) { return ret; } diff --git a/src/rgw/store/rados/rgw_rest_realm.cc b/src/rgw/store/rados/rgw_rest_realm.cc index 771355dee1152..79640a2a10812 100644 --- a/src/rgw/store/rados/rgw_rest_realm.cc +++ b/src/rgw/store/rados/rgw_rest_realm.cc @@ -75,7 +75,7 @@ void RGWOp_Period_Get::execute(optional_yield y) period.set_id(period_id); period.set_epoch(epoch); - op_ret = period.init(this, store->ctx(), static_cast(store)->svc()->sysobj, realm_id, y, realm_name); + op_ret = period.init(this, driver->ctx(), static_cast(driver)->svc()->sysobj, realm_id, y, realm_name); if (op_ret < 0) ldpp_dout(this, 5) << "failed to read period" << dendl; } @@ -96,10 +96,10 @@ class RGWOp_Period_Post : public RGWOp_Period_Base { void RGWOp_Period_Post::execute(optional_yield y) { - auto cct = store->ctx(); + auto cct = driver->ctx(); // initialize the period without reading from rados - period.init(this, cct, static_cast(store)->svc()->sysobj, y, false); + period.init(this, cct, static_cast(driver)->svc()->sysobj, y, false); // decode the period from input const auto max_size = cct->_conf->rgw_max_put_param_size; @@ -111,9 +111,9 @@ void RGWOp_Period_Post::execute(optional_yield y) } // require period.realm_id to match our realm - if (period.get_realm() != static_cast(store)->svc()->zone->get_realm().get_id()) { + if (period.get_realm() != static_cast(driver)->svc()->zone->get_realm().get_id()) { error_stream << "period with realm id " << period.get_realm() - << " doesn't match current realm " << static_cast(store)->svc()->zone->get_realm().get_id() << std::endl; + << " doesn't match current realm " << static_cast(driver)->svc()->zone->get_realm().get_id() << std::endl; op_ret = -EINVAL; return; } @@ -122,7 +122,7 @@ void RGWOp_Period_Post::execute(optional_yield y) // period that we haven't restarted with yet. we also don't want to modify // the objects in use by RGWRados RGWRealm realm(period.get_realm()); - op_ret = realm.init(this, cct, static_cast(store)->svc()->sysobj, y); + op_ret = realm.init(this, cct, static_cast(driver)->svc()->sysobj, y); if (op_ret < 0) { ldpp_dout(this, -1) << "failed to read current realm: " << cpp_strerror(-op_ret) << dendl; @@ -130,7 +130,7 @@ void RGWOp_Period_Post::execute(optional_yield y) } RGWPeriod current_period; - op_ret = current_period.init(this, cct, static_cast(store)->svc()->sysobj, realm.get_id(), y); + op_ret = current_period.init(this, cct, static_cast(driver)->svc()->sysobj, realm.get_id(), y); if (op_ret < 0) { ldpp_dout(this, -1) << "failed to read current period: " << cpp_strerror(-op_ret) << dendl; @@ -139,7 +139,7 @@ void RGWOp_Period_Post::execute(optional_yield y) // if period id is empty, handle as 'period commit' if (period.get_id().empty()) { - op_ret = period.commit(this, store, realm, current_period, error_stream, y); + op_ret = period.commit(this, driver, realm, current_period, error_stream, y); if (op_ret < 0) { ldpp_dout(this, -1) << "master zone failed to commit period" << dendl; } @@ -147,7 +147,7 @@ void RGWOp_Period_Post::execute(optional_yield y) } // if it's not period commit, nobody is allowed to push to the master zone - if (period.get_master_zone() == static_cast(store)->svc()->zone->get_zone_params().get_id()) { + if (period.get_master_zone() == static_cast(driver)->svc()->zone->get_zone_params().get_id()) { ldpp_dout(this, 10) << "master zone rejecting period id=" << period.get_id() << " epoch=" << period.get_epoch() << dendl; op_ret = -EINVAL; // XXX: error code @@ -174,7 +174,7 @@ void RGWOp_Period_Post::execute(optional_yield y) return; } - auto period_history = static_cast(store)->svc()->mdlog->get_period_history(); + auto period_history = static_cast(driver)->svc()->mdlog->get_period_history(); // decide whether we can set_current_period() or set_latest_epoch() if (period.get_id() != current_period.get_id()) { @@ -250,7 +250,7 @@ class RGWHandler_Period : public RGWHandler_Auth_S3 { class RGWRESTMgr_Period : public RGWRESTMgr { public: - RGWHandler_REST* get_handler(rgw::sal::Store* store, + RGWHandler_REST* get_handler(rgw::sal::Driver* driver, req_state*, const rgw::auth::StrategyRegistry& auth_registry, const std::string&) override { @@ -283,7 +283,7 @@ void RGWOp_Realm_Get::execute(optional_yield y) // read realm realm.reset(new RGWRealm(id, name)); - op_ret = realm->init(this, g_ceph_context, static_cast(store)->svc()->sysobj, y); + op_ret = realm->init(this, g_ceph_context, static_cast(driver)->svc()->sysobj, y); if (op_ret < 0) ldpp_dout(this, -1) << "failed to read realm id=" << id << " name=" << name << dendl; @@ -324,10 +324,10 @@ void RGWOp_Realm_List::execute(optional_yield y) { { // read default realm - RGWRealm realm(store->ctx(), static_cast(store)->svc()->sysobj); + RGWRealm realm(driver->ctx(), static_cast(driver)->svc()->sysobj); [[maybe_unused]] int ret = realm.read_default_id(this, default_id, y); } - op_ret = static_cast(store)->svc()->zone->list_realms(this, realms); + op_ret = static_cast(driver)->svc()->zone->list_realms(this, realms); if (op_ret < 0) ldpp_dout(this, -1) << "failed to list realms" << dendl; } @@ -367,7 +367,7 @@ RGWRESTMgr_Realm::RGWRESTMgr_Realm() } RGWHandler_REST* -RGWRESTMgr_Realm::get_handler(rgw::sal::Store* store, +RGWRESTMgr_Realm::get_handler(rgw::sal::Driver* driver, req_state*, const rgw::auth::StrategyRegistry& auth_registry, const std::string&) diff --git a/src/rgw/store/rados/rgw_rest_realm.h b/src/rgw/store/rados/rgw_rest_realm.h index aa9aa423fd10d..a0d1dc1c92a6b 100644 --- a/src/rgw/store/rados/rgw_rest_realm.h +++ b/src/rgw/store/rados/rgw_rest_realm.h @@ -9,7 +9,7 @@ class RGWRESTMgr_Realm : public RGWRESTMgr { public: RGWRESTMgr_Realm(); - RGWHandler_REST* get_handler(rgw::sal::Store* store, + RGWHandler_REST* get_handler(rgw::sal::Driver* driver, req_state*, const rgw::auth::StrategyRegistry& auth_registry, const std::string&) override; diff --git a/src/rgw/store/rados/rgw_rest_user.cc b/src/rgw/store/rados/rgw_rest_user.cc index 78b06d365ed62..c2aeece247dbe 100644 --- a/src/rgw/store/rados/rgw_rest_user.cc +++ b/src/rgw/store/rados/rgw_rest_user.cc @@ -35,7 +35,7 @@ public: void RGWOp_User_List::execute(optional_yield y) { - RGWUserAdminOpState op_state(store); + RGWUserAdminOpState op_state(driver); uint32_t max_entries; std::string marker; @@ -44,7 +44,7 @@ void RGWOp_User_List::execute(optional_yield y) op_state.max_entries = max_entries; op_state.marker = marker; - op_ret = RGWUserAdminOp_User::list(this, store, op_state, flusher); + op_ret = RGWUserAdminOp_User::list(this, driver, op_state, flusher); } class RGWOp_User_Info : public RGWRESTOp { @@ -63,7 +63,7 @@ public: void RGWOp_User_Info::execute(optional_yield y) { - RGWUserAdminOpState op_state(store); + RGWUserAdminOpState op_state(driver); std::string uid_str, access_key_str; bool fetch_stats; @@ -91,7 +91,7 @@ void RGWOp_User_Info::execute(optional_yield y) op_state.set_fetch_stats(fetch_stats); op_state.set_sync_stats(sync_stats); - op_ret = RGWUserAdminOp_User::info(s, store, op_state, flusher, y); + op_ret = RGWUserAdminOp_User::info(s, driver, op_state, flusher, y); } class RGWOp_User_Create : public RGWRESTOp { @@ -131,7 +131,7 @@ void RGWOp_User_Create::execute(optional_yield y) const int32_t default_max_buckets = s->cct->_conf.get_val("rgw_user_max_buckets"); - RGWUserAdminOpState op_state(store); + RGWUserAdminOpState op_state(driver); RESTArgs::get_string(s, "uid", uid_str, &uid_str); rgw_user uid(uid_str); @@ -212,7 +212,7 @@ void RGWOp_User_Create::execute(optional_yield y) if (!default_placement_str.empty()) { rgw_placement_rule target_rule; target_rule.from_str(default_placement_str); - if (!store->valid_placement(target_rule)) { + if (!driver->valid_placement(target_rule)) { ldpp_dout(this, 0) << "NOTICE: invalid dest placement: " << target_rule.to_str() << dendl; op_ret = -EINVAL; return; @@ -227,12 +227,12 @@ void RGWOp_User_Create::execute(optional_yield y) } bufferlist data; - op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = RGWUserAdminOp_User::create(s, store, op_state, flusher, y); + op_ret = RGWUserAdminOp_User::create(s, driver, op_state, flusher, y); } class RGWOp_User_Modify : public RGWRESTOp { @@ -268,7 +268,7 @@ void RGWOp_User_Modify::execute(optional_yield y) bool quota_set; int32_t max_buckets; - RGWUserAdminOpState op_state(store); + RGWUserAdminOpState op_state(driver); RESTArgs::get_string(s, "uid", uid_str, &uid_str); rgw_user uid(uid_str); @@ -351,7 +351,7 @@ void RGWOp_User_Modify::execute(optional_yield y) if (!default_placement_str.empty()) { rgw_placement_rule target_rule; target_rule.from_str(default_placement_str); - if (!store->valid_placement(target_rule)) { + if (!driver->valid_placement(target_rule)) { ldpp_dout(this, 0) << "NOTICE: invalid dest placement: " << target_rule.to_str() << dendl; op_ret = -EINVAL; return; @@ -366,12 +366,12 @@ void RGWOp_User_Modify::execute(optional_yield y) } bufferlist data; - op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = RGWUserAdminOp_User::modify(s, store, op_state, flusher, y); + op_ret = RGWUserAdminOp_User::modify(s, driver, op_state, flusher, y); } class RGWOp_User_Remove : public RGWRESTOp { @@ -393,7 +393,7 @@ void RGWOp_User_Remove::execute(optional_yield y) std::string uid_str; bool purge_data; - RGWUserAdminOpState op_state(store); + RGWUserAdminOpState op_state(driver); RESTArgs::get_string(s, "uid", uid_str, &uid_str); rgw_user uid(uid_str); @@ -407,12 +407,12 @@ void RGWOp_User_Remove::execute(optional_yield y) op_state.set_purge_data(purge_data); bufferlist data; - op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = RGWUserAdminOp_User::remove(s, store, op_state, flusher, s->yield); + op_ret = RGWUserAdminOp_User::remove(s, driver, op_state, flusher, s->yield); } class RGWOp_Subuser_Create : public RGWRESTOp { @@ -445,7 +445,7 @@ void RGWOp_Subuser_Create::execute(optional_yield y) uint32_t perm_mask = 0; int32_t key_type = KEY_TYPE_SWIFT; - RGWUserAdminOpState op_state(store); + RGWUserAdminOpState op_state(driver); RESTArgs::get_string(s, "uid", uid_str, &uid_str); rgw_user uid(uid_str); @@ -482,12 +482,12 @@ void RGWOp_Subuser_Create::execute(optional_yield y) op_state.set_key_type(key_type); bufferlist data; - op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = RGWUserAdminOp_Subuser::create(s, store, op_state, flusher, y); + op_ret = RGWUserAdminOp_Subuser::create(s, driver, op_state, flusher, y); } class RGWOp_Subuser_Modify : public RGWRESTOp { @@ -512,7 +512,7 @@ void RGWOp_Subuser_Modify::execute(optional_yield y) std::string key_type_str; std::string perm_str; - RGWUserAdminOpState op_state(store); + RGWUserAdminOpState op_state(driver); uint32_t perm_mask; int32_t key_type = KEY_TYPE_SWIFT; @@ -549,12 +549,12 @@ void RGWOp_Subuser_Modify::execute(optional_yield y) op_state.set_key_type(key_type); bufferlist data; - op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = RGWUserAdminOp_Subuser::modify(s, store, op_state, flusher, y); + op_ret = RGWUserAdminOp_Subuser::modify(s, driver, op_state, flusher, y); } class RGWOp_Subuser_Remove : public RGWRESTOp { @@ -577,7 +577,7 @@ void RGWOp_Subuser_Remove::execute(optional_yield y) std::string subuser; bool purge_keys; - RGWUserAdminOpState op_state(store); + RGWUserAdminOpState op_state(driver); RESTArgs::get_string(s, "uid", uid_str, &uid_str); rgw_user uid(uid_str); @@ -592,12 +592,12 @@ void RGWOp_Subuser_Remove::execute(optional_yield y) op_state.set_purge_keys(); bufferlist data; - op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = RGWUserAdminOp_Subuser::remove(s, store, op_state, flusher, y); + op_ret = RGWUserAdminOp_Subuser::remove(s, driver, op_state, flusher, y); } class RGWOp_Key_Create : public RGWRESTOp { @@ -624,7 +624,7 @@ void RGWOp_Key_Create::execute(optional_yield y) bool gen_key; - RGWUserAdminOpState op_state(store); + RGWUserAdminOpState op_state(driver); RESTArgs::get_string(s, "uid", uid_str, &uid_str); rgw_user uid(uid_str); @@ -653,7 +653,7 @@ void RGWOp_Key_Create::execute(optional_yield y) op_state.set_key_type(key_type); } - op_ret = RGWUserAdminOp_Key::create(s, store, op_state, flusher, y); + op_ret = RGWUserAdminOp_Key::create(s, driver, op_state, flusher, y); } class RGWOp_Key_Remove : public RGWRESTOp { @@ -677,7 +677,7 @@ void RGWOp_Key_Remove::execute(optional_yield y) std::string access_key; std::string key_type_str; - RGWUserAdminOpState op_state(store); + RGWUserAdminOpState op_state(driver); RESTArgs::get_string(s, "uid", uid_str, &uid_str); rgw_user uid(uid_str); @@ -700,7 +700,7 @@ void RGWOp_Key_Remove::execute(optional_yield y) op_state.set_key_type(key_type); } - op_ret = RGWUserAdminOp_Key::remove(s, store, op_state, flusher, y); + op_ret = RGWUserAdminOp_Key::remove(s, driver, op_state, flusher, y); } class RGWOp_Caps_Add : public RGWRESTOp { @@ -722,7 +722,7 @@ void RGWOp_Caps_Add::execute(optional_yield y) std::string uid_str; std::string caps; - RGWUserAdminOpState op_state(store); + RGWUserAdminOpState op_state(driver); RESTArgs::get_string(s, "uid", uid_str, &uid_str); rgw_user uid(uid_str); @@ -733,12 +733,12 @@ void RGWOp_Caps_Add::execute(optional_yield y) op_state.set_caps(caps); bufferlist data; - op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = RGWUserAdminOp_Caps::add(s, store, op_state, flusher, y); + op_ret = RGWUserAdminOp_Caps::add(s, driver, op_state, flusher, y); } class RGWOp_Caps_Remove : public RGWRESTOp { @@ -760,7 +760,7 @@ void RGWOp_Caps_Remove::execute(optional_yield y) std::string uid_str; std::string caps; - RGWUserAdminOpState op_state(store); + RGWUserAdminOpState op_state(driver); RESTArgs::get_string(s, "uid", uid_str, &uid_str); rgw_user uid(uid_str); @@ -771,12 +771,12 @@ void RGWOp_Caps_Remove::execute(optional_yield y) op_state.set_caps(caps); bufferlist data; - op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y); if (op_ret < 0) { ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } - op_ret = RGWUserAdminOp_Caps::remove(s, store, op_state, flusher, y); + op_ret = RGWUserAdminOp_Caps::remove(s, driver, op_state, flusher, y); } struct UserQuotas { @@ -816,7 +816,7 @@ public: void RGWOp_Quota_Info::execute(optional_yield y) { - RGWUserAdminOpState op_state(store); + RGWUserAdminOpState op_state(driver); std::string uid_str; std::string quota_type; @@ -843,7 +843,7 @@ void RGWOp_Quota_Info::execute(optional_yield y) op_state.set_user_id(uid); RGWUser user; - op_ret = user.init(s, store, op_state, y); + op_ret = user.init(s, driver, op_state, y); if (op_ret < 0) return; @@ -935,7 +935,7 @@ public: void RGWOp_Quota_Set::execute(optional_yield y) { - RGWUserAdminOpState op_state(store); + RGWUserAdminOpState op_state(driver); std::string uid_str; std::string quota_type; @@ -978,7 +978,7 @@ void RGWOp_Quota_Set::execute(optional_yield y) op_state.set_user_id(uid); RGWUser user; - op_ret = user.init(s, store, op_state, y); + op_ret = user.init(s, driver, op_state, y); if (op_ret < 0) { ldpp_dout(this, 20) << "failed initializing user info: " << op_ret << dendl; return; @@ -993,7 +993,7 @@ void RGWOp_Quota_Set::execute(optional_yield y) if (set_all) { UserQuotas quotas; - if ((op_ret = get_json_input(store->ctx(), s, quotas, QUOTA_INPUT_MAX_LEN, NULL)) < 0) { + if ((op_ret = get_json_input(driver->ctx(), s, quotas, QUOTA_INPUT_MAX_LEN, NULL)) < 0) { ldpp_dout(this, 20) << "failed to retrieve input" << dendl; return; } @@ -1005,7 +1005,7 @@ void RGWOp_Quota_Set::execute(optional_yield y) if (!use_http_params) { bool empty; - op_ret = get_json_input(store->ctx(), s, quota, QUOTA_INPUT_MAX_LEN, &empty); + op_ret = get_json_input(driver->ctx(), s, quota, QUOTA_INPUT_MAX_LEN, &empty); if (op_ret < 0) { ldpp_dout(this, 20) << "failed to retrieve input" << dendl; if (!empty) diff --git a/src/rgw/store/rados/rgw_rest_user.h b/src/rgw/store/rados/rgw_rest_user.h index d094df745dcfb..ee585be450857 100644 --- a/src/rgw/store/rados/rgw_rest_user.h +++ b/src/rgw/store/rados/rgw_rest_user.h @@ -27,7 +27,7 @@ public: RGWRESTMgr_User() = default; ~RGWRESTMgr_User() override = default; - RGWHandler_REST *get_handler(rgw::sal::Store* store, + RGWHandler_REST *get_handler(rgw::sal::Driver* driver, req_state*, const rgw::auth::StrategyRegistry& auth_registry, const std::string&) override { diff --git a/src/rgw/store/rados/rgw_sal_rados.cc b/src/rgw/store/rados/rgw_sal_rados.cc index 1b44424a9d724..577569dd5d558 100644 --- a/src/rgw/store/rados/rgw_sal_rados.cc +++ b/src/rgw/store/rados/rgw_sal_rados.cc @@ -3610,7 +3610,7 @@ int RadosRole::delete_obj(const DoutPrefixProvider *dpp, optional_yield y) extern "C" { -void* newStore(void) +void* newRadosStore(void) { rgw::sal::RadosStore* store = new rgw::sal::RadosStore(); if (store) { diff --git a/src/rgw/store/rados/rgw_sal_rados.h b/src/rgw/store/rados/rgw_sal_rados.h index c54f3344d13d0..499e099480761 100644 --- a/src/rgw/store/rados/rgw_sal_rados.h +++ b/src/rgw/store/rados/rgw_sal_rados.h @@ -120,7 +120,7 @@ class RadosZone : public StoreZone { virtual RGWBucketSyncPolicyHandlerRef get_sync_policy_handler() override; }; -class RadosStore : public StoreStore { +class RadosStore : public StoreDriver { private: RGWRados* rados; RGWUserCtl* user_ctl; @@ -375,14 +375,14 @@ class RadosObject : public StoreObject { : StoreObject(_k), store(_st), acls(), - rados_ctx(new RGWObjectCtx(dynamic_cast(store))), + rados_ctx(new RGWObjectCtx(dynamic_cast(store))), rados_ctx_owned(true) { } RadosObject(RadosStore *_st, const rgw_obj_key& _k, Bucket* _b) : StoreObject(_k, _b), store(_st), acls(), - rados_ctx(new RGWObjectCtx(dynamic_cast(store))) , + rados_ctx(new RGWObjectCtx(dynamic_cast(store))) , rados_ctx_owned(true) { } RadosObject(RadosObject& _o) : StoreObject(_o) { diff --git a/src/rgw/store/rados/rgw_service.cc b/src/rgw/store/rados/rgw_service.cc index 3f097ce26b3a3..4fcb1ebdef7f9 100644 --- a/src/rgw/store/rados/rgw_service.cc +++ b/src/rgw/store/rados/rgw_service.cc @@ -374,7 +374,7 @@ RGWCtlDef::_meta::_meta() {} RGWCtlDef::_meta::~_meta() {} -int RGWCtlDef::init(RGWServices& svc, rgw::sal::Store* store, const DoutPrefixProvider *dpp) +int RGWCtlDef::init(RGWServices& svc, rgw::sal::Driver* driver, const DoutPrefixProvider *dpp) { meta.mgr.reset(new RGWMetadataManager(svc.meta)); @@ -383,14 +383,14 @@ int RGWCtlDef::init(RGWServices& svc, rgw::sal::Store* store, const DoutPrefixPr auto sync_module = svc.sync_modules->get_sync_module(); if (sync_module) { meta.bucket.reset(sync_module->alloc_bucket_meta_handler()); - meta.bucket_instance.reset(sync_module->alloc_bucket_instance_meta_handler(store)); + meta.bucket_instance.reset(sync_module->alloc_bucket_instance_meta_handler(driver)); } else { meta.bucket.reset(RGWBucketMetaHandlerAllocator::alloc()); - meta.bucket_instance.reset(RGWBucketInstanceMetaHandlerAllocator::alloc(store)); + meta.bucket_instance.reset(RGWBucketInstanceMetaHandlerAllocator::alloc(driver)); } meta.otp.reset(RGWOTPMetaHandlerAllocator::alloc()); - meta.role = std::make_unique(store, svc.role); + meta.role = std::make_unique(driver, svc.role); user.reset(new RGWUserCtl(svc.zone, svc.user, (RGWUserMetadataHandler *)meta.user.get())); bucket.reset(new RGWBucketCtl(svc.zone, @@ -420,12 +420,12 @@ int RGWCtlDef::init(RGWServices& svc, rgw::sal::Store* store, const DoutPrefixPr return 0; } -int RGWCtl::init(RGWServices *_svc, rgw::sal::Store* store, const DoutPrefixProvider *dpp) +int RGWCtl::init(RGWServices *_svc, rgw::sal::Driver* driver, const DoutPrefixProvider *dpp) { svc = _svc; cct = svc->cct; - int r = _ctl.init(*svc, store, dpp); + int r = _ctl.init(*svc, driver, dpp); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to start init ctls (" << cpp_strerror(-r) << dendl; return r; diff --git a/src/rgw/store/rados/rgw_service.h b/src/rgw/store/rados/rgw_service.h index 6a34443d39764..dc4991388a981 100644 --- a/src/rgw/store/rados/rgw_service.h +++ b/src/rgw/store/rados/rgw_service.h @@ -190,7 +190,7 @@ struct RGWCtlDef { RGWCtlDef(); ~RGWCtlDef(); - int init(RGWServices& svc, rgw::sal::Store* store, const DoutPrefixProvider *dpp); + int init(RGWServices& svc, rgw::sal::Driver* driver, const DoutPrefixProvider *dpp); }; struct RGWCtl { @@ -213,7 +213,7 @@ struct RGWCtl { RGWBucketCtl *bucket{nullptr}; RGWOTPCtl *otp{nullptr}; - int init(RGWServices *_svc, rgw::sal::Store* store, const DoutPrefixProvider *dpp); + int init(RGWServices *_svc, rgw::sal::Driver* driver, const DoutPrefixProvider *dpp); }; #endif diff --git a/src/rgw/store/rados/rgw_sync_module.cc b/src/rgw/store/rados/rgw_sync_module.cc index ae23bab14b25b..5a1e70be34ebe 100644 --- a/src/rgw/store/rados/rgw_sync_module.cc +++ b/src/rgw/store/rados/rgw_sync_module.cc @@ -21,9 +21,9 @@ RGWMetadataHandler *RGWSyncModuleInstance::alloc_bucket_meta_handler() return RGWBucketMetaHandlerAllocator::alloc(); } -RGWBucketInstanceMetadataHandlerBase* RGWSyncModuleInstance::alloc_bucket_instance_meta_handler(rgw::sal::Store* store) +RGWBucketInstanceMetadataHandlerBase* RGWSyncModuleInstance::alloc_bucket_instance_meta_handler(rgw::sal::Driver* driver) { - return RGWBucketInstanceMetaHandlerAllocator::alloc(store); + return RGWBucketInstanceMetaHandlerAllocator::alloc(driver); } RGWStatRemoteObjCBCR::RGWStatRemoteObjCBCR(RGWDataSyncCtx *_sc, @@ -41,7 +41,7 @@ RGWCallStatRemoteObjCR::RGWCallStatRemoteObjCR(RGWDataSyncCtx *_sc, int RGWCallStatRemoteObjCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { yield { - call(new RGWStatRemoteObjCR(sync_env->async_rados, sync_env->store, + call(new RGWStatRemoteObjCR(sync_env->async_rados, sync_env->driver, sc->source_zone, src_bucket, key, &mtime, &size, &etag, &attrs, &headers)); } diff --git a/src/rgw/store/rados/rgw_sync_module.h b/src/rgw/store/rados/rgw_sync_module.h index f07a539523435..6d974c39a274a 100644 --- a/src/rgw/store/rados/rgw_sync_module.h +++ b/src/rgw/store/rados/rgw_sync_module.h @@ -53,7 +53,7 @@ public: return false; } virtual RGWMetadataHandler *alloc_bucket_meta_handler(); - virtual RGWBucketInstanceMetadataHandlerBase *alloc_bucket_instance_meta_handler(rgw::sal::Store* store); + virtual RGWBucketInstanceMetadataHandlerBase *alloc_bucket_instance_meta_handler(rgw::sal::Driver* driver); // indication whether the sync module start with full sync (default behavior) // incremental sync would follow anyway diff --git a/src/rgw/store/rados/rgw_sync_module_aws.cc b/src/rgw/store/rados/rgw_sync_module_aws.cc index 18aab89fe8aea..6827f7f3a1a80 100644 --- a/src/rgw/store/rados/rgw_sync_module_aws.cc +++ b/src/rgw/store/rados/rgw_sync_module_aws.cc @@ -1388,7 +1388,7 @@ public: ldpp_dout(dpp, 0) << "ERROR: failed to abort multipart upload dest obj=" << dest_obj << " upload_id=" << upload_id << " retcode=" << retcode << dendl; /* ignore error, best effort */ } - yield call(new RGWRadosRemoveCR(sc->env->store, status_obj)); + yield call(new RGWRadosRemoveCR(sc->env->driver, status_obj)); if (retcode < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to remove sync status obj obj=" << status_obj << " retcode=" << retcode << dendl; /* ignore error, best effort */ @@ -1532,7 +1532,7 @@ public: } /* remove status obj */ - yield call(new RGWRadosRemoveCR(sync_env->store, status_obj)); + yield call(new RGWRadosRemoveCR(sync_env->driver, status_obj)); if (retcode < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to abort multipart upload obj=" << src_obj << " upload_id=" << status.upload_id << " part number " << status.cur_part << " (" << cpp_strerror(-retcode) << ")" << dendl; /* ignore error, best effort */ @@ -1679,14 +1679,14 @@ public: } yield { - bucket.reset(new rgw::sal::RadosBucket(sync_env->store, src_bucket)); - src_obj.reset(new rgw::sal::RadosObject(sync_env->store, key, bucket.get())); + bucket.reset(new rgw::sal::RadosBucket(sync_env->driver, src_bucket)); + src_obj.reset(new rgw::sal::RadosObject(sync_env->driver, key, bucket.get())); /* init output */ target_bucket.name = target_bucket_name; /* this is only possible because we only use bucket name for uri resolution */ - dest_bucket.reset(new rgw::sal::RadosBucket(sync_env->store, target_bucket)); - dest_obj.reset(new rgw::sal::RadosObject(sync_env->store, rgw_obj_key(target_obj_name), dest_bucket.get())); + dest_bucket.reset(new rgw::sal::RadosBucket(sync_env->driver, target_bucket)); + dest_obj.reset(new rgw::sal::RadosObject(sync_env->driver, rgw_obj_key(target_obj_name), dest_bucket.get())); rgw_sync_aws_src_obj_properties src_properties; src_properties.mtime = mtime; diff --git a/src/rgw/store/rados/rgw_sync_module_es.cc b/src/rgw/store/rados/rgw_sync_module_es.cc index bba54660894b5..3c294bbbc193e 100644 --- a/src/rgw/store/rados/rgw_sync_module_es.cc +++ b/src/rgw/store/rados/rgw_sync_module_es.cc @@ -178,7 +178,7 @@ struct ElasticConfig { void init(CephContext *cct, const JSONFormattable& config) { string elastic_endpoint = config["endpoint"]; id = string("elastic:") + elastic_endpoint; - conn.reset(new RGWRESTConn(cct, (rgw::sal::Store*)nullptr, id, { elastic_endpoint }, nullopt /* region */ )); + conn.reset(new RGWRESTConn(cct, (rgw::sal::Driver*)nullptr, id, { elastic_endpoint }, nullopt /* region */ )); explicit_custom_meta = config["explicit_custom_meta"](true); index_buckets.init(config["index_buckets_list"], true); /* approve all buckets by default */ allow_owners.init(config["approved_owners_list"], true); /* approve all bucket owners by default */ diff --git a/src/rgw/store/rados/rgw_sync_module_es_rest.cc b/src/rgw/store/rados/rgw_sync_module_es_rest.cc index e0f5d19e77a37..db9d48adb366f 100644 --- a/src/rgw/store/rados/rgw_sync_module_es_rest.cc +++ b/src/rgw/store/rados/rgw_sync_module_es_rest.cc @@ -383,7 +383,7 @@ class RGWHandler_REST_MDSearch_S3 : public RGWHandler_REST_S3 { protected: RGWOp *op_get() override { if (s->info.args.exists("query")) { - return new RGWMetadataSearch_ObjStore_S3(store->get_sync_module()); + return new RGWMetadataSearch_ObjStore_S3(driver->get_sync_module()); } if (!s->init_state.url_bucket.empty() && s->info.args.exists("mdsearch")) { @@ -403,13 +403,13 @@ public: }; -RGWHandler_REST* RGWRESTMgr_MDSearch_S3::get_handler(rgw::sal::Store* store, +RGWHandler_REST* RGWRESTMgr_MDSearch_S3::get_handler(rgw::sal::Driver* driver, req_state* const s, const rgw::auth::StrategyRegistry& auth_registry, const std::string& frontend_prefix) { int ret = - RGWHandler_REST_S3::init_from_header(store, s, + RGWHandler_REST_S3::init_from_header(driver, s, RGWFormat::XML, true); if (ret < 0) { return nullptr; diff --git a/src/rgw/store/rados/rgw_sync_module_es_rest.h b/src/rgw/store/rados/rgw_sync_module_es_rest.h index f9fba879f0a36..b18271a69cde1 100644 --- a/src/rgw/store/rados/rgw_sync_module_es_rest.h +++ b/src/rgw/store/rados/rgw_sync_module_es_rest.h @@ -11,7 +11,7 @@ class RGWRESTMgr_MDSearch_S3 : public RGWRESTMgr { public: explicit RGWRESTMgr_MDSearch_S3() {} - RGWHandler_REST *get_handler(rgw::sal::Store* store, + RGWHandler_REST *get_handler(rgw::sal::Driver* driver, req_state* s, const rgw::auth::StrategyRegistry& auth_registry, const std::string& frontend_prefix) override; diff --git a/src/rgw/store/rados/rgw_tools.cc b/src/rgw/store/rados/rgw_tools.cc index a990b2064c9f2..5a8aefaac3d31 100644 --- a/src/rgw/store/rados/rgw_tools.cc +++ b/src/rgw/store/rados/rgw_tools.cc @@ -237,7 +237,7 @@ void rgw_filter_attrset(map& unfiltered_attrset, const strin } } -RGWDataAccess::RGWDataAccess(rgw::sal::Store* _store) : store(_store) +RGWDataAccess::RGWDataAccess(rgw::sal::Driver* _driver) : driver(_driver) { } @@ -262,7 +262,7 @@ int RGWDataAccess::Bucket::finish_init() int RGWDataAccess::Bucket::init(const DoutPrefixProvider *dpp, optional_yield y) { std::unique_ptr bucket; - int ret = sd->store->get_bucket(dpp, nullptr, tenant, name, &bucket, y); + int ret = sd->driver->get_bucket(dpp, nullptr, tenant, name, &bucket, y); if (ret < 0) { return ret; } @@ -294,26 +294,26 @@ int RGWDataAccess::Object::put(bufferlist& data, const DoutPrefixProvider *dpp, optional_yield y) { - rgw::sal::Store* store = sd->store; - CephContext *cct = store->ctx(); + rgw::sal::Driver* driver = sd->driver; + CephContext *cct = driver->ctx(); string tag; append_rand_alpha(cct, tag, tag, 32); RGWBucketInfo& bucket_info = bucket->bucket_info; - rgw::BlockingAioThrottle aio(store->ctx()->_conf->rgw_put_obj_min_window_size); + rgw::BlockingAioThrottle aio(driver->ctx()->_conf->rgw_put_obj_min_window_size); std::unique_ptr b; - store->get_bucket(NULL, bucket_info, &b); + driver->get_bucket(NULL, bucket_info, &b); std::unique_ptr obj = b->get_object(key); auto& owner = bucket->policy.get_owner(); - string req_id = store->zone_unique_id(store->get_new_req_id()); + string req_id = driver->zone_unique_id(driver->get_new_req_id()); std::unique_ptr processor; - processor = store->get_atomic_writer(dpp, y, std::move(obj), + processor = driver->get_atomic_writer(dpp, y, std::move(obj), owner.get_id(), nullptr, olh_epoch, req_id); @@ -326,14 +326,14 @@ int RGWDataAccess::Object::put(bufferlist& data, CompressorRef plugin; boost::optional compressor; - const auto& compression_type = store->get_compression_type(bucket_info.placement_rule); + const auto& compression_type = driver->get_compression_type(bucket_info.placement_rule); if (compression_type != "none") { - plugin = Compressor::create(store->ctx(), compression_type); + plugin = Compressor::create(driver->ctx(), compression_type); if (!plugin) { ldpp_dout(dpp, 1) << "Cannot load plugin for compression type " << compression_type << dendl; } else { - compressor.emplace(store->ctx(), plugin, filter); + compressor.emplace(driver->ctx(), plugin, filter); filter = &*compressor; } } diff --git a/src/rgw/store/rados/rgw_tools.h b/src/rgw/store/rados/rgw_tools.h index 7ce74f6209bbe..6aeb9b8910058 100644 --- a/src/rgw/store/rados/rgw_tools.h +++ b/src/rgw/store/rados/rgw_tools.h @@ -151,10 +151,10 @@ using RGWMD5Etag = RGWEtag; class RGWDataAccess { - rgw::sal::Store* store; + rgw::sal::Driver* driver; public: - RGWDataAccess(rgw::sal::Store* _store); + RGWDataAccess(rgw::sal::Driver* _driver); class Object; class Bucket; diff --git a/src/rgw/store/rados/rgw_user.cc b/src/rgw/store/rados/rgw_user.cc index 27a0dfd8fa29f..7c36a52e31d55 100644 --- a/src/rgw/store/rados/rgw_user.cc +++ b/src/rgw/store/rados/rgw_user.cc @@ -225,7 +225,7 @@ RGWAccessKeyPool::RGWAccessKeyPool(RGWUser* usr) user = usr; - store = user->get_store(); + driver = user->get_driver(); } int RGWAccessKeyPool::init(RGWUserAdminOpState& op_state) @@ -249,9 +249,9 @@ int RGWAccessKeyPool::init(RGWUserAdminOpState& op_state) return 0; } -RGWUserAdminOpState::RGWUserAdminOpState(rgw::sal::Store* store) +RGWUserAdminOpState::RGWUserAdminOpState(rgw::sal::Driver* driver) { - user = store->get_user(rgw_user(RGW_USER_ANON_ID)); + user = driver->get_user(rgw_user(RGW_USER_ANON_ID)); } void RGWUserAdminOpState::set_user_id(const rgw_user& id) @@ -504,13 +504,13 @@ int RGWAccessKeyPool::generate_key(const DoutPrefixProvider *dpp, RGWUserAdminOp if (!id.empty()) { switch (key_type) { case KEY_TYPE_SWIFT: - if (store->get_user_by_swift(dpp, id, y, &duplicate_check) >= 0) { + if (driver->get_user_by_swift(dpp, id, y, &duplicate_check) >= 0) { set_err_msg(err_msg, "existing swift key in RGW system:" + id); return -ERR_KEY_EXIST; } break; case KEY_TYPE_S3: - if (store->get_user_by_access_key(dpp, id, y, &duplicate_check) >= 0) { + if (driver->get_user_by_access_key(dpp, id, y, &duplicate_check) >= 0) { set_err_msg(err_msg, "existing S3 key in RGW system:" + id); return -ERR_KEY_EXIST; } @@ -550,7 +550,7 @@ int RGWAccessKeyPool::generate_key(const DoutPrefixProvider *dpp, RGWUserAdminOp if (!validate_access_key(id)) continue; - } while (!store->get_user_by_access_key(dpp, id, y, &duplicate_check)); + } while (!driver->get_user_by_access_key(dpp, id, y, &duplicate_check)); } if (key_type == KEY_TYPE_SWIFT) { @@ -561,7 +561,7 @@ int RGWAccessKeyPool::generate_key(const DoutPrefixProvider *dpp, RGWUserAdminOp } // check that the access key doesn't exist - if (store->get_user_by_swift(dpp, id, y, &duplicate_check) >= 0) { + if (driver->get_user_by_swift(dpp, id, y, &duplicate_check) >= 0) { set_err_msg(err_msg, "cannot create existing swift key"); return -ERR_KEY_EXIST; } @@ -869,7 +869,7 @@ RGWSubUserPool::RGWSubUserPool(RGWUser *usr) user = usr; subusers_allowed = true; - store = user->get_store(); + driver = user->get_driver(); } int RGWSubUserPool::init(RGWUserAdminOpState& op_state) @@ -1294,11 +1294,11 @@ RGWUser::RGWUser() : caps(this), keys(this), subusers(this) init_default(); } -int RGWUser::init(const DoutPrefixProvider *dpp, rgw::sal::Store* storage, +int RGWUser::init(const DoutPrefixProvider *dpp, rgw::sal::Driver* _driver, RGWUserAdminOpState& op_state, optional_yield y) { init_default(); - int ret = init_storage(storage); + int ret = init_storage(_driver); if (ret < 0) return ret; @@ -1318,13 +1318,13 @@ void RGWUser::init_default() clear_populated(); } -int RGWUser::init_storage(rgw::sal::Store* storage) +int RGWUser::init_storage(rgw::sal::Driver* _driver) { - if (!storage) { + if (!_driver) { return -EINVAL; } - store = storage; + driver = _driver; clear_populated(); @@ -1364,22 +1364,22 @@ int RGWUser::init(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, } if (!user_id.empty() && (user_id.compare(RGW_USER_ANON_ID) != 0)) { - user = store->get_user(user_id); + user = driver->get_user(user_id); found = (user->load_user(dpp, y) >= 0); op_state.found_by_uid = found; } - if (store->ctx()->_conf.get_val("rgw_user_unique_email")) { + if (driver->ctx()->_conf.get_val("rgw_user_unique_email")) { if (!user_email.empty() && !found) { - found = (store->get_user_by_email(dpp, user_email, y, &user) >= 0); + found = (driver->get_user_by_email(dpp, user_email, y, &user) >= 0); op_state.found_by_email = found; } } if (!swift_user.empty() && !found) { - found = (store->get_user_by_swift(dpp, swift_user, y, &user) >= 0); + found = (driver->get_user_by_swift(dpp, swift_user, y, &user) >= 0); op_state.found_by_key = found; } if (!access_key.empty() && !found) { - found = (store->get_user_by_access_key(dpp, access_key, y, &user) >= 0); + found = (driver->get_user_by_access_key(dpp, access_key, y, &user) >= 0); op_state.found_by_key = found; } @@ -1433,7 +1433,7 @@ int RGWUser::update(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state std::string subprocess_msg; rgw::sal::User* user = op_state.get_user(); - if (!store) { + if (!driver) { set_err_msg(err_msg, "couldn't initialize storage"); return -EINVAL; } @@ -1521,8 +1521,8 @@ int RGWUser::execute_rename(const DoutPrefixProvider *dpp, RGWUserAdminOpState& } } - std::unique_ptr old_user = store->get_user(op_state.get_user_info().user_id); - std::unique_ptr new_user = store->get_user(op_state.get_new_uid()); + std::unique_ptr old_user = driver->get_user(op_state.get_user_info().user_id); + std::unique_ptr new_user = driver->get_user(op_state.get_new_uid()); if (old_user->get_tenant() != new_user->get_tenant()) { set_err_msg(err_msg, "users have to be under the same tenant namespace " + old_user->get_tenant() + " != " + new_user->get_tenant()); @@ -1531,7 +1531,7 @@ int RGWUser::execute_rename(const DoutPrefixProvider *dpp, RGWUserAdminOpState& // create a stub user and write only the uid index and buckets object std::unique_ptr user; - user = store->get_user(new_user->get_id()); + user = driver->get_user(new_user->get_id()); const bool exclusive = !op_state.get_overwrite_new_user(); // overwrite if requested @@ -1550,7 +1550,7 @@ int RGWUser::execute_rename(const DoutPrefixProvider *dpp, RGWUserAdminOpState& //unlink and link buckets to new user string marker; - CephContext *cct = store->ctx(); + CephContext *cct = driver->ctx(); size_t max_buckets = cct->_conf->rgw_list_buckets_max_chunk; rgw::sal::BucketList buckets; @@ -1617,7 +1617,7 @@ int RGWUser::execute_add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_ if (!user_email.empty()) user_info.user_email = user_email; - CephContext *cct = store->ctx(); + CephContext *cct = driver->ctx(); if (op_state.max_buckets_specified) { user_info.max_buckets = op_state.get_max_buckets(); } else { @@ -1756,7 +1756,7 @@ int RGWUser::execute_remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& rgw::sal::BucketList buckets; string marker; - CephContext *cct = store->ctx(); + CephContext *cct = driver->ctx(); size_t max_buckets = cct->_conf->rgw_list_buckets_max_chunk; do { ret = user->list_buckets(dpp, marker, string(), max_buckets, false, buckets, y); @@ -1853,7 +1853,7 @@ int RGWUser::execute_modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& if (!op_email.empty()) { // make sure we are not adding a duplicate email if (old_email != op_email) { - ret = store->get_user_by_email(dpp, op_email, y, &duplicate_check); + ret = driver->get_user_by_email(dpp, op_email, y, &duplicate_check); if (ret >= 0 && duplicate_check->get_id().compare(user_id) != 0) { set_err_msg(err_msg, "cannot add duplicate email"); return -ERR_EMAIL_EXIST; @@ -1908,9 +1908,9 @@ int RGWUser::execute_modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& } string marker; - CephContext *cct = store->ctx(); + CephContext *cct = driver->ctx(); size_t max_buckets = cct->_conf->rgw_list_buckets_max_chunk; - std::unique_ptr user = store->get_user(user_id); + std::unique_ptr user = driver->get_user(user_id); do { ret = user->list_buckets(dpp, marker, string(), max_buckets, false, buckets, y); if (ret < 0) { @@ -1928,7 +1928,7 @@ int RGWUser::execute_modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& marker = iter->first; } - ret = store->set_buckets_enabled(dpp, bucket_names, !suspended); + ret = driver->set_buckets_enabled(dpp, bucket_names, !suspended); if (ret < 0) { set_err_msg(err_msg, "failed to modify bucket"); return ret; @@ -2022,7 +2022,7 @@ int RGWUser::list(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, op_state.max_entries = 1000; } - int ret = store->meta_list_keys_init(dpp, metadata_key, op_state.marker, &handle); + int ret = driver->meta_list_keys_init(dpp, metadata_key, op_state.marker, &handle); if (ret < 0) { return ret; } @@ -2040,7 +2040,7 @@ int RGWUser::list(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, do { std::list keys; left = op_state.max_entries - count; - ret = store->meta_list_keys_next(dpp, handle, left, keys, &truncated); + ret = driver->meta_list_keys_next(dpp, handle, left, keys, &truncated); if (ret < 0 && ret != -ENOENT) { return ret; } if (ret != -ENOENT) { @@ -2056,24 +2056,24 @@ int RGWUser::list(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, formatter->dump_bool("truncated", truncated); formatter->dump_int("count", count); if (truncated) { - formatter->dump_string("marker", store->meta_get_marker(handle)); + formatter->dump_string("marker", driver->meta_get_marker(handle)); } // close result object section formatter->close_section(); - store->meta_list_keys_complete(handle); + driver->meta_list_keys_complete(handle); flusher.flush(); return 0; } -int RGWUserAdminOp_User::list(const DoutPrefixProvider *dpp, rgw::sal::Store* store, RGWUserAdminOpState& op_state, +int RGWUserAdminOp_User::list(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher) { RGWUser user; - int ret = user.init_storage(store); + int ret = user.init_storage(driver); if (ret < 0) return ret; @@ -2085,7 +2085,7 @@ int RGWUserAdminOp_User::list(const DoutPrefixProvider *dpp, rgw::sal::Store* st } int RGWUserAdminOp_User::info(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, RGWUserAdminOpState& op_state, + rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y) { @@ -2093,7 +2093,7 @@ int RGWUserAdminOp_User::info(const DoutPrefixProvider *dpp, RGWUser user; std::unique_ptr ruser; - int ret = user.init(dpp, store, op_state, y); + int ret = user.init(dpp, driver, op_state, y); if (ret < 0) return ret; @@ -2106,10 +2106,10 @@ int RGWUserAdminOp_User::info(const DoutPrefixProvider *dpp, if (ret < 0) return ret; - ruser = store->get_user(info.user_id); + ruser = driver->get_user(info.user_id); if (op_state.sync_stats) { - ret = rgw_user_sync_all_stats(dpp, store, ruser.get(), y); + ret = rgw_user_sync_all_stats(dpp, driver, ruser.get(), y); if (ret < 0) { return ret; } @@ -2137,13 +2137,13 @@ int RGWUserAdminOp_User::info(const DoutPrefixProvider *dpp, } int RGWUserAdminOp_User::create(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y) { RGWUserInfo info; RGWUser user; - int ret = user.init(dpp, store, op_state, y); + int ret = user.init(dpp, driver, op_state, y); if (ret < 0) return ret; @@ -2171,13 +2171,13 @@ int RGWUserAdminOp_User::create(const DoutPrefixProvider *dpp, } int RGWUserAdminOp_User::modify(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y) { RGWUserInfo info; RGWUser user; - int ret = user.init(dpp, store, op_state, y); + int ret = user.init(dpp, driver, op_state, y); if (ret < 0) return ret; Formatter *formatter = flusher.get_formatter(); @@ -2204,12 +2204,12 @@ int RGWUserAdminOp_User::modify(const DoutPrefixProvider *dpp, } int RGWUserAdminOp_User::remove(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, RGWUserAdminOpState& op_state, + rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y) { RGWUserInfo info; RGWUser user; - int ret = user.init(dpp, store, op_state, y); + int ret = user.init(dpp, driver, op_state, y); if (ret < 0) return ret; @@ -2222,14 +2222,14 @@ int RGWUserAdminOp_User::remove(const DoutPrefixProvider *dpp, } int RGWUserAdminOp_Subuser::create(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y) { RGWUserInfo info; RGWUser user; - int ret = user.init(dpp, store, op_state, y); + int ret = user.init(dpp, driver, op_state, y); if (ret < 0) return ret; @@ -2257,12 +2257,12 @@ int RGWUserAdminOp_Subuser::create(const DoutPrefixProvider *dpp, } int RGWUserAdminOp_Subuser::modify(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, RGWUserAdminOpState& op_state, + rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y) { RGWUserInfo info; RGWUser user; - int ret = user.init(dpp, store, op_state, y); + int ret = user.init(dpp, driver, op_state, y); if (ret < 0) return ret; @@ -2290,14 +2290,14 @@ int RGWUserAdminOp_Subuser::modify(const DoutPrefixProvider *dpp, } int RGWUserAdminOp_Subuser::remove(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y) { RGWUserInfo info; RGWUser user; - int ret = user.init(dpp, store, op_state, y); + int ret = user.init(dpp, driver, op_state, y); if (ret < 0) return ret; @@ -2313,13 +2313,13 @@ int RGWUserAdminOp_Subuser::remove(const DoutPrefixProvider *dpp, } int RGWUserAdminOp_Key::create(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, RGWUserAdminOpState& op_state, + rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y) { RGWUserInfo info; RGWUser user; - int ret = user.init(dpp, store, op_state, y); + int ret = user.init(dpp, driver, op_state, y); if (ret < 0) return ret; @@ -2354,14 +2354,14 @@ int RGWUserAdminOp_Key::create(const DoutPrefixProvider *dpp, } int RGWUserAdminOp_Key::remove(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y) { RGWUserInfo info; RGWUser user; - int ret = user.init(dpp, store, op_state, y); + int ret = user.init(dpp, driver, op_state, y); if (ret < 0) return ret; @@ -2377,13 +2377,13 @@ int RGWUserAdminOp_Key::remove(const DoutPrefixProvider *dpp, } int RGWUserAdminOp_Caps::add(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y) { RGWUserInfo info; RGWUser user; - int ret = user.init(dpp, store, op_state, y); + int ret = user.init(dpp, driver, op_state, y); if (ret < 0) return ret; @@ -2412,13 +2412,13 @@ int RGWUserAdminOp_Caps::add(const DoutPrefixProvider *dpp, int RGWUserAdminOp_Caps::remove(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y) { RGWUserInfo info; RGWUser user; - int ret = user.init(dpp, store, op_state, y); + int ret = user.init(dpp, driver, op_state, y); if (ret < 0) return ret; diff --git a/src/rgw/store/rados/rgw_user.h b/src/rgw/store/rados/rgw_user.h index 3f43209b26e1f..110124cdbc78d 100644 --- a/src/rgw/store/rados/rgw_user.h +++ b/src/rgw/store/rados/rgw_user.h @@ -63,9 +63,9 @@ struct bucket_meta_entry { uint64_t count; }; -extern int rgw_user_sync_all_stats(const DoutPrefixProvider *dpp, rgw::sal::Store* store, rgw::sal::User* user, optional_yield y); +extern int rgw_user_sync_all_stats(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::User* user, optional_yield y); extern int rgw_user_get_all_buckets_stats(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, rgw::sal::User* user, + rgw::sal::Driver* driver, rgw::sal::User* user, std::map& buckets_usage_map, optional_yield y); /** @@ -430,7 +430,7 @@ struct RGWUserAdminOpState { std::string generate_subuser(); - RGWUserAdminOpState(rgw::sal::Store* store); + RGWUserAdminOpState(rgw::sal::Driver* driver); }; class RGWUser; @@ -441,7 +441,7 @@ class RGWAccessKeyPool std::map key_type_map; rgw_user user_id; - rgw::sal::Store* store{nullptr}; + rgw::sal::Driver* driver{nullptr}; std::map *swift_keys{nullptr}; std::map *access_keys{nullptr}; @@ -494,7 +494,7 @@ class RGWSubUserPool RGWUser *user{nullptr}; rgw_user user_id; - rgw::sal::Store* store{nullptr}; + rgw::sal::Driver* driver{nullptr}; bool subusers_allowed{false}; std::map *subuser_map{nullptr}; @@ -556,7 +556,7 @@ class RGWUser private: RGWUserInfo old_info; - rgw::sal::Store* store{nullptr}; + rgw::sal::Driver* driver{nullptr}; rgw_user user_id; bool info_stored{false}; @@ -582,14 +582,14 @@ private: public: RGWUser(); - int init(const DoutPrefixProvider *dpp, rgw::sal::Store* storage, RGWUserAdminOpState& op_state, + int init(const DoutPrefixProvider *dpp, rgw::sal::Driver* storage, RGWUserAdminOpState& op_state, optional_yield y); - int init_storage(rgw::sal::Store* storage); + int init_storage(rgw::sal::Driver* storage); int init(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y); int init_members(RGWUserAdminOpState& op_state); - rgw::sal::Store* get_store() { return store; } + rgw::sal::Driver* get_driver() { return driver; } /* API Contracted Members */ RGWUserCapPool caps; @@ -628,24 +628,24 @@ public: class RGWUserAdminOp_User { public: - static int list(const DoutPrefixProvider *dpp, rgw::sal::Store* store, + static int list(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher); static int info(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); static int create(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); static int modify(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); - static int remove(const DoutPrefixProvider *dpp, rgw::sal::Store* store, + static int remove(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); }; @@ -653,17 +653,17 @@ class RGWUserAdminOp_Subuser { public: static int create(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); static int modify(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); static int remove(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); }; @@ -671,12 +671,12 @@ public: class RGWUserAdminOp_Key { public: - static int create(const DoutPrefixProvider *dpp, rgw::sal::Store* store, + static int create(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); static int remove(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); }; @@ -685,12 +685,12 @@ class RGWUserAdminOp_Caps { public: static int add(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); static int remove(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y); }; diff --git a/src/rgw/store/rados/rgw_zone.cc b/src/rgw/store/rados/rgw_zone.cc index 4fa6a52c91241..d9e750541d7c2 100644 --- a/src/rgw/store/rados/rgw_zone.cc +++ b/src/rgw/store/rados/rgw_zone.cc @@ -705,13 +705,13 @@ int update_period(const DoutPrefixProvider* dpp, optional_yield y, } int commit_period(const DoutPrefixProvider* dpp, optional_yield y, - sal::ConfigStore* cfgstore, sal::Store* store, + sal::ConfigStore* cfgstore, sal::Driver* driver, RGWRealm& realm, sal::RealmWriter& realm_writer, const RGWPeriod& current_period, RGWPeriod& info, std::ostream& error_stream, bool force_if_stale) { - auto zone_svc = static_cast(store)->svc()->zone; // XXX + auto zone_svc = static_cast(driver)->svc()->zone; // XXX ldpp_dout(dpp, 20) << __func__ << " realm " << realm.id << " period " << current_period.id << dendl; @@ -742,7 +742,7 @@ int commit_period(const DoutPrefixProvider* dpp, optional_yield y, // did the master zone change? if (info.master_zone != current_period.master_zone) { // store the current metadata sync status in the period - int r = info.update_sync_status(dpp, store, current_period, + int r = info.update_sync_status(dpp, driver, current_period, error_stream, force_if_stale); if (r < 0) { ldpp_dout(dpp, 0) << "failed to update metadata sync status: " diff --git a/src/rgw/store/rados/rgw_zone.h b/src/rgw/store/rados/rgw_zone.h index da1400c5432b1..e1792a40ccef1 100644 --- a/src/rgw/store/rados/rgw_zone.h +++ b/src/rgw/store/rados/rgw_zone.h @@ -1241,7 +1241,7 @@ public: // gather the metadata sync status for each shard; only for use on master zone int update_sync_status(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, const RGWPeriod ¤t_period, std::ostream& error_stream, bool force_if_stale); @@ -1343,7 +1343,7 @@ public: // commit a staging period; only for use on master zone int commit(const DoutPrefixProvider *dpp, - rgw::sal::Store* store, + rgw::sal::Driver* driver, RGWRealm& realm, const RGWPeriod ¤t_period, std::ostream& error_stream, optional_yield y, bool force_if_stale = false); @@ -1438,7 +1438,7 @@ int update_period(const DoutPrefixProvider* dpp, optional_yield y, /// Validates the given 'staging' period and tries to commit it as the /// realm's new current period. int commit_period(const DoutPrefixProvider* dpp, optional_yield y, - sal::ConfigStore* cfgstore, sal::Store* store, + sal::ConfigStore* cfgstore, sal::Driver* driver, RGWRealm& realm, sal::RealmWriter& realm_writer, const RGWPeriod& current_period, RGWPeriod& info, std::ostream& error_stream, diff --git a/src/test/librgw_file_nfsns.cc b/src/test/librgw_file_nfsns.cc index 911401f61e6ca..af6e723216d91 100644 --- a/src/test/librgw_file_nfsns.cc +++ b/src/test/librgw_file_nfsns.cc @@ -261,7 +261,7 @@ TEST(LibRGW, SETUP_HIER1) << std::endl; } RGWPutObjRequest req(cct, - g_rgwlib->get_store()->get_user(fs_private->get_user()->user_id), + g_rgwlib->get_driver()->get_user(fs_private->get_user()->user_id), bucket_name, obj_name, bl); int rc = g_rgwlib->get_fe()->execute_req(&req); int rc2 = req.get_ret(); -- 2.39.5