return env->get(header, NULL);
}
-static int parse_grantee_str(const DoutPrefixProvider *dpp, rgw::sal::Store* store, string& grantee_str,
+static int parse_grantee_str(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, string& grantee_str,
const struct s3_acl_header *perm, ACLGrant& grant)
{
string id_type, id_val_quoted;
if (strcasecmp(id_type.c_str(), "emailAddress") == 0) {
std::unique_ptr<rgw::sal::User> user;
- ret = store->get_user_by_email(dpp, id_val, null_yield, &user);
+ ret = driver->get_user_by_email(dpp, id_val, null_yield, &user);
if (ret < 0)
return ret;
grant.set_canon(user->get_id(), user->get_display_name(), rgw_perm);
} else if (strcasecmp(id_type.c_str(), "id") == 0) {
- std::unique_ptr<rgw::sal::User> user = store->get_user(rgw_user(id_val));
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(rgw_user(id_val));
ret = user->load_user(dpp, null_yield);
if (ret < 0)
return ret;
return 0;
}
-static int parse_acl_header(const DoutPrefixProvider *dpp, rgw::sal::Store* store,
+static int parse_acl_header(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
const RGWEnv *env, const struct s3_acl_header *perm,
std::list<ACLGrant>& _grants)
{
for (list<string>::iterator it = grantees.begin(); it != grantees.end(); ++it) {
ACLGrant grant;
- int ret = parse_grantee_str(dpp, store, *it, perm, grant);
+ int ret = parse_grantee_str(dpp, driver, *it, perm, grant);
if (ret < 0)
return ret;
};
int RGWAccessControlPolicy_S3::create_from_headers(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const RGWEnv *env, ACLOwner& _owner)
{
std::list<ACLGrant> grants;
int r = 0;
for (const struct s3_acl_header *p = acl_header_perms; p->rgw_perm; p++) {
- r = parse_acl_header(dpp, store, env, p, grants);
+ r = parse_acl_header(dpp, driver, env, p, grants);
if (r < 0) {
return r;
}
can only be called on object that was parsed
*/
int RGWAccessControlPolicy_S3::rebuild(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store, ACLOwner *owner,
+ rgw::sal::Driver* driver, ACLOwner *owner,
RGWAccessControlPolicy& dest, std::string &err_msg)
{
if (!owner)
return -EPERM;
}
- std::unique_ptr<rgw::sal::User> user = store->get_user(owner->get_id());
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(owner->get_id());
if (user->load_user(dpp, null_yield) < 0) {
ldpp_dout(dpp, 10) << "owner info does not exist" << dendl;
err_msg = "Invalid id";
}
email = u.id;
ldpp_dout(dpp, 10) << "grant user email=" << email << dendl;
- if (store->get_user_by_email(dpp, email, null_yield, &user) < 0) {
+ if (driver->get_user_by_email(dpp, email, null_yield, &user) < 0) {
ldpp_dout(dpp, 10) << "grant user email not found or other error" << dendl;
err_msg = "The e-mail address you provided does not match any account on record.";
return -ERR_UNRESOLVABLE_EMAIL;
}
if (grant_user.user_id.empty()) {
- user = store->get_user(uid);
+ user = driver->get_user(uid);
if (user->load_user(dpp, null_yield) < 0) {
ldpp_dout(dpp, 10) << "grant user does not exist:" << uid << dendl;
err_msg = "Invalid id";
bool xml_end(const char *el) override;
void to_xml(std::ostream& out);
- int rebuild(const DoutPrefixProvider *dpp, rgw::sal::Store* store, ACLOwner *owner,
+ int rebuild(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, ACLOwner *owner,
RGWAccessControlPolicy& dest, std::string &err_msg);
bool compare_group_name(std::string& id, ACLGroupTypeEnum group) override;
int ret = _acl.create_canned(owner, bucket_owner, canned_acl);
return ret;
}
- int create_from_headers(const DoutPrefixProvider *dpp, rgw::sal::Store* store,
+ int create_from_headers(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
const RGWEnv *env, ACLOwner& _owner);
};
static ACLGrant user_to_grant(const DoutPrefixProvider *dpp,
CephContext* const cct,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const std::string& uid,
const uint32_t perm)
{
ACLGrant grant;
std::unique_ptr<rgw::sal::User> user;
- user = store->get_user(rgw_user(uid));
+ user = driver->get_user(rgw_user(uid));
if (user->load_user(dpp, null_yield) < 0) {
ldpp_dout(dpp, 10) << "grant user does not exist: " << uid << dendl;
/* skipping silently */
}
int RGWAccessControlPolicy_SWIFT::add_grants(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const std::vector<std::string>& uids,
const uint32_t perm)
{
const size_t pos = uid.find(':');
if (std::string::npos == pos) {
/* No, it don't have -- we've got just a regular user identifier. */
- grant = user_to_grant(dpp, cct, store, uid, perm);
+ grant = user_to_grant(dpp, cct, driver, uid, perm);
} else {
/* Yes, *potentially* an HTTP referral. */
auto designator = uid.substr(0, pos);
boost::algorithm::trim(designatee);
if (! boost::algorithm::starts_with(designator, ".")) {
- grant = user_to_grant(dpp, cct, store, uid, perm);
+ grant = user_to_grant(dpp, cct, driver, uid, perm);
} else if ((perm & SWIFT_PERM_WRITE) == 0 && is_referrer(designator)) {
/* HTTP referrer-based ACLs aren't acceptable for writes. */
grant = referrer_to_grant(designatee, perm);
int RGWAccessControlPolicy_SWIFT::create(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const rgw_user& id,
const std::string& name,
const char* read_list,
return r;
}
- r = add_grants(dpp, store, uids, SWIFT_PERM_READ);
+ r = add_grants(dpp, driver, uids, SWIFT_PERM_READ);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: add_grants for read returned r="
<< r << dendl;
return r;
}
- r = add_grants(dpp, store, uids, SWIFT_PERM_WRITE);
+ r = add_grants(dpp, driver, uids, SWIFT_PERM_WRITE);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: add_grants for write returned r="
<< r << dendl;
}
void RGWAccessControlPolicy_SWIFTAcct::add_grants(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const std::vector<std::string>& uids,
const uint32_t perm)
{
grant.set_group(ACL_GROUP_ALL_USERS, perm);
acl.add_grant(&grant);
} else {
- std::unique_ptr<rgw::sal::User> user = store->get_user(rgw_user(uid));
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(rgw_user(uid));
if (user->load_user(dpp, null_yield) < 0) {
ldpp_dout(dpp, 10) << "grant user does not exist:" << uid << dendl;
}
bool RGWAccessControlPolicy_SWIFTAcct::create(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const rgw_user& id,
const std::string& name,
const std::string& acl_str)
decode_json_obj(admin, *iter);
ldpp_dout(dpp, 0) << "admins: " << admin << dendl;
- add_grants(dpp, store, admin, SWIFT_PERM_ADMIN);
+ add_grants(dpp, driver, admin, SWIFT_PERM_ADMIN);
}
iter = parser.find_first("read-write");
decode_json_obj(readwrite, *iter);
ldpp_dout(dpp, 0) << "read-write: " << readwrite << dendl;
- add_grants(dpp, store, readwrite, SWIFT_PERM_RWRT);
+ add_grants(dpp, driver, readwrite, SWIFT_PERM_RWRT);
}
iter = parser.find_first("read-only");
decode_json_obj(readonly, *iter);
ldpp_dout(dpp, 0) << "read-only: " << readonly << dendl;
- add_grants(dpp, store, readonly, SWIFT_PERM_READ);
+ add_grants(dpp, driver, readonly, SWIFT_PERM_READ);
}
return true;
class RGWAccessControlPolicy_SWIFT : public RGWAccessControlPolicy
{
- int add_grants(const DoutPrefixProvider *dpp, rgw::sal::Store* store,
+ int add_grants(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
const std::vector<std::string>& uids,
uint32_t perm);
~RGWAccessControlPolicy_SWIFT() override = default;
int create(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const rgw_user& id,
const std::string& name,
const char* read_list,
~RGWAccessControlPolicy_SWIFTAcct() override {}
void add_grants(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const std::vector<std::string>& uids,
uint32_t perm);
bool create(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const rgw_user& id,
const std::string& name,
const std::string& acl_str);
using namespace std;
-static rgw::sal::Store* store = NULL;
+static rgw::sal::Driver* driver = NULL;
static constexpr auto dout_subsys = ceph_subsys_rgw;
}
class StoreDestructor {
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
public:
- explicit StoreDestructor(rgw::sal::Store* _s) : store(_s) {}
+ explicit StoreDestructor(rgw::sal::Driver* _s) : driver(_s) {}
~StoreDestructor() {
- StoreManager::close_storage(store);
+ DriverManager::close_storage(driver);
rgw_http_client_cleanup();
}
};
static int init_bucket(rgw::sal::User* user, const rgw_bucket& b,
std::unique_ptr<rgw::sal::Bucket>* bucket)
{
- return store->get_bucket(dpp(), user, b, bucket, null_yield);
+ return driver->get_bucket(dpp(), user, b, bucket, null_yield);
}
static int init_bucket(rgw::sal::User* user,
}
}
-int set_bucket_quota(rgw::sal::Store* store, OPT opt_cmd,
+int set_bucket_quota(rgw::sal::Driver* driver, OPT opt_cmd,
const string& tenant_name, const string& bucket_name,
int64_t max_size, int64_t max_objects,
bool have_max_size, bool have_max_objects)
{
std::unique_ptr<rgw::sal::Bucket> bucket;
- int r = store->get_bucket(dpp(), nullptr, tenant_name, bucket_name, &bucket, null_yield);
+ int r = driver->get_bucket(dpp(), nullptr, tenant_name, bucket_name, &bucket, null_yield);
if (r < 0) {
cerr << "could not get bucket info for bucket=" << bucket_name << ": " << cpp_strerror(-r) << std::endl;
return -r;
return 0;
}
-int set_bucket_ratelimit(rgw::sal::Store* store, OPT opt_cmd,
+int set_bucket_ratelimit(rgw::sal::Driver* driver, OPT opt_cmd,
const string& tenant_name, const string& bucket_name,
int64_t max_read_ops, int64_t max_write_ops,
int64_t max_read_bytes, int64_t max_write_bytes,
bool have_max_read_bytes, bool have_max_write_bytes)
{
std::unique_ptr<rgw::sal::Bucket> bucket;
- int r = store->get_bucket(dpp(), nullptr, tenant_name, bucket_name, &bucket, null_yield);
+ int r = driver->get_bucket(dpp(), nullptr, tenant_name, bucket_name, &bucket, null_yield);
if (r < 0) {
cerr << "could not get bucket info for bucket=" << bucket_name << ": " << cpp_strerror(-r) << std::endl;
return -r;
return 0;
}
-int show_bucket_ratelimit(rgw::sal::Store* store, const string& tenant_name,
+int show_bucket_ratelimit(rgw::sal::Driver* driver, const string& tenant_name,
const string& bucket_name, Formatter *formatter)
{
std::unique_ptr<rgw::sal::Bucket> bucket;
- int r = store->get_bucket(dpp(), nullptr, tenant_name, bucket_name, &bucket, null_yield);
+ int r = driver->get_bucket(dpp(), nullptr, tenant_name, bucket_name, &bucket, null_yield);
if (r < 0) {
cerr << "could not get bucket info for bucket=" << bucket_name << ": " << cpp_strerror(-r) << std::endl;
return -r;
return 0;
}
-int check_min_obj_stripe_size(rgw::sal::Store* store, rgw::sal::Object* obj, uint64_t min_stripe_size, bool *need_rewrite)
+int check_min_obj_stripe_size(rgw::sal::Driver* driver, rgw::sal::Object* obj, uint64_t min_stripe_size, bool *need_rewrite)
{
int ret = obj->get_obj_attrs(null_yield, dpp());
if (ret < 0) {
string status = (needs_fixing ? "needs_fixing" : "ok");
if ((needs_fixing || remove_bad) && fix) {
- ret = static_cast<rgw::sal::RadosStore*>(store)->getRados()->fix_head_obj_locator(dpp(), obj->get_bucket()->get_info(), needs_fixing, remove_bad, obj->get_key());
+ ret = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->fix_head_obj_locator(dpp(), obj->get_bucket()->get_info(), needs_fixing, remove_bad, obj->get_key());
if (ret < 0) {
cerr << "ERROR: fix_head_object_locator() returned ret=" << ret << std::endl;
goto done;
bool needs_fixing;
string status;
- int ret = static_cast<rgw::sal::RadosStore*>(store)->getRados()->fix_tail_obj_locator(dpp(), bucket_info, key, fix, &needs_fixing, null_yield);
+ int ret = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->fix_tail_obj_locator(dpp(), bucket_info, key, fix, &needs_fixing, null_yield);
if (ret < 0) {
cerr << "ERROR: fix_tail_object_locator_underscore() returned ret=" << ret << std::endl;
status = "failed";
do {
ret = bucket->list(dpp(), params, max_entries - count, results, null_yield);
if (ret < 0) {
- cerr << "ERROR: store->list_objects(): " << cpp_strerror(-ret) << std::endl;
+ cerr << "ERROR: driver->list_objects(): " << cpp_strerror(-ret) << std::endl;
return -ret;
}
}
/// search for a matching zone/zonegroup id and return a connection if found
-static boost::optional<RGWRESTConn> get_remote_conn(rgw::sal::RadosStore* store,
+static boost::optional<RGWRESTConn> get_remote_conn(rgw::sal::RadosStore* driver,
const RGWZoneGroup& zonegroup,
const std::string& remote)
{
boost::optional<RGWRESTConn> conn;
if (remote == zonegroup.get_id()) {
- conn.emplace(store->ctx(), store, remote, zonegroup.endpoints, zonegroup.api_name);
+ conn.emplace(driver->ctx(), driver, remote, zonegroup.endpoints, zonegroup.api_name);
} else {
for (const auto& z : zonegroup.zones) {
const auto& zone = z.second;
if (remote == zone.id) {
- conn.emplace(store->ctx(), store, remote, zone.endpoints, zonegroup.api_name);
+ conn.emplace(driver->ctx(), driver, remote, zone.endpoints, zonegroup.api_name);
break;
}
}
}
/// search each zonegroup for a connection
-static boost::optional<RGWRESTConn> get_remote_conn(rgw::sal::RadosStore* store,
+static boost::optional<RGWRESTConn> get_remote_conn(rgw::sal::RadosStore* driver,
const RGWPeriodMap& period_map,
const std::string& remote)
{
boost::optional<RGWRESTConn> conn;
for (const auto& zg : period_map.zonegroups) {
- conn = get_remote_conn(store, zg.second, remote);
+ conn = get_remote_conn(driver, zg.second, remote);
if (conn) {
break;
}
return -EINVAL;
}
// are we the period's master zone?
- if (store->get_zone()->get_id() == master_zone) {
+ if (driver->get_zone()->get_id() == master_zone) {
// read the current period
RGWPeriod current_period;
int ret = cfgstore->read_period(dpp(), null_yield, realm.current_period,
return ret;
}
// the master zone can commit locally
- ret = rgw::commit_period(dpp(), null_yield, cfgstore, store,
+ ret = rgw::commit_period(dpp(), null_yield, cfgstore, driver,
realm, realm_writer, current_period,
period, cerr, force);
if (ret < 0) {
boost::optional<RGWRESTConn> conn;
RGWRESTConn *remote_conn = nullptr;
if (!remote.empty()) {
- conn = get_remote_conn(static_cast<rgw::sal::RadosStore*>(store), period.get_map(), remote);
+ conn = get_remote_conn(static_cast<rgw::sal::RadosStore*>(driver), period.get_map(), remote);
if (!conn) {
cerr << "failed to find a zone or zonegroup for remote "
<< remote << std::endl;
return ret;
}
- // decode the response and store it back
+ // decode the response and driver it back
try {
decode_json_obj(period, &p);
} catch (const JSONDecoder::err& e) {
constexpr bool exclusive = false;
ret = cfgstore->create_period(dpp(), null_yield, exclusive, period);
if (ret < 0) {
- cerr << "failed to store period: " << cpp_strerror(-ret) << std::endl;
+ cerr << "failed to driver period: " << cpp_strerror(-ret) << std::endl;
return ret;
}
if (commit) {
static void get_md_sync_status(list<string>& status)
{
- RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(store), static_cast<rgw::sal::RadosStore*>(store)->svc()->rados->get_async_processor());
+ RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor());
int ret = sync.init(dpp());
if (ret < 0) {
push_ss(ss, status) << "incremental sync: " << num_inc << "/" << total_shards << " shards";
map<int, RGWMetadataLogInfo> master_shards_info;
- string master_period = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->get_current_period_id();
+ string master_period = static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_current_period_id();
ret = sync.read_master_log_shards_info(dpp(), master_period, &master_shards_info);
if (ret < 0) {
RGWZone *sz;
- if (!(sz = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->find_zone(source_zone))) {
+ if (!(sz = static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->find_zone(source_zone))) {
push_ss(ss, status, tab) << string("zone not found");
flush_ss(ss, status);
return;
}
- if (!static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->zone_syncs_from(static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->get_zone(), *sz)) {
+ if (!static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->zone_syncs_from(static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_zone(), *sz)) {
push_ss(ss, status, tab) << string("not syncing from zone");
flush_ss(ss, status);
return;
}
- RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(store), static_cast<rgw::sal::RadosStore*>(store)->svc()->rados->get_async_processor(), source_zone, nullptr);
+ RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor(), source_zone, nullptr);
int ret = sync.init(dpp());
if (ret < 0) {
static void sync_status(Formatter *formatter)
{
- const rgw::sal::ZoneGroup& zonegroup = store->get_zone()->get_zonegroup();
- rgw::sal::Zone* zone = store->get_zone();
+ const rgw::sal::ZoneGroup& zonegroup = driver->get_zone()->get_zonegroup();
+ rgw::sal::Zone* zone = driver->get_zone();
int width = 15;
list<string> md_status;
- if (store->is_meta_master()) {
+ if (driver->is_meta_master()) {
md_status.push_back("no sync (zone is master)");
} else {
get_md_sync_status(md_status);
list<string> data_status;
- auto& zone_conn_map = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->get_zone_conn_map();
+ auto& zone_conn_map = static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_zone_conn_map();
for (auto iter : zone_conn_map) {
const rgw_zone_id& source_id = iter.first;
string source_str = "source: ";
string s = source_str + source_id.id;
std::unique_ptr<rgw::sal::Zone> sz;
- if (store->get_zone()->get_zonegroup().get_zone_by_id(source_id.id, &sz) == 0) {
+ if (driver->get_zone()->get_zonegroup().get_zone_by_id(source_id.id, &sz) == 0) {
s += string(" (") + sz->get_name() + ")";
}
data_status.push_back(s);
return out << std::setw(h.w) << h.header << std::setw(1) << ' ';
}
-static int bucket_source_sync_status(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, const RGWZone& zone,
+static int bucket_source_sync_status(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* driver, const RGWZone& zone,
const RGWZone& source, RGWRESTConn *conn,
const RGWBucketInfo& bucket_info,
rgw_sync_bucket_pipe pipe,
// check for full sync status
rgw_bucket_sync_status full_status;
- r = rgw_read_bucket_full_sync_status(dpp, store, pipe, &full_status, null_yield);
+ r = rgw_read_bucket_full_sync_status(dpp, driver, pipe, &full_status, null_yield);
if (r >= 0) {
if (full_status.state == BucketSyncState::Init) {
out << indented{width} << "init: bucket sync has not started\n";
// use shard count from our log gen=0
shard_status.resize(rgw::num_shards(log.layout.in_index));
} else {
- lderr(store->ctx()) << "failed to read bucket full sync status: " << cpp_strerror(r) << dendl;
+ lderr(driver->ctx()) << "failed to read bucket full sync status: " << cpp_strerror(r) << dendl;
return r;
}
- r = rgw_read_bucket_inc_sync_status(dpp, store, pipe, gen, &shard_status);
+ r = rgw_read_bucket_inc_sync_status(dpp, driver, pipe, gen, &shard_status);
if (r < 0) {
- lderr(store->ctx()) << "failed to read bucket incremental sync status: " << cpp_strerror(r) << dendl;
+ lderr(driver->ctx()) << "failed to read bucket incremental sync status: " << cpp_strerror(r) << dendl;
return r;
}
static rgw_zone_id resolve_zone_id(const string& s)
{
std::unique_ptr<rgw::sal::Zone> zone;
- int ret = store->get_zone()->get_zonegroup().get_zone_by_id(s, &zone);
+ int ret = driver->get_zone()->get_zonegroup().get_zone_by_id(s, &zone);
if (ret < 0)
- ret = store->get_zone()->get_zonegroup().get_zone_by_name(s, &zone);
+ ret = driver->get_zone()->get_zonegroup().get_zone_by_name(s, &zone);
if (ret < 0)
return rgw_zone_id(s);
static int sync_info(std::optional<rgw_zone_id> opt_target_zone, std::optional<rgw_bucket> opt_bucket, Formatter *formatter)
{
- rgw_zone_id zone_id = opt_target_zone.value_or(store->get_zone()->get_id());
+ rgw_zone_id zone_id = opt_target_zone.value_or(driver->get_zone()->get_id());
- auto zone_policy_handler = store->get_zone()->get_sync_policy_handler();
+ auto zone_policy_handler = driver->get_zone()->get_sync_policy_handler();
RGWBucketSyncPolicyHandlerRef bucket_handler;
auto& hint_bucket = *hint_entity.bucket;
RGWBucketSyncPolicyHandlerRef hint_bucket_handler;
- int r = store->get_sync_policy_handler(dpp(), zid, hint_bucket, &hint_bucket_handler, null_yield);
+ int r = driver->get_sync_policy_handler(dpp(), zid, hint_bucket, &hint_bucket_handler, null_yield);
if (r < 0) {
ldpp_dout(dpp(), 20) << "could not get bucket sync policy handler for hint bucket=" << hint_bucket << " ... skipping" << dendl;
continue;
return 0;
}
-static int bucket_sync_info(rgw::sal::Store* store, const RGWBucketInfo& info,
+static int bucket_sync_info(rgw::sal::Driver* driver, const RGWBucketInfo& info,
std::ostream& out)
{
- const rgw::sal::ZoneGroup& zonegroup = store->get_zone()->get_zonegroup();
- rgw::sal::Zone* zone = store->get_zone();
+ const rgw::sal::ZoneGroup& zonegroup = driver->get_zone()->get_zonegroup();
+ rgw::sal::Zone* zone = driver->get_zone();
constexpr int width = 15;
out << indented{width, "realm"} << zone->get_realm_id() << " (" << zone->get_realm_name() << ")\n";
out << indented{width, "zone"} << zone->get_id() << " (" << zone->get_name() << ")\n";
out << indented{width, "bucket"} << info.bucket << "\n\n";
- if (!static_cast<rgw::sal::RadosStore*>(store)->ctl()->bucket->bucket_imports_data(info.bucket, null_yield, dpp())) {
+ if (!static_cast<rgw::sal::RadosStore*>(driver)->ctl()->bucket->bucket_imports_data(info.bucket, null_yield, dpp())) {
out << "Sync is disabled for bucket " << info.bucket.name << '\n';
return 0;
}
RGWBucketSyncPolicyHandlerRef handler;
- int r = store->get_sync_policy_handler(dpp(), std::nullopt, info.bucket, &handler, null_yield);
+ int r = driver->get_sync_policy_handler(dpp(), std::nullopt, info.bucket, &handler, null_yield);
if (r < 0) {
ldpp_dout(dpp(), -1) << "ERROR: failed to get policy handler for bucket (" << info.bucket << "): r=" << r << ": " << cpp_strerror(-r) << dendl;
return r;
return 0;
}
-static int bucket_sync_status(rgw::sal::Store* store, const RGWBucketInfo& info,
+static int bucket_sync_status(rgw::sal::Driver* driver, const RGWBucketInfo& info,
const rgw_zone_id& source_zone_id,
std::optional<rgw_bucket>& opt_source_bucket,
std::ostream& out)
{
- const rgw::sal::ZoneGroup& zonegroup = store->get_zone()->get_zonegroup();
- rgw::sal::Zone* zone = store->get_zone();
+ const rgw::sal::ZoneGroup& zonegroup = driver->get_zone()->get_zonegroup();
+ rgw::sal::Zone* zone = driver->get_zone();
constexpr int width = 15;
out << indented{width, "realm"} << zone->get_realm_id() << " (" << zone->get_realm_name() << ")\n";
<< to_iso_8601(ceph::real_clock::now(), iso_8601_format::YMDhms) << "\n\n";
- if (!static_cast<rgw::sal::RadosStore*>(store)->ctl()->bucket->bucket_imports_data(info.bucket, null_yield, dpp())) {
+ if (!static_cast<rgw::sal::RadosStore*>(driver)->ctl()->bucket->bucket_imports_data(info.bucket, null_yield, dpp())) {
out << "Sync is disabled for bucket " << info.bucket.name << " or bucket has no sync sources" << std::endl;
return 0;
}
RGWBucketSyncPolicyHandlerRef handler;
- int r = store->get_sync_policy_handler(dpp(), std::nullopt, info.bucket, &handler, null_yield);
+ int r = driver->get_sync_policy_handler(dpp(), std::nullopt, info.bucket, &handler, null_yield);
if (r < 0) {
ldpp_dout(dpp(), -1) << "ERROR: failed to get policy handler for bucket (" << info.bucket << "): r=" << r << ": " << cpp_strerror(-r) << dendl;
return r;
auto sources = handler->get_all_sources();
- auto& zone_conn_map = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->get_zone_conn_map();
+ auto& zone_conn_map = static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_zone_conn_map();
set<rgw_zone_id> zone_ids;
if (!source_zone_id.empty()) {
std::unique_ptr<rgw::sal::Zone> zone;
- int ret = store->get_zone()->get_zonegroup().get_zone_by_id(source_zone_id.id, &zone);
+ int ret = driver->get_zone()->get_zonegroup().get_zone_by_id(source_zone_id.id, &zone);
if (ret < 0) {
ldpp_dout(dpp(), -1) << "Source zone not found in zonegroup "
<< zonegroup.get_name() << dendl;
zone_ids.insert(source_zone_id);
} else {
std::list<std::string> ids;
- int ret = store->get_zone()->get_zonegroup().list_zones(ids);
+ int ret = driver->get_zone()->get_zonegroup().list_zones(ids);
if (ret == 0) {
for (const auto& entry : ids) {
zone_ids.insert(entry);
}
for (auto& zone_id : zone_ids) {
- auto z = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->get_zonegroup().zones.find(zone_id.id);
- if (z == static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->get_zonegroup().zones.end()) { /* should't happen */
+ auto z = static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_zonegroup().zones.find(zone_id.id);
+ if (z == static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_zonegroup().zones.end()) { /* should't happen */
continue;
}
auto c = zone_conn_map.find(zone_id.id);
continue;
}
if (pipe.source.zone.value_or(rgw_zone_id()) == z->second.id) {
- bucket_source_sync_status(dpp(), static_cast<rgw::sal::RadosStore*>(store), static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->get_zone(), z->second,
+ bucket_source_sync_status(dpp(), static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_zone(), z->second,
c->second,
info, pipe,
width, out);
static int check_pool_support_omap(const rgw_pool& pool)
{
librados::IoCtx io_ctx;
- int ret = static_cast<rgw::sal::RadosStore*>(store)->getRados()->get_rados_handle()->ioctx_create(pool.to_str().c_str(), io_ctx);
+ int ret = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->get_rados_handle()->ioctx_create(pool.to_str().c_str(), io_ctx);
if (ret < 0) {
// the pool may not exist at this moment, we have no way to check if it supports omap.
return 0;
return 0;
}
-int check_reshard_bucket_params(rgw::sal::Store* store,
+int check_reshard_bucket_params(rgw::sal::Driver* driver,
const string& bucket_name,
const string& tenant,
const string& bucket_id,
return -EINVAL;
}
- if (num_shards > (int)static_cast<rgw::sal::RadosStore*>(store)->getRados()->get_max_bucket_shards()) {
- cerr << "ERROR: num_shards too high, max value: " << static_cast<rgw::sal::RadosStore*>(store)->getRados()->get_max_bucket_shards() << std::endl;
+ if (num_shards > (int)static_cast<rgw::sal::RadosStore*>(driver)->getRados()->get_max_bucket_shards()) {
+ cerr << "ERROR: num_shards too high, max value: " << static_cast<rgw::sal::RadosStore*>(driver)->getRados()->get_max_bucket_shards() << std::endl;
return -EINVAL;
}
shard_id);
// call cls_log_trim() until it returns -ENODATA
for (;;) {
- int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->timelog.trim(dpp(), oid, {}, {}, {}, marker, nullptr,
+ int ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->cls->timelog.trim(dpp(), oid, {}, {}, {}, marker, nullptr,
null_yield);
if (ret == -ENODATA) {
return 0;
int ret = bucket->put_info(dpp(), false, real_time());
if (ret < 0) {
- cerr << "failed to store bucket info: " << cpp_strerror(-ret) << std::endl;
+ cerr << "failed to driver bucket info: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
}
zone_id.emplace();
std::unique_ptr<rgw::sal::Zone> zone;
- int ret = store->get_zone()->get_zonegroup().get_zone_by_name(*zone_name, &zone);
+ int ret = driver->get_zone()->get_zonegroup().get_zone_by_name(*zone_name, &zone);
if (ret < 0) {
cerr << "WARNING: cannot find source zone id for name=" << *zone_name << std::endl;
zone_id = rgw_zone_id(*zone_name);
for (auto& name : *names) {
rgw_zone_id zid;
std::unique_ptr<rgw::sal::Zone> zone;
- int ret = store->get_zone()->get_zonegroup().get_zone_by_name(name, &zone);
+ int ret = driver->get_zone()->get_zonegroup().get_zone_by_name(name, &zone);
if (ret < 0) {
cerr << "WARNING: cannot find source zone id for name=" << name << std::endl;
zid = rgw_zone_id(name);
auto zone_id = *(static_cast<const rgw_zone_id *>(pval));
string zone_name;
std::unique_ptr<rgw::sal::Zone> zone;
- if (store->get_zone()->get_zonegroup().get_zone_by_id(zone_id.id, &zone) == 0) {
+ if (driver->get_zone()->get_zonegroup().get_zone_by_id(zone_id.id, &zone) == 0) {
zone_name = zone->get_name();
} else {
cerr << "WARNING: cannot find zone name for id=" << zone_id << std::endl;
bool need_cache = readonly_ops_list.find(opt_cmd) == readonly_ops_list.end();
bool need_gc = (gc_ops_list.find(opt_cmd) != gc_ops_list.end()) && !bypass_gc;
- StoreManager::Config cfg = StoreManager::get_config(true, g_ceph_context);
+ DriverManager::Config cfg = DriverManager::get_config(true, g_ceph_context);
auto config_store_type = g_conf().get_val<std::string>("rgw_config_store");
- cfgstore = StoreManager::create_config_store(dpp(), config_store_type);
+ cfgstore = DriverManager::create_config_store(dpp(), config_store_type);
if (!cfgstore) {
cerr << "couldn't init config storage provider" << std::endl;
return EIO;
}
if (raw_storage_op) {
- store = StoreManager::get_raw_storage(dpp(),
+ driver = DriverManager::get_raw_storage(dpp(),
g_ceph_context,
cfg);
} else {
- store = StoreManager::get_storage(dpp(),
+ driver = DriverManager::get_storage(dpp(),
g_ceph_context,
cfg,
false,
need_cache && g_conf()->rgw_cache_enabled,
need_gc);
}
- if (!store) {
+ if (!driver) {
cerr << "couldn't init storage provider" << std::endl;
return EIO;
}
- /* Needs to be after the store is initialized. Note, user could be empty here. */
- user = store->get_user(user_id_arg);
+ /* Needs to be after the driver is initialized. Note, user could be empty here. */
+ user = driver->get_user(user_id_arg);
init_optional_bucket(opt_bucket, opt_tenant,
opt_bucket_name, opt_bucket_id);
RGWStreamFlusher stream_flusher(formatter.get(), cout);
- RGWUserAdminOpState user_op(store);
+ RGWUserAdminOpState user_op(driver);
if (!user_email.empty()) {
user_op.user_email_specified=true;
}
if (!source_zone_name.empty()) {
std::unique_ptr<rgw::sal::Zone> zone;
- if (store->get_zone()->get_zonegroup().get_zone_by_name(source_zone_name, &zone) < 0) {
+ if (driver->get_zone()->get_zonegroup().get_zone_by_name(source_zone_name, &zone) < 0) {
cerr << "WARNING: cannot find source zone id for name=" << source_zone_name << std::endl;
source_zone = source_zone_name;
} else {
oath_init();
- StoreDestructor store_destructor(store);
+ StoreDestructor store_destructor(driver);
if (raw_storage_op) {
try_to_resolve_local_entities(cfgstore.get(), realm_id, realm_name,
// use realm master zone as remote
remote = current_period.get_master_zone().id;
}
- conn = get_remote_conn(static_cast<rgw::sal::RadosStore*>(store), current_period.get_map(), remote);
+ conn = get_remote_conn(static_cast<rgw::sal::RadosStore*>(driver), current_period.get_map(), remote);
if (!conn) {
cerr << "failed to find a zone or zonegroup for remote "
<< remote << std::endl;
} else {
ret = writer->write(dpp(), null_yield, realm);
if (ret < 0) {
- cerr << "ERROR: couldn't store realm info: " << cpp_strerror(-ret) << std::endl;
+ cerr << "ERROR: couldn't driver realm info: " << cpp_strerror(-ret) << std::endl;
return 1;
}
}
// validate --tier-type if specified
const string *ptier_type = (tier_type_specified ? &tier_type : nullptr);
if (ptier_type) {
- auto sync_mgr = static_cast<rgw::sal::RadosStore*>(store)->svc()->sync_modules->get_manager();
+ auto sync_mgr = static_cast<rgw::sal::RadosStore*>(driver)->svc()->sync_modules->get_manager();
if (!sync_mgr->get_module(*ptier_type, nullptr)) {
ldpp_dout(dpp(), -1) << "ERROR: could not find sync module: "
<< *ptier_type << ", valid sync modules: "
// validate --tier-type if specified
const string *ptier_type = (tier_type_specified ? &tier_type : nullptr);
if (ptier_type) {
- auto sync_mgr = static_cast<rgw::sal::RadosStore*>(store)->svc()->sync_modules->get_manager();
+ auto sync_mgr = static_cast<rgw::sal::RadosStore*>(driver)->svc()->sync_modules->get_manager();
if (!sync_mgr->get_module(*ptier_type, nullptr)) {
ldpp_dout(dpp(), -1) << "ERROR: could not find sync module: "
<< *ptier_type << ", valid sync modules: "
// validate --tier-type if specified
const string *ptier_type = (tier_type_specified ? &tier_type : nullptr);
if (ptier_type) {
- auto sync_mgr = static_cast<rgw::sal::RadosStore*>(store)->svc()->sync_modules->get_manager();
+ auto sync_mgr = static_cast<rgw::sal::RadosStore*>(driver)->svc()->sync_modules->get_manager();
if (!sync_mgr->get_module(*ptier_type, nullptr)) {
ldpp_dout(dpp(), -1) << "ERROR: could not find sync module: "
<< *ptier_type << ", valid sync modules: "
resolve_zone_ids_opt(opt_source_zone_names, opt_source_zone_ids);
resolve_zone_ids_opt(opt_dest_zone_names, opt_dest_zone_ids);
- bool non_master_cmd = (!store->is_meta_master() && !yes_i_really_mean_it);
+ bool non_master_cmd = (!driver->is_meta_master() && !yes_i_really_mean_it);
std::set<OPT> non_master_ops_list = {OPT::USER_CREATE, OPT::USER_RM,
OPT::USER_MODIFY, OPT::USER_ENABLE,
OPT::USER_SUSPEND, OPT::SUBUSER_CREATE,
rgw_placement_rule target_rule;
target_rule.name = placement_id;
target_rule.storage_class = opt_storage_class.value_or("");
- if (!store->valid_placement(target_rule)) {
+ if (!driver->valid_placement(target_rule)) {
cerr << "NOTICE: invalid dest placement: " << target_rule.to_str() << std::endl;
return EINVAL;
}
RGWUser ruser;
int ret = 0;
if (!(rgw::sal::User::empty(user) && access_key.empty()) || !subuser.empty()) {
- ret = ruser.init(dpp(), store, user_op, null_yield);
+ ret = ruser.init(dpp(), driver, user_op, null_yield);
if (ret < 0) {
cerr << "user.init failed: " << cpp_strerror(-ret) << std::endl;
return -ret;
cerr << "failed to parse policy: " << e.what() << std::endl;
return -EINVAL;
}
- std::unique_ptr<rgw::sal::RGWRole> role = store->get_role(role_name, tenant, path, assume_role_doc);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant, path, assume_role_doc);
ret = role->create(dpp(), true, "", null_yield);
if (ret < 0) {
return -ret;
cerr << "ERROR: empty role name" << std::endl;
return -EINVAL;
}
- std::unique_ptr<rgw::sal::RGWRole> role = store->get_role(role_name, tenant);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant);
ret = role->delete_obj(dpp(), null_yield);
if (ret < 0) {
return -ret;
cerr << "ERROR: empty role name" << std::endl;
return -EINVAL;
}
- std::unique_ptr<rgw::sal::RGWRole> role = store->get_role(role_name, tenant);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant);
ret = role->get(dpp(), null_yield);
if (ret < 0) {
return -ret;
return -EINVAL;
}
- std::unique_ptr<rgw::sal::RGWRole> role = store->get_role(role_name, tenant);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant);
ret = role->get(dpp(), null_yield);
if (ret < 0) {
return -ret;
case OPT::ROLE_LIST:
{
vector<std::unique_ptr<rgw::sal::RGWRole>> result;
- ret = store->get_roles(dpp(), null_yield, path_prefix, tenant, result);
+ ret = driver->get_roles(dpp(), null_yield, path_prefix, tenant, result);
if (ret < 0) {
return -ret;
}
return -EINVAL;
}
- std::unique_ptr<rgw::sal::RGWRole> role = store->get_role(role_name, tenant);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant);
ret = role->get(dpp(), null_yield);
if (ret < 0) {
return -ret;
cerr << "ERROR: Role name is empty" << std::endl;
return -EINVAL;
}
- std::unique_ptr<rgw::sal::RGWRole> role = store->get_role(role_name, tenant);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant);
ret = role->get(dpp(), null_yield);
if (ret < 0) {
return -ret;
cerr << "ERROR: policy name is empty" << std::endl;
return -EINVAL;
}
- std::unique_ptr<rgw::sal::RGWRole> role = store->get_role(role_name, tenant);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant);
int ret = role->get(dpp(), null_yield);
if (ret < 0) {
return -ret;
cerr << "ERROR: policy name is empty" << std::endl;
return -EINVAL;
}
- std::unique_ptr<rgw::sal::RGWRole> role = store->get_role(role_name, tenant);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant);
ret = role->get(dpp(), null_yield);
if (ret < 0) {
return -ret;
return -EINVAL;
}
- std::unique_ptr<rgw::sal::RGWRole> role = store->get_role(role_name, tenant);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant);
ret = role->get(dpp(), null_yield);
if (ret < 0) {
return -ret;
if (opt_cmd == OPT::POLICY) {
if (format == "xml") {
- int ret = RGWBucketAdminOp::dump_s3_policy(store, bucket_op, cout, dpp());
+ int ret = RGWBucketAdminOp::dump_s3_policy(driver, bucket_op, cout, dpp());
if (ret < 0) {
cerr << "ERROR: failed to get policy: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
} else {
- int ret = RGWBucketAdminOp::get_policy(store, bucket_op, stream_flusher, dpp());
+ int ret = RGWBucketAdminOp::get_policy(driver, bucket_op, stream_flusher, dpp());
if (ret < 0) {
cerr << "ERROR: failed to get policy: " << cpp_strerror(-ret) << std::endl;
return -ret;
if (!rgw::sal::User::empty(user)) {
user_ids.push_back(user->get_id().id);
ret =
- RGWBucketAdminOp::limit_check(store, bucket_op, user_ids, stream_flusher,
+ RGWBucketAdminOp::limit_check(driver, bucket_op, user_ids, stream_flusher,
null_yield, dpp(), warnings_only);
} else {
/* list users in groups of max-keys, then perform user-bucket
* limit-check on each group */
- ret = store->meta_list_keys_init(dpp(), metadata_key, string(), &handle);
+ ret = driver->meta_list_keys_init(dpp(), metadata_key, string(), &handle);
if (ret < 0) {
cerr << "ERROR: buckets limit check can't get user metadata_key: "
<< cpp_strerror(-ret) << std::endl;
}
do {
- ret = store->meta_list_keys_next(dpp(), handle, max, user_ids,
+ ret = driver->meta_list_keys_next(dpp(), handle, max, user_ids,
&truncated);
if (ret < 0 && ret != -ENOENT) {
cerr << "ERROR: buckets limit check lists_keys_next(): "
} else {
/* ok, do the limit checks for this group */
ret =
- RGWBucketAdminOp::limit_check(store, bucket_op, user_ids, stream_flusher,
+ RGWBucketAdminOp::limit_check(driver, bucket_op, user_ids, stream_flusher,
null_yield, dpp(), warnings_only);
if (ret < 0)
break;
}
user_ids.clear();
} while (truncated);
- store->meta_list_keys_complete(handle);
+ driver->meta_list_keys_complete(handle);
}
return -ret;
} /* OPT::BUCKET_LIMIT_CHECK */
return -ENOENT;
}
}
- RGWBucketAdminOp::info(store, bucket_op, stream_flusher, null_yield, dpp());
+ RGWBucketAdminOp::info(driver, bucket_op, stream_flusher, null_yield, dpp());
} else {
int ret = init_bucket(user.get(), tenant, bucket_name, bucket_id, &bucket);
if (ret < 0) {
ret = bucket->list(dpp(), params, std::min(remaining, paginate_size), results,
null_yield);
if (ret < 0) {
- cerr << "ERROR: store->list_objects(): " << cpp_strerror(-ret) << std::endl;
+ cerr << "ERROR: driver->list_objects(): " << cpp_strerror(-ret) << std::endl;
return -ret;
}
} /* OPT::BUCKETS_LIST */
if (opt_cmd == OPT::BUCKET_RADOS_LIST) {
- RGWRadosList lister(static_cast<rgw::sal::RadosStore*>(store),
+ RGWRadosList lister(static_cast<rgw::sal::RadosStore*>(driver),
max_concurrent_ios, orphan_stale_secs, tenant);
if (rgw_obj_fs) {
lister.set_field_separator(*rgw_obj_fs);
if (opt_cmd == OPT::BUCKET_STATS) {
if (bucket_name.empty() && !bucket_id.empty()) {
rgw_bucket bucket;
- if (!rgw_find_bucket_by_id(dpp(), store->ctx(), store, marker, bucket_id, &bucket)) {
+ if (!rgw_find_bucket_by_id(dpp(), driver->ctx(), driver, marker, bucket_id, &bucket)) {
cerr << "failure: no such bucket id" << std::endl;
return -ENOENT;
}
}
bucket_op.set_fetch_stats(true);
- int r = RGWBucketAdminOp::info(store, bucket_op, stream_flusher, null_yield, dpp());
+ int r = RGWBucketAdminOp::info(driver, bucket_op, stream_flusher, null_yield, dpp());
if (r < 0) {
cerr << "failure: " << cpp_strerror(-r) << ": " << err << std::endl;
return posix_errortrans(-r);
bucket_op.set_bucket_id(bucket_id);
bucket_op.set_new_bucket_name(new_bucket_name);
string err;
- int r = RGWBucketAdminOp::link(store, bucket_op, dpp(), &err);
+ int r = RGWBucketAdminOp::link(driver, bucket_op, dpp(), &err);
if (r < 0) {
cerr << "failure: " << cpp_strerror(-r) << ": " << err << std::endl;
return -r;
}
if (opt_cmd == OPT::BUCKET_UNLINK) {
- int r = RGWBucketAdminOp::unlink(store, bucket_op, dpp());
+ int r = RGWBucketAdminOp::unlink(driver, bucket_op, dpp());
if (r < 0) {
cerr << "failure: " << cpp_strerror(-r) << std::endl;
return -r;
bucket_op.set_new_bucket_name(new_bucket_name);
string err;
- int r = RGWBucketAdminOp::chown(store, bucket_op, marker, dpp(), &err);
+ int r = RGWBucketAdminOp::chown(driver, bucket_op, marker, dpp(), &err);
if (r < 0) {
cerr << "failure: " << cpp_strerror(-r) << ": " << err << std::endl;
return -r;
formatter->reset();
formatter->open_array_section("logs");
RGWAccessHandle h;
- int r = static_cast<rgw::sal::RadosStore*>(store)->getRados()->log_list_init(dpp(), date, &h);
+ int r = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->log_list_init(dpp(), date, &h);
if (r == -ENOENT) {
// no logs.
} else {
}
while (true) {
string name;
- int r = static_cast<rgw::sal::RadosStore*>(store)->getRados()->log_list_next(h, &name);
+ int r = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->log_list_next(h, &name);
if (r == -ENOENT)
break;
if (r < 0) {
if (opt_cmd == OPT::LOG_SHOW) {
RGWAccessHandle h;
- int r = static_cast<rgw::sal::RadosStore*>(store)->getRados()->log_show_init(dpp(), oid, &h);
+ int r = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->log_show_init(dpp(), oid, &h);
if (r < 0) {
cerr << "error opening log " << oid << ": " << cpp_strerror(-r) << std::endl;
return -r;
struct rgw_log_entry entry;
// peek at first entry to get bucket metadata
- r = static_cast<rgw::sal::RadosStore*>(store)->getRados()->log_show_next(dpp(), h, &entry);
+ r = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->log_show_next(dpp(), h, &entry);
if (r < 0) {
cerr << "error reading log " << oid << ": " << cpp_strerror(-r) << std::endl;
return -r;
formatter->flush(cout);
}
next:
- r = static_cast<rgw::sal::RadosStore*>(store)->getRados()->log_show_next(dpp(), h, &entry);
+ r = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->log_show_next(dpp(), h, &entry);
} while (r > 0);
if (r < 0) {
cout << std::endl;
}
if (opt_cmd == OPT::LOG_RM) {
- int r = static_cast<rgw::sal::RadosStore*>(store)->getRados()->log_remove(dpp(), oid);
+ int r = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->log_remove(dpp(), oid);
if (r < 0) {
cerr << "error removing log " << oid << ": " << cpp_strerror(-r) << std::endl;
return -r;
exit(1);
}
- int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->add_bucket_placement(dpp(), pool, null_yield);
+ int ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->add_bucket_placement(dpp(), pool, null_yield);
if (ret < 0)
cerr << "failed to add bucket placement: " << cpp_strerror(-ret) << std::endl;
}
exit(1);
}
- int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->remove_bucket_placement(dpp(), pool, null_yield);
+ int ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->remove_bucket_placement(dpp(), pool, null_yield);
if (ret < 0)
cerr << "failed to remove bucket placement: " << cpp_strerror(-ret) << std::endl;
}
if (opt_cmd == OPT::POOLS_LIST) {
set<rgw_pool> pools;
- int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->list_placement_set(dpp(), pools, null_yield);
+ int ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->list_placement_set(dpp(), pools, null_yield);
if (ret < 0) {
cerr << "could not list placement set: " << cpp_strerror(-ret) << std::endl;
return -ret;
return -ret;
}
}
- ret = RGWUsage::show(dpp(), store, user.get(), bucket.get(), start_epoch,
+ ret = RGWUsage::show(dpp(), driver, user.get(), bucket.get(), start_epoch,
end_epoch, show_log_entries, show_log_sum, &categories,
stream_flusher);
if (ret < 0) {
return -ret;
}
}
- ret = RGWUsage::trim(dpp(), store, user.get(), bucket.get(), start_epoch, end_epoch);
+ ret = RGWUsage::trim(dpp(), driver, user.get(), bucket.get(), start_epoch, end_epoch);
if (ret < 0) {
cerr << "ERROR: read_usage() returned ret=" << ret << std::endl;
return 1;
return 1;
}
- ret = RGWUsage::clear(dpp(), store);
+ ret = RGWUsage::clear(dpp(), driver);
if (ret < 0) {
return ret;
}
}
RGWOLHInfo olh;
rgw_obj obj(bucket->get_key(), object);
- ret = static_cast<rgw::sal::RadosStore*>(store)->getRados()->get_olh(dpp(), bucket->get_info(), obj, &olh);
+ ret = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->get_olh(dpp(), bucket->get_info(), obj, &olh);
if (ret < 0) {
cerr << "ERROR: failed reading olh: " << cpp_strerror(-ret) << std::endl;
return -ret;
return -ret;
}
- ret = static_cast<rgw::sal::RadosStore*>(store)->getRados()->bucket_index_read_olh_log(dpp(), bucket->get_info(), *state, obj->get_obj(), 0, &log, &is_truncated);
+ ret = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->bucket_index_read_olh_log(dpp(), bucket->get_info(), *state, obj->get_obj(), 0, &log, &is_truncated);
if (ret < 0) {
cerr << "ERROR: failed reading olh: " << cpp_strerror(-ret) << std::endl;
return -ret;
rgw_cls_bi_entry entry;
- ret = static_cast<rgw::sal::RadosStore*>(store)->getRados()->bi_get(dpp(), bucket->get_info(), obj, bi_index_type, &entry);
+ ret = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->bi_get(dpp(), bucket->get_info(), obj, bi_index_type, &entry);
if (ret < 0) {
cerr << "ERROR: bi_get(): " << cpp_strerror(-ret) << std::endl;
return -ret;
rgw_obj obj(bucket->get_key(), key);
- ret = static_cast<rgw::sal::RadosStore*>(store)->getRados()->bi_put(dpp(), bucket->get_key(), obj, entry);
+ ret = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->bi_put(dpp(), bucket->get_key(), obj, entry);
if (ret < 0) {
cerr << "ERROR: bi_put(): " << cpp_strerror(-ret) << std::endl;
return -ret;
int i = (specified_shard_id ? shard_id : 0);
for (; i < max_shards; i++) {
- RGWRados::BucketShard bs(static_cast<rgw::sal::RadosStore*>(store)->getRados());
+ RGWRados::BucketShard bs(static_cast<rgw::sal::RadosStore*>(driver)->getRados());
int ret = bs.init(dpp(), bucket->get_info(), index, i);
marker.clear();
do {
entries.clear();
// if object is specified, we use that as a filter to only retrieve some some entries
- ret = static_cast<rgw::sal::RadosStore*>(store)->getRados()->bi_list(bs, object, marker, max_entries, &entries, &is_truncated);
+ ret = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->bi_list(bs, object, marker, max_entries, &entries, &is_truncated);
if (ret < 0) {
cerr << "ERROR: bi_list(): " << cpp_strerror(-ret) << std::endl;
return -ret;
const int max_shards = rgw::num_shards(index);
for (int i = 0; i < max_shards; i++) {
- RGWRados::BucketShard bs(static_cast<rgw::sal::RadosStore*>(store)->getRados());
+ RGWRados::BucketShard bs(static_cast<rgw::sal::RadosStore*>(driver)->getRados());
int ret = bs.init(dpp(), bucket->get_info(), index, i);
if (ret < 0) {
cerr << "ERROR: bs.init(bucket=" << bucket << ", shard=" << i << "): " << cpp_strerror(-ret) << std::endl;
return -ret;
}
- ret = static_cast<rgw::sal::RadosStore*>(store)->getRados()->bi_remove(dpp(), bs);
+ ret = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->bi_remove(dpp(), bs);
if (ret < 0) {
cerr << "ERROR: failed to remove bucket index object: " << cpp_strerror(-ret) << std::endl;
return -ret;
return EINVAL;
}
- RGWDataAccess data_access(store);
+ RGWDataAccess data_access(driver);
rgw_obj_key key(object, object_version);
RGWDataAccess::BucketRef b;
return -ret;
}
rgw_obj_key key(object, object_version);
- ret = rgw_remove_object(dpp(), store, bucket.get(), key);
+ ret = rgw_remove_object(dpp(), driver, bucket.get(), key);
if (ret < 0) {
cerr << "ERROR: object remove returned: " << cpp_strerror(-ret) << std::endl;
obj->set_instance(object_version);
bool need_rewrite = true;
if (min_rewrite_stripe_size > 0) {
- ret = check_min_obj_stripe_size(store, obj.get(), min_rewrite_stripe_size, &need_rewrite);
+ ret = check_min_obj_stripe_size(driver, obj.get(), min_rewrite_stripe_size, &need_rewrite);
if (ret < 0) {
ldpp_dout(dpp(), 0) << "WARNING: check_min_obj_stripe_size failed, r=" << ret << dendl;
}
}
if (need_rewrite) {
- ret = static_cast<rgw::sal::RadosStore*>(store)->getRados()->rewrite_obj(obj.get(), dpp(), null_yield);
+ ret = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->rewrite_obj(obj.get(), dpp(), null_yield);
if (ret < 0) {
cerr << "ERROR: object rewrite returned: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
if (opt_cmd == OPT::OBJECTS_EXPIRE) {
- if (!static_cast<rgw::sal::RadosStore*>(store)->getRados()->process_expire_objects(dpp())) {
+ if (!static_cast<rgw::sal::RadosStore*>(driver)->getRados()->process_expire_objects(dpp())) {
cerr << "ERROR: process_expire_objects() processing returned error." << std::endl;
return 1;
}
}
if (opt_cmd == OPT::OBJECTS_EXPIRE_STALE_LIST) {
- ret = RGWBucketAdminOp::fix_obj_expiry(store, bucket_op, stream_flusher, dpp(), true);
+ ret = RGWBucketAdminOp::fix_obj_expiry(driver, bucket_op, stream_flusher, dpp(), true);
if (ret < 0) {
cerr << "ERROR: listing returned " << cpp_strerror(-ret) << std::endl;
return -ret;
}
if (opt_cmd == OPT::OBJECTS_EXPIRE_STALE_RM) {
- ret = RGWBucketAdminOp::fix_obj_expiry(store, bucket_op, stream_flusher, dpp(), false);
+ ret = RGWBucketAdminOp::fix_obj_expiry(driver, bucket_op, stream_flusher, dpp(), false);
if (ret < 0) {
cerr << "ERROR: removing returned " << cpp_strerror(-ret) << std::endl;
return -ret;
result.reserve(NUM_ENTRIES);
const auto& current_index = bucket->get_info().layout.current_index;
- int r = static_cast<rgw::sal::RadosStore*>(store)->getRados()->cls_bucket_list_ordered(
+ int r = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->cls_bucket_list_ordered(
dpp(), bucket->get_info(), current_index, RGW_NO_SHARD,
marker, empty_prefix, empty_delimiter,
NUM_ENTRIES, true, expansion_factor,
bool need_rewrite = true;
if (min_rewrite_stripe_size > 0) {
- r = check_min_obj_stripe_size(store, obj.get(), min_rewrite_stripe_size, &need_rewrite);
+ r = check_min_obj_stripe_size(driver, obj.get(), min_rewrite_stripe_size, &need_rewrite);
if (r < 0) {
ldpp_dout(dpp(), 0) << "WARNING: check_min_obj_stripe_size failed, r=" << r << dendl;
}
if (!need_rewrite) {
formatter->dump_string("status", "Skipped");
} else {
- r = static_cast<rgw::sal::RadosStore*>(store)->getRados()->rewrite_obj(obj.get(), dpp(), null_yield);
+ r = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->rewrite_obj(obj.get(), dpp(), null_yield);
if (r == 0) {
formatter->dump_string("status", "Success");
} else {
}
if (opt_cmd == OPT::BUCKET_RESHARD) {
- int ret = check_reshard_bucket_params(store,
+ int ret = check_reshard_bucket_params(driver,
bucket_name,
tenant,
bucket_id,
return ret;
}
- auto zone_svc = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone;
+ auto zone_svc = static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone;
if (!zone_svc->can_reshard()) {
const auto& zonegroup = zone_svc->get_zonegroup();
std::cerr << "The zonegroup '" << zonegroup.get_name() << "' does not "
return EINVAL;
}
- RGWBucketReshard br(static_cast<rgw::sal::RadosStore*>(store),
+ RGWBucketReshard br(static_cast<rgw::sal::RadosStore*>(driver),
bucket->get_info(), bucket->get_attrs(),
nullptr /* no callback */);
}
if (opt_cmd == OPT::RESHARD_ADD) {
- int ret = check_reshard_bucket_params(store,
+ int ret = check_reshard_bucket_params(driver,
bucket_name,
tenant,
bucket_id,
int num_source_shards = rgw::current_num_shards(bucket->get_info().layout);
- RGWReshard reshard(static_cast<rgw::sal::RadosStore*>(store), dpp());
+ RGWReshard reshard(static_cast<rgw::sal::RadosStore*>(driver), dpp());
cls_rgw_reshard_entry entry;
entry.time = real_clock::now();
entry.tenant = tenant;
}
int num_logshards =
- store->ctx()->_conf.get_val<uint64_t>("rgw_reshard_num_logs");
+ driver->ctx()->_conf.get_val<uint64_t>("rgw_reshard_num_logs");
- RGWReshard reshard(static_cast<rgw::sal::RadosStore*>(store), dpp());
+ RGWReshard reshard(static_cast<rgw::sal::RadosStore*>(driver), dpp());
formatter->open_array_section("reshard");
for (int i = 0; i < num_logshards; i++) {
return -ret;
}
- RGWBucketReshard br(static_cast<rgw::sal::RadosStore*>(store),
+ RGWBucketReshard br(static_cast<rgw::sal::RadosStore*>(driver),
bucket->get_info(), bucket->get_attrs(),
nullptr /* no callback */);
list<cls_rgw_bucket_instance_entry> status;
}
if (opt_cmd == OPT::RESHARD_PROCESS) {
- RGWReshard reshard(static_cast<rgw::sal::RadosStore*>(store), true, &cout);
+ RGWReshard reshard(static_cast<rgw::sal::RadosStore*>(driver), true, &cout);
int ret = reshard.process_all_logshards(dpp());
if (ret < 0) {
if (bucket_initable) {
// we did not encounter an error, so let's work with the bucket
- RGWBucketReshard br(static_cast<rgw::sal::RadosStore*>(store),
+ RGWBucketReshard br(static_cast<rgw::sal::RadosStore*>(driver),
bucket->get_info(), bucket->get_attrs(),
nullptr /* no callback */);
int ret = br.cancel(dpp());
if (ret < 0) {
if (ret == -EBUSY) {
cerr << "There is ongoing resharding, please retry after " <<
- store->ctx()->_conf.get_val<uint64_t>("rgw_reshard_bucket_lock_duration") <<
+ driver->ctx()->_conf.get_val<uint64_t>("rgw_reshard_bucket_lock_duration") <<
" seconds." << std::endl;
return -ret;
} else if (ret == -EINVAL) {
}
}
- RGWReshard reshard(static_cast<rgw::sal::RadosStore*>(store), dpp());
+ RGWReshard reshard(static_cast<rgw::sal::RadosStore*>(driver), dpp());
cls_rgw_reshard_entry entry;
entry.tenant = tenant;
}
do_check_object_locator(tenant, bucket_name, fix, remove_bad, formatter.get());
} else {
- RGWBucketAdminOp::check_index(store, bucket_op, stream_flusher, null_yield, dpp());
+ RGWBucketAdminOp::check_index(driver, bucket_op, stream_flusher, null_yield, dpp());
}
}
if (opt_cmd == OPT::BUCKET_RM) {
if (!inconsistent_index) {
- RGWBucketAdminOp::remove_bucket(store, bucket_op, null_yield, dpp(), bypass_gc, true);
+ RGWBucketAdminOp::remove_bucket(driver, bucket_op, null_yield, dpp(), bypass_gc, true);
} else {
if (!yes_i_really_mean_it) {
cerr << "using --inconsistent_index can corrupt the bucket index " << std::endl
<< "do you really mean it? (requires --yes-i-really-mean-it)" << std::endl;
return 1;
}
- RGWBucketAdminOp::remove_bucket(store, bucket_op, null_yield, dpp(), bypass_gc, false);
+ RGWBucketAdminOp::remove_bucket(driver, bucket_op, null_yield, dpp(), bypass_gc, false);
}
}
do {
list<cls_rgw_gc_obj_info> result;
- int ret = static_cast<rgw::sal::RadosStore*>(store)->getRados()->list_gc_objs(&index, marker, 1000, !include_all, result, &truncated, processing_queue);
+ int ret = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->list_gc_objs(&index, marker, 1000, !include_all, result, &truncated, processing_queue);
if (ret < 0) {
cerr << "ERROR: failed to list objs: " << cpp_strerror(-ret) << std::endl;
return 1;
}
if (opt_cmd == OPT::GC_PROCESS) {
- int ret = static_cast<rgw::sal::RadosStore*>(store)->getRados()->process_gc(!include_all);
+ int ret = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->process_gc(!include_all);
if (ret < 0) {
cerr << "ERROR: gc processing returned error: " << cpp_strerror(-ret) << std::endl;
return 1;
max_entries = MAX_LC_LIST_ENTRIES;
}
do {
- int ret = static_cast<rgw::sal::RadosStore*>(store)->getRados()->list_lc_progress(marker, max_entries,
+ int ret = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->list_lc_progress(marker, max_entries,
bucket_lc_map, index);
if (ret < 0) {
cerr << "ERROR: failed to list objs: " << cpp_strerror(-ret)
}
int ret =
- static_cast<rgw::sal::RadosStore*>(store)->getRados()->process_lc(bucket);
+ static_cast<rgw::sal::RadosStore*>(driver)->getRados()->process_lc(bucket);
if (ret < 0) {
cerr << "ERROR: lc processing returned error: " << cpp_strerror(-ret) << std::endl;
return 1;
}
if (opt_cmd == OPT::LC_RESHARD_FIX) {
- ret = RGWBucketAdminOp::fix_lc_shards(store, bucket_op, stream_flusher, dpp());
+ ret = RGWBucketAdminOp::fix_lc_shards(driver, bucket_op, stream_flusher, dpp());
if (ret < 0) {
cerr << "ERROR: fixing lc shards: " << cpp_strerror(-ret) << std::endl;
}
<< std::endl;
}
- RGWOrphanSearch search(static_cast<rgw::sal::RadosStore*>(store), max_concurrent_ios, orphan_stale_secs);
+ RGWOrphanSearch search(static_cast<rgw::sal::RadosStore*>(driver), max_concurrent_ios, orphan_stale_secs);
if (job_id.empty()) {
cerr << "ERROR: --job-id not specified" << std::endl;
<< std::endl;
}
- RGWOrphanSearch search(static_cast<rgw::sal::RadosStore*>(store), max_concurrent_ios, orphan_stale_secs);
+ RGWOrphanSearch search(static_cast<rgw::sal::RadosStore*>(driver), max_concurrent_ios, orphan_stale_secs);
if (job_id.empty()) {
cerr << "ERROR: --job-id not specified" << std::endl;
<< std::endl;
}
- RGWOrphanStore orphan_store(static_cast<rgw::sal::RadosStore*>(store));
+ RGWOrphanStore orphan_store(static_cast<rgw::sal::RadosStore*>(driver));
int ret = orphan_store.init(dpp());
if (ret < 0){
cerr << "connection to cluster failed!" << std::endl;
}
if (opt_cmd == OPT::USER_CHECK) {
- check_bad_user_bucket_mapping(store, user.get(), fix, null_yield, dpp());
+ check_bad_user_bucket_mapping(driver, user.get(), fix, null_yield, dpp());
}
if (opt_cmd == OPT::USER_STATS) {
"so at most one of the two should be specified" << std::endl;
return EINVAL;
}
- ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->user->reset_bucket_stats(dpp(), user->get_id(), null_yield);
+ ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->user->reset_bucket_stats(dpp(), user->get_id(), null_yield);
if (ret < 0) {
cerr << "ERROR: could not reset user stats: " << cpp_strerror(-ret) <<
std::endl;
return -ret;
}
} else {
- int ret = rgw_user_sync_all_stats(dpp(), store, user.get(), null_yield);
+ int ret = rgw_user_sync_all_stats(dpp(), driver, user.get(), null_yield);
if (ret < 0) {
cerr << "ERROR: could not sync user stats: " <<
cpp_strerror(-ret) << std::endl;
}
if (opt_cmd == OPT::METADATA_GET) {
- int ret = static_cast<rgw::sal::RadosStore*>(store)->ctl()->meta.mgr->get(metadata_key, formatter.get(), null_yield, dpp());
+ int ret = static_cast<rgw::sal::RadosStore*>(driver)->ctl()->meta.mgr->get(metadata_key, formatter.get(), null_yield, dpp());
if (ret < 0) {
cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl;
return -ret;
cerr << "ERROR: failed to read input: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
- ret = static_cast<rgw::sal::RadosStore*>(store)->ctl()->meta.mgr->put(metadata_key, bl, null_yield, dpp(), RGWMDLogSyncType::APPLY_ALWAYS, false);
+ ret = static_cast<rgw::sal::RadosStore*>(driver)->ctl()->meta.mgr->put(metadata_key, bl, null_yield, dpp(), RGWMDLogSyncType::APPLY_ALWAYS, false);
if (ret < 0) {
cerr << "ERROR: can't put key: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
if (opt_cmd == OPT::METADATA_RM) {
- int ret = static_cast<rgw::sal::RadosStore*>(store)->ctl()->meta.mgr->remove(metadata_key, null_yield, dpp());
+ int ret = static_cast<rgw::sal::RadosStore*>(driver)->ctl()->meta.mgr->remove(metadata_key, null_yield, dpp());
if (ret < 0) {
cerr << "ERROR: can't remove key: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
void *handle;
int max = 1000;
- int ret = store->meta_list_keys_init(dpp(), metadata_key, marker, &handle);
+ int ret = driver->meta_list_keys_init(dpp(), metadata_key, marker, &handle);
if (ret < 0) {
cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl;
return -ret;
do {
list<string> keys;
left = (max_entries_specified ? max_entries - count : max);
- ret = store->meta_list_keys_next(dpp(), handle, left, keys, &truncated);
+ ret = driver->meta_list_keys_next(dpp(), handle, left, keys, &truncated);
if (ret < 0 && ret != -ENOENT) {
cerr << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << std::endl;
return -ret;
encode_json("truncated", truncated, formatter.get());
encode_json("count", count, formatter.get());
if (truncated) {
- encode_json("marker", store->meta_get_marker(handle), formatter.get());
+ encode_json("marker", driver->meta_get_marker(handle), formatter.get());
}
formatter->close_section();
}
formatter->flush(cout);
- store->meta_list_keys_complete(handle);
+ driver->meta_list_keys_complete(handle);
}
if (opt_cmd == OPT::MDLOG_LIST) {
std::cerr << "No --period given, using current period="
<< period_id << std::endl;
}
- RGWMetadataLog *meta_log = static_cast<rgw::sal::RadosStore*>(store)->svc()->mdlog->get_log(period_id);
+ RGWMetadataLog *meta_log = static_cast<rgw::sal::RadosStore*>(driver)->svc()->mdlog->get_log(period_id);
formatter->open_array_section("entries");
for (; i < g_ceph_context->_conf->rgw_md_log_max_shards; i++) {
for (list<cls_log_entry>::iterator iter = entries.begin(); iter != entries.end(); ++iter) {
cls_log_entry& entry = *iter;
- static_cast<rgw::sal::RadosStore*>(store)->ctl()->meta.mgr->dump_log_entry(entry, formatter.get());
+ static_cast<rgw::sal::RadosStore*>(driver)->ctl()->meta.mgr->dump_log_entry(entry, formatter.get());
}
formatter->flush(cout);
} while (truncated);
std::cerr << "No --period given, using current period="
<< period_id << std::endl;
}
- RGWMetadataLog *meta_log = static_cast<rgw::sal::RadosStore*>(store)->svc()->mdlog->get_log(period_id);
+ RGWMetadataLog *meta_log = static_cast<rgw::sal::RadosStore*>(driver)->svc()->mdlog->get_log(period_id);
formatter->open_array_section("entries");
if (opt_cmd == OPT::MDLOG_AUTOTRIM) {
// need a full history for purging old mdlog periods
- static_cast<rgw::sal::RadosStore*>(store)->svc()->mdlog->init_oldest_log_period(null_yield, dpp());
+ static_cast<rgw::sal::RadosStore*>(driver)->svc()->mdlog->init_oldest_log_period(null_yield, dpp());
- RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
- RGWHTTPManager http(store->ctx(), crs.get_completion_mgr());
+ RGWCoroutinesManager crs(driver->ctx(), driver->get_cr_registry());
+ RGWHTTPManager http(driver->ctx(), crs.get_completion_mgr());
int ret = http.start();
if (ret < 0) {
cerr << "failed to initialize http client with " << cpp_strerror(ret) << std::endl;
auto num_shards = g_conf()->rgw_md_log_max_shards;
auto mltcr = create_admin_meta_log_trim_cr(
- dpp(), static_cast<rgw::sal::RadosStore*>(store), &http, num_shards);
+ dpp(), static_cast<rgw::sal::RadosStore*>(driver), &http, num_shards);
if (!mltcr) {
cerr << "Cluster misconfigured! Unable to trim." << std::endl;
return -EIO;
std::cerr << "missing --period argument" << std::endl;
return EINVAL;
}
- RGWMetadataLog *meta_log = static_cast<rgw::sal::RadosStore*>(store)->svc()->mdlog->get_log(period_id);
+ RGWMetadataLog *meta_log = static_cast<rgw::sal::RadosStore*>(driver)->svc()->mdlog->get_log(period_id);
// trim until -ENODATA
do {
}
if (opt_cmd == OPT::METADATA_SYNC_STATUS) {
- RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(store), static_cast<rgw::sal::RadosStore*>(store)->svc()->rados->get_async_processor());
+ RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor());
int ret = sync.init(dpp());
if (ret < 0) {
}
if (opt_cmd == OPT::METADATA_SYNC_INIT) {
- RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(store), static_cast<rgw::sal::RadosStore*>(store)->svc()->rados->get_async_processor());
+ RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor());
int ret = sync.init(dpp());
if (ret < 0) {
if (opt_cmd == OPT::METADATA_SYNC_RUN) {
- RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(store), static_cast<rgw::sal::RadosStore*>(store)->svc()->rados->get_async_processor());
+ RGWMetaSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor());
int ret = sync.init(dpp());
if (ret < 0) {
cerr << "ERROR: source zone not specified" << std::endl;
return EINVAL;
}
- RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(store), static_cast<rgw::sal::RadosStore*>(store)->svc()->rados->get_async_processor(), source_zone, nullptr);
+ RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor(), source_zone, nullptr);
int ret = sync.init(dpp());
if (ret < 0) {
return EINVAL;
}
- RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(store), static_cast<rgw::sal::RadosStore*>(store)->svc()->rados->get_async_processor(), source_zone, nullptr);
+ RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor(), source_zone, nullptr);
int ret = sync.init(dpp());
if (ret < 0) {
}
RGWSyncModuleInstanceRef sync_module;
- int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->sync_modules->get_manager()->create_instance(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->get_zone().tier_type,
- static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->get_zone_params().tier_config, &sync_module);
+ int ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->sync_modules->get_manager()->create_instance(dpp(), g_ceph_context, static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_zone().tier_type,
+ static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_zone_params().tier_config, &sync_module);
if (ret < 0) {
ldpp_dout(dpp(), -1) << "ERROR: failed to init sync module instance, ret=" << ret << dendl;
return ret;
}
- RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(store), static_cast<rgw::sal::RadosStore*>(store)->svc()->rados->get_async_processor(), source_zone, nullptr, sync_module);
+ RGWDataSyncStatusManager sync(static_cast<rgw::sal::RadosStore*>(driver), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados->get_async_processor(), source_zone, nullptr, sync_module);
ret = sync.init(dpp());
if (ret < 0) {
}
auto sync = RGWBucketPipeSyncStatusManager::construct(
- dpp(), static_cast<rgw::sal::RadosStore*>(store), source_zone, opt_sb,
+ dpp(), static_cast<rgw::sal::RadosStore*>(driver), source_zone, opt_sb,
bucket->get_key(), extra_info ? &std::cout : nullptr);
if (!sync) {
return -ret;
}
- if (!static_cast<rgw::sal::RadosStore*>(store)->ctl()->bucket->bucket_imports_data(bucket->get_key(), null_yield, dpp())) {
+ if (!static_cast<rgw::sal::RadosStore*>(driver)->ctl()->bucket->bucket_imports_data(bucket->get_key(), null_yield, dpp())) {
std::cout << "Sync is disabled for bucket " << bucket_name << std::endl;
return 0;
}
RGWBucketSyncPolicyHandlerRef handler;
- ret = store->get_sync_policy_handler(dpp(), std::nullopt, bucket->get_key(), &handler, null_yield);
+ ret = driver->get_sync_policy_handler(dpp(), std::nullopt, bucket->get_key(), &handler, null_yield);
if (ret < 0) {
std::cerr << "ERROR: failed to get policy handler for bucket ("
<< bucket << "): r=" << ret << ": " << cpp_strerror(-ret) << std::endl;
}
auto timeout_at = ceph::coarse_mono_clock::now() + opt_timeout_sec;
- ret = rgw_bucket_sync_checkpoint(dpp(), static_cast<rgw::sal::RadosStore*>(store), *handler, bucket->get_info(),
+ ret = rgw_bucket_sync_checkpoint(dpp(), static_cast<rgw::sal::RadosStore*>(driver), *handler, bucket->get_info(),
opt_source_zone, opt_source_bucket,
opt_retry_delay_ms, timeout_at);
if (ret < 0) {
}
bucket_op.set_tenant(tenant);
string err_msg;
- ret = RGWBucketAdminOp::sync_bucket(store, bucket_op, dpp(), &err_msg);
+ ret = RGWBucketAdminOp::sync_bucket(driver, bucket_op, dpp(), &err_msg);
if (ret < 0) {
cerr << err_msg << std::endl;
return -ret;
if (ret < 0) {
return -ret;
}
- bucket_sync_info(store, bucket->get_info(), std::cout);
+ bucket_sync_info(driver, bucket->get_info(), std::cout);
}
if (opt_cmd == OPT::BUCKET_SYNC_STATUS) {
if (ret < 0) {
return -ret;
}
- bucket_sync_status(store, bucket->get_info(), source_zone, opt_source_bucket, std::cout);
+ bucket_sync_status(driver, bucket->get_info(), source_zone, opt_source_bucket, std::cout);
}
if (opt_cmd == OPT::BUCKET_SYNC_MARKERS) {
return -ret;
}
auto sync = RGWBucketPipeSyncStatusManager::construct(
- dpp(), static_cast<rgw::sal::RadosStore*>(store), source_zone,
+ dpp(), static_cast<rgw::sal::RadosStore*>(driver), source_zone,
opt_source_bucket, bucket->get_key(), nullptr);
if (!sync) {
return -ret;
}
auto sync = RGWBucketPipeSyncStatusManager::construct(
- dpp(), static_cast<rgw::sal::RadosStore*>(store), source_zone,
+ dpp(), static_cast<rgw::sal::RadosStore*>(driver), source_zone,
opt_source_bucket, bucket->get_key(), extra_info ? &std::cout : nullptr);
if (!sync) {
do {
list<rgw_bi_log_entry> entries;
- ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->bilog_rados->log_list(dpp(), bucket->get_info(), log_layout, shard_id, marker, max_entries - count, entries, &truncated);
+ ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->bilog_rados->log_list(dpp(), bucket->get_info(), log_layout, shard_id, marker, max_entries - count, entries, &truncated);
if (ret < 0) {
cerr << "ERROR: list_bi_log_entries(): " << cpp_strerror(-ret) << std::endl;
return -ret;
do {
list<cls_log_entry> entries;
- ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->timelog.list(dpp(), oid, {}, {}, max_entries - count, entries, marker, &marker, &truncated,
+ ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->cls->timelog.list(dpp(), oid, {}, {}, max_entries - count, entries, marker, &marker, &truncated,
null_yield);
if (ret == -ENOENT) {
break;
if (!gen) {
gen = 0;
}
- ret = bilog_trim(dpp(), static_cast<rgw::sal::RadosStore*>(store),
+ ret = bilog_trim(dpp(), static_cast<rgw::sal::RadosStore*>(driver),
bucket->get_info(), *gen,
shard_id, start_marker, end_marker);
if (ret < 0) {
log_layout = *i;
}
- ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->bilog_rados->get_log_status(dpp(), bucket->get_info(), log_layout, shard_id,
+ ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->bilog_rados->get_log_status(dpp(), bucket->get_info(), log_layout, shard_id,
&markers, null_yield);
if (ret < 0) {
cerr << "ERROR: get_bi_log_status(): " << cpp_strerror(-ret) << std::endl;
}
if (opt_cmd == OPT::BILOG_AUTOTRIM) {
- RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
- RGWHTTPManager http(store->ctx(), crs.get_completion_mgr());
+ RGWCoroutinesManager crs(driver->ctx(), driver->get_cr_registry());
+ RGWHTTPManager http(driver->ctx(), crs.get_completion_mgr());
int ret = http.start();
if (ret < 0) {
cerr << "failed to initialize http client with " << cpp_strerror(ret) << std::endl;
}
rgw::BucketTrimConfig config;
- configure_bucket_trim(store->ctx(), config);
+ configure_bucket_trim(driver->ctx(), config);
- rgw::BucketTrimManager trim(static_cast<rgw::sal::RadosStore*>(store), config);
+ rgw::BucketTrimManager trim(static_cast<rgw::sal::RadosStore*>(driver), config);
ret = trim.init();
if (ret < 0) {
cerr << "trim manager init failed with " << cpp_strerror(ret) << std::endl;
}
}
- auto datalog_svc = static_cast<rgw::sal::RadosStore*>(store)->svc()->datalog_rados;
+ auto datalog_svc = static_cast<rgw::sal::RadosStore*>(driver)->svc()->datalog_rados;
RGWDataChangesLog::LogMarker log_marker;
do {
list<cls_log_entry> entries;
RGWDataChangesLogInfo info;
- static_cast<rgw::sal::RadosStore*>(store)->svc()->datalog_rados->get_info(dpp(), i, &info);
+ static_cast<rgw::sal::RadosStore*>(driver)->svc()->datalog_rados->get_info(dpp(), i, &info);
::encode_json("info", info, formatter.get());
}
if (opt_cmd == OPT::DATALOG_AUTOTRIM) {
- RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
- RGWHTTPManager http(store->ctx(), crs.get_completion_mgr());
+ RGWCoroutinesManager crs(driver->ctx(), driver->get_cr_registry());
+ RGWHTTPManager http(driver->ctx(), crs.get_completion_mgr());
int ret = http.start();
if (ret < 0) {
cerr << "failed to initialize http client with " << cpp_strerror(ret) << std::endl;
auto num_shards = g_conf()->rgw_data_log_num_shards;
std::vector<std::string> markers(num_shards);
- ret = crs.run(dpp(), create_admin_data_log_trim_cr(dpp(), static_cast<rgw::sal::RadosStore*>(store), &http, num_shards, markers));
+ ret = crs.run(dpp(), create_admin_data_log_trim_cr(dpp(), static_cast<rgw::sal::RadosStore*>(driver), &http, num_shards, markers));
if (ret < 0) {
cerr << "automated datalog trim failed with " << cpp_strerror(ret) << std::endl;
return -ret;
return EINVAL;
}
- auto datalog = static_cast<rgw::sal::RadosStore*>(store)->svc()->datalog_rados;
+ auto datalog = static_cast<rgw::sal::RadosStore*>(driver)->svc()->datalog_rados;
ret = datalog->trim_entries(dpp(), shard_id, marker);
if (ret < 0 && ret != -ENODATA) {
std::cerr << "log-type not specified." << std::endl;
return -EINVAL;
}
- auto datalog = static_cast<rgw::sal::RadosStore*>(store)->svc()->datalog_rados;
+ auto datalog = static_cast<rgw::sal::RadosStore*>(driver)->svc()->datalog_rados;
ret = datalog->change_format(dpp(), *opt_log_type, null_yield);
if (ret < 0) {
cerr << "ERROR: change_format(): " << cpp_strerror(-ret) << std::endl;
}
if (opt_cmd == OPT::DATALOG_PRUNE) {
- auto datalog = static_cast<rgw::sal::RadosStore*>(store)->svc()->datalog_rados;
+ auto datalog = static_cast<rgw::sal::RadosStore*>(driver)->svc()->datalog_rados;
std::optional<uint64_t> through;
ret = datalog->trim_generations(dpp(), through);
cerr << "ERROR: invalid quota scope specification." << std::endl;
return EINVAL;
}
- set_bucket_quota(store, opt_cmd, tenant, bucket_name,
+ set_bucket_quota(driver, opt_cmd, tenant, bucket_name,
max_size, max_objects, have_max_size, have_max_objects);
} else if (!rgw::sal::User::empty(user)) {
if (quota_scope == "bucket") {
cerr << "ERROR: invalid ratelimit scope specification. (bucket scope is not bucket but bucket has been specified)" << std::endl;
return EINVAL;
}
- return set_bucket_ratelimit(store, opt_cmd, tenant, bucket_name,
+ return set_bucket_ratelimit(driver, opt_cmd, tenant, bucket_name,
max_read_ops, max_write_ops,
max_read_bytes, max_write_bytes,
have_max_read_ops, have_max_write_ops,
cerr << "ERROR: invalid ratelimit scope specification. (bucket scope is not bucket but bucket has been specified)" << std::endl;
return EINVAL;
}
- return show_bucket_ratelimit(store, tenant, bucket_name, formatter.get());
+ return show_bucket_ratelimit(driver, tenant, bucket_name, formatter.get());
} else if (!rgw::sal::User::empty(user)) {
} if (ratelimit_scope == "user") {
return show_user_ratelimit(user, formatter.get());
}
real_time mtime = real_clock::now();
- string oid = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.get_mfa_oid(user->get_id());
+ string oid = static_cast<rgw::sal::RadosStore*>(driver)->svc()->cls->mfa.get_mfa_oid(user->get_id());
- int ret = static_cast<rgw::sal::RadosStore*>(store)->ctl()->meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user->get_id()),
+ int ret = static_cast<rgw::sal::RadosStore*>(driver)->ctl()->meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user->get_id()),
mtime, &objv_tracker,
null_yield, dpp(),
MDLOG_STATUS_WRITE,
[&] {
- return static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.create_mfa(dpp(), user->get_id(), config, &objv_tracker, mtime, null_yield);
+ return static_cast<rgw::sal::RadosStore*>(driver)->svc()->cls->mfa.create_mfa(dpp(), user->get_id(), config, &objv_tracker, mtime, null_yield);
});
if (ret < 0) {
cerr << "MFA creation failed, error: " << cpp_strerror(-ret) << std::endl;
real_time mtime = real_clock::now();
- int ret = static_cast<rgw::sal::RadosStore*>(store)->ctl()->meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user->get_id()),
+ int ret = static_cast<rgw::sal::RadosStore*>(driver)->ctl()->meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user->get_id()),
mtime, &objv_tracker,
null_yield, dpp(),
MDLOG_STATUS_WRITE,
[&] {
- return static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.remove_mfa(dpp(), user->get_id(), totp_serial, &objv_tracker, mtime, null_yield);
+ return static_cast<rgw::sal::RadosStore*>(driver)->svc()->cls->mfa.remove_mfa(dpp(), user->get_id(), totp_serial, &objv_tracker, mtime, null_yield);
});
if (ret < 0) {
cerr << "MFA removal failed, error: " << cpp_strerror(-ret) << std::endl;
}
rados::cls::otp::otp_info_t result;
- int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.get_mfa(dpp(), user->get_id(), totp_serial, &result, null_yield);
+ int ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->cls->mfa.get_mfa(dpp(), user->get_id(), totp_serial, &result, null_yield);
if (ret < 0) {
if (ret == -ENOENT || ret == -ENODATA) {
cerr << "MFA serial id not found" << std::endl;
}
list<rados::cls::otp::otp_info_t> result;
- int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.list_mfa(dpp(), user->get_id(), &result, null_yield);
+ int ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->cls->mfa.list_mfa(dpp(), user->get_id(), &result, null_yield);
if (ret < 0) {
cerr << "MFA listing failed, error: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
list<rados::cls::otp::otp_info_t> result;
- int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.check_mfa(dpp(), user->get_id(), totp_serial, totp_pin.front(), null_yield);
+ int ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->cls->mfa.check_mfa(dpp(), user->get_id(), totp_serial, totp_pin.front(), null_yield);
if (ret < 0) {
cerr << "MFA check failed, error: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
rados::cls::otp::otp_info_t config;
- int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.get_mfa(dpp(), user->get_id(), totp_serial, &config, null_yield);
+ int ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->cls->mfa.get_mfa(dpp(), user->get_id(), totp_serial, &config, null_yield);
if (ret < 0) {
if (ret == -ENOENT || ret == -ENODATA) {
cerr << "MFA serial id not found" << std::endl;
ceph::real_time now;
- ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.otp_get_current_time(dpp(), user->get_id(), &now, null_yield);
+ ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->cls->mfa.otp_get_current_time(dpp(), user->get_id(), &now, null_yield);
if (ret < 0) {
cerr << "ERROR: failed to fetch current time from osd: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
time_t time_ofs;
- ret = scan_totp(store->ctx(), now, config, totp_pin, &time_ofs);
+ ret = scan_totp(driver->ctx(), now, config, totp_pin, &time_ofs);
if (ret < 0) {
if (ret == -ENOENT) {
cerr << "failed to resync, TOTP values not found in range" << std::endl;
/* now update the backend */
real_time mtime = real_clock::now();
- ret = static_cast<rgw::sal::RadosStore*>(store)->ctl()->meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user->get_id()),
+ ret = static_cast<rgw::sal::RadosStore*>(driver)->ctl()->meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user->get_id()),
mtime, &objv_tracker,
null_yield, dpp(),
MDLOG_STATUS_WRITE,
[&] {
- return static_cast<rgw::sal::RadosStore*>(store)->svc()->cls->mfa.create_mfa(dpp(), user->get_id(), config, &objv_tracker, mtime, null_yield);
+ return static_cast<rgw::sal::RadosStore*>(driver)->svc()->cls->mfa.create_mfa(dpp(), user->get_id(), config, &objv_tracker, mtime, null_yield);
});
if (ret < 0) {
cerr << "MFA update failed, error: " << cpp_strerror(-ret) << std::endl;
}
if (opt_cmd == OPT::RESHARD_STALE_INSTANCES_LIST) {
- if (!static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->can_reshard() && !yes_i_really_mean_it) {
+ if (!static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->can_reshard() && !yes_i_really_mean_it) {
cerr << "Resharding disabled in a multisite env, stale instances unlikely from resharding" << std::endl;
cerr << "These instances may not be safe to delete." << std::endl;
cerr << "Use --yes-i-really-mean-it to force displaying these instances." << std::endl;
return EINVAL;
}
- ret = RGWBucketAdminOp::list_stale_instances(store, bucket_op, stream_flusher, dpp());
+ ret = RGWBucketAdminOp::list_stale_instances(driver, bucket_op, stream_flusher, dpp());
if (ret < 0) {
cerr << "ERROR: listing stale instances" << cpp_strerror(-ret) << std::endl;
}
}
if (opt_cmd == OPT::RESHARD_STALE_INSTANCES_DELETE) {
- if (!static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->can_reshard()) {
+ if (!static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->can_reshard()) {
cerr << "Resharding disabled in a multisite env. Stale instances are not safe to be deleted." << std::endl;
return EINVAL;
}
- ret = RGWBucketAdminOp::clear_stale_instances(store, bucket_op, stream_flusher, dpp());
+ ret = RGWBucketAdminOp::clear_stale_instances(driver, bucket_op, stream_flusher, dpp());
if (ret < 0) {
cerr << "ERROR: deleting stale instances" << cpp_strerror(-ret) << std::endl;
}
if (opt_cmd == OPT::PUBSUB_TOPICS_LIST) {
- RGWPubSub ps(static_cast<rgw::sal::RadosStore*>(store), tenant);
+ RGWPubSub ps(static_cast<rgw::sal::RadosStore*>(driver), tenant);
if (!bucket_name.empty()) {
rgw_pubsub_bucket_topics result;
return EINVAL;
}
- RGWPubSub ps(static_cast<rgw::sal::RadosStore*>(store), tenant);
+ RGWPubSub ps(static_cast<rgw::sal::RadosStore*>(driver), tenant);
rgw_pubsub_topic_subs topic;
ret = ps.get_topic(topic_name, &topic);
return EINVAL;
}
- RGWPubSub ps(static_cast<rgw::sal::RadosStore*>(store), tenant);
+ RGWPubSub ps(static_cast<rgw::sal::RadosStore*>(driver), tenant);
ret = ps.remove_topic(dpp(), topic_name, null_yield);
if (ret < 0) {
cerr << "ERROR: cannot specify tenant in background context" << std::endl;
return EINVAL;
}
- auto lua_manager = store->get_lua_manager();
+ auto lua_manager = driver->get_lua_manager();
rc = rgw::lua::write_script(dpp(), lua_manager.get(), tenant, null_yield, script_ctx, script);
if (rc < 0) {
cerr << "ERROR: failed to put script. error: " << rc << std::endl;
cerr << "ERROR: invalid script context: " << *str_script_ctx << ". must be one of: " << LUA_CONTEXT_LIST << std::endl;
return EINVAL;
}
- auto lua_manager = store->get_lua_manager();
+ auto lua_manager = driver->get_lua_manager();
std::string script;
const auto rc = rgw::lua::read_script(dpp(), lua_manager.get(), tenant, null_yield, script_ctx, script);
if (rc == -ENOENT) {
cerr << "ERROR: invalid script context: " << *str_script_ctx << ". must be one of: " << LUA_CONTEXT_LIST << std::endl;
return EINVAL;
}
- auto lua_manager = store->get_lua_manager();
+ auto lua_manager = driver->get_lua_manager();
const auto rc = rgw::lua::delete_script(dpp(), lua_manager.get(), tenant, null_yield, script_ctx);
if (rc < 0) {
cerr << "ERROR: failed to remove script. error: " << rc << std::endl;
cerr << "ERROR: lua package name was not provided (via --package)" << std::endl;
return EINVAL;
}
- const auto rc = rgw::lua::add_package(dpp(), store, null_yield, *script_package, bool(allow_compilation));
+ const auto rc = rgw::lua::add_package(dpp(), driver, null_yield, *script_package, bool(allow_compilation));
if (rc < 0) {
cerr << "ERROR: failed to add lua package: " << script_package << " .error: " << rc << std::endl;
return -rc;
cerr << "ERROR: lua package name was not provided (via --package)" << std::endl;
return EINVAL;
}
- const auto rc = rgw::lua::remove_package(dpp(), store, null_yield, *script_package);
+ const auto rc = rgw::lua::remove_package(dpp(), driver, null_yield, *script_package);
if (rc == -ENOENT) {
cerr << "WARNING: package " << script_package << " did not exists or already removed" << std::endl;
return 0;
if (opt_cmd == OPT::SCRIPT_PACKAGE_LIST) {
#ifdef WITH_RADOSGW_LUA_PACKAGES
rgw::lua::packages_t packages;
- const auto rc = rgw::lua::list_packages(dpp(), store, null_yield, packages);
+ const auto rc = rgw::lua::list_packages(dpp(), driver, null_yield, packages);
if (rc == -ENOENT) {
std::cout << "no lua packages in allowlist" << std::endl;
} else if (rc < 0) {
(g_conf()->rgw_run_sync_thread &&
((!nfs) || (nfs && g_conf()->rgw_nfs_run_sync_thread)));
- StoreManager::Config cfg = StoreManager::get_config(false, g_ceph_context);
- store = StoreManager::get_storage(dpp, dpp->get_cct(),
+ DriverManager::Config cfg = DriverManager::get_config(false, g_ceph_context);
+ driver = DriverManager::get_storage(dpp, dpp->get_cct(),
cfg,
run_gc,
run_lc,
void rgw::AppMain::cond_init_apis()
{
- rgw_rest_init(g_ceph_context, store->get_zone()->get_zonegroup());
+ rgw_rest_init(g_ceph_context, driver->get_zone()->get_zonegroup());
if (have_http_frontend) {
std::vector<std::string> apis;
if (apis_map.count("s3") > 0 || s3website_enabled) {
if (!swift_at_root) {
rest.register_default_mgr(set_logging(
- rest_filter(store, RGW_REST_S3,
+ rest_filter(driver, RGW_REST_S3,
new RGWRESTMgr_S3(s3website_enabled, sts_enabled,
iam_enabled, pubsub_enabled))));
} else {
if (! swift_at_root) {
rest.register_resource(g_conf()->rgw_swift_url_prefix,
- set_logging(rest_filter(store, RGW_REST_SWIFT,
+ set_logging(rest_filter(driver, RGW_REST_SWIFT,
swift_resource)));
} else {
- if (store->get_zone()->get_zonegroup().get_zone_count() > 1) {
+ if (driver->get_zone()->get_zonegroup().get_zone_count() > 1) {
derr << "Placing Swift API in the root of URL hierarchy while running"
<< " multi-site configuration requires another instance of RadosGW"
<< " with S3 API enabled!" << dendl;
RGWRESTMgr_Admin *admin_resource = new RGWRESTMgr_Admin;
admin_resource->register_resource("info", new RGWRESTMgr_Info);
admin_resource->register_resource("usage", new RGWRESTMgr_Usage);
- /* Register store-specific admin APIs */
- store->register_admin_apis(admin_resource);
+ /* Register driver-specific admin APIs */
+ driver->register_admin_apis(admin_resource);
rest.register_resource(g_conf()->rgw_admin_entry, admin_resource);
}
} /* have_http_frontend */
void rgw::AppMain::init_ldap()
{
- const string &ldap_uri = store->ctx()->_conf->rgw_ldap_uri;
- const string &ldap_binddn = store->ctx()->_conf->rgw_ldap_binddn;
- const string &ldap_searchdn = store->ctx()->_conf->rgw_ldap_searchdn;
- const string &ldap_searchfilter = store->ctx()->_conf->rgw_ldap_searchfilter;
- const string &ldap_dnattr = store->ctx()->_conf->rgw_ldap_dnattr;
- std::string ldap_bindpw = parse_rgw_ldap_bindpw(store->ctx());
+ const string &ldap_uri = driver->ctx()->_conf->rgw_ldap_uri;
+ const string &ldap_binddn = driver->ctx()->_conf->rgw_ldap_binddn;
+ const string &ldap_searchdn = driver->ctx()->_conf->rgw_ldap_searchdn;
+ const string &ldap_searchfilter = driver->ctx()->_conf->rgw_ldap_searchfilter;
+ const string &ldap_dnattr = driver->ctx()->_conf->rgw_ldap_dnattr;
+ std::string ldap_bindpw = parse_rgw_ldap_bindpw(driver->ctx());
ldh.reset(new rgw::LDAPHelper(ldap_uri, ldap_binddn,
ldap_bindpw.c_str(), ldap_searchdn, ldap_searchfilter, ldap_dnattr));
void rgw::AppMain::init_opslog()
{
- rgw_log_usage_init(dpp->get_cct(), store);
+ rgw_log_usage_init(dpp->get_cct(), driver);
OpsLogManifold *olog_manifold = new OpsLogManifold();
if (!g_conf()->rgw_ops_log_socket_path.empty()) {
ops_log_file->start();
olog_manifold->add_sink(ops_log_file);
}
- olog_manifold->add_sink(new OpsLogRados(store));
+ olog_manifold->add_sink(new OpsLogRados(driver));
olog = olog_manifold;
} /* init_opslog */
implicit_tenant_context.reset(new rgw::auth::ImplicitTenants{g_conf()});
g_conf().add_observer(implicit_tenant_context.get());
auto auth_registry =
- rgw::auth::StrategyRegistry::create(dpp->get_cct(), *(implicit_tenant_context.get()), store);
+ rgw::auth::StrategyRegistry::create(dpp->get_cct(), *(implicit_tenant_context.get()), driver);
/* allocate a mime table (you'd never guess that from the name) */
rgw_tools_init(dpp, dpp->get_cct());
std::string uri_prefix;
config->get_val("prefix", "", &uri_prefix);
- RGWProcessEnv env = {store, &rest, olog, port, uri_prefix,
+ RGWProcessEnv env = {driver, &rest, olog, port, uri_prefix,
auth_registry, ratelimiter.get(), lua_background.get()};
fe = new RGWLoadGenFrontend(env, config);
config->get_val("port", 80, &port);
std::string uri_prefix;
config->get_val("prefix", "", &uri_prefix);
- RGWProcessEnv env{store, &rest, olog, port, uri_prefix,
+ RGWProcessEnv env{driver, &rest, olog, port, uri_prefix,
auth_registry, ratelimiter.get(), lua_background.get()};
fe = new RGWAsioFrontend(env, config, *(sched_ctx.get()));
}
else if (framework == "rgw-nfs") {
int port = 80;
- RGWProcessEnv env = { store, &rest, olog, port };
+ RGWProcessEnv env = { driver, &rest, olog, port };
fe = new RGWLibFrontend(env, config);
if (rgwlib) {
rgwlib->set_fe(static_cast<RGWLibFrontend*>(fe));
}
std::string daemon_type = (nfs) ? "rgw-nfs" : "rgw";
- r = store->register_to_service_map(dpp, daemon_type, service_map_meta);
+ r = driver->register_to_service_map(dpp, daemon_type, service_map_meta);
if (r < 0) {
derr << "ERROR: failed to register to service map: " << cpp_strerror(-r) << dendl;
/* ignore error */
}
- if (store->get_name() == "rados") {
+ if (driver->get_name() == "rados") {
// add a watcher to respond to realm configuration changes
- pusher = std::make_unique<RGWPeriodPusher>(dpp, store, null_yield);
+ pusher = std::make_unique<RGWPeriodPusher>(dpp, driver, null_yield);
fe_pauser = std::make_unique<RGWFrontendPauser>(fes, *(implicit_tenant_context.get()), pusher.get());
rgw_pauser = std::make_unique<RGWPauser>();
rgw_pauser->add_pauser(fe_pauser.get());
if (lua_background) {
rgw_pauser->add_pauser(lua_background.get());
}
- reloader = std::make_unique<RGWRealmReloader>(store, service_map_meta, rgw_pauser.get());
+ reloader = std::make_unique<RGWRealmReloader>(driver, service_map_meta, rgw_pauser.get());
realm_watcher = std::make_unique<RGWRealmWatcher>(dpp, g_ceph_context,
- static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->get_realm());
+ static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_realm());
realm_watcher->add_watcher(RGWRealmNotify::Reload, *reloader);
realm_watcher->add_watcher(RGWRealmNotify::ZonesNeedPeriod, *pusher.get());
}
const auto &luarocks_path =
g_conf().get_val<std::string>("rgw_luarocks_location");
if (luarocks_path.empty()) {
- store->set_luarocks_path("");
+ driver->set_luarocks_path("");
} else {
- store->set_luarocks_path(luarocks_path + "/" + g_conf()->name.to_str());
+ driver->set_luarocks_path(luarocks_path + "/" + g_conf()->name.to_str());
}
#ifdef WITH_RADOSGW_LUA_PACKAGES
rgw::lua::packages_t failed_packages;
std::string output;
- r = rgw::lua::install_packages(dpp, store, null_yield, failed_packages,
+ r = rgw::lua::install_packages(dpp, driver, null_yield, failed_packages,
output);
if (r < 0) {
dout(1) << "WARNING: failed to install lua packages from allowlist"
}
#endif
- if (store->get_name() == "rados") { /* Supported for only RadosStore */
+ if (driver->get_name() == "rados") { /* Supported for only RadosStore */
lua_background = std::make_unique<
- rgw::lua::Background>(store, dpp->get_cct(), store->get_luarocks_path());
+ rgw::lua::Background>(driver, dpp->get_cct(), driver->get_luarocks_path());
lua_background->start();
}
} /* init_lua */
void rgw::AppMain::shutdown(std::function<void(void)> finalize_async_signals)
{
- if (store->get_name() == "rados") {
+ if (driver->get_name() == "rados") {
reloader.reset(); // stop the realm reloader
}
lua_background->shutdown();
}
- StoreManager::close_storage(store);
+ DriverManager::close_storage(driver);
rgw_tools_cleanup();
rgw_shutdown_resolver();
// don't impose a limit on the body, since we read it in pieces
static constexpr size_t body_limit = std::numeric_limits<size_t>::max();
- auto cct = env.store->ctx();
+ auto cct = env.driver->ctx();
// read messages from the stream until eof
for (;;) {
}
// process the request
- RGWRequest req{env.store->get_new_req_id()};
+ RGWRequest req{env.driver->get_new_req_id()};
auto& socket = stream.lowest_layer();
const auto& remote_endpoint = socket.remote_endpoint(ec);
string user = "-";
const auto started = ceph::coarse_real_clock::now();
ceph::coarse_real_clock::duration latency{};
- process_request(env.store, env.rest, &req, env.uri_prefix,
+ process_request(env.driver, env.rest, &req, env.uri_prefix,
*env.auth_registry, &client, env.olog, y,
scheduler, &user, &latency,
env.ratelimiting->get_active(),
std::vector<std::thread> threads;
std::atomic<bool> going_down{false};
- CephContext* ctx() const { return env.store->ctx(); }
+ CephContext* ctx() const { return env.driver->ctx(); }
std::optional<dmc::ClientCounters> client_counters;
std::unique_ptr<dmc::ClientConfig> client_config;
void accept(Listener& listener, boost::system::error_code ec);
AsioFrontend(const RGWProcessEnv& env, RGWFrontendConfig* conf,
dmc::SchedulerCtx& sched_ctx)
: env(env), conf(conf), pause_mutex(context.get_executor()),
- lua_manager(env.store->get_lua_manager())
+ lua_manager(env.driver->get_lua_manager())
{
auto sched_t = dmc::get_scheduler_t(ctx());
switch(sched_t){
void stop();
void join();
void pause();
- void unpause(rgw::sal::Store* store, rgw_auth_registry_ptr_t);
+ void unpause(rgw::sal::Driver* driver, rgw_auth_registry_ptr_t);
};
unsigned short parse_port(const char *input, boost::system::error_code& ec)
return -EINVAL;
}
- int r = env.store->get_config_key_val(name, pbl);
+ int r = env.driver->get_config_key_val(name, pbl);
if (r < 0) {
lderr(ctx()) << type << " was not found: " << name << dendl;
return r;
key_is_cert = true;
}
- ExpandMetaVar emv(env.store->get_zone());
+ ExpandMetaVar emv(env.driver->get_zone());
cert = emv.process_str(*cert);
key = emv.process_str(*key);
}
}
-void AsioFrontend::unpause(rgw::sal::Store* const store,
+void AsioFrontend::unpause(rgw::sal::Driver* const driver,
rgw_auth_registry_ptr_t auth_registry)
{
- env.store = store;
+ env.driver = driver;
env.auth_registry = std::move(auth_registry);
- lua_manager = store->get_lua_manager();
+ lua_manager = driver->get_lua_manager();
// unpause to unblock connections
pause_mutex.unlock();
}
void RGWAsioFrontend::unpause_with_new_config(
- rgw::sal::Store* const store,
+ rgw::sal::Driver* const driver,
rgw_auth_registry_ptr_t auth_registry
) {
- impl->unpause(store, std::move(auth_registry));
+ impl->unpause(driver, std::move(auth_registry));
}
void join() override;
void pause_for_new_config() override;
- void unpause_with_new_config(rgw::sal::Store* store,
+ void unpause_with_new_config(rgw::sal::Driver* driver,
rgw_auth_registry_ptr_t auth_registry) override;
};
const string& display_name,
RGWUserInfo& user_info) const /* out */
{
- std::unique_ptr<rgw::sal::User> user = store->get_user(acct_user);
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(acct_user);
user->get_info().display_name = display_name;
user->get_info().type = TYPE_WEB;
user->get_info().max_buckets =
federated_user.tenant = role_tenant;
federated_user.ns = "oidc";
- std::unique_ptr<rgw::sal::User> user = store->get_user(federated_user);
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(federated_user);
//Check in oidc namespace
if (user->load_user(dpp, null_yield) >= 0) {
new_acct_user.tenant = new_acct_user.id;
}
- std::unique_ptr<rgw::sal::User> user = store->get_user(new_acct_user);
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(new_acct_user);
user->get_info().display_name = info.acct_name;
if (info.acct_type) {
//ldap/keystone for s3 users
; /* suppress lookup for id used by "other" protocol */
else if (acct_user.tenant.empty()) {
const rgw_user tenanted_uid(acct_user.id, acct_user.id);
- user = store->get_user(tenanted_uid);
+ user = driver->get_user(tenanted_uid);
if (user->load_user(dpp, null_yield) >= 0) {
/* Succeeded. */
}
}
- user = store->get_user(acct_user);
+ user = driver->get_user(acct_user);
if (split_mode && implicit_tenant)
; /* suppress lookup for id used by "other" protocol */
std::string user_name;
protected:
CephContext* const cct;
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
std::string role_session;
std::string role_tenant;
std::unordered_multimap<std::string, std::string> token_claims;
RGWUserInfo& user_info) const; /* out */
public:
WebIdentityApplier( CephContext* const cct,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const std::string& role_session,
const std::string& role_tenant,
const std::unordered_multimap<std::string, std::string>& token_claims,
boost::optional<std::multimap<std::string,std::string>> role_tags,
boost::optional<std::set<std::pair<std::string, std::string>>> principal_tags)
: cct(cct),
- store(store),
+ driver(driver),
role_session(role_session),
role_tenant(role_tenant),
token_claims(token_claims),
CephContext* const cct;
/* Read-write is intensional here due to RGWUserInfo creation process. */
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
/* Supplemental strategy for extracting permissions from ACLs. Its results
* will be combined (ORed) with a default strategy that is responsible for
public:
RemoteApplier(CephContext* const cct,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
acl_strategy_t&& extra_acl_strategy,
const AuthInfo& info,
rgw::auth::ImplicitTenants& implicit_tenant_context,
rgw::auth::ImplicitTenants::implicit_tenant_flag_bits implicit_tenant_bit)
: cct(cct),
- store(store),
+ driver(driver),
extra_acl_strategy(std::move(extra_acl_strategy)),
info(info),
implicit_tenant_context(implicit_tenant_context),
template <typename T>
class ThirdPartyAccountApplier : public DecoratedApplier<T> {
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
const rgw_user acct_user_override;
public:
static const rgw_user UNKNOWN_ACCT;
template <typename U>
- ThirdPartyAccountApplier(rgw::sal::Store* store,
+ ThirdPartyAccountApplier(rgw::sal::Driver* driver,
const rgw_user &acct_user_override,
U&& decoratee)
: DecoratedApplier<T>(std::move(decoratee)),
- store(store),
+ driver(driver),
acct_user_override(acct_user_override) {
}
if (acct_user_override.tenant.empty()) {
const rgw_user tenanted_uid(acct_user_override.id, acct_user_override.id);
- user = store->get_user(tenanted_uid);
+ user = driver->get_user(tenanted_uid);
if (user->load_user(dpp, null_yield) >= 0) {
user_info = user->get_info();
}
}
- user = store->get_user(acct_user_override);
+ user = driver->get_user(acct_user_override);
const int ret = user->load_user(dpp, null_yield);
if (ret < 0) {
/* We aren't trying to recover from ENOENT here. It's supposed that creating
}
template <typename T> static inline
-ThirdPartyAccountApplier<T> add_3rdparty(rgw::sal::Store* store,
+ThirdPartyAccountApplier<T> add_3rdparty(rgw::sal::Driver* driver,
const rgw_user &acct_user_override,
T&& t) {
- return ThirdPartyAccountApplier<T>(store, acct_user_override,
+ return ThirdPartyAccountApplier<T>(driver, acct_user_override,
std::forward<T>(t));
}
template <typename T>
class SysReqApplier : public DecoratedApplier<T> {
CephContext* const cct;
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
const RGWHTTPArgs& args;
mutable boost::tribool is_system;
public:
template <typename U>
SysReqApplier(CephContext* const cct,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const req_state* const s,
U&& decoratee)
: DecoratedApplier<T>(std::forward<T>(decoratee)),
cct(cct),
- store(store),
+ driver(driver),
args(s->info.args),
is_system(boost::logic::indeterminate) {
}
/* We aren't writing directly to user_info for consistency and security
* reasons. rgw_get_user_info_by_uid doesn't trigger the operator=() but
* calls ::decode instead. */
- std::unique_ptr<rgw::sal::User> user = store->get_user(effective_uid);
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(effective_uid);
if (user->load_user(dpp, null_yield) < 0) {
//ldpp_dout(dpp, 0) << "User lookup failed!" << dendl;
throw -EACCES;
template <typename T> static inline
SysReqApplier<T> add_sysreq(CephContext* const cct,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const req_state* const s,
T&& t) {
- return SysReqApplier<T>(cct, store, s, std::forward<T>(t));
+ return SysReqApplier<T>(cct, driver, s, std::forward<T>(t));
}
} /* namespace auth */
s3_main_strategy_t(CephContext* const cct,
ImplicitTenants& implicit_tenant_context,
- rgw::sal::Store* store)
- : s3_main_strategy_plain(cct, implicit_tenant_context, store),
- s3_main_strategy_boto2(cct, implicit_tenant_context, store) {
+ rgw::sal::Driver* driver)
+ : s3_main_strategy_plain(cct, implicit_tenant_context, driver),
+ s3_main_strategy_boto2(cct, implicit_tenant_context, driver) {
add_engine(Strategy::Control::SUFFICIENT, s3_main_strategy_plain);
add_engine(Strategy::Control::FALLBACK, s3_main_strategy_boto2);
}
public:
StrategyRegistry(CephContext* const cct,
ImplicitTenants& implicit_tenant_context,
- rgw::sal::Store* store)
- : s3_main_strategy(cct, implicit_tenant_context, store),
- s3_post_strategy(cct, implicit_tenant_context, store),
- swift_strategy(cct, implicit_tenant_context, store),
- sts_strategy(cct, implicit_tenant_context, store) {
+ rgw::sal::Driver* driver)
+ : s3_main_strategy(cct, implicit_tenant_context, driver),
+ s3_post_strategy(cct, implicit_tenant_context, driver),
+ swift_strategy(cct, implicit_tenant_context, driver),
+ sts_strategy(cct, implicit_tenant_context, driver) {
}
const s3_main_strategy_t& get_s3_main() const {
static std::shared_ptr<StrategyRegistry>
create(CephContext* const cct,
ImplicitTenants& implicit_tenant_context,
- rgw::sal::Store* store) {
- return std::make_shared<StrategyRegistry>(cct, implicit_tenant_context, store);
+ rgw::sal::Driver* driver) {
+ return std::make_shared<StrategyRegistry>(cct, implicit_tenant_context, driver);
}
};
public rgw::auth::LocalApplier::Factory,
public rgw::auth::RoleApplier::Factory {
typedef rgw::auth::IdentityApplier::aplptr_t aplptr_t;
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
rgw::auth::ImplicitTenants& implicit_tenant_context;
STSEngine sts_engine;
const req_state* const s,
rgw::auth::RemoteApplier::acl_strategy_t&& acl_alg,
const rgw::auth::RemoteApplier::AuthInfo &info) const override {
- auto apl = rgw::auth::add_sysreq(cct, store, s,
- rgw::auth::RemoteApplier(cct, store, std::move(acl_alg), info,
+ auto apl = rgw::auth::add_sysreq(cct, driver, s,
+ rgw::auth::RemoteApplier(cct, driver, std::move(acl_alg), info,
implicit_tenant_context,
rgw::auth::ImplicitTenants::IMPLICIT_TENANTS_S3));
return aplptr_t(new decltype(apl)(std::move(apl)));
const std::string& subuser,
const std::optional<uint32_t>& perm_mask,
const std::string& access_key_id) const override {
- auto apl = rgw::auth::add_sysreq(cct, store, s,
+ auto apl = rgw::auth::add_sysreq(cct, driver, s,
rgw::auth::LocalApplier(cct, user_info, subuser, perm_mask, access_key_id));
return aplptr_t(new decltype(apl)(std::move(apl)));
}
const req_state* const s,
const rgw::auth::RoleApplier::Role& role,
const rgw::auth::RoleApplier::TokenAttrs& token_attrs) const override {
- auto apl = rgw::auth::add_sysreq(cct, store, s,
+ auto apl = rgw::auth::add_sysreq(cct, driver, s,
rgw::auth::RoleApplier(cct, role, token_attrs));
return aplptr_t(new decltype(apl)(std::move(apl)));
}
public:
STSAuthStrategy(CephContext* const cct,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
rgw::auth::ImplicitTenants& implicit_tenant_context,
AWSEngine::VersionAbstractor* const ver_abstractor)
- : store(store),
+ : driver(driver),
implicit_tenant_context(implicit_tenant_context),
- sts_engine(cct, store, *ver_abstractor,
+ sts_engine(cct, driver, *ver_abstractor,
static_cast<rgw::auth::LocalApplier::Factory*>(this),
static_cast<rgw::auth::RemoteApplier::Factory*>(this),
static_cast<rgw::auth::RoleApplier::Factory*>(this)) {
class ExternalAuthStrategy : public rgw::auth::Strategy,
public rgw::auth::RemoteApplier::Factory {
typedef rgw::auth::IdentityApplier::aplptr_t aplptr_t;
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
rgw::auth::ImplicitTenants& implicit_tenant_context;
using keystone_config_t = rgw::keystone::CephCtxConfig;
const req_state* const s,
rgw::auth::RemoteApplier::acl_strategy_t&& acl_alg,
const rgw::auth::RemoteApplier::AuthInfo &info) const override {
- auto apl = rgw::auth::add_sysreq(cct, store, s,
- rgw::auth::RemoteApplier(cct, store, std::move(acl_alg), info,
+ auto apl = rgw::auth::add_sysreq(cct, driver, s,
+ rgw::auth::RemoteApplier(cct, driver, std::move(acl_alg), info,
implicit_tenant_context,
rgw::auth::ImplicitTenants::IMPLICIT_TENANTS_S3));
/* TODO(rzarzynski): replace with static_ptr. */
public:
ExternalAuthStrategy(CephContext* const cct,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
rgw::auth::ImplicitTenants& implicit_tenant_context,
AWSEngine::VersionAbstractor* const ver_abstractor)
- : store(store),
+ : driver(driver),
implicit_tenant_context(implicit_tenant_context),
- ldap_engine(cct, store, *ver_abstractor,
+ ldap_engine(cct, driver, *ver_abstractor,
static_cast<rgw::auth::RemoteApplier::Factory*>(this)) {
if (cct->_conf->rgw_s3_auth_use_keystone &&
AbstractorT>::value,
"AbstractorT must be a subclass of rgw::auth::s3::VersionAbstractor");
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
AbstractorT ver_abstractor;
S3AnonymousEngine anonymous_engine;
const std::string& subuser,
const std::optional<uint32_t>& perm_mask,
const std::string& access_key_id) const override {
- auto apl = rgw::auth::add_sysreq(cct, store, s,
+ auto apl = rgw::auth::add_sysreq(cct, driver, s,
rgw::auth::LocalApplier(cct, user_info, subuser, perm_mask, access_key_id));
/* TODO(rzarzynski): replace with static_ptr. */
return aplptr_t(new decltype(apl)(std::move(apl)));
AWSAuthStrategy(CephContext* const cct,
rgw::auth::ImplicitTenants& implicit_tenant_context,
- rgw::sal::Store* store)
- : store(store),
+ rgw::sal::Driver* driver)
+ : driver(driver),
ver_abstractor(cct),
anonymous_engine(cct,
static_cast<rgw::auth::LocalApplier::Factory*>(this)),
- external_engines(cct, store, implicit_tenant_context, &ver_abstractor),
- sts_engine(cct, store, implicit_tenant_context, &ver_abstractor),
- local_engine(cct, store, ver_abstractor,
+ external_engines(cct, driver, implicit_tenant_context, &ver_abstractor),
+ sts_engine(cct, driver, implicit_tenant_context, &ver_abstractor),
+ local_engine(cct, driver, ver_abstractor,
static_cast<rgw::auth::LocalApplier::Factory*>(this)) {
/* The anonymous auth. */
if (AllowAnonAccessT) {
return false;
RGWRMdirCheck req(fs->get_context(),
- g_rgwlib->get_store()->get_user(fs->get_user()->user_id),
+ g_rgwlib->get_driver()->get_user(fs->get_user()->user_id),
this);
int rc = g_rgwlib->get_fe()->execute_req(&req);
if (! rc) {
}
if (is_root()) {
- RGWListBucketsRequest req(cct, g_rgwlib->get_store()->get_user(fs->get_user()->user_id),
+ RGWListBucketsRequest req(cct, g_rgwlib->get_driver()->get_user(fs->get_user()->user_id),
this, rcb, cb_arg, offset);
rc = g_rgwlib->get_fe()->execute_req(&req);
if (! rc) {
*eof = req.eof();
}
} else {
- RGWReaddirRequest req(cct, g_rgwlib->get_store()->get_user(fs->get_user()->user_id),
+ RGWReaddirRequest req(cct, g_rgwlib->get_driver()->get_user(fs->get_user()->user_id),
this, rcb, cb_arg, offset);
rc = g_rgwlib->get_fe()->execute_req(&req);
if (! rc) {
/* start */
std::string object_name = relative_object_name();
f->write_req =
- new RGWWriteRequest(g_rgwlib->get_store(),
- g_rgwlib->get_store()->get_user(fs->get_user()->user_id),
+ new RGWWriteRequest(g_rgwlib->get_driver(),
+ g_rgwlib->get_driver()->get_user(fs->get_user()->user_id),
this, bucket_name(), object_name);
rc = g_rgwlib->get_fe()->start_req(f->write_req);
if (rc < 0) {
state->object->set_bucket(state->bucket.get());
auto compression_type =
- get_store()->get_compression_type(state->bucket->get_placement_rule());
+ get_driver()->get_compression_type(state->bucket->get_placement_rule());
/* not obviously supportable */
ceph_assert(! dlo_manifest);
version_id = state->object->get_instance();
}
}
- processor = get_store()->get_atomic_writer(this, state->yield, state->object->clone(),
+ processor = get_driver()->get_atomic_writer(this, state->yield, state->object->clone(),
state->bucket_owner.get_id(),
&state->dest_placement, 0, state->req_id);
sec_key, "/");
ceph_assert(new_fs);
- const DoutPrefix dp(g_rgwlib->get_store()->ctx(), dout_subsys, "rgw mount: ");
- rc = new_fs->authorize(&dp, g_rgwlib->get_store());
+ const DoutPrefix dp(g_rgwlib->get_driver()->ctx(), dout_subsys, "rgw mount: ");
+ rc = new_fs->authorize(&dp, g_rgwlib->get_driver());
if (rc != 0) {
delete new_fs;
return -EINVAL;
ceph_assert(new_fs); /* should we be using ceph_assert? */
- const DoutPrefix dp(g_rgwlib->get_store()->ctx(), dout_subsys, "rgw mount2: ");
- rc = new_fs->authorize(&dp, g_rgwlib->get_store());
+ const DoutPrefix dp(g_rgwlib->get_driver()->ctx(), dout_subsys, "rgw mount2: ");
+ rc = new_fs->authorize(&dp, g_rgwlib->get_driver());
if (rc != 0) {
delete new_fs;
return -EINVAL;
struct rados_cluster_stat_t stats;
RGWGetClusterStatReq req(fs->get_context(),
- g_rgwlib->get_store()->get_user(fs->get_user()->user_id),
+ g_rgwlib->get_driver()->get_user(fs->get_user()->user_id),
stats);
int rc = g_rgwlib->get_fe()->execute_req(&req);
if (rc < 0) {
}
std::string oname = rgw_fh->relative_object_name();
- RGWPutObjRequest req(cct, g_rgwlib->get_store()->get_user(fs->get_user()->user_id),
+ RGWPutObjRequest req(cct, g_rgwlib->get_driver()->get_user(fs->get_user()->user_id),
rgw_fh->bucket_name(), oname, bl);
int rc = g_rgwlib->get_fe()->execute_req(&req);
(void) fh_lru.unref(fh, cohort::lru::FLAG_NONE);
}
- int authorize(const DoutPrefixProvider *dpp, rgw::sal::Store* store) {
- int ret = store->get_user_by_access_key(dpp, key.id, null_yield, &user);
+ int authorize(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver) {
+ int ret = driver->get_user_by_access_key(dpp, key.id, null_yield, &user);
if (ret == 0) {
RGWAccessKey* k = user->get_info().get_key(key.id);
if (!k || (k->key != key.key))
token = std::string("");
}
if (token.valid() && (ldh->auth(token.id, token.key) == 0)) {
- /* try to store user if it doesn't already exist */
+ /* try to driver user if it doesn't already exist */
if (user->load_user(dpp, null_yield) < 0) {
int ret = user->store_user(dpp, null_yield, true);
if (ret < 0) {
lsubdout(get_context(), rgw, 10)
- << "NOTICE: failed to store new user's info: ret=" << ret
+ << "NOTICE: failed to driver new user's info: ret=" << ret
<< dendl;
}
}
RGWUserInfo* get_user() { return &user->get_info(); }
void update_user(const DoutPrefixProvider *dpp) {
- (void) g_rgwlib->get_store()->get_user_by_access_key(dpp, key.id, null_yield, &user);
+ (void) g_rgwlib->get_driver()->get_user_by_access_key(dpp, key.id, null_yield, &user);
}
void close();
bool only_bucket() override { return false; }
int op_init() override {
- // assign store, s, and dialect_handler
+ // assign driver, s, and dialect_handler
// framework promises to call op_init after parent init
- RGWOp::init(RGWHandler::store, get_state(), this);
+ RGWOp::init(RGWHandler::driver, get_state(), this);
op = this; // assign self as op: REQUIRED
return 0;
}
bool only_bucket() override { return true; }
int op_init() override {
- // assign store, s, and dialect_handler
+ // assign driver, s, and dialect_handler
// framework promises to call op_init after parent init
- RGWOp::init(RGWHandler::store, get_state(), this);
+ RGWOp::init(RGWHandler::driver, get_state(), this);
op = this; // assign self as op: REQUIRED
return 0;
}
bool only_bucket() override { return true; }
int op_init() override {
- // assign store, s, and dialect_handler
+ // assign driver, s, and dialect_handler
// framework promises to call op_init after parent init
- RGWOp::init(RGWHandler::store, get_state(), this);
+ RGWOp::init(RGWHandler::driver, get_state(), this);
op = this; // assign self as op: REQUIRED
return 0;
}
}
int op_init() override {
- // assign store, s, and dialect_handler
+ // assign driver, s, and dialect_handler
// framework promises to call op_init after parent init
- RGWOp::init(RGWHandler::store, get_state(), this);
+ RGWOp::init(RGWHandler::driver, get_state(), this);
op = this; // assign self as op: REQUIRED
return 0;
}
bool only_bucket() override { return true; }
int op_init() override {
- // assign store, s, and dialect_handler
+ // assign driver, s, and dialect_handler
// framework promises to call op_init after parent init
- RGWOp::init(RGWHandler::store, get_state(), this);
+ RGWOp::init(RGWHandler::driver, get_state(), this);
op = this; // assign self as op: REQUIRED
return 0;
}
bool only_bucket() override { return true; }
int op_init() override {
- // assign store, s, and dialect_handler
+ // assign driver, s, and dialect_handler
// framework promises to call op_init after parent init
- RGWOp::init(RGWHandler::store, get_state(), this);
+ RGWOp::init(RGWHandler::driver, get_state(), this);
op = this; // assign self as op: REQUIRED
int rc = valid_s3_object_name(obj_name);
bool only_bucket() override { return false; }
int op_init() override {
- // assign store, s, and dialect_handler
+ // assign driver, s, and dialect_handler
// framework promises to call op_init after parent init
- RGWOp::init(RGWHandler::store, get_state(), this);
+ RGWOp::init(RGWHandler::driver, get_state(), this);
op = this; // assign self as op: REQUIRED
return 0;
}
bool only_bucket() override { return true; }
int op_init() override {
- // assign store, s, and dialect_handler
+ // assign driver, s, and dialect_handler
// framework promises to call op_init after parent init
- RGWOp::init(RGWHandler::store, get_state(), this);
+ RGWOp::init(RGWHandler::driver, get_state(), this);
op = this; // assign self as op: REQUIRED
return 0;
}
bool only_bucket() override { return false; }
int op_init() override {
- // assign store, s, and dialect_handler
+ // assign driver, s, and dialect_handler
// framework promises to call op_init after parent init
- RGWOp::init(RGWHandler::store, get_state(), this);
+ RGWOp::init(RGWHandler::driver, get_state(), this);
op = this; // assign self as op: REQUIRED
return 0;
}
bool only_bucket() override { return false; }
int op_init() override {
- // assign store, s, and dialect_handler
+ // assign driver, s, and dialect_handler
// framework promises to call op_init after parent init
- RGWOp::init(RGWHandler::store, get_state(), this);
+ RGWOp::init(RGWHandler::driver, get_state(), this);
op = this; // assign self as op: REQUIRED
return 0;
}
bool only_bucket() override { return true; }
int op_init() override {
- // assign store, s, and dialect_handler
+ // assign driver, s, and dialect_handler
// framework promises to call op_init after parent init
- RGWOp::init(RGWHandler::store, get_state(), this);
+ RGWOp::init(RGWHandler::driver, get_state(), this);
op = this; // assign self as op: REQUIRED
return 0;
}
size_t bytes_written;
bool eio;
- RGWWriteRequest(rgw::sal::Store* store,
+ RGWWriteRequest(rgw::sal::Driver* driver,
std::unique_ptr<rgw::sal::User> _user,
RGWFileHandle* _fh, const std::string& _bname,
const std::string& _oname)
- : RGWLibContinuedReq(store->ctx(), std::move(_user)),
+ : RGWLibContinuedReq(driver->ctx(), std::move(_user)),
bucket_name(_bname), obj_name(_oname),
rgw_fh(_fh), filter(nullptr), timer_id(0), real_ofs(0),
bytes_written(0), eio(false) {
bool only_bucket() override { return true; }
int op_init() override {
- // assign store, s, and dialect_handler
+ // assign driver, s, and dialect_handler
// framework promises to call op_init after parent init
- RGWOp::init(RGWHandler::store, get_state(), this);
+ RGWOp::init(RGWHandler::driver, get_state(), this);
op = this; // assign self as op: REQUIRED
return 0;
}
bool only_bucket() override { return true; }
int op_init() override {
- // assign store, s, and dialect_handler
+ // assign driver, s, and dialect_handler
// framework promises to call op_init after parent init
- RGWOp::init(RGWHandler::store, get_state(), this);
+ RGWOp::init(RGWHandler::driver, get_state(), this);
op = this; // assign self as op: REQUIRED
return 0;
virtual bool only_bucket() { return false; }
virtual int op_init() {
- // assign store, s, and dialect_handler
+ // assign driver, s, and dialect_handler
// framework promises to call op_init after parent init
- RGWOp::init(RGWHandler::store, get_state(), this);
+ RGWOp::init(RGWHandler::driver, get_state(), this);
op = this; // assign self as op: REQUIRED
return 0;
}
bool only_bucket() override { return false; }
int op_init() override {
- // assign store, s, and dialect_handler
+ // assign driver, s, and dialect_handler
// framework promises to call op_init after parent init
- RGWOp::init(RGWHandler::store, get_state(), this);
+ RGWOp::init(RGWHandler::driver, get_state(), this);
op = this; // assign self as op: REQUIRED
return 0;
}
virtual bool only_bucket() { return false; }
virtual int op_init() {
- // assign store, s, and dialect_handler
+ // assign driver, s, and dialect_handler
// framework promises to call op_init after parent init
- RGWOp::init(RGWHandler::store, get_state(), this);
+ RGWOp::init(RGWHandler::driver, get_state(), this);
op = this; // assign self as op: REQUIRED
return 0;
}
}
int op_init() override {
- // assign store, s, and dialect_handler
+ // assign driver, s, and dialect_handler
// framework promises to call op_init after parent init
- RGWOp::init(RGWHandler::store, get_state(), this);
+ RGWOp::init(RGWHandler::driver, get_state(), this);
op = this; // assign self as op: REQUIRED
return 0;
}
virtual void join() = 0;
virtual void pause_for_new_config() = 0;
- virtual void unpause_with_new_config(rgw::sal::Store* store,
+ virtual void unpause_with_new_config(rgw::sal::Driver* driver,
rgw_auth_registry_ptr_t auth_registry) = 0;
};
pprocess->pause();
}
- void unpause_with_new_config(rgw::sal::Store* const store,
+ void unpause_with_new_config(rgw::sal::Driver* const driver,
rgw_auth_registry_ptr_t auth_registry) override {
- env.store = store;
+ env.driver = driver;
env.auth_registry = auth_registry;
- pprocess->unpause_with_new_config(store, std::move(auth_registry));
+ pprocess->unpause_with_new_config(driver, std::move(auth_registry));
}
}; /* RGWProcessFrontend */
: RGWProcessFrontend(pe, _conf) {}
CephContext *get_cct() const {
- return env.store->ctx();
+ return env.driver->ctx();
}
unsigned get_subsys() const
}
rgw_user uid(uid_str);
- std::unique_ptr<rgw::sal::User> user = env.store->get_user(uid);
+ std::unique_ptr<rgw::sal::User> user = env.driver->get_user(uid);
int ret = user->load_user(this, null_yield);
if (ret < 0) {
if (pauser)
pauser->pause();
}
- void resume(rgw::sal::Store* store) override {
+ void resume(rgw::sal::Driver* driver) override {
/* Initialize the registry of auth strategies which will coordinate
* the dynamic reconfiguration. */
auto auth_registry = \
- rgw::auth::StrategyRegistry::create(g_ceph_context, implicit_tenants, store);
+ rgw::auth::StrategyRegistry::create(g_ceph_context, implicit_tenants, driver);
for (auto frontend : frontends)
- frontend->unpause_with_new_config(store, auth_registry);
+ frontend->unpause_with_new_config(driver, auth_registry);
if (pauser)
- pauser->resume(store);
+ pauser->resume(driver);
}
};
return NULL;
}
-void RGWLC::initialize(CephContext *_cct, rgw::sal::Store* _store) {
+void RGWLC::initialize(CephContext *_cct, rgw::sal::Driver* _driver) {
cct = _cct;
- store = _store;
- sal_lc = store->get_lifecycle();
+ driver = _driver;
+ sal_lc = driver->get_lifecycle();
max_objs = cct->_conf->rgw_lc_max_objs;
if (max_objs > HASH_PRIME)
max_objs = HASH_PRIME;
return (timediff >= cmp);
}
-static bool pass_object_lock_check(rgw::sal::Store* store, rgw::sal::Object* obj, const DoutPrefixProvider *dpp)
+static bool pass_object_lock_check(rgw::sal::Driver* driver, rgw::sal::Object* obj, const DoutPrefixProvider *dpp)
{
if (!obj->get_bucket()->get_info().obj_lock_enabled()) {
return true;
}
class LCObjsLister {
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
rgw::sal::Bucket* bucket;
rgw::sal::Bucket::ListParams list_params;
rgw::sal::Bucket::ListResults list_results;
int64_t delay_ms;
public:
- LCObjsLister(rgw::sal::Store* _store, rgw::sal::Bucket* _bucket) :
- store(_store), bucket(_bucket) {
+ LCObjsLister(rgw::sal::Driver* _driver, rgw::sal::Bucket* _bucket) :
+ driver(_driver), bucket(_bucket) {
list_params.list_versions = bucket->versioned();
list_params.allow_unordered = true;
- delay_ms = store->ctx()->_conf.get_val<int64_t>("rgw_lc_thread_delay");
+ delay_ms = driver->ctx()->_conf.get_val<int64_t>("rgw_lc_thread_delay");
}
void set_prefix(const string& p) {
using LCWorker = RGWLC::LCWorker;
lc_op op;
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
LCWorker* worker;
rgw::sal::Bucket* bucket;
LCObjsLister& ol;
- op_env(lc_op& _op, rgw::sal::Store* _store, LCWorker* _worker,
+ op_env(lc_op& _op, rgw::sal::Driver* _driver, LCWorker* _worker,
rgw::sal::Bucket* _bucket, LCObjsLister& _ol)
- : op(_op), store(_store), worker(_worker), bucket(_bucket),
+ : op(_op), driver(_driver), worker(_worker), bucket(_bucket),
ol(_ol) {}
}; /* op_env */
boost::optional<std::string> next_key_name;
ceph::real_time effective_mtime;
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
rgw::sal::Bucket* bucket;
lc_op& op; // ok--refers to expanded env.op
LCObjsLister& ol;
boost::optional<std::string> next_key_name,
ceph::real_time effective_mtime,
const DoutPrefixProvider *dpp, WorkQ* wq)
- : cct(env.store->ctx()), env(env), o(o), next_key_name(next_key_name),
+ : cct(env.driver->ctx()), env(env), o(o), next_key_name(next_key_name),
effective_mtime(effective_mtime),
- store(env.store), bucket(env.bucket), op(env.op), ol(env.ol),
- rctx(env.store), dpp(dpp), wq(wq)
+ driver(env.driver), bucket(env.bucket), op(env.op), ol(env.ol),
+ rctx(env.driver), dpp(dpp), wq(wq)
{
obj = bucket->get_object(o.key);
}
const DoutPrefixProvider *dpp, lc_op_ctx& oc, bool remove_indeed,
rgw::notify::EventType event_type)
{
- auto& store = oc.store;
+ auto& driver = oc.driver;
auto& bucket_info = oc.bucket->get_info();
auto& o = oc.o;
auto obj_key = o.key;
std::unique_ptr<rgw::sal::Bucket> bucket;
std::unique_ptr<rgw::sal::Object> obj;
- ret = store->get_bucket(nullptr, bucket_info, &bucket);
+ ret = driver->get_bucket(nullptr, bucket_info, &bucket);
if (ret < 0) {
return ret;
}
std::unique_ptr<rgw::sal::User> user;
if (! bucket->get_owner()) {
auto& bucket_info = bucket->get_info();
- user = store->get_user(bucket_info.owner);
+ user = driver->get_user(bucket_info.owner);
// forgive me, lord
if (user) {
bucket->set_owner(user.get());
del_op->params.unmod_since = meta.mtime;
del_op->params.marker_version_id = version_id;
- // notification supported only for RADOS store for now
- notify = store->get_notification(dpp, obj.get(), nullptr, event_type,
+ // notification supported only for RADOS driver for now
+ notify = driver->get_notification(dpp, obj.get(), nullptr, event_type,
bucket.get(), lc_id,
const_cast<std::string&>(oc.bucket->get_tenant()),
lc_req_id, null_yield);
if (ret < 0) {
if (ret == (-ENOENT))
return 0;
- ldpp_dout(this, 0) << "ERROR: store->list_objects():" <<dendl;
+ ldpp_dout(this, 0) << "ERROR: driver->list_objects():" <<dendl;
return ret;
}
<< oc.wq->thr_name() << dendl;
return is_expired &&
- pass_object_lock_check(oc.store, oc.obj.get(), dpp);
+ pass_object_lock_check(oc.driver, oc.obj.get(), dpp);
}
int process(lc_op_ctx& oc) {
}
std::string tier_type = "";
- rgw::sal::ZoneGroup& zonegroup = oc.store->get_zone()->get_zonegroup();
+ rgw::sal::ZoneGroup& zonegroup = oc.driver->get_zone()->get_zonegroup();
rgw_placement_rule target_placement;
target_placement.inherit_from(oc.bucket->get_placement_rule());
if (!r && oc.tier->get_tier_type() == "cloud-s3") {
ldpp_dout(oc.dpp, 30) << "Found cloud s3 tier: " << target_placement.storage_class << dendl;
if (!oc.o.is_current() &&
- !pass_object_lock_check(oc.store, oc.obj.get(), oc.dpp)) {
+ !pass_object_lock_check(oc.driver, oc.obj.get(), oc.dpp)) {
/* Skip objects which has object lock enabled. */
ldpp_dout(oc.dpp, 10) << "Object(key:" << oc.o.key << ") is locked. Skipping transition to cloud-s3 tier: " << target_placement.storage_class << dendl;
return 0;
return r;
}
} else {
- if (!oc.store->valid_placement(target_placement)) {
+ if (!oc.driver->valid_placement(target_placement)) {
ldpp_dout(oc.dpp, 0) << "ERROR: non existent dest placement: "
<< target_placement
<< " bucket="<< oc.bucket
return 0;
}
- int ret = store->get_bucket(this, nullptr, bucket_tenant, bucket_name, &bucket, null_yield);
+ int ret = driver->get_bucket(this, nullptr, bucket_tenant, bucket_name, &bucket, null_yield);
if (ret < 0) {
ldpp_dout(this, 0) << "LC:get_bucket for " << bucket_name
<< " failed" << dendl;
}
/* fetch information for zone checks */
- rgw::sal::Zone* zone = store->get_zone();
+ rgw::sal::Zone* zone = driver->get_zone();
auto pf = [](RGWLC::LCWorker* wk, WorkQ* wq, WorkItem& wi) {
auto wt =
pre_marker = next_marker;
}
- LCObjsLister ol(store, bucket.get());
+ LCObjsLister ol(driver, bucket.get());
ol.set_prefix(prefix_iter->first);
if (! zone_check(op, zone)) {
if (ret < 0) {
if (ret == (-ENOENT))
return 0;
- ldpp_dout(this, 0) << "ERROR: store->list_objects():" << dendl;
+ ldpp_dout(this, 0) << "ERROR: driver->list_objects():" << dendl;
return ret;
}
- op_env oenv(op, store, worker, bucket.get(), ol);
+ op_env oenv(op, driver, worker, bucket.get(), ol);
LCOpRule orule(oenv);
orule.build(); // why can't ctor do it?
rgw_bucket_dir_entry* o{nullptr};
* do need the entry {pro,epi}logue which update the state entry
* for this bucket) */
auto bucket_lc_key = get_bucket_lc_key(optional_bucket->get_key());
- auto index = get_lc_index(store->ctx(), bucket_lc_key);
+ auto index = get_lc_index(driver->ctx(), bucket_lc_key);
ret = process_bucket(index, max_secs, worker, bucket_lc_key, once);
return ret;
} else {
template<typename F>
static int guard_lc_modify(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
rgw::sal::Lifecycle* sal_lc,
const rgw_bucket& bucket, const string& cookie,
const F& f) {
- CephContext *cct = store->ctx();
+ CephContext *cct = driver->ctx();
auto bucket_lc_key = get_bucket_lc_key(bucket);
string oid;
rgw_bucket& b = bucket->get_key();
- ret = guard_lc_modify(this, store, sal_lc.get(), b, cookie,
+ ret = guard_lc_modify(this, driver, sal_lc.get(), b, cookie,
[&](rgw::sal::Lifecycle* sal_lc, const string& oid,
rgw::sal::Lifecycle::LCEntry& entry) {
return sal_lc->set_entry(oid, entry);
}
}
- ret = guard_lc_modify(this, store, sal_lc.get(), b, cookie,
+ ret = guard_lc_modify(this, driver, sal_lc.get(), b, cookie,
[&](rgw::sal::Lifecycle* sal_lc, const string& oid,
rgw::sal::Lifecycle::LCEntry& entry) {
return sal_lc->rm_entry(oid, entry);
namespace rgw::lc {
int fix_lc_shard_entry(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
rgw::sal::Lifecycle* sal_lc,
rgw::sal::Bucket* bucket)
{
auto bucket_lc_key = get_bucket_lc_key(bucket->get_key());
std::string lc_oid;
- get_lc_oid(store->ctx(), bucket_lc_key, &lc_oid);
+ get_lc_oid(driver->ctx(), bucket_lc_key, &lc_oid);
std::unique_ptr<rgw::sal::Lifecycle::LCEntry> entry;
// There are multiple cases we need to encounter here
<< " creating " << dendl;
// TODO: we have too many ppl making cookies like this!
char cookie_buf[COOKIE_LEN + 1];
- gen_rand_alphanumeric(store->ctx(), cookie_buf, sizeof(cookie_buf) - 1);
+ gen_rand_alphanumeric(driver->ctx(), cookie_buf, sizeof(cookie_buf) - 1);
std::string cookie = cookie_buf;
ret = guard_lc_modify(dpp,
- store, sal_lc, bucket->get_key(), cookie,
+ driver, sal_lc, bucket->get_key(), cookie,
[&lc_oid](rgw::sal::Lifecycle* slc,
const string& oid,
rgw::sal::Lifecycle::LCEntry& entry) {
class RGWLC : public DoutPrefixProvider {
CephContext *cct;
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
std::unique_ptr<rgw::sal::Lifecycle> sal_lc;
int max_objs{0};
std::string *obj_names{nullptr};
std::vector<std::unique_ptr<RGWLC::LCWorker>> workers;
- RGWLC() : cct(nullptr), store(nullptr) {}
+ RGWLC() : cct(nullptr), driver(nullptr) {}
virtual ~RGWLC() override;
- void initialize(CephContext *_cct, rgw::sal::Store* _store);
+ void initialize(CephContext *_cct, rgw::sal::Driver* _driver);
void finalize();
int process(LCWorker* worker,
namespace rgw::lc {
int fix_lc_shard_entry(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
rgw::sal::Lifecycle* sal_lc,
rgw::sal::Bucket* bucket);
s->cio = io;
/* XXX and -then- stash req_state pointers everywhere they are needed */
- ret = req->init(rgw_env, store, io, s);
+ ret = req->init(rgw_env, driver, io, s);
if (ret < 0) {
ldpp_dout(op, 10) << "failed to initialize request" << dendl;
abort_req(s, op, ret);
rgw_env.set("HTTP_HOST", "");
- int ret = req->init(rgw_env, store, &io_ctx, s);
+ int ret = req->init(rgw_env, driver, &io_ctx, s);
if (ret < 0) {
ldpp_dout(op, 10) << "failed to initialize request" << dendl;
abort_req(s, op, ret);
main.init_http_clients();
main.init_storage();
- if (! main.get_store()) {
+ if (! main.get_driver()) {
mutex.lock();
init_timer.cancel_all_events();
init_timer.shutdown();
return 0;
} /* RGWLib::stop() */
- int RGWLibIO::set_uid(rgw::sal::Store* store, const rgw_user& uid)
+ int RGWLibIO::set_uid(rgw::sal::Driver* driver, const rgw_user& uid)
{
- const DoutPrefix dp(store->ctx(), dout_subsys, "librgw: ");
- std::unique_ptr<rgw::sal::User> user = store->get_user(uid);
+ const DoutPrefix dp(driver->ctx(), dout_subsys, "librgw: ");
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(uid);
/* object exists, but policy is broken */
int ret = user->load_user(&dp, null_yield);
if (ret < 0) {
int RGWLibRequest::read_permissions(RGWOp* op, optional_yield y) {
/* bucket and object ops */
int ret =
- rgw_build_bucket_policies(op, g_rgwlib->get_store(), get_state(), y);
+ rgw_build_bucket_policies(op, g_rgwlib->get_driver(), get_state(), y);
if (ret < 0) {
ldpp_dout(op, 10) << "read_permissions (bucket policy) on "
<< get_state()->bucket << ":"
ret = -EACCES;
} else if (! only_bucket()) {
/* object ops */
- ret = rgw_build_object_policies(op, g_rgwlib->get_store(), get_state(),
+ ret = rgw_build_object_policies(op, g_rgwlib->get_driver(), get_state(),
op->prefetch_data(), y);
if (ret < 0) {
ldpp_dout(op, 10) << "read_permissions (object policy) on"
{}
~RGWLib() {}
- rgw::sal::Store* get_store() { return main.get_store(); }
+ rgw::sal::Driver* get_driver() { return main.get_driver(); }
RGWLibFrontend* get_fe() { return fe; }
return user_info;
}
- int set_uid(rgw::sal::Store* store, const rgw_user& uid);
+ int set_uid(rgw::sal::Driver* driver, const rgw_user& uid);
int write_data(const char *buf, int len);
int read_data(char *buf, int len);
RGWHandler_Lib() {}
~RGWHandler_Lib() override {}
- static int init_from_header(rgw::sal::Store* store,
+ static int init_from_header(rgw::sal::Driver* driver,
req_state *s);
}; /* RGWHandler_Lib */
inline req_state* get_state() { return this->RGWRequest::s; }
RGWLibRequest(CephContext* _cct, std::unique_ptr<rgw::sal::User> _user)
- : RGWRequest(g_rgwlib->get_store()->get_new_req_id()),
+ : RGWRequest(g_rgwlib->get_driver()->get_new_req_id()),
tuser(std::move(_user)), cct(_cct)
{}
using RGWHandler::init;
- int init(const RGWEnv& rgw_env, rgw::sal::Store* _store,
+ int init(const RGWEnv& rgw_env, rgw::sal::Driver* _driver,
RGWLibIO* io, req_state* _s) {
RGWRequest::init_state(_s);
- RGWHandler::init(_store, _s, io);
+ RGWHandler::init(_driver, _s, io);
- get_state()->req_id = store->zone_unique_id(id);
- get_state()->trans_id = store->zone_unique_trans_id(id);
+ get_state()->req_id = driver->zone_unique_id(id);
+ get_state()->trans_id = driver->zone_unique_trans_id(id);
get_state()->bucket_tenant = tuser->get_tenant();
get_state()->set_user(tuser);
int ret = header_init();
if (ret == 0) {
- ret = init_from_header(store, _s);
+ ret = init_from_header(driver, _s);
}
return ret;
}
io_ctx.init(_cct);
RGWRequest::init_state(&rstate);
- RGWHandler::init(g_rgwlib->get_store(), &rstate, &io_ctx);
+ RGWHandler::init(g_rgwlib->get_driver(), &rstate, &io_ctx);
- get_state()->req_id = store->zone_unique_id(id);
- get_state()->trans_id = store->zone_unique_trans_id(id);
+ get_state()->req_id = driver->zone_unique_id(id);
+ get_state()->trans_id = driver->zone_unique_trans_id(id);
ldpp_dout(get_state(), 2) << "initializing for trans_id = "
<< get_state()->trans_id.c_str() << dendl;
}
- inline rgw::sal::Store* get_store() { return store; }
+ inline rgw::sal::Driver* get_driver() { return driver; }
inline RGWLibIO& get_io() { return io_ctx; }
virtual int execute() final { ceph_abort(); }
int content_length, std::atomic<bool>* fail_flag)
{
RGWLoadGenRequest* req =
- new RGWLoadGenRequest(store->get_new_req_id(), method, resource,
+ new RGWLoadGenRequest(driver->get_new_req_id(), method, resource,
content_length, fail_flag);
dout(10) << "allocated request req=" << hex << req << dec << dendl;
req_throttle.get(1);
RGWLoadGenIO real_client_io(&env);
RGWRestfulIO client_io(cct, &real_client_io);
ActiveRateLimiter ratelimit(cct);
- int ret = process_request(store, rest, req, uri_prefix,
+ int ret = process_request(driver, rest, req, uri_prefix,
*auth_registry, &client_io, olog,
null_yield, nullptr, nullptr, nullptr,
ratelimit.get_active(),
/* usage logger */
class UsageLogger : public DoutPrefixProvider {
CephContext *cct;
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
map<rgw_user_bucket, RGWUsageBatch> usage_map;
ceph::mutex lock = ceph::make_mutex("UsageLogger");
int32_t num_entries;
}
public:
- UsageLogger(CephContext *_cct, rgw::sal::Store* _store) : cct(_cct), store(_store), num_entries(0), timer(cct, timer_lock) {
+ UsageLogger(CephContext *_cct, rgw::sal::Driver* _driver) : cct(_cct), driver(_driver), num_entries(0), timer(cct, timer_lock) {
timer.init();
std::lock_guard l{timer_lock};
set_timer();
num_entries = 0;
lock.unlock();
- store->log_usage(this, old_map);
+ driver->log_usage(this, old_map);
}
CephContext *get_cct() const override { return cct; }
static UsageLogger *usage_logger = NULL;
-void rgw_log_usage_init(CephContext *cct, rgw::sal::Store* store)
+void rgw_log_usage_init(CephContext *cct, rgw::sal::Driver* driver)
{
- usage_logger = new UsageLogger(cct, store);
+ usage_logger = new UsageLogger(cct, driver);
}
void rgw_log_usage_finalize()
return 0;
}
-OpsLogRados::OpsLogRados(rgw::sal::Store* const& store): store(store)
+OpsLogRados::OpsLogRados(rgw::sal::Driver* const& driver): driver(driver)
{
}
localtime_r(&t, &bdt);
string oid = render_log_object_name(s->cct->_conf->rgw_log_object_name, &bdt,
entry.bucket_id, entry.bucket);
- if (store->log_op(s, oid, bl) < 0) {
+ if (driver->log_op(s, oid, bl) < 0) {
ldpp_dout(s, 0) << "ERROR: failed to log RADOS RGW ops log entry for txn: " << s->trans_id << dendl;
return -1;
}
};
class OpsLogRados : public OpsLogSink {
- // main()'s Store pointer as a reference, possibly modified by RGWRealmReloader
- rgw::sal::Store* const& store;
+ // main()'s driver pointer as a reference, possibly modified by RGWRealmReloader
+ rgw::sal::Driver* const& driver;
public:
- OpsLogRados(rgw::sal::Store* const& store);
+ OpsLogRados(rgw::sal::Driver* const& driver);
int log(req_state* s, struct rgw_log_entry& entry) override;
};
int rgw_log_op(RGWREST* const rest, struct req_state* s,
const RGWOp* op, OpsLogSink* olog);
-void rgw_log_usage_init(CephContext* cct, rgw::sal::Store* store);
+void rgw_log_usage_init(CephContext* cct, rgw::sal::Driver* driver);
void rgw_log_usage_finalize();
void rgw_format_ops_log_entry(struct rgw_log_entry& entry,
ceph::Formatter *formatter);
namespace bp = boost::process;
-int add_package(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, const std::string& package_name, bool allow_compilation)
+int add_package(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y, const std::string& package_name, bool allow_compilation)
{
// verify that luarocks can load this package
const auto p = bp::search_path("luarocks");
//replace previous versions of the package
const std::string package_name_no_version = package_name.substr(0, package_name.find(" "));
- ret = remove_package(dpp, store, y, package_name_no_version);
+ ret = remove_package(dpp, driver, y, package_name_no_version);
if (ret < 0) {
return ret;
}
- auto lua_mgr = store->get_lua_manager();
+ auto lua_mgr = driver->get_lua_manager();
return lua_mgr->add_package(dpp, y, package_name);
}
-int remove_package(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, const std::string& package_name)
+int remove_package(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y, const std::string& package_name)
{
- auto lua_mgr = store->get_lua_manager();
+ auto lua_mgr = driver->get_lua_manager();
return lua_mgr->remove_package(dpp, y, package_name);
}
namespace bp = boost::process;
-int list_packages(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, packages_t& packages)
+int list_packages(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y, packages_t& packages)
{
- auto lua_mgr = store->get_lua_manager();
+ auto lua_mgr = driver->get_lua_manager();
return lua_mgr->list_packages(dpp, y, packages);
}
-int install_packages(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, packages_t& failed_packages, std::string& output) {
+int install_packages(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y, packages_t& failed_packages, std::string& output) {
// luarocks directory cleanup
std::error_code ec;
- const auto& luarocks_path = store->get_luarocks_path();
+ const auto& luarocks_path = driver->get_luarocks_path();
if (std::filesystem::remove_all(luarocks_path, ec)
== static_cast<std::uintmax_t>(-1) &&
ec != std::errc::no_such_file_or_directory) {
}
packages_t packages;
- auto ret = list_packages(dpp, store, y, packages);
+ auto ret = list_packages(dpp, driver, y, packages);
if (ret == -ENOENT) {
// allowlist is empty
return 0;
// verify a lua script
bool verify(const std::string& script, std::string& err_msg);
-// store a lua script in a context
+// driver a lua script in a context
int write_script(const DoutPrefixProvider *dpp, rgw::sal::LuaManager* manager, const std::string& tenant, optional_yield y, context ctx, const std::string& script);
// read the stored lua script from a context
#ifdef WITH_RADOSGW_LUA_PACKAGES
// add a lua package to the allowlist
-int add_package(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, const std::string& package_name, bool allow_compilation);
+int add_package(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y, const std::string& package_name, bool allow_compilation);
// remove a lua package from the allowlist
-int remove_package(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, const std::string& package_name);
+int remove_package(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y, const std::string& package_name);
// list lua packages in the allowlist
-int list_packages(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, packages_t& packages);
+int list_packages(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y, packages_t& packages);
// install all packages from the allowlist
// return the list of packages that failed to install and the output of the install command
-int install_packages(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y, packages_t& failed_packages, std::string& output);
+int install_packages(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y, packages_t& failed_packages, std::string& output);
#endif
}
return 0;
}
-Background::Background(rgw::sal::Store* store,
+Background::Background(rgw::sal::Driver* driver,
CephContext* cct,
const std::string& luarocks_path,
int execute_interval) :
execute_interval(execute_interval),
dp(cct, dout_subsys, "lua background: "),
- lua_manager(store->get_lua_manager()),
+ lua_manager(driver->get_lua_manager()),
cct(cct),
luarocks_path(luarocks_path) {}
cond.notify_all();
}
-void Background::resume(rgw::sal::Store* store) {
- lua_manager = store->get_lua_manager();
+void Background::resume(rgw::sal::Driver* driver) {
+ lua_manager = driver->get_lua_manager();
paused = false;
cond.notify_all();
}
virtual int read_script();
public:
- Background(rgw::sal::Store* store,
+ Background(rgw::sal::Driver* driver,
CephContext* cct,
const std::string& luarocks_path,
int execute_interval = INIT_EXECUTE_INTERVAL);
}
void pause() override;
- void resume(rgw::sal::Store* _store) override;
+ void resume(rgw::sal::Driver* _driver) override;
};
} //namepsace rgw::lua
}
int execute(
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWREST* rest,
OpsLogSink* olog,
req_state* s,
lua_state_guard lguard(L);
open_standard_libs(L);
- set_package_path(L, store ?
- store->get_luarocks_path() :
+ set_package_path(L, driver ?
+ driver->get_luarocks_path() :
"");
create_debug_action(L, s->cct);
// execute a lua script in the Request context
int execute(
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWREST* rest,
OpsLogSink* olog,
req_state *s,
main.init_http_clients();
main.init_storage();
- if (! main.get_store()) {
+ if (! main.get_driver()) {
mutex.lock();
init_timer.cancel_all_events();
init_timer.shutdown();
void pause() override {
std::for_each(pausers.begin(), pausers.end(), [](Pauser* p){p->pause();});
}
- void resume(rgw::sal::Store* store) override {
- std::for_each(pausers.begin(), pausers.end(), [store](Pauser* p){p->resume(store);});
+ void resume(rgw::sal::Driver* driver) override {
+ std::for_each(pausers.begin(), pausers.end(), [driver](Pauser* p){p->resume(driver);});
}
};
std::unique_ptr<RGWFrontendPauser> fe_pauser;
std::unique_ptr<RGWRealmWatcher> realm_watcher;
std::unique_ptr<RGWPauser> rgw_pauser;
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
DoutPrefixProvider* dpp;
public:
void shutdown(std::function<void(void)> finalize_async_signals
= []() { /* nada */});
- rgw::sal::Store* get_store() {
- return store;
+ rgw::sal::Driver* get_driver() {
+ return driver;
}
rgw::LDAPHelper* get_ldh() {
return mgr;
}
-static inline RGWRESTMgr *rest_filter(rgw::sal::Store* store, int dialect, RGWRESTMgr* orig)
+static inline RGWRESTMgr *rest_filter(rgw::sal::Driver* driver, int dialect, RGWRESTMgr* orig)
{
- RGWSyncModuleInstanceRef sync_module = store->get_sync_module();
+ RGWSyncModuleInstanceRef sync_module = driver->get_sync_module();
if (sync_module) {
return sync_module->get_rest_filter(dialect, orig);
} else {
#define dout_subsys ceph_subsys_rgw
-static rgw::sal::Store* store = NULL;
+static rgw::sal::Driver* driver = NULL;
class StoreDestructor {
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
public:
- explicit StoreDestructor(rgw::sal::Store* _s) : store(_s) {}
+ explicit StoreDestructor(rgw::sal::Driver* _s) : driver(_s) {}
~StoreDestructor() {
- if (store) {
- StoreManager::close_storage(store);
+ if (driver) {
+ DriverManager::close_storage(driver);
}
}
};
common_init_finish(g_ceph_context);
const DoutPrefix dp(cct.get(), dout_subsys, "rgw object expirer: ");
- StoreManager::Config cfg;
+ DriverManager::Config cfg;
cfg.store_name = "rados";
cfg.filter_name = "none";
- store = StoreManager::get_storage(&dp, g_ceph_context, cfg, false, false, false, false, false);
- if (!store) {
+ driver = DriverManager::get_storage(&dp, g_ceph_context, cfg, false, false, false, false, false);
+ if (!driver) {
std::cerr << "couldn't init storage provider" << std::endl;
return EIO;
}
- /* Guard to not forget about closing the rados store. */
- StoreDestructor store_dtor(store);
+ /* Guard to not forget about closing the rados driver. */
+ StoreDestructor store_dtor(driver);
- RGWObjectExpirer objexp(store);
+ RGWObjectExpirer objexp(driver);
objexp.start_processor();
const utime_t interval(g_ceph_context->_conf->rgw_objexp_gc_interval, 0);
*/
int rgw_op_get_bucket_policy_from_attr(const DoutPrefixProvider *dpp,
CephContext *cct,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWBucketInfo& bucket_info,
map<string, bufferlist>& bucket_attrs,
RGWAccessControlPolicy *policy,
return ret;
} else {
ldpp_dout(dpp, 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl;
- std::unique_ptr<rgw::sal::User> user = store->get_user(bucket_info.owner);
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(bucket_info.owner);
/* object exists, but policy is broken */
int r = user->load_user(dpp, y);
if (r < 0)
static int get_obj_policy_from_attr(const DoutPrefixProvider *dpp,
CephContext *cct,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWBucketInfo& bucket_info,
map<string, bufferlist>& bucket_attrs,
RGWAccessControlPolicy *policy,
} else if (ret == -ENODATA) {
/* object exists, but policy is broken */
ldpp_dout(dpp, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl;
- std::unique_ptr<rgw::sal::User> user = store->get_user(bucket_info.owner);
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(bucket_info.owner);
ret = user->load_user(dpp, y);
if (ret < 0)
return ret;
}
static int read_bucket_policy(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
req_state *s,
RGWBucketInfo& bucket_info,
map<string, bufferlist>& bucket_attrs,
return 0;
}
- int ret = rgw_op_get_bucket_policy_from_attr(dpp, s->cct, store, bucket_info, bucket_attrs, policy, y);
+ int ret = rgw_op_get_bucket_policy_from_attr(dpp, s->cct, driver, bucket_info, bucket_attrs, policy, y);
if (ret == -ENOENT) {
ret = -ERR_NO_SUCH_BUCKET;
}
}
static int read_obj_policy(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
req_state *s,
RGWBucketInfo& bucket_info,
map<string, bufferlist>& bucket_attrs,
}
policy = get_iam_policy_from_attr(s->cct, bucket_attrs, bucket->get_tenant());
- int ret = get_obj_policy_from_attr(dpp, s->cct, store, bucket_info,
+ int ret = get_obj_policy_from_attr(dpp, s->cct, driver, bucket_info,
bucket_attrs, acl, storage_class, object,
s->yield);
if (ret == -ENOENT) {
/* object does not exist checking the bucket's ACL to make sure
that we send a proper error code */
RGWAccessControlPolicy bucket_policy(s->cct);
- ret = rgw_op_get_bucket_policy_from_attr(dpp, s->cct, store, bucket_info, bucket_attrs, &bucket_policy, y);
+ ret = rgw_op_get_bucket_policy_from_attr(dpp, s->cct, driver, bucket_info, bucket_attrs, &bucket_policy, y);
if (ret < 0) {
return ret;
}
* only_bucket: If true, reads the user and bucket ACLs rather than the object ACL.
* Returns: 0 on success, -ERR# otherwise.
*/
-int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Store* store, req_state* s, optional_yield y)
+int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, req_state* s, optional_yield y)
{
int ret = 0;
/* check if copy source is within the current domain */
if (!s->src_bucket_name.empty()) {
std::unique_ptr<rgw::sal::Bucket> src_bucket;
- ret = store->get_bucket(dpp, nullptr,
+ ret = driver->get_bucket(dpp, nullptr,
rgw_bucket(s->src_tenant_name,
s->src_bucket_name,
s->bucket_instance_id),
&src_bucket, y);
if (ret == 0) {
string& zonegroup = src_bucket->get_info().zonegroup;
- s->local_source = store->get_zone()->get_zonegroup().equals(zonegroup);
+ s->local_source = driver->get_zone()->get_zonegroup().equals(zonegroup);
}
}
/* This is the only place that s->bucket is created. It should never be
* overwritten. */
- ret = store->get_bucket(dpp, s->user.get(), rgw_bucket(rgw_bucket_key(s->bucket_tenant, s->bucket_name, s->bucket_instance_id)), &s->bucket, y);
+ ret = driver->get_bucket(dpp, s->user.get(), rgw_bucket(rgw_bucket_key(s->bucket_tenant, s->bucket_name, s->bucket_instance_id)), &s->bucket, y);
if (ret < 0) {
if (ret != -ENOENT) {
string bucket_log;
s->bucket_mtime = s->bucket->get_modification_time();
s->bucket_attrs = s->bucket->get_attrs();
- ret = read_bucket_policy(dpp, store, s, s->bucket->get_info(),
+ ret = read_bucket_policy(dpp, driver, s, s->bucket->get_info(),
s->bucket->get_attrs(),
s->bucket_acl.get(), s->bucket->get_key(), y);
acct_acl_user = {
s->bucket_owner = s->bucket_acl->get_owner();
std::unique_ptr<rgw::sal::ZoneGroup> zonegroup;
- int r = store->get_zonegroup(s->bucket->get_info().zonegroup, &zonegroup);
+ int r = driver->get_zonegroup(s->bucket->get_info().zonegroup, &zonegroup);
if (!r) {
s->zonegroup_endpoint = zonegroup->get_endpoint();
s->zonegroup_name = zonegroup->get_name();
ret = r;
}
- if (!store->get_zone()->get_zonegroup().equals(s->bucket->get_info().zonegroup)) {
+ if (!driver->get_zone()->get_zonegroup().equals(s->bucket->get_info().zonegroup)) {
ldpp_dout(dpp, 0) << "NOTICE: request for data in a different zonegroup ("
<< s->bucket->get_info().zonegroup << " != "
- << store->get_zone()->get_zonegroup().get_id() << ")" << dendl;
+ << driver->get_zone()->get_zonegroup().get_id() << ")" << dendl;
/* we now need to make sure that the operation actually requires copy source, that is
* it's a copy operation
*/
- if (store->get_zone()->get_zonegroup().is_master_zonegroup() && s->system_request) {
+ if (driver->get_zone()->get_zonegroup().is_master_zonegroup() && s->system_request) {
/*If this is the master, don't redirect*/
} else if (s->op_type == RGW_OP_GET_BUCKET_LOCATION ) {
/* If op is get bucket location, don't redirect */
s->dest_placement.storage_class = s->info.storage_class;
s->dest_placement.inherit_from(s->bucket->get_placement_rule());
- if (!store->valid_placement(s->dest_placement)) {
+ if (!driver->valid_placement(s->dest_placement)) {
ldpp_dout(dpp, 0) << "NOTICE: invalid dest placement: " << s->dest_placement.to_str() << dendl;
return -EINVAL;
}
/* handle user ACL only for those APIs which support it */
if (s->user_acl) {
- std::unique_ptr<rgw::sal::User> acl_user = store->get_user(acct_acl_user.uid);
+ std::unique_ptr<rgw::sal::User> acl_user = driver->get_user(acct_acl_user.uid);
ret = acl_user->read_attrs(dpp, y);
if (!ret) {
ret = -EACCES;
}
- bool success = store->get_zone()->get_redirect_endpoint(&s->redirect_zone_endpoint);
+ bool success = driver->get_zone()->get_redirect_endpoint(&s->redirect_zone_endpoint);
if (success) {
ldpp_dout(dpp, 20) << "redirect_zone_endpoint=" << s->redirect_zone_endpoint << dendl;
}
* only_bucket: If true, reads the bucket ACL rather than the object ACL.
* Returns: 0 on success, -ERR# otherwise.
*/
-int rgw_build_object_policies(const DoutPrefixProvider *dpp, rgw::sal::Store* store,
+int rgw_build_object_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
req_state *s, bool prefetch_data, optional_yield y)
{
int ret = 0;
if (prefetch_data) {
s->object->set_prefetch_data();
}
- ret = read_obj_policy(dpp, store, s, s->bucket->get_info(), s->bucket_attrs,
+ ret = read_obj_policy(dpp, driver, s, s->bucket->get_info(), s->bucket_attrs,
s->object_acl.get(), nullptr, s->iam_policy, s->bucket.get(),
s->object.get(), y);
}
}
}
-void rgw_build_iam_environment(rgw::sal::Store* store,
+void rgw_build_iam_environment(rgw::sal::Driver* driver,
req_state* s)
{
const auto& m = s->info.env->get_map();
return -EPERM;
}
- if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->get_zone()->is_writeable()) {
+ if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !driver->get_zone()->is_writeable()) {
ldpp_dout(this, 5) << "NOTICE: modify request to a read-only zone by a "
"non-system user, permission denied" << dendl;
return -EPERM;
if (op_ret < 0)
return;
- op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
}
void RGWDeleteBucketTags::execute(optional_yield y)
{
bufferlist in_data;
- op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
if (op_ret < 0)
return;
- op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
void RGWDeleteBucketReplication::execute(optional_yield y)
{
bufferlist in_data;
- op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
std::unique_ptr<rgw::sal::User> owner_user =
- store->get_user(s->bucket->get_info().owner);
+ driver->get_user(s->bucket->get_info().owner);
rgw::sal::User* user;
if (s->user->get_id() == s->bucket_owner.get_id()) {
}
- store->get_quota(quota);
+ driver->get_quota(quota);
if (s->bucket->get_info().quota.enabled) {
quota.bucket_quota = s->bucket->get_info().quota;
static int iterate_user_manifest_parts(const DoutPrefixProvider *dpp,
CephContext * const cct,
- rgw::sal::Store* const store,
+ rgw::sal::Driver* const driver,
const off_t ofs,
const off_t end,
rgw::sal::Bucket* bucket,
static int iterate_slo_parts(const DoutPrefixProvider *dpp,
CephContext *cct,
- rgw::sal::Store*store,
+ rgw::sal::Driver* driver,
off_t ofs,
off_t end,
map<uint64_t, rgw_slo_part>& slo_parts,
if (bucket_name.compare(s->bucket->get_name()) != 0) {
map<string, bufferlist> bucket_attrs;
- r = store->get_bucket(this, s->user.get(), s->user->get_tenant(), bucket_name, &ubucket, y);
+ r = driver->get_bucket(this, s->user.get(), s->user->get_tenant(), bucket_name, &ubucket, y);
if (r < 0) {
ldpp_dout(this, 0) << "could not get bucket info for bucket="
<< bucket_name << dendl;
return r;
}
bucket_acl = &_bucket_acl;
- r = read_bucket_policy(this, store, s, ubucket->get_info(), bucket_attrs, bucket_acl, ubucket->get_key(), y);
+ r = read_bucket_policy(this, driver, s, ubucket->get_info(), bucket_attrs, bucket_acl, ubucket->get_key(), y);
if (r < 0) {
ldpp_dout(this, 0) << "failed to read bucket policy" << dendl;
return r;
* - total length (of the parts we are going to send to client),
* - overall DLO's content size,
* - md5 sum of overall DLO's content (for etag of Swift API). */
- r = iterate_user_manifest_parts(this, s->cct, store, ofs, end,
+ r = iterate_user_manifest_parts(this, s->cct, driver, ofs, end,
pbucket, obj_prefix, bucket_acl, *bucket_policy,
nullptr, &s->obj_size, &lo_etag,
nullptr /* cb */, nullptr /* cb arg */, y);
return r;
}
- r = iterate_user_manifest_parts(this, s->cct, store, ofs, end,
+ r = iterate_user_manifest_parts(this, s->cct, driver, ofs, end,
pbucket, obj_prefix, bucket_acl, *bucket_policy,
&total_len, nullptr, nullptr,
nullptr, nullptr, y);
return 0;
}
- r = iterate_user_manifest_parts(this, s->cct, store, ofs, end,
+ r = iterate_user_manifest_parts(this, s->cct, driver, ofs, end,
pbucket, obj_prefix, bucket_acl, *bucket_policy,
nullptr, nullptr, nullptr,
get_obj_user_manifest_iterate_cb, (void *)this, y);
RGWAccessControlPolicy& _bucket_acl = allocated_acls.back();
std::unique_ptr<rgw::sal::Bucket> tmp_bucket;
- int r = store->get_bucket(this, s->user.get(), s->user->get_tenant(), bucket_name, &tmp_bucket, y);
+ int r = driver->get_bucket(this, s->user.get(), s->user->get_tenant(), bucket_name, &tmp_bucket, y);
if (r < 0) {
ldpp_dout(this, 0) << "could not get bucket info for bucket="
<< bucket_name << dendl;
}
bucket = tmp_bucket.get();
bucket_acl = &_bucket_acl;
- r = read_bucket_policy(this, store, s, tmp_bucket->get_info(), tmp_bucket->get_attrs(), bucket_acl,
+ r = read_bucket_policy(this, driver, s, tmp_bucket->get_info(), tmp_bucket->get_attrs(), bucket_acl,
tmp_bucket->get_key(), y);
if (r < 0) {
ldpp_dout(this, 0) << "failed to read bucket ACL for bucket "
<< " total=" << total_len
<< dendl;
- r = iterate_slo_parts(this, s->cct, store, ofs, end, slo_parts,
+ r = iterate_slo_parts(this, s->cct, driver, ofs, end, slo_parts,
get_obj_user_manifest_iterate_cb, (void *)this);
if (r < 0) {
return r;
op_ret = -EINVAL;
goto done_err;
}
- torrent.init(s, store);
+ torrent.init(s, driver);
rgw_obj obj = s->object->get_obj();
op_ret = torrent.get_torrent_file(s->object.get(), total_len, bl, obj);
if (op_ret < 0)
* isn't actually used in a given account. In such situation its usage
* stats would be simply full of zeros. */
std::set<std::string> targets;
- if (store->get_zone()->get_zonegroup().get_placement_target_names(targets)) {
+ if (driver->get_zone()->get_zonegroup().get_placement_target_names(targets)) {
for (const auto& policy : targets) {
policies_stats.emplace(policy, decltype(policies_stats)::mapped_type());
}
}
}
- op_ret = rgw_user_sync_all_stats(this, store, s->user.get(), y);
+ op_ret = rgw_user_sync_all_stats(this, driver, s->user.get(), y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "ERROR: failed to sync user stats" << dendl;
return;
}
- op_ret = rgw_user_get_all_buckets_stats(this, store, s->user.get(), buckets_usage, y);
+ op_ret = rgw_user_get_all_buckets_stats(this, driver, s->user.get(), buckets_usage, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "ERROR: failed to get user's buckets stats" << dendl;
return;
* isn't actually used in a given account. In such situation its usage
* stats would be simply full of zeros. */
std::set<std::string> names;
- store->get_zone()->get_zonegroup().get_placement_target_names(names);
+ driver->get_zone()->get_zonegroup().get_placement_target_names(names);
for (const auto& policy : names) {
policies_stats.emplace(policy, decltype(policies_stats)::mapped_type());
}
}
}
- op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
return;
}
- op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << " forward_request_to_master returned ret=" << op_ret << dendl;
return;
bufferlist in_data;
- op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "NOTICE: forward_to_master failed on bucket=" << s->bucket->get_name()
<< "returned err=" << op_ret << dendl;
return;
}
- op_ret = store->get_bucket(this, s->user.get(), s->bucket->get_key(), &bucket, y);
+ op_ret = driver->get_bucket(this, s->user.get(), s->bucket->get_key(), &bucket, y);
if (op_ret) {
return;
}
if (!relaxed_region_enforcement &&
!location_constraint.empty() &&
- !store->get_zone()->has_zonegroup_api(location_constraint)) {
+ !driver->get_zone()->has_zonegroup_api(location_constraint)) {
ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")"
<< " can't be found." << dendl;
op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
return;
}
- if (!relaxed_region_enforcement && !store->get_zone()->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() &&
- store->get_zone()->get_zonegroup().get_api_name() != location_constraint) {
+ if (!relaxed_region_enforcement && !driver->get_zone()->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() &&
+ driver->get_zone()->get_zonegroup().get_api_name() != location_constraint) {
ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")"
- << " doesn't match zonegroup" << " (" << store->get_zone()->get_zonegroup().get_api_name() << ")"
+ << " doesn't match zonegroup" << " (" << driver->get_zone()->get_zonegroup().get_api_name() << ")"
<< dendl;
op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
s->err.message = "The specified location-constraint is not valid";
}
std::set<std::string> names;
- store->get_zone()->get_zonegroup().get_placement_target_names(names);
+ driver->get_zone()->get_zonegroup().get_placement_target_names(names);
if (!placement_rule.name.empty() &&
!names.count(placement_rule.name)) {
ldpp_dout(this, 0) << "placement target (" << placement_rule.name << ")"
<< " doesn't exist in the placement targets of zonegroup"
- << " (" << store->get_zone()->get_zonegroup().get_api_name() << ")" << dendl;
+ << " (" << driver->get_zone()->get_zonegroup().get_api_name() << ")" << dendl;
op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
s->err.message = "The specified placement target does not exist";
return;
* specific request */
{
std::unique_ptr<rgw::sal::Bucket> tmp_bucket;
- op_ret = store->get_bucket(this, s->user.get(), s->bucket_tenant,
+ op_ret = driver->get_bucket(this, s->user.get(), s->bucket_tenant,
s->bucket_name, &tmp_bucket, y);
if (op_ret < 0 && op_ret != -ENOENT)
return;
if (s->bucket_exists) {
if (!s->system_request &&
- store->get_zone()->get_zonegroup().get_id() !=
+ driver->get_zone()->get_zonegroup().get_id() !=
tmp_bucket->get_info().zonegroup) {
op_ret = -EEXIST;
return;
if (s->system_request) {
zonegroup_id = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup");
if (zonegroup_id.empty()) {
- zonegroup_id = store->get_zone()->get_zonegroup().get_id();
+ zonegroup_id = driver->get_zone()->get_zonegroup().get_id();
}
} else {
- zonegroup_id = store->get_zone()->get_zonegroup().get_id();
+ zonegroup_id = driver->get_zone()->get_zonegroup().get_id();
}
/* Encode special metadata first as we're using std::map::emplace under
}
bufferlist in_data;
- op_ret = store->forward_request_to_master(this, s->user.get(), &ot.read_version, in_data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), &ot.read_version, in_data, nullptr, s->info, y);
if (op_ret < 0) {
if (op_ret == -ENOENT) {
/* adjust error, we want to return with NoSuchBucket and not
}
}
std::unique_ptr<rgw::sal::Bucket> bucket;
- ret = store->get_bucket(this, s->user.get(), copy_source_tenant_name, copy_source_bucket_name,
+ ret = driver->get_bucket(this, s->user.get(), copy_source_tenant_name, copy_source_bucket_name,
&bucket, y);
if (ret < 0) {
ldpp_dout(this, 5) << __func__ << "(): get_bucket() returned ret=" << ret << dendl;
boost::optional<Policy> policy;
map<string, bufferlist> cs_attrs;
std::unique_ptr<rgw::sal::Bucket> cs_bucket;
- int ret = store->get_bucket(NULL, copy_source_bucket_info, &cs_bucket);
+ int ret = driver->get_bucket(NULL, copy_source_bucket_info, &cs_bucket);
if (ret < 0)
return ret;
cs_object->set_prefetch_data();
/* check source object permissions */
- if (ret = read_obj_policy(this, store, s, copy_source_bucket_info, cs_attrs, &cs_acl, nullptr,
+ if (ret = read_obj_policy(this, driver, s, copy_source_bucket_info, cs_attrs, &cs_acl, nullptr,
policy, cs_bucket.get(), cs_object.get(), y, true); ret < 0) {
return ret;
}
new_end = lst;
std::unique_ptr<rgw::sal::Bucket> bucket;
- ret = store->get_bucket(nullptr, copy_source_bucket_info, &bucket);
+ ret = driver->get_bucket(nullptr, copy_source_bucket_info, &bucket);
if (ret < 0)
return ret;
// make reservation for notification if needed
std::unique_ptr<rgw::sal::Notification> res
- = store->get_notification(
+ = driver->get_notification(
s->object.get(), s->src_object.get(), s,
rgw::notify::ObjectCreatedPut);
if(!multipart) {
op_ret = -ERR_INVALID_BUCKET_STATE;
return;
}
- processor = store->get_append_writer(this, s->yield, s->object->clone(),
+ processor = driver->get_append_writer(this, s->yield, s->object->clone(),
s->bucket_owner.get_id(),
pdest_placement, s->req_id, position,
&cur_accounted_size);
version_id = s->object->get_instance();
}
}
- processor = store->get_atomic_writer(this, s->yield, s->object->clone(),
+ processor = driver->get_atomic_writer(this, s->yield, s->object->clone(),
s->bucket_owner.get_id(),
pdest_placement, olh_epoch, s->req_id);
}
}
if ((! copy_source.empty()) && !copy_source_range) {
std::unique_ptr<rgw::sal::Bucket> bucket;
- op_ret = store->get_bucket(nullptr, copy_source_bucket_info, &bucket);
+ op_ret = driver->get_bucket(nullptr, copy_source_bucket_info, &bucket);
if (op_ret < 0) {
ldpp_dout(this, 0) << "ERROR: failed to get bucket with error" << op_ret << dendl;
return;
// no filters by default
rgw::sal::DataProcessor *filter = processor.get();
- const auto& compression_type = store->get_compression_type(*pdest_placement);
+ const auto& compression_type = driver->get_compression_type(*pdest_placement);
CompressorRef plugin;
boost::optional<RGWPutObj_Compress> compressor;
/* produce torrent */
if (s->cct->_conf->rgw_torrent_flag && (ofs == torrent.get_data_len()))
{
- torrent.init(s, store);
+ torrent.init(s, driver);
torrent.set_create_date(mtime);
op_ret = torrent.complete(y);
if (0 != op_ret)
// make reservation for notification if needed
std::unique_ptr<rgw::sal::Notification> res
- = store->get_notification(s->object.get(), s->src_object.get(), s, rgw::notify::ObjectCreatedPost);
+ = driver->get_notification(s->object.get(), s->src_object.get(), s, rgw::notify::ObjectCreatedPost);
op_ret = res->publish_reserve(this);
if (op_ret < 0) {
return;
}
std::unique_ptr<rgw::sal::Writer> processor;
- processor = store->get_atomic_writer(this, s->yield, std::move(obj),
+ processor = driver->get_atomic_writer(this, s->yield, std::move(obj),
s->bucket_owner.get_id(),
&s->dest_placement, 0, s->req_id);
op_ret = processor->prepare(s->yield);
if (encrypt != nullptr) {
filter = encrypt.get();
} else {
- const auto& compression_type = store->get_compression_type(s->dest_placement);
+ const auto& compression_type = driver->get_compression_type(s->dest_placement);
if (compression_type != "none") {
plugin = Compressor::create(s->cct, compression_type);
if (!plugin) {
try {
deleter = std::unique_ptr<RGWBulkDelete::Deleter>(\
- new RGWBulkDelete::Deleter(this, store, s));
+ new RGWBulkDelete::Deleter(this, driver, s));
} catch (const std::bad_alloc&) {
return -ENOMEM;
}
rgw::notify::ObjectRemovedDeleteMarkerCreated :
rgw::notify::ObjectRemovedDelete;
std::unique_ptr<rgw::sal::Notification> res
- = store->get_notification(s->object.get(), s->src_object.get(), s,
+ = driver->get_notification(s->object.get(), s->src_object.get(), s,
event_type);
op_ret = res->publish_reserve(this);
if (op_ret < 0) {
return op_ret;
}
- op_ret = store->get_bucket(this, s->user.get(),
+ op_ret = driver->get_bucket(this, s->user.get(),
rgw_bucket(src_tenant_name,
src_bucket_name,
s->bucket_instance_id),
rgw_placement_rule src_placement;
/* check source object permissions */
- op_ret = read_obj_policy(this, store, s, src_bucket->get_info(), src_bucket->get_attrs(), &src_acl, &src_placement.storage_class,
+ op_ret = read_obj_policy(this, driver, s, src_bucket->get_info(), src_bucket->get_attrs(), &src_acl, &src_placement.storage_class,
src_policy, src_bucket.get(), s->src_object.get(), y);
if (op_ret < 0) {
return op_ret;
or intra region sync */
dest_bucket = src_bucket->clone();
} else {
- op_ret = store->get_bucket(this, s->user.get(), dest_tenant_name, dest_bucket_name, &dest_bucket, y);
+ op_ret = driver->get_bucket(this, s->user.get(), dest_tenant_name, dest_bucket_name, &dest_bucket, y);
if (op_ret < 0) {
if (op_ret == -ENOENT) {
ldpp_dout(this, 0) << "ERROR: Destination Bucket not found for user: " << s->user->get_id().to_str() << dendl;
dest_object->set_atomic();
/* check dest bucket permissions */
- op_ret = read_bucket_policy(this, store, s, dest_bucket->get_info(),
+ op_ret = read_bucket_policy(this, driver, s, dest_bucket->get_info(),
dest_bucket->get_attrs(),
&dest_bucket_policy, dest_bucket->get_key(), y);
if (op_ret < 0) {
// make reservation for notification if needed
std::unique_ptr<rgw::sal::Notification> res
- = store->get_notification(
+ = driver->get_notification(
s->object.get(), s->src_object.get(),
s, rgw::notify::ObjectCreatedCopy);
op_ret = res->publish_reserve(this);
}
if (!s->canned_acl.empty() || s->has_acl_header) {
- op_ret = get_policy_from_state(store, s, ss);
+ op_ret = get_policy_from_state(driver, s, ss);
if (op_ret < 0)
return;
if (s->canned_acl.empty()) {
in_data.append(data);
}
- op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
*_dout << dendl;
}
- op_ret = policy->rebuild(this, store, &owner, new_policy, s->err.message);
+ op_ret = policy->rebuild(this, driver, &owner, new_policy, s->err.message);
if (op_ret < 0)
return;
ldpp_dout(this, 15) << "New LifecycleConfiguration:" << ss.str() << dendl;
}
- op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
- op_ret = store->get_rgwlc()->set_bucket_config(s->bucket.get(), s->bucket_attrs, &new_config);
+ op_ret = driver->get_rgwlc()->set_bucket_config(s->bucket.get(), s->bucket_attrs, &new_config);
if (op_ret < 0) {
return;
}
void RGWDeleteLC::execute(optional_yield y)
{
bufferlist data;
- op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
- op_ret = store->get_rgwlc()->remove_bucket_config(s->bucket.get(), s->bucket_attrs);
+ op_ret = driver->get_rgwlc()->remove_bucket_config(s->bucket.get(), s->bucket_attrs);
if (op_ret < 0) {
return;
}
if (op_ret < 0)
return;
- op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
void RGWDeleteCORS::execute(optional_yield y)
{
bufferlist data;
- op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
if (op_ret < 0)
return;
- op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
// make reservation for notification if needed
std::unique_ptr<rgw::sal::Notification> res
- = store->get_notification(meta_obj.get(), nullptr, s, rgw::notify::ObjectCreatedCompleteMultipartUpload, &s->object->get_name());
+ = driver->get_notification(meta_obj.get(), nullptr, s, rgw::notify::ObjectCreatedCompleteMultipartUpload, &s->object->get_name());
op_ret = res->publish_reserve(this);
if (op_ret < 0) {
return;
rgw::notify::ObjectRemovedDeleteMarkerCreated :
rgw::notify::ObjectRemovedDelete;
std::unique_ptr<rgw::sal::Notification> res
- = store->get_notification(obj.get(), s->src_object.get(), s, event_type);
+ = driver->get_notification(obj.get(), s->src_object.get(), s, event_type);
op_ret = res->publish_reserve(this);
if (op_ret < 0) {
send_partial_response(o, false, "", op_ret, formatter_flush_cond);
ACLOwner& bucket_owner /* out */,
optional_yield y)
{
- RGWAccessControlPolicy bacl(store->ctx());
- int ret = read_bucket_policy(dpp, store, s, binfo, battrs, &bacl, binfo.bucket, y);
+ RGWAccessControlPolicy bacl(driver->ctx());
+ int ret = read_bucket_policy(dpp, driver, s, binfo, battrs, &bacl, binfo.bucket, y);
if (ret < 0) {
return false;
}
ACLOwner bowner;
RGWObjVersionTracker ot;
- int ret = store->get_bucket(dpp, s->user.get(), s->user->get_tenant(), path.bucket_name, &bucket, y);
+ int ret = driver->get_bucket(dpp, s->user.get(), s->user->get_tenant(), path.bucket_name, &bucket, y);
if (ret < 0) {
goto binfo_fail;
}
void RGWBulkDelete::execute(optional_yield y)
{
- deleter = std::unique_ptr<Deleter>(new Deleter(this, store, s));
+ deleter = std::unique_ptr<Deleter>(new Deleter(this, driver, s));
bool is_truncated = false;
do {
info.effective_uri = "/" + bucket_name;
}
-void RGWBulkUploadOp::init(rgw::sal::Store* const store,
+void RGWBulkUploadOp::init(rgw::sal::Driver* const driver,
req_state* const s,
RGWHandler* const h)
{
- RGWOp::init(store, s, h);
+ RGWOp::init(driver, s, h);
}
int RGWBulkUploadOp::handle_dir(const std::string_view path, optional_yield y)
forward_req_info(this, s->cct, info, bucket_name);
op_ret = s->user->create_bucket(this, new_bucket,
- store->get_zone()->get_zonegroup().get_id(),
+ driver->get_zone()->get_zonegroup().get_id(),
placement_rule, swift_ver_location,
pquota_info, policy, attrs,
out_info, ep_objv,
ACLOwner& bucket_owner /* out */,
optional_yield y)
{
- RGWAccessControlPolicy bacl(store->ctx());
- op_ret = read_bucket_policy(this, store, s, binfo, battrs, &bacl, binfo.bucket, y);
+ RGWAccessControlPolicy bacl(driver->ctx());
+ op_ret = read_bucket_policy(this, driver, s, binfo, battrs, &bacl, binfo.bucket, y);
if (op_ret < 0) {
ldpp_dout(this, 20) << "cannot read_policy() for bucket" << dendl;
return false;
std::unique_ptr<rgw::sal::Bucket> bucket;
ACLOwner bowner;
- op_ret = store->get_bucket(this, s->user.get(), rgw_bucket(rgw_bucket_key(s->user->get_tenant(), bucket_name)), &bucket, y);
+ op_ret = driver->get_bucket(this, s->user.get(), rgw_bucket(rgw_bucket_key(s->user->get_tenant(), bucket_name)), &bucket, y);
if (op_ret < 0) {
if (op_ret == -ENOENT) {
ldpp_dout(this, 20) << "non existent directory=" << bucket_name << dendl;
dest_placement.inherit_from(bucket->get_placement_rule());
std::unique_ptr<rgw::sal::Writer> processor;
- processor = store->get_atomic_writer(this, s->yield, std::move(obj),
+ processor = driver->get_atomic_writer(this, s->yield, std::move(obj),
bowner.get_id(),
&s->dest_placement, 0, s->req_id);
op_ret = processor->prepare(s->yield);
/* No filters by default. */
rgw::sal::DataProcessor *filter = processor.get();
- const auto& compression_type = store->get_compression_type(dest_placement);
+ const auto& compression_type = driver->get_compression_type(dest_placement);
CompressorRef plugin;
boost::optional<RGWPutObj_Compress> compressor;
if (compression_type != "none") {
{
}
-int RGWHandler::init(rgw::sal::Store* _store,
+int RGWHandler::init(rgw::sal::Driver* _driver,
req_state *_s,
rgw::io::BasicClient *cio)
{
- store = _store;
+ driver = _driver;
s = _s;
return 0;
int RGWHandler::do_init_permissions(const DoutPrefixProvider *dpp, optional_yield y)
{
- int ret = rgw_build_bucket_policies(dpp, store, s, y);
+ int ret = rgw_build_bucket_policies(dpp, driver, s, y);
if (ret < 0) {
ldpp_dout(dpp, 10) << "init_permissions on " << s->bucket
<< " failed, ret=" << ret << dendl;
return ret==-ENODATA ? -EACCES : ret;
}
- rgw_build_iam_environment(store, s);
+ rgw_build_iam_environment(driver, s);
return ret;
}
/* already read bucket info */
return 0;
}
- int ret = rgw_build_object_policies(op, store, s, op->prefetch_data(), y);
+ int ret = rgw_build_object_policies(op, driver, s, op->prefetch_data(), y);
if (ret < 0) {
ldpp_dout(op, 10) << "read_permissions on " << s->bucket << ":"
return;
}
- op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
void RGWDeleteBucketPolicy::execute(optional_yield y)
{
bufferlist data;
- op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
return;
}
- op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
void RGWGetClusterStat::execute(optional_yield y)
{
- op_ret = store->cluster_stat(stats_op);
+ op_ret = driver->cluster_stat(stats_op);
}
int RGWGetBucketPolicyStatus::verify_permission(optional_yield y)
return;
}
- op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
void RGWDeleteBucketPublicAccessBlock::execute(optional_yield y)
{
bufferlist data;
- op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
return;
}
- op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
void RGWDeleteBucketEncryption::execute(optional_yield y)
{
bufferlist data;
- op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
int rgw_op_get_bucket_policy_from_attr(const DoutPrefixProvider *dpp,
CephContext *cct,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWBucketInfo& bucket_info,
std::map<std::string, bufferlist>& bucket_attrs,
RGWAccessControlPolicy *policy,
class RGWHandler {
protected:
- rgw::sal::Store* store{nullptr};
+ rgw::sal::Driver* driver{nullptr};
req_state *s{nullptr};
int do_init_permissions(const DoutPrefixProvider *dpp, optional_yield y);
RGWHandler() {}
virtual ~RGWHandler();
- virtual int init(rgw::sal::Store* store,
+ virtual int init(rgw::sal::Driver* driver,
req_state* _s,
rgw::io::BasicClient* cio);
protected:
req_state *s;
RGWHandler *dialect_handler;
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
RGWCORSConfiguration bucket_cors;
bool cors_exist;
RGWQuota quota;
RGWOp()
: s(nullptr),
dialect_handler(nullptr),
- store(nullptr),
+ driver(nullptr),
cors_exist(false),
op_ret(0) {
}
return 0;
}
- virtual void init(rgw::sal::Store* store, req_state *s, RGWHandler *dialect_handler) {
- this->store = store;
+ virtual void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *dialect_handler) {
+ this->driver = driver;
this->s = s;
this->dialect_handler = dialect_handler;
}
unsigned int num_unfound;
std::list<fail_desc_t> failures;
- rgw::sal::Store* const store;
+ rgw::sal::Driver* const driver;
req_state * const s;
public:
- Deleter(const DoutPrefixProvider* dpp, rgw::sal::Store* const str, req_state * const s)
+ Deleter(const DoutPrefixProvider* dpp, rgw::sal::Driver* const str, req_state * const s)
: dpp(dpp),
num_deleted(0),
num_unfound(0),
- store(str),
+ driver(str),
s(s) {
}
: num_created(0) {
}
- void init(rgw::sal::Store* const store,
+ void init(rgw::sal::Driver* const driver,
req_state* const s,
RGWHandler* const h) override;
void pre_exec() override;
void execute(optional_yield y) override;
- void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override {
- RGWOp::init(store, s, h);
+ void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override {
+ RGWOp::init(driver, s, h);
}
virtual int get_params(optional_yield y) = 0;
void send_response() override = 0;
int verify_permission(optional_yield y) override;
void pre_exec() override;
void execute(optional_yield y) override;
- void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override {
- RGWOp::init(store, s, h);
+ void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override {
+ RGWOp::init(driver, s, h);
policy.set_ctx(s->cct);
relaxed_region_enforcement =
s->cct->_conf.get_val<bool>("rgw_relaxed_region_enforcement");
delete obj_legal_hold;
}
- void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override {
- RGWOp::init(store, s, h);
+ void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override {
+ RGWOp::init(driver, s, h);
policy.set_ctx(s->cct);
}
attrs.emplace(std::move(key), std::move(bl)); /* key and bl are r-value refs */
}
- void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override {
- RGWOp::init(store, s, h);
+ void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override {
+ RGWOp::init(driver, s, h);
policy.set_ctx(s->cct);
}
has_policy(false) {
}
- void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override {
- RGWOp::init(store, s, h);
+ void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override {
+ RGWOp::init(driver, s, h);
policy.set_ctx(s->cct);
}
int init_processing(optional_yield y) override;
attrs.emplace(std::move(key), std::move(bl)); /* key and bl are r-value refs */
}
- void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override {
- RGWOp::init(store, s, h);
+ void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override {
+ RGWOp::init(driver, s, h);
policy.set_ctx(s->cct);
}
: dlo_manifest(NULL)
{}
- void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override {
- RGWOp::init(store, s, h);
+ void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override {
+ RGWOp::init(driver, s, h);
policy.set_ctx(s->cct);
}
int verify_permission(optional_yield y) override;
attrs.emplace(std::move(key), std::move(bl));
}
- void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override {
- RGWOp::init(store, s, h);
+ void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override {
+ RGWOp::init(driver, s, h);
dest_policy.set_ctx(s->cct);
}
int verify_permission(optional_yield y) override;
void pre_exec() override;
void execute(optional_yield y) override;
- virtual int get_policy_from_state(rgw::sal::Store* store, req_state *s, std::stringstream& ss) { return 0; }
+ virtual int get_policy_from_state(rgw::sal::Driver* driver, req_state *s, std::stringstream& ss) { return 0; }
virtual int get_params(optional_yield y) = 0;
void send_response() override = 0;
const char* name() const override { return "put_acls"; }
}
~RGWPutLC() override {}
- void init(rgw::sal::Store* store, req_state *s, RGWHandler *dialect_handler) override {
+ void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *dialect_handler) override {
#define COOKIE_LEN 16
char buf[COOKIE_LEN + 1];
- RGWOp::init(store, s, dialect_handler);
+ RGWOp::init(driver, s, dialect_handler);
gen_rand_alphanumeric(s->cct, buf, sizeof(buf) - 1);
cookie = buf;
}
void pre_exec() override;
void execute(optional_yield y) override;
-// virtual int get_policy_from_state(RGWRados* store, req_state *s, std::stringstream& ss) { return 0; }
+// virtual int get_policy_from_state(RGWRados* driver, req_state *s, std::stringstream& ss) { return 0; }
virtual int get_params(optional_yield y) = 0;
void send_response() override = 0;
const char* name() const override { return "put_lifecycle"; }
public:
RGWInitMultipart() {}
- void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override {
- RGWOp::init(store, s, h);
+ void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override {
+ RGWOp::init(driver, s, h);
policy.set_ctx(s->cct);
}
int verify_permission(optional_yield y) override;
truncated = false;
}
- void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override {
- RGWOp::init(store, s, h);
+ void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override {
+ RGWOp::init(driver, s, h);
policy = RGWAccessControlPolicy(s->cct);
}
int verify_permission(optional_yield y) override;
default_max = 0;
}
- void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override {
- RGWOp::init(store, s, h);
+ void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override {
+ RGWOp::init(driver, s, h);
max_uploads = default_max;
}
uint32_t op_mask() override { return RGW_OP_TYPE_READ; }
};
-extern int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Store* store,
+extern int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
req_state* s, optional_yield y);
-extern int rgw_build_object_policies(const DoutPrefixProvider *dpp, rgw::sal::Store* store,
+extern int rgw_build_object_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
req_state *s, bool prefetch_data, optional_yield y);
-extern void rgw_build_iam_environment(rgw::sal::Store* store,
+extern void rgw_build_iam_environment(rgw::sal::Driver* driver,
req_state* s);
extern std::vector<rgw::IAM::Policy> get_iam_user_policy_from_attr(CephContext* cct,
std::map<std::string, bufferlist>& attrs,
public:
RGWGetClusterStat() {}
- void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override {
- RGWOp::init(store, s, h);
+ void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override {
+ RGWOp::init(driver, s, h);
}
int verify_permission(optional_yield) override {return 0;}
virtual void send_response() override = 0;
namespace rgw {
/* static */
- int RGWHandler_Lib::init_from_header(rgw::sal::Store* store,
+ int RGWHandler_Lib::init_from_header(rgw::sal::Driver* driver,
req_state *s)
{
string req;
if (pos >= 0) {
// XXX ugh, another copy
string encoded_obj_str = req.substr(pos+1);
- s->object = store->get_object(rgw_obj_key(encoded_obj_str, s->info.args.get("versionId")));
+ s->object = driver->get_object(rgw_obj_key(encoded_obj_str, s->info.args.get("versionId")));
}
} else {
- s->object = store->get_object(rgw_obj_key(req_name, s->info.args.get("versionId")));
+ s->object = driver->get_object(rgw_obj_key(req_name, s->info.args.get("versionId")));
}
return 0;
} /* init_from_header */
};
-RGWPeriodPusher::RGWPeriodPusher(const DoutPrefixProvider *dpp, rgw::sal::Store* store,
+RGWPeriodPusher::RGWPeriodPusher(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
optional_yield y)
- : cct(store->ctx()), store(store)
+ : cct(driver->ctx()), driver(driver)
{
- rgw::sal::Zone* zone = store->get_zone();
+ rgw::sal::Zone* zone = driver->get_zone();
auto& realm_id = zone->get_realm_id();
if (realm_id.empty()) // no realm configuration
return;
// always send out the current period on startup
RGWPeriod period;
// XXX dang
- int r = period.init(dpp, cct, static_cast<rgw::sal::RadosStore* >(store)->svc()->sysobj, realm_id, y, zone->get_realm_name());
+ int r = period.init(dpp, cct, static_cast<rgw::sal::RadosStore* >(driver)->svc()->sysobj, realm_id, y, zone->get_realm_name());
if (r < 0) {
ldpp_dout(dpp, -1) << "failed to load period for realm " << realm_id << dendl;
return;
// we can't process this notification without access to our current realm
// configuration. queue it until resume()
- if (store == nullptr) {
+ if (driver == nullptr) {
pending_periods.emplace_back(std::move(info));
return;
}
// find our zonegroup in the new period
auto& zonegroups = period.get_map().zonegroups;
- auto i = zonegroups.find(store->get_zone()->get_zonegroup().get_id());
+ auto i = zonegroups.find(driver->get_zone()->get_zonegroup().get_id());
if (i == zonegroups.end()) {
lderr(cct) << "The new period does not contain my zonegroup!" << dendl;
return;
auto& my_zonegroup = i->second;
// if we're not a master zone, we're not responsible for pushing any updates
- if (my_zonegroup.master_zone != store->get_zone()->get_id())
+ if (my_zonegroup.master_zone != driver->get_zone()->get_id())
return;
// construct a map of the zones that need this period. the map uses the same
auto hint = conns.end();
// are we the master zonegroup in this period?
- if (period.get_map().master_zonegroup == store->get_zone()->get_zonegroup().get_id()) {
+ if (period.get_map().master_zonegroup == driver->get_zone()->get_zonegroup().get_id()) {
// update other zonegroup endpoints
for (auto& zg : zonegroups) {
auto& zonegroup = zg.second;
- if (zonegroup.get_id() == store->get_zone()->get_zonegroup().get_id())
+ if (zonegroup.get_id() == driver->get_zone()->get_zonegroup().get_id())
continue;
if (zonegroup.endpoints.empty())
continue;
hint = conns.emplace_hint(
hint, std::piecewise_construct,
std::forward_as_tuple(zonegroup.get_id()),
- std::forward_as_tuple(cct, store, zonegroup.get_id(), zonegroup.endpoints, zonegroup.api_name));
+ std::forward_as_tuple(cct, driver, zonegroup.get_id(), zonegroup.endpoints, zonegroup.api_name));
}
}
// update other zone endpoints
for (auto& z : my_zonegroup.zones) {
auto& zone = z.second;
- if (zone.id == store->get_zone()->get_id())
+ if (zone.id == driver->get_zone()->get_id())
continue;
if (zone.endpoints.empty())
continue;
hint = conns.emplace_hint(
hint, std::piecewise_construct,
std::forward_as_tuple(zone.id),
- std::forward_as_tuple(cct, store, zone.id, zone.endpoints, my_zonegroup.api_name));
+ std::forward_as_tuple(cct, driver, zone.id, zone.endpoints, my_zonegroup.api_name));
}
if (conns.empty()) {
{
ldout(cct, 4) << "paused for realm update" << dendl;
std::lock_guard<std::mutex> lock(mutex);
- store = nullptr;
+ driver = nullptr;
}
-void RGWPeriodPusher::resume(rgw::sal::Store* store)
+void RGWPeriodPusher::resume(rgw::sal::Driver* driver)
{
std::lock_guard<std::mutex> lock(mutex);
- this->store = store;
+ this->driver = driver;
ldout(cct, 4) << "resume with " << pending_periods.size()
<< " periods pending" << dendl;
class RGWPeriodPusher final : public RGWRealmWatcher::Watcher,
public RGWRealmReloader::Pauser {
public:
- explicit RGWPeriodPusher(const DoutPrefixProvider *dpp, rgw::sal::Store* store, optional_yield y);
+ explicit RGWPeriodPusher(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y);
~RGWPeriodPusher() override;
/// respond to realm notifications by pushing new periods to other zones
void pause() override;
/// continue processing notifications with a new RGWRados instance
- void resume(rgw::sal::Store* store) override;
+ void resume(rgw::sal::Driver* driver) override;
private:
void handle_notify(RGWZonesNeedPeriod&& period);
CephContext *const cct;
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
std::mutex mutex;
epoch_t realm_epoch{0}; //< the current realm epoch being sent
process->req_throttle.put(1);
perfcounter->inc(l_rgw_qactive, -1);
}
-bool rate_limit(rgw::sal::Store* store, req_state* s) {
+bool rate_limit(rgw::sal::Driver* driver, req_state* s) {
// we dont want to limit health check or system or admin requests
const auto& is_admin_or_system = s->user->get_info();
if ((s->op_type == RGW_OP_GET_HEALTH_CHECK) || is_admin_or_system.admin || is_admin_or_system.system)
RGWRateLimitInfo global_anon;
RGWRateLimitInfo* bucket_ratelimit;
RGWRateLimitInfo* user_ratelimit;
- store->get_ratelimit(global_bucket, global_user, global_anon);
+ driver->get_ratelimit(global_bucket, global_user, global_anon);
bucket_ratelimit = &global_bucket;
user_ratelimit = &global_user;
s->user->get_id().to_str(userfind);
RGWRequest * const req,
req_state * const s,
optional_yield y,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const bool skip_retarget)
{
ldpp_dout(op, 2) << "init permissions" << dendl;
op->pre_exec();
ldpp_dout(op, 2) << "check rate limiting" << dendl;
- if (rate_limit(store, s)) {
+ if (rate_limit(driver, s)) {
return -ERR_RATE_LIMITED;
}
ldpp_dout(op, 2) << "executing" << dendl;
return 0;
}
-int process_request(rgw::sal::Store* const store,
+int process_request(rgw::sal::Driver* const driver,
RGWREST* const rest,
RGWRequest* const req,
const std::string& frontend_prefix,
req_state *s = &rstate;
s->ratelimit_data = ratelimit;
- std::unique_ptr<rgw::sal::User> u = store->get_user(rgw_user());
+ std::unique_ptr<rgw::sal::User> u = driver->get_user(rgw_user());
s->set_user(u);
if (ret < 0) {
return ret;
}
- s->req_id = store->zone_unique_id(req->id);
- s->trans_id = store->zone_unique_trans_id(req->id);
- s->host_id = store->get_host_id();
+ s->req_id = driver->zone_unique_id(req->id);
+ s->trans_id = driver->zone_unique_trans_id(req->id);
+ s->host_id = driver->get_host_id();
s->yield = yield;
ldpp_dout(s, 2) << "initializing for trans_id = " << s->trans_id << dendl;
int init_error = 0;
bool should_log = false;
RGWRESTMgr *mgr;
- RGWHandler_REST *handler = rest->get_handler(store, s,
+ RGWHandler_REST *handler = rest->get_handler(driver, s,
auth_registry,
frontend_prefix,
client_io, &mgr, &init_error);
} else if (rc < 0) {
ldpp_dout(op, 5) << "WARNING: failed to read pre request script. error: " << rc << dendl;
} else {
- rc = rgw::lua::request::execute(store, rest, olog, s, op, script);
+ rc = rgw::lua::request::execute(driver, rest, olog, s, op, script);
if (rc < 0) {
ldpp_dout(op, 5) << "WARNING: failed to execute pre request script. error: " << rc << dendl;
}
s->trace->SetAttribute(tracing::rgw::OP, op->name());
s->trace->SetAttribute(tracing::rgw::TYPE, tracing::rgw::REQUEST);
- ret = rgw_process_authenticated(handler, op, req, s, yield, store);
+ ret = rgw_process_authenticated(handler, op, req, s, yield, driver);
if (ret < 0) {
abort_early(s, op, ret, handler, yield);
goto done;
} else if (rc < 0) {
ldpp_dout(op, 5) << "WARNING: failed to read post request script. error: " << rc << dendl;
} else {
- rc = rgw::lua::request::execute(store, rest, olog, s, op, script);
+ rc = rgw::lua::request::execute(driver, rest, olog, s, op, script);
if (rc < 0) {
ldpp_dout(op, 5) << "WARNING: failed to execute post request script. error: " << rc << dendl;
}
}
struct RGWProcessEnv {
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
RGWREST *rest;
OpsLogSink *olog;
int port;
std::string uri_prefix;
std::shared_ptr<rgw::auth::StrategyRegistry> auth_registry;
- //maybe there is a better place to store the rate limit data structure
+ //maybe there is a better place to driver the rate limit data structure
ActiveRateLimiter* ratelimiting;
rgw::lua::Background* lua_background;
};
std::deque<RGWRequest*> m_req_queue;
protected:
CephContext *cct;
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
rgw_auth_registry_ptr_t auth_registry;
OpsLogSink* olog;
ThreadPool m_tp;
const int num_threads,
RGWFrontendConfig* const conf)
: cct(cct),
- store(pe->store),
+ driver(pe->driver),
auth_registry(pe->auth_registry),
olog(pe->olog),
m_tp(cct, "RGWProcess::m_tp", "tp_rgw_process", num_threads),
sock_fd(-1),
uri_prefix(pe->uri_prefix),
lua_background(pe->lua_background),
- lua_manager(store->get_lua_manager()),
+ lua_manager(driver->get_lua_manager()),
req_wq(this,
ceph::make_timespan(g_conf()->rgw_op_thread_timeout),
ceph::make_timespan(g_conf()->rgw_op_thread_suicide_timeout),
m_tp.pause();
}
- void unpause_with_new_config(rgw::sal::Store* const store,
+ void unpause_with_new_config(rgw::sal::Driver* const driver,
rgw_auth_registry_ptr_t auth_registry) {
- this->store = store;
+ this->driver = driver;
this->auth_registry = std::move(auth_registry);
- lua_manager = store->get_lua_manager();
+ lua_manager = driver->get_lua_manager();
m_tp.unpause();
}
void set_access_key(RGWAccessKey& key) { access_key = key; }
};
/* process stream request */
-extern int process_request(rgw::sal::Store* store,
+extern int process_request(rgw::sal::Driver* driver,
RGWREST* rest,
RGWRequest* req,
const std::string& frontend_prefix,
RGWRequest* req,
req_state* s,
optional_yield y,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
bool skip_retarget = false);
#undef dout_context
template<class T>
class RGWQuotaCache {
protected:
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
lru_map<T, RGWQuotaCacheStats> stats_map;
RefCountedWaitObject *async_refcount;
virtual void data_modified(const rgw_user& user, rgw_bucket& bucket) {}
public:
- RGWQuotaCache(rgw::sal::Store* _store, int size) : store(_store), stats_map(size) {
+ RGWQuotaCache(rgw::sal::Driver* _driver, int size) : driver(_driver), stats_map(size) {
async_refcount = new RefCountedWaitObject;
}
virtual ~RGWQuotaCache() {
class AsyncRefreshHandler {
protected:
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
RGWQuotaCache<T> *cache;
public:
- AsyncRefreshHandler(rgw::sal::Store* _store, RGWQuotaCache<T> *_cache) : store(_store), cache(_cache) {}
+ AsyncRefreshHandler(rgw::sal::Driver* _driver, RGWQuotaCache<T> *_cache) : driver(_driver), cache(_cache) {}
virtual ~AsyncRefreshHandler() {}
virtual int init_fetch() = 0;
template<class T>
void RGWQuotaCache<T>::async_refresh_fail(const rgw_user& user, rgw_bucket& bucket)
{
- ldout(store->ctx(), 20) << "async stats refresh response for bucket=" << bucket << dendl;
+ ldout(driver->ctx(), 20) << "async stats refresh response for bucket=" << bucket << dendl;
async_refcount->put();
}
template<class T>
void RGWQuotaCache<T>::async_refresh_response(const rgw_user& user, rgw_bucket& bucket, RGWStorageStats& stats)
{
- ldout(store->ctx(), 20) << "async stats refresh response for bucket=" << bucket << dendl;
+ ldout(driver->ctx(), 20) << "async stats refresh response for bucket=" << bucket << dendl;
RGWQuotaCacheStats qs;
qs.stats = stats;
qs.expiration = ceph_clock_now();
qs.async_refresh_time = qs.expiration;
- qs.expiration += store->ctx()->_conf->rgw_bucket_quota_ttl;
- qs.async_refresh_time += store->ctx()->_conf->rgw_bucket_quota_ttl / 2;
+ qs.expiration += driver->ctx()->_conf->rgw_bucket_quota_ttl;
+ qs.async_refresh_time += driver->ctx()->_conf->rgw_bucket_quota_ttl / 2;
map_add(user, bucket, qs);
}
public RGWGetBucketStats_CB {
rgw_user user;
public:
- BucketAsyncRefreshHandler(rgw::sal::Store* _store, RGWQuotaCache<rgw_bucket> *_cache,
+ BucketAsyncRefreshHandler(rgw::sal::Driver* _driver, RGWQuotaCache<rgw_bucket> *_cache,
const rgw_user& _user, const rgw_bucket& _bucket) :
- RGWQuotaCache<rgw_bucket>::AsyncRefreshHandler(_store, _cache),
+ RGWQuotaCache<rgw_bucket>::AsyncRefreshHandler(_driver, _cache),
RGWGetBucketStats_CB(_bucket), user(_user) {}
void drop_reference() override { put(); }
{
std::unique_ptr<rgw::sal::Bucket> rbucket;
- const DoutPrefix dp(store->ctx(), dout_subsys, "rgw bucket async refresh handler: ");
- int r = store->get_bucket(&dp, nullptr, bucket, &rbucket, null_yield);
+ const DoutPrefix dp(driver->ctx(), dout_subsys, "rgw bucket async refresh handler: ");
+ int r = driver->get_bucket(&dp, nullptr, bucket, &rbucket, null_yield);
if (r < 0) {
ldpp_dout(&dp, 0) << "could not get bucket info for bucket=" << bucket << " r=" << r << dendl;
return r;
void BucketAsyncRefreshHandler::handle_response(const int r)
{
if (r < 0) {
- ldout(store->ctx(), 20) << "AsyncRefreshHandler::handle_response() r=" << r << dendl;
+ ldout(driver->ctx(), 20) << "AsyncRefreshHandler::handle_response() r=" << r << dendl;
cache->async_refresh_fail(user, bucket);
return;
}
int fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats, optional_yield y, const DoutPrefixProvider *dpp) override;
public:
- explicit RGWBucketStatsCache(rgw::sal::Store* _store) : RGWQuotaCache<rgw_bucket>(_store, _store->ctx()->_conf->rgw_bucket_quota_cache_size) {
+ explicit RGWBucketStatsCache(rgw::sal::Driver* _driver) : RGWQuotaCache<rgw_bucket>(_driver, _driver->ctx()->_conf->rgw_bucket_quota_cache_size) {
}
AsyncRefreshHandler *allocate_refresh_handler(const rgw_user& user, const rgw_bucket& bucket) override {
- return new BucketAsyncRefreshHandler(store, this, user, bucket);
+ return new BucketAsyncRefreshHandler(driver, this, user, bucket);
}
};
int RGWBucketStatsCache::fetch_stats_from_storage(const rgw_user& _u, const rgw_bucket& _b, RGWStorageStats& stats, optional_yield y, const DoutPrefixProvider *dpp)
{
- std::unique_ptr<rgw::sal::User> user = store->get_user(_u);
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(_u);
std::unique_ptr<rgw::sal::Bucket> bucket;
- int r = store->get_bucket(dpp, user.get(), _b, &bucket, y);
+ int r = driver->get_bucket(dpp, user.get(), _b, &bucket, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "could not get bucket info for bucket=" << _b << " r=" << r << dendl;
return r;
const DoutPrefixProvider *dpp;
rgw_bucket bucket;
public:
- UserAsyncRefreshHandler(const DoutPrefixProvider *_dpp, rgw::sal::Store* _store, RGWQuotaCache<rgw_user> *_cache,
+ UserAsyncRefreshHandler(const DoutPrefixProvider *_dpp, rgw::sal::Driver* _driver, RGWQuotaCache<rgw_user> *_cache,
const rgw_user& _user, const rgw_bucket& _bucket) :
- RGWQuotaCache<rgw_user>::AsyncRefreshHandler(_store, _cache),
+ RGWQuotaCache<rgw_user>::AsyncRefreshHandler(_driver, _cache),
RGWGetUserStats_CB(_user),
dpp(_dpp),
bucket(_bucket) {}
int UserAsyncRefreshHandler::init_fetch()
{
- std::unique_ptr<rgw::sal::User> ruser = store->get_user(user);
+ std::unique_ptr<rgw::sal::User> ruser = driver->get_user(user);
ldpp_dout(dpp, 20) << "initiating async quota refresh for user=" << user << dendl;
int r = ruser->read_stats_async(dpp, this);
void UserAsyncRefreshHandler::handle_response(int r)
{
if (r < 0) {
- ldout(store->ctx(), 20) << "AsyncRefreshHandler::handle_response() r=" << r << dendl;
+ ldout(driver->ctx(), 20) << "AsyncRefreshHandler::handle_response() r=" << r << dendl;
cache->async_refresh_fail(user, bucket);
return;
}
}
public:
- RGWUserStatsCache(const DoutPrefixProvider *dpp, rgw::sal::Store* _store, bool quota_threads)
- : RGWQuotaCache<rgw_user>(_store, _store->ctx()->_conf->rgw_bucket_quota_cache_size), dpp(dpp)
+ RGWUserStatsCache(const DoutPrefixProvider *dpp, rgw::sal::Driver* _driver, bool quota_threads)
+ : RGWQuotaCache<rgw_user>(_driver, _driver->ctx()->_conf->rgw_bucket_quota_cache_size), dpp(dpp)
{
if (quota_threads) {
- buckets_sync_thread = new BucketsSyncThread(store->ctx(), this);
+ buckets_sync_thread = new BucketsSyncThread(driver->ctx(), this);
buckets_sync_thread->create("rgw_buck_st_syn");
- user_sync_thread = new UserSyncThread(store->ctx(), this);
+ user_sync_thread = new UserSyncThread(driver->ctx(), this);
user_sync_thread->create("rgw_user_st_syn");
} else {
buckets_sync_thread = NULL;
}
AsyncRefreshHandler *allocate_refresh_handler(const rgw_user& user, const rgw_bucket& bucket) override {
- return new UserAsyncRefreshHandler(dpp, store, this, user, bucket);
+ return new UserAsyncRefreshHandler(dpp, driver, this, user, bucket);
}
bool going_down() {
optional_yield y,
const DoutPrefixProvider *dpp)
{
- std::unique_ptr<rgw::sal::User> user = store->get_user(_u);
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(_u);
int r = user->read_stats(dpp, y, &stats);
if (r < 0) {
ldpp_dout(dpp, 0) << "could not get user stats for user=" << user << dendl;
int RGWUserStatsCache::sync_bucket(const rgw_user& _u, rgw_bucket& _b, optional_yield y, const DoutPrefixProvider *dpp)
{
- std::unique_ptr<rgw::sal::User> user = store->get_user(_u);
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(_u);
std::unique_ptr<rgw::sal::Bucket> bucket;
- int r = store->get_bucket(dpp, user.get(), _b, &bucket, y);
+ int r = driver->get_bucket(dpp, user.get(), _b, &bucket, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "could not get bucket info for bucket=" << _b << " r=" << r << dendl;
return r;
RGWStorageStats stats;
ceph::real_time last_stats_sync;
ceph::real_time last_stats_update;
- std::unique_ptr<rgw::sal::User> user = store->get_user(rgw_user(_u.to_str()));
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(rgw_user(_u.to_str()));
int ret = user->read_stats(dpp, y, &stats, &last_stats_sync, &last_stats_update);
if (ret < 0) {
return ret;
}
- if (!store->ctx()->_conf->rgw_user_quota_sync_idle_users &&
+ if (!driver->ctx()->_conf->rgw_user_quota_sync_idle_users &&
last_stats_update < last_stats_sync) {
ldpp_dout(dpp, 20) << "user is idle, not doing a full sync (user=" << user << ")" << dendl;
return 0;
}
real_time when_need_full_sync = last_stats_sync;
- when_need_full_sync += make_timespan(store->ctx()->_conf->rgw_user_quota_sync_wait_time);
+ when_need_full_sync += make_timespan(driver->ctx()->_conf->rgw_user_quota_sync_wait_time);
// check if enough time passed since last full sync
/* FIXME: missing check? */
- ret = rgw_user_sync_all_stats(dpp, store, user.get(), y);
+ ret = rgw_user_sync_all_stats(dpp, driver, user.get(), y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed user stats sync, ret=" << ret << dendl;
return ret;
string key = "user";
void *handle;
- int ret = store->meta_list_keys_init(dpp, key, string(), &handle);
+ int ret = driver->meta_list_keys_init(dpp, key, string(), &handle);
if (ret < 0) {
ldpp_dout(dpp, 10) << "ERROR: can't get key: ret=" << ret << dendl;
return ret;
do {
list<string> keys;
- ret = store->meta_list_keys_next(dpp, handle, max, keys, &truncated);
+ ret = driver->meta_list_keys_next(dpp, handle, max, keys, &truncated);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: lists_keys_next(): ret=" << ret << dendl;
goto done;
ret = 0;
done:
- store->meta_list_keys_complete(handle);
+ driver->meta_list_keys_complete(handle);
return ret;
}
class RGWQuotaHandlerImpl : public RGWQuotaHandler {
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
RGWBucketStatsCache bucket_stats_cache;
RGWUserStatsCache user_stats_cache;
return 0;
}
public:
- RGWQuotaHandlerImpl(const DoutPrefixProvider *dpp, rgw::sal::Store* _store, bool quota_threads) : store(_store),
- bucket_stats_cache(_store),
- user_stats_cache(dpp, _store, quota_threads) {}
+ RGWQuotaHandlerImpl(const DoutPrefixProvider *dpp, rgw::sal::Driver* _driver, bool quota_threads) : driver(_driver),
+ bucket_stats_cache(_driver),
+ user_stats_cache(dpp, _driver, quota_threads) {}
int check_quota(const DoutPrefixProvider *dpp,
const rgw_user& user,
* fetch that info and not rely on cached data
*/
- const DoutPrefix dp(store->ctx(), dout_subsys, "rgw quota handler: ");
+ const DoutPrefix dp(driver->ctx(), dout_subsys, "rgw quota handler: ");
if (quota.bucket_quota.enabled) {
RGWStorageStats bucket_stats;
int ret = bucket_stats_cache.get_stats(user, bucket, bucket_stats, y, &dp);
};
-RGWQuotaHandler *RGWQuotaHandler::generate_handler(const DoutPrefixProvider *dpp, rgw::sal::Store* store, bool quota_threads)
+RGWQuotaHandler *RGWQuotaHandler::generate_handler(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, bool quota_threads)
{
- return new RGWQuotaHandlerImpl(dpp, store, quota_threads);
+ return new RGWQuotaHandlerImpl(dpp, driver, quota_threads);
}
void RGWQuotaHandler::free_handler(RGWQuotaHandler *handler)
virtual void update_stats(const rgw_user& bucket_owner, rgw_bucket& bucket, int obj_delta, uint64_t added_bytes, uint64_t removed_bytes) = 0;
- static RGWQuotaHandler *generate_handler(const DoutPrefixProvider *dpp, rgw::sal::Store* store, bool quota_threads);
+ static RGWQuotaHandler *generate_handler(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, bool quota_threads);
static void free_handler(RGWQuotaHandler *handler);
};
static RGWObjCategory main_category = RGWObjCategory::Main;
#define RGW_USAGE_OBJ_PREFIX "usage."
-rgw_raw_obj rgw_obj_select::get_raw_obj(rgw::sal::RadosStore* store) const
+rgw_raw_obj rgw_obj_select::get_raw_obj(rgw::sal::RadosStore* driver) const
{
if (!is_raw) {
rgw_raw_obj r;
- store->get_raw_obj(placement_rule, obj, &r);
+ driver->get_raw_obj(placement_rule, obj, &r);
return r;
}
return raw_obj;
RGWHTTPManager http_manager;
public:
- RGWMetaNotifierManager(RGWRados *_store) : RGWCoroutinesManager(_store->ctx(), _store->get_cr_registry()), store(_store),
+ RGWMetaNotifierManager(RGWRados *_driver) : RGWCoroutinesManager(_driver->ctx(), _driver->get_cr_registry()), store(_driver),
http_manager(store->ctx(), completion_mgr) {
http_manager.start();
}
RGWHTTPManager http_manager;
public:
- RGWDataNotifierManager(RGWRados *_store) : RGWCoroutinesManager(_store->ctx(), _store->get_cr_registry()), store(_store),
+ RGWDataNotifierManager(RGWRados *_driver) : RGWCoroutinesManager(_driver->ctx(), _driver->get_cr_registry()), store(_driver),
http_manager(store->ctx(), completion_mgr) {
http_manager.start();
}
notify_mgr.stop();
}
public:
- RGWMetaNotifier(RGWRados *_store, RGWMetadataLog* log)
- : RGWRadosThread(_store, "meta-notifier"), notify_mgr(_store), log(log) {}
+ RGWMetaNotifier(RGWRados *_driver, RGWMetadataLog* log)
+ : RGWRadosThread(_driver, "meta-notifier"), notify_mgr(_driver), log(log) {}
int process(const DoutPrefixProvider *dpp) override;
};
notify_mgr.stop();
}
public:
- RGWDataNotifier(RGWRados *_store) : RGWRadosThread(_store, "data-notifier"), notify_mgr(_store) {}
+ RGWDataNotifier(RGWRados *_driver) : RGWRadosThread(_driver, "data-notifier"), notify_mgr(_driver) {}
int process(const DoutPrefixProvider *dpp) override;
};
class RGWSyncProcessorThread : public RGWRadosThread {
public:
- RGWSyncProcessorThread(RGWRados *_store, const string& thread_name = "radosgw") : RGWRadosThread(_store, thread_name) {}
- RGWSyncProcessorThread(RGWRados *_store) : RGWRadosThread(_store) {}
+ RGWSyncProcessorThread(RGWRados *_driver, const string& thread_name = "radosgw") : RGWRadosThread(_driver, thread_name) {}
+ RGWSyncProcessorThread(RGWRados *_driver) : RGWRadosThread(_driver) {}
~RGWSyncProcessorThread() override {}
int init(const DoutPrefixProvider *dpp) override = 0 ;
int process(const DoutPrefixProvider *dpp) override = 0;
sync.stop();
}
public:
- RGWMetaSyncProcessorThread(rgw::sal::RadosStore* _store, RGWAsyncRadosProcessor *async_rados)
- : RGWSyncProcessorThread(_store->getRados(), "meta-sync"), sync(_store, async_rados) {}
+ RGWMetaSyncProcessorThread(rgw::sal::RadosStore* _driver, RGWAsyncRadosProcessor *async_rados)
+ : RGWSyncProcessorThread(_driver->getRados(), "meta-sync"), sync(_driver, async_rados) {}
void wakeup_sync_shards(set<int>& shard_ids) {
for (set<int>::iterator iter = shard_ids.begin(); iter != shard_ids.end(); ++iter) {
sync.stop();
}
public:
- RGWDataSyncProcessorThread(rgw::sal::RadosStore* _store, RGWAsyncRadosProcessor *async_rados,
+ RGWDataSyncProcessorThread(rgw::sal::RadosStore* _driver, RGWAsyncRadosProcessor *async_rados,
const RGWZone* source_zone)
- : RGWSyncProcessorThread(_store->getRados(), "data-sync"),
+ : RGWSyncProcessorThread(_driver->getRados(), "data-sync"),
counters(sync_counters::build(store->ctx(), std::string("data-sync-from-") + source_zone->name)),
- sync(_store, async_rados, source_zone->id, counters.get()),
+ sync(_driver, async_rados, source_zone->id, counters.get()),
initialized(false) {}
void wakeup_sync_shards(bc::flat_map<int, bc::flat_set<rgw_data_notify_entry> >& entries) {
}
public:
- RGWIndexCompletionManager(RGWRados *_store) :
- store(_store),
+ RGWIndexCompletionManager(RGWRados *_driver) :
+ store(_driver),
num_shards(store->ctx()->_conf->rgw_thread_pool_size),
locks{ceph::make_lock_container<ceph::mutex>(
num_shards,
ldpp_dout(dpp, 5) << "note: GC not initialized" << dendl;
}
- obj_expirer = new RGWObjectExpirer(this->store);
+ obj_expirer = new RGWObjectExpirer(this->driver);
if (use_gc_thread && use_gc) {
gc->start_processor();
}
auto async_processor = svc.rados->get_async_processor();
std::lock_guard l{meta_sync_thread_lock};
- meta_sync_processor_thread = new RGWMetaSyncProcessorThread(this->store, async_processor);
+ meta_sync_processor_thread = new RGWMetaSyncProcessorThread(this->driver, async_processor);
ret = meta_sync_processor_thread->init(dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to initialize meta sync thread" << dendl;
rgw::BucketTrimConfig config;
rgw::configure_bucket_trim(cct, config);
- bucket_trim.emplace(this->store, config);
+ bucket_trim.emplace(this->driver, config);
ret = bucket_trim->init();
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start bucket trim manager" << dendl;
std::lock_guard dl{data_sync_thread_lock};
for (auto source_zone : svc.zone->get_data_sync_source_zones()) {
ldpp_dout(dpp, 5) << "starting data sync thread for zone " << source_zone->name << dendl;
- auto *thread = new RGWDataSyncProcessorThread(this->store, svc.rados->get_async_processor(), source_zone);
+ auto *thread = new RGWDataSyncProcessorThread(this->driver, svc.rados->get_async_processor(), source_zone);
ret = thread->init(dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to initialize data sync thread" << dendl;
}
auto interval = cct->_conf->rgw_sync_log_trim_interval;
if (interval > 0) {
- sync_log_trimmer = new RGWSyncLogTrimThread(this->store, &*bucket_trim, interval);
+ sync_log_trimmer = new RGWSyncLogTrimThread(this->driver, &*bucket_trim, interval);
ret = sync_log_trimmer->init(dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to initialize sync log trim thread" << dendl;
binfo_cache->init(svc.cache);
lc = new RGWLC();
- lc->initialize(cct, this->store);
+ lc->initialize(cct, this->driver);
if (use_lc_thread)
lc->start_processor();
- quota_handler = RGWQuotaHandler::generate_handler(dpp, this->store, quota_threads);
+ quota_handler = RGWQuotaHandler::generate_handler(dpp, this->driver, quota_threads);
bucket_index_max_shards = (cct->_conf->rgw_override_bucket_index_max_shards ? cct->_conf->rgw_override_bucket_index_max_shards :
zone.bucket_index_max_shards);
reshard_wait = std::make_shared<RGWReshardWait>();
- reshard = new RGWReshard(this->store);
+ reshard = new RGWReshard(this->driver);
// disable reshard thread based on zone/zonegroup support
run_reshard_thread = run_reshard_thread && svc.zone->can_reshard();
}
index_completion_manager = new RGWIndexCompletionManager(this);
- ret = rgw::notify::init(cct, store, dpp);
+ ret = rgw::notify::init(cct, driver, dpp);
if (ret < 0 ) {
ldpp_dout(dpp, 1) << "ERROR: failed to initialize notification manager" << dendl;
}
int RGWRados::init_ctl(const DoutPrefixProvider *dpp)
{
- return ctl.init(&svc, store, dpp);
+ return ctl.init(&svc, driver, dpp);
}
/**
bool fix, bool *need_fix, optional_yield y)
{
std::unique_ptr<rgw::sal::Bucket> bucket;
- store->get_bucket(nullptr, bucket_info, &bucket);
+ driver->get_bucket(nullptr, bucket_info, &bucket);
std::unique_ptr<rgw::sal::Object> obj = bucket->get_object(key);
if (need_fix) {
RGWObjState *astate = nullptr;
RGWObjManifest* manifest = nullptr;
- RGWObjectCtx rctx(this->store);
+ RGWObjectCtx rctx(this->driver);
r = get_obj_state(dpp, &rctx, bucket_info, obj.get(), &astate, &manifest, false, y);
if (r < 0)
return r;
if (manifest) {
RGWObjManifest::obj_iterator miter;
for (miter = manifest->obj_begin(dpp); miter != manifest->obj_end(dpp); ++miter) {
- rgw_raw_obj raw_loc = miter.get_location().get_raw_obj(store);
+ rgw_raw_obj raw_loc = miter.get_location().get_raw_obj(driver);
rgw_obj loc;
string oid;
string locator;
return -ERR_PRECONDITION_FAILED;
}
- rgw::sal::RadosBucket dest_bucket(store, dest_bucket_info);
- rgw::sal::RadosObject dest_obj(store, rgw_obj_key(buf), &dest_bucket);
+ rgw::sal::RadosBucket dest_bucket(driver, dest_bucket_info);
+ rgw::sal::RadosObject dest_obj(driver, rgw_obj_key(buf), &dest_bucket);
if (dest_bucket_info.versioning_enabled()){
dest_obj.gen_rand_obj_instance_name();
* irrelevant and may be safely skipped. */
std::map<std::string, ceph::bufferlist> no_attrs;
- rgw::sal::RadosBucket archive_bucket(store, archive_binfo);
- rgw::sal::RadosObject archive_obj(store, entry.key, &archive_bucket);
+ rgw::sal::RadosBucket archive_bucket(driver, archive_binfo);
+ rgw::sal::RadosObject archive_obj(driver, entry.key, &archive_bucket);
if (bucket->versioning_enabled()){
obj->gen_rand_obj_instance_name();
int RGWRados::rewrite_obj(rgw::sal::Object* obj, const DoutPrefixProvider *dpp, optional_yield y)
{
- RGWObjectCtx rctx(this->store);
+ RGWObjectCtx rctx(this->driver);
rgw::sal::Attrs attrset;
uint64_t obj_size;
ceph::real_time mtime;
attrset.erase(RGW_ATTR_TAIL_TAG);
attrset.erase(RGW_ATTR_STORAGE_CLASS);
- return store->getRados()->copy_obj_data(rctx, obj->get_bucket(),
- obj->get_bucket()->get_info().placement_rule,
- read_op, obj_size - 1, obj, NULL, mtime,
- attrset, 0, real_time(), NULL, dpp, y);
+ return this->copy_obj_data(rctx, obj->get_bucket(),
+ obj->get_bucket()->get_info().placement_rule,
+ read_op, obj_size - 1, obj, NULL, mtime,
+ attrset, 0, real_time(), NULL, dpp, y);
}
struct obj_time_weight {
rgw::BlockingAioThrottle aio(cct->_conf->rgw_put_obj_min_window_size);
using namespace rgw::putobj;
- AtomicObjectProcessor processor(&aio, this->store, nullptr, user_id,
+ AtomicObjectProcessor processor(&aio, this->driver, nullptr, user_id,
obj_ctx, dest_obj->clone(), olh_epoch,
tag, dpp, null_yield);
RGWRESTConn *conn;
ref_tag = tag + '\0';
cls_refcount_get(op, ref_tag, true);
- auto obj = svc.rados->obj(miter.get_location().get_raw_obj(store));
+ auto obj = svc.rados->obj(miter.get_location().get_raw_obj(driver));
ret = obj.open(dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "failed to open rados context for " << obj << dendl;
using namespace rgw::putobj;
// do not change the null_yield in the initialization of this AtomicObjectProcessor
// it causes crashes in the ragweed tests
- AtomicObjectProcessor processor(&aio, this->store, &dest_placement,
+ AtomicObjectProcessor processor(&aio, this->driver, &dest_placement,
bucket->get_info().owner, obj_ctx,
dest_obj->clone(), olh_epoch, tag,
dpp, null_yield);
rgw_raw_obj raw_head;
obj_to_raw(manifest.get_head_placement_rule(), head_obj, &raw_head);
for (iter = manifest.obj_begin(dpp); iter != manifest.obj_end(dpp); ++iter) {
- const rgw_raw_obj& mobj = iter.get_location().get_raw_obj(store);
+ const rgw_raw_obj& mobj = iter.get_location().get_raw_obj(driver);
if (mobj == raw_head)
continue;
cls_rgw_obj_key key(mobj.oid);
return 0;
}
-int RGWRados::delete_obj(rgw::sal::Store* store,
+int RGWRados::delete_obj(rgw::sal::Driver* store,
const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
const rgw_obj& obj,
return index_op.complete_del(dpp, -1 /* pool */, 0, mtime, NULL);
}
-static void generate_fake_tag(const DoutPrefixProvider *dpp, rgw::sal::Store* store, map<string, bufferlist>& attrset, RGWObjManifest& manifest, bufferlist& manifest_bl, bufferlist& tag_bl)
+static void generate_fake_tag(const DoutPrefixProvider *dpp, rgw::sal::Driver* store, map<string, bufferlist>& attrset, RGWObjManifest& manifest, bufferlist& manifest_bl, bufferlist& tag_bl)
{
string tag;
}
std::unique_ptr<rgw::sal::Bucket> bucket;
- store->get_bucket(nullptr, bucket_info, &bucket);
+ driver->get_bucket(nullptr, bucket_info, &bucket);
std::unique_ptr<rgw::sal::Object> target_obj = bucket->get_object(target.key);
r = get_obj_state(dpp, &obj_ctx, bucket_info, target_obj.get(), target_state,
sm->manifest->has_explicit_objs()) {
RGWObjManifest::obj_iterator mi;
for (mi = sm->manifest->obj_begin(dpp); mi != sm->manifest->obj_end(dpp); ++mi) {
- ldpp_dout(dpp, 20) << "manifest: ofs=" << mi.get_ofs() << " loc=" << mi.get_location().get_raw_obj(store) << dendl;
+ ldpp_dout(dpp, 20) << "manifest: ofs=" << mi.get_ofs() << " loc=" << mi.get_location().get_raw_obj(driver) << dendl;
}
}
* Uh oh, something's wrong, object with manifest should have tag. Let's
* create one out of the manifest, would be unique
*/
- generate_fake_tag(dpp, store, s->attrset, *sm->manifest, manifest_bl, s->obj_tag);
+ generate_fake_tag(dpp, driver, s->attrset, *sm->manifest, manifest_bl, s->obj_tag);
s->fake_tag = true;
}
}
RGWObjManifest::obj_iterator iter = manifest->obj_find(dpp, ofs);
uint64_t stripe_ofs = iter.get_stripe_ofs();
- read_obj = iter.get_location().get_raw_obj(store->store);
+ read_obj = iter.get_location().get_raw_obj(store->driver);
len = std::min(len, iter.get_stripe_size() - (ofs - stripe_ofs));
read_ofs = iter.location_ofs() + (ofs - stripe_ofs);
reading_from_head = (read_obj == state.head_obj);
off_t next_stripe_ofs = stripe_ofs + iter.get_stripe_size();
while (ofs < next_stripe_ofs && ofs <= end) {
- read_obj = iter.get_location().get_raw_obj(store);
+ read_obj = iter.get_location().get_raw_obj(driver);
uint64_t read_len = std::min(len, iter.get_stripe_size() - (ofs - stripe_ofs));
read_ofs = iter.location_ofs() + (ofs - stripe_ofs);
// since we expect to do this rarely, we'll do our work in a
// block and erase our work after each try
- RGWObjectCtx obj_ctx(this->store);
+ RGWObjectCtx obj_ctx(this->driver);
const rgw_bucket& b = bs->bucket;
std::string bucket_id = b.get_key();
- RGWBucketReshardLock reshard_lock(this->store, bucket_info, true);
+ RGWBucketReshardLock reshard_lock(this->driver, bucket_info, true);
ret = reshard_lock.lock(dpp);
if (ret == -ENOENT) {
continue;
continue; // try again
}
- ret = RGWBucketReshard::clear_resharding(this->store, bucket_info, bucket_attrs, dpp);
+ ret = RGWBucketReshard::clear_resharding(this->driver, bucket_info, bucket_attrs, dpp);
reshard_lock.unlock();
if (ret == -ENOENT) {
ldpp_dout(dpp, 5) << __func__ <<
int RGWRados::process_lc(const std::unique_ptr<rgw::sal::Bucket>& optional_bucket)
{
RGWLC lc;
- lc.initialize(cct, this->store);
+ lc.initialize(cct, this->driver);
RGWLC::LCWorker worker(&lc, cct, &lc, 0);
auto ret = lc.process(&worker, optional_bucket, true /* once */);
lc.stop_processor(); // sets down_flag, but returns immediately
bucket_info.bucket << " dir_entry=" << list_state.key << dendl_bitx;
std::unique_ptr<rgw::sal::Bucket> bucket;
- store->get_bucket(nullptr, bucket_info, &bucket);
+ driver->get_bucket(nullptr, bucket_info, &bucket);
uint8_t suggest_flag = (svc.zone->get_zone().log_data ? CEPH_RGW_DIR_SUGGEST_LOG_OP : 0);
std::string loc;
RGWObjState *astate = NULL;
RGWObjManifest *manifest = nullptr;
- RGWObjectCtx rctx(this->store);
+ RGWObjectCtx rctx(this->driver);
int r = get_obj_state(dpp, &rctx, bucket_info, obj.get(), &astate, &manifest, false, y);
if (r < 0)
return r;
if (manifest) {
RGWObjManifest::obj_iterator miter;
for (miter = manifest->obj_begin(dpp); miter != manifest->obj_end(dpp); ++miter) {
- const rgw_raw_obj& raw_loc = miter.get_location().get_raw_obj(store);
+ const rgw_raw_obj& raw_loc = miter.get_location().get_raw_obj(driver);
rgw_obj loc;
RGWSI_Tier_RADOS::raw_obj_to_obj(manifest->get_obj().bucket, raw_loc, &loc);
int RGWRados::add_bucket_to_reshard(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, uint32_t new_num_shards)
{
- RGWReshard reshard(this->store, dpp);
+ RGWReshard reshard(this->driver, dpp);
uint32_t num_source_shards = rgw::current_num_shards(bucket_info.layout);
};
class RGWObjectCtx {
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
ceph::shared_mutex lock = ceph::make_shared_mutex("RGWObjectCtx");
std::map<rgw_obj, RGWObjStateManifest> objs_state;
public:
- explicit RGWObjectCtx(rgw::sal::Store* _store) : store(_store) {}
+ explicit RGWObjectCtx(rgw::sal::Driver* _driver) : driver(_driver) {}
RGWObjectCtx(RGWObjectCtx& _o) {
std::unique_lock wl{lock};
- this->store = _o.store;
+ this->driver = _o.driver;
this->objs_state = _o.objs_state;
}
- rgw::sal::Store* get_store() {
- return store;
+ rgw::sal::Driver* get_driver() {
+ return driver;
}
RGWObjStateManifest *get_state(const rgw_obj& obj);
ceph::mutex lock = ceph::make_mutex("rados_timer_lock");
SafeTimer *timer;
- rgw::sal::RadosStore* store = nullptr;
+ rgw::sal::RadosStore* driver = nullptr;
RGWGC *gc = nullptr;
RGWLC *lc;
RGWObjectExpirer *obj_expirer;
void set_context(CephContext *_cct) {
cct = _cct;
}
- void set_store(rgw::sal::RadosStore* _store) {
- store = _store;
+ void set_store(rgw::sal::RadosStore* _driver) {
+ driver = _driver;
}
RGWServices svc;
int bucket_suspended(const DoutPrefixProvider *dpp, rgw_bucket& bucket, bool *suspended);
/** Delete an object.*/
- int delete_obj(rgw::sal::Store* store,
+ int delete_obj(rgw::sal::Driver* driver,
const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_owner,
const rgw_obj& src_obj,
static constexpr bool USE_SAFE_TIMER_CALLBACKS = false;
-RGWRealmReloader::RGWRealmReloader(rgw::sal::Store*& store, std::map<std::string, std::string>& service_map_meta,
+RGWRealmReloader::RGWRealmReloader(rgw::sal::Driver*& driver, std::map<std::string, std::string>& service_map_meta,
Pauser* frontends)
- : store(store),
+ : driver(driver),
service_map_meta(service_map_meta),
frontends(frontends),
- timer(store->ctx(), mutex, USE_SAFE_TIMER_CALLBACKS),
+ timer(driver->ctx(), mutex, USE_SAFE_TIMER_CALLBACKS),
mutex(ceph::make_mutex("RGWRealmReloader")),
reload_scheduled(nullptr)
{
void RGWRealmReloader::handle_notify(RGWRealmNotify type,
bufferlist::const_iterator& p)
{
- if (!store) {
+ if (!driver) {
/* we're in the middle of reload */
return;
}
- CephContext *const cct = store->ctx();
+ CephContext *const cct = driver->ctx();
std::lock_guard lock{mutex};
if (reload_scheduled) {
void RGWRealmReloader::reload()
{
- CephContext *const cct = store->ctx();
+ CephContext *const cct = driver->ctx();
const DoutPrefix dp(cct, dout_subsys, "rgw realm reloader: ");
ldpp_dout(&dp, 1) << "Pausing frontends for realm update..." << dendl;
// TODO: make RGWRados responsible for rgw_log_usage lifetime
rgw_log_usage_finalize();
- // destroy the existing store
- StoreManager::close_storage(store);
- store = nullptr;
+ // destroy the existing driver
+ DriverManager::close_storage(driver);
+ driver = nullptr;
- ldpp_dout(&dp, 1) << "Store closed" << dendl;
+ ldpp_dout(&dp, 1) << "driver closed" << dendl;
{
// allow a new notify to reschedule us. it's important that we do this
// before we start loading the new realm, or we could miss some updates
}
- while (!store) {
- // recreate and initialize a new store
- StoreManager::Config cfg;
+ while (!driver) {
+ // recreate and initialize a new driver
+ DriverManager::Config cfg;
cfg.store_name = "rados";
cfg.filter_name = "none";
- store =
- StoreManager::get_storage(&dp, cct,
+ driver =
+ DriverManager::get_storage(&dp, cct,
cfg,
cct->_conf->rgw_enable_gc_threads,
cct->_conf->rgw_enable_lc_threads,
cct->_conf.get_val<bool>("rgw_dynamic_resharding"),
cct->_conf->rgw_cache_enabled);
- ldpp_dout(&dp, 1) << "Creating new store" << dendl;
+ ldpp_dout(&dp, 1) << "Creating new driver" << dendl;
- rgw::sal::Store* store_cleanup = nullptr;
+ rgw::sal::Driver* store_cleanup = nullptr;
{
std::unique_lock lock{mutex};
// don't want to assert or abort the entire cluster. instead, just
// sleep until we get another notification, and retry until we get
// a working configuration
- if (store == nullptr) {
+ if (driver == nullptr) {
ldpp_dout(&dp, -1) << "Failed to reinitialize RGWRados after a realm "
"configuration update. Waiting for a new update." << dendl;
timer.cancel_event(reload_scheduled);
reload_scheduled = nullptr;
- // if we successfully created a store, clean it up outside of the lock,
+ // if we successfully created a driver, clean it up outside of the lock,
// then continue to loop and recreate another
- std::swap(store, store_cleanup);
+ std::swap(driver, store_cleanup);
}
}
ldpp_dout(&dp, 4) << "Got another notification, restarting RGWRados "
"initialization." << dendl;
- StoreManager::close_storage(store_cleanup);
+ DriverManager::close_storage(store_cleanup);
}
}
- int r = store->register_to_service_map(&dp, "rgw", service_map_meta);
+ int r = driver->register_to_service_map(&dp, "rgw", service_map_meta);
if (r < 0) {
ldpp_dout(&dp, -1) << "ERROR: failed to register to service map: " << cpp_strerror(-r) << dendl;
/* ignore error */
}
- ldpp_dout(&dp, 1) << "Finishing initialization of new store" << dendl;
- // finish initializing the new store
+ ldpp_dout(&dp, 1) << "Finishing initialization of new driver" << dendl;
+ // finish initializing the new driver
ldpp_dout(&dp, 1) << " - REST subsystem init" << dendl;
- rgw_rest_init(cct, store->get_zone()->get_zonegroup());
+ rgw_rest_init(cct, driver->get_zone()->get_zonegroup());
ldpp_dout(&dp, 1) << " - usage subsystem init" << dendl;
- rgw_log_usage_init(cct, store);
+ rgw_log_usage_init(cct, driver);
ldpp_dout(&dp, 1) << "Resuming frontends with new realm configuration." << dendl;
- frontends->resume(store);
+ frontends->resume(driver);
}
/// pause all frontends while realm reconfiguration is in progress
virtual void pause() = 0;
/// resume all frontends with the given RGWRados instance
- virtual void resume(rgw::sal::Store* store) = 0;
+ virtual void resume(rgw::sal::Driver* driver) = 0;
};
- RGWRealmReloader(rgw::sal::Store*& store, std::map<std::string, std::string>& service_map_meta,
+ RGWRealmReloader(rgw::sal::Driver*& driver, std::map<std::string, std::string>& service_map_meta,
Pauser* frontends);
~RGWRealmReloader() override;
class C_Reload; //< Context that calls reload()
- /// main()'s Store pointer as a reference, modified by reload()
- rgw::sal::Store*& store;
+ /// main()'s driver pointer as a reference, modified by reload()
+ rgw::sal::Driver*& driver;
std::map<std::string, std::string>& service_map_meta;
Pauser *const frontends;
}
if (op) {
- op->init(store, s, this);
+ op->init(driver, s, this);
}
return op;
} /* get_op */
ldpp_dout(op, -1) << "Error reading IAM User Policy: " << e.what() << dendl;
}
}
- rgw_build_iam_environment(store, s);
+ rgw_build_iam_environment(driver, s);
return 0;
}
}
RGWHandler_REST* RGWREST::get_handler(
- rgw::sal::Store* const store,
+ rgw::sal::Driver* const driver,
req_state* const s,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string& frontend_prefix,
*pmgr = m;
}
- RGWHandler_REST* handler = m->get_handler(store, s, auth_registry, frontend_prefix);
+ RGWHandler_REST* handler = m->get_handler(driver, s, auth_registry, frontend_prefix);
if (! handler) {
*init_error = -ERR_METHOD_NOT_ALLOWED;
return NULL;
}
- *init_error = handler->init(store, s, rio);
+ *init_error = handler->init(driver, s, rio);
if (*init_error < 0) {
m->put_handler(handler);
return nullptr;
public:
RGWGetObj_ObjStore() : sent_header(false) {}
- void init(rgw::sal::Store* store, req_state *s, RGWHandler *h) override {
- RGWGetObj::init(store, s, h);
+ void init(rgw::sal::Driver* driver, req_state *s, RGWHandler *h) override {
+ RGWGetObj::init(driver, s, h);
sent_header = false;
}
RGWRESTFlusher flusher;
public:
- void init(rgw::sal::Store* store, req_state *s,
+ void init(rgw::sal::Driver* driver, req_state *s,
RGWHandler *dialect_handler) override {
- RGWOp::init(store, s, dialect_handler);
+ RGWOp::init(driver, s, dialect_handler);
flusher.init(s, this);
}
void send_response() override;
}
virtual RGWHandler_REST* get_handler(
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
req_state* const s,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string& frontend_prefix
static int preprocess(req_state *s, rgw::io::BasicClient* rio);
public:
RGWREST() {}
- RGWHandler_REST *get_handler(rgw::sal::Store* store,
+ RGWHandler_REST *get_handler(rgw::sal::Driver* driver,
req_state *s,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string& frontend_prefix,
RGWRESTMgr **pmgr,
int *init_error);
#if 0
- RGWHandler *get_handler(RGWRados *store, req_state *s,
+ RGWHandler *get_handler(RGWRados *driver, req_state *s,
RGWLibIO *io, RGWRESTMgr **pmgr,
int *init_error);
#endif
op_state.set_bucket_name(bucket);
op_state.set_fetch_stats(fetch_stats);
- op_ret = RGWBucketAdminOp::info(store, op_state, flusher, y, this);
+ op_ret = RGWBucketAdminOp::info(driver, op_state, flusher, y, this);
}
class RGWOp_Get_Policy : public RGWRESTOp {
op_state.set_bucket_name(bucket);
op_state.set_object(object);
- op_ret = RGWBucketAdminOp::get_policy(store, op_state, flusher, this);
+ op_ret = RGWBucketAdminOp::get_policy(driver, op_state, flusher, this);
}
class RGWOp_Check_Bucket_Index : public RGWRESTOp {
op_state.set_fix_index(fix_index);
op_state.set_check_objects(check_objects);
- op_ret = RGWBucketAdminOp::check_index(store, op_state, flusher, s->yield, s);
+ op_ret = RGWBucketAdminOp::check_index(driver, op_state, flusher, s->yield, s);
}
class RGWOp_Bucket_Link : public RGWRESTOp {
op_state.set_new_bucket_name(new_bucket_name);
bufferlist data;
- op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
- op_ret = RGWBucketAdminOp::link(store, op_state, s);
+ op_ret = RGWBucketAdminOp::link(driver, op_state, s);
}
class RGWOp_Bucket_Unlink : public RGWRESTOp {
op_state.set_bucket_name(bucket);
bufferlist data;
- op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
- op_ret = RGWBucketAdminOp::unlink(store, op_state, s);
+ op_ret = RGWBucketAdminOp::unlink(driver, op_state, s);
}
class RGWOp_Bucket_Remove : public RGWRESTOp {
/* FIXME We're abusing the owner of the bucket to pass the user, so that it can be forwarded to
* the master. This user is actually the OP caller, not the bucket owner. */
- op_ret = store->get_bucket(s, s->user.get(), string(), bucket_name, &bucket, y);
+ op_ret = driver->get_bucket(s, s->user.get(), string(), bucket_name, &bucket, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "get_bucket returned ret=" << op_ret << dendl;
if (op_ret == -ENOENT) {
RGWQuotaInfo quota;
if (!use_http_params) {
bool empty;
- op_ret = get_json_input(store->ctx(), s, quota, QUOTA_INPUT_MAX_LEN, &empty);
+ op_ret = get_json_input(driver->ctx(), s, quota, QUOTA_INPUT_MAX_LEN, &empty);
if (op_ret < 0) {
if (!empty)
return;
}
if (use_http_params) {
std::unique_ptr<rgw::sal::Bucket> bucket;
- op_ret = store->get_bucket(s, nullptr, uid.tenant, bucket_name, &bucket, s->yield);
+ op_ret = driver->get_bucket(s, nullptr, uid.tenant, bucket_name, &bucket, s->yield);
if (op_ret < 0) {
return;
}
op_state.set_bucket_name(bucket_name);
op_state.set_quota(quota);
- op_ret = RGWBucketAdminOp::set_quota(store, op_state, s);
+ op_ret = RGWBucketAdminOp::set_quota(driver, op_state, s);
}
class RGWOp_Sync_Bucket : public RGWRESTOp {
op_state.set_tenant(tenant);
op_state.set_sync_bucket(sync_bucket);
- op_ret = RGWBucketAdminOp::sync_bucket(store, op_state, s);
+ op_ret = RGWBucketAdminOp::sync_bucket(driver, op_state, s);
}
class RGWOp_Object_Remove: public RGWRESTOp {
op_state.set_bucket_name(bucket);
op_state.set_object(object);
- op_ret = RGWBucketAdminOp::remove_object(store, op_state, s);
+ op_ret = RGWBucketAdminOp::remove_object(driver, op_state, s);
}
return new RGWOp_Object_Remove;
return new RGWOp_Bucket_Remove;
-}
\ No newline at end of file
+}
RGWRESTMgr_Bucket() = default;
~RGWRESTMgr_Bucket() override = default;
- RGWHandler_REST* get_handler(rgw::sal::Store* store,
+ RGWHandler_REST* get_handler(rgw::sal::Driver* driver,
req_state*,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string&) override {
using namespace std;
void RGWOp_ZoneConfig_Get::send_response() {
- const RGWZoneParams& zone_params = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->get_zone_params();
+ const RGWZoneParams& zone_params = static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_zone_params();
set_req_state_err(s, op_ret);
dump_errno(s);
int verify_permission(optional_yield) override {
return check_caps(s->user->get_caps());
}
- void execute(optional_yield) override {} /* store already has the info we need, just need to send response */
+ void execute(optional_yield) override {} /* driver already has the info we need, just need to send response */
void send_response() override ;
const char* name() const override {
return "get_zone_config";
RGWRESTMgr_Config() = default;
~RGWRESTMgr_Config() override = default;
- RGWHandler_REST* get_handler(rgw::sal::Store* ,
+ RGWHandler_REST* get_handler(rgw::sal::Driver* ,
req_state*,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string&) override {
using namespace std;
-RGWRESTConn::RGWRESTConn(CephContext *_cct, rgw::sal::Store* store,
+RGWRESTConn::RGWRESTConn(CephContext *_cct, rgw::sal::Driver* driver,
const string& _remote_id,
const list<string>& remote_endpoints,
std::optional<string> _api_name,
api_name(_api_name),
host_style(_host_style)
{
- if (store) {
- key = store->get_zone()->get_system_key();
- self_zone_group = store->get_zone()->get_zonegroup().get_id();
+ if (driver) {
+ key = driver->get_zone()->get_system_key();
+ self_zone_group = driver->get_zone()->get_zonegroup().get_id();
}
}
public:
RGWRESTConn(CephContext *_cct,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const std::string& _remote_id,
const std::list<std::string>& endpoints,
std::optional<std::string> _api_name,
public:
- S3RESTConn(CephContext *_cct, rgw::sal::Store* store, const std::string& _remote_id, const std::list<std::string>& endpoints, std::optional<std::string> _api_name, HostStyle _host_style = PathStyle) :
- RGWRESTConn(_cct, store, _remote_id, endpoints, _api_name, _host_style) {}
+ S3RESTConn(CephContext *_cct, rgw::sal::Driver* driver, const std::string& _remote_id, const std::list<std::string>& endpoints, std::optional<std::string> _api_name, HostStyle _host_style = PathStyle) :
+ RGWRESTConn(_cct, driver, _remote_id, endpoints, _api_name, _host_style) {}
S3RESTConn(CephContext *_cct, const std::string& _remote_id, const std::list<std::string>& endpoints, RGWAccessKey _cred, std::string _zone_group, std::optional<std::string> _api_name, HostStyle _host_style = PathStyle):
RGWRESTConn(_cct, _remote_id, endpoints, _cred, _zone_group, _api_name, _host_style) {}
~S3RESTConn() override = default;
return nullptr;
}
-int RGWHandler_REST_IAM::init(rgw::sal::Store* store,
+int RGWHandler_REST_IAM::init(rgw::sal::Driver* driver,
req_state *s,
rgw::io::BasicClient *cio)
{
return ret;
}
- return RGWHandler_REST::init(store, s, cio);
+ return RGWHandler_REST::init(driver, s, cio);
}
int RGWHandler_REST_IAM::authorize(const DoutPrefixProvider* dpp, optional_yield y)
{
- return RGW_Auth_S3::authorize(dpp, store, auth_registry, s, y);
+ return RGW_Auth_S3::authorize(dpp, driver, auth_registry, s, y);
}
int RGWHandler_REST_IAM::init_from_header(req_state* s,
}
RGWHandler_REST*
-RGWRESTMgr_IAM::get_handler(rgw::sal::Store* store,
+RGWRESTMgr_IAM::get_handler(rgw::sal::Driver* driver,
req_state* const s,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string& frontend_prefix)
bl_post_body(bl_post_body) {}
~RGWHandler_REST_IAM() override = default;
- int init(rgw::sal::Store* store,
+ int init(rgw::sal::Driver* driver,
req_state *s,
rgw::io::BasicClient *cio) override;
int authorize(const DoutPrefixProvider* dpp, optional_yield y) override;
return this;
}
- RGWHandler_REST* get_handler(rgw::sal::Store* store,
+ RGWHandler_REST* get_handler(rgw::sal::Driver* driver,
req_state*,
const rgw::auth::StrategyRegistry&,
const std::string&) override;
formatter->open_array_section("storage_backends");
// for now, just return the backend that is accessible
formatter->open_object_section("dummy");
- formatter->dump_string("name", store->get_name());
- formatter->dump_string("cluster_id", store->get_cluster_id(this, y));
+ formatter->dump_string("name", driver->get_name());
+ formatter->dump_string("cluster_id", driver->get_cluster_id(this, y));
formatter->close_section();
formatter->close_section();
formatter->close_section();
RGWRESTMgr_Info() = default;
~RGWRESTMgr_Info() override = default;
- RGWHandler_REST* get_handler(rgw::sal::Store* store,
+ RGWHandler_REST* get_handler(rgw::sal::Driver* driver,
req_state*,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string&) override {
if (period.empty()) {
ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl;
- period = store->get_zone()->get_current_period_id();
+ period = driver->get_zone()->get_current_period_id();
if (period.empty()) {
ldpp_dout(this, 5) << "Missing period id" << dendl;
op_ret = -EINVAL;
}
}
- RGWMetadataLog meta_log{s->cct, static_cast<rgw::sal::RadosStore*>(store)->svc()->zone, static_cast<rgw::sal::RadosStore*>(store)->svc()->cls, period};
+ RGWMetadataLog meta_log{s->cct, static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone, static_cast<rgw::sal::RadosStore*>(driver)->svc()->cls, period};
meta_log.init_list_entries(shard_id, {}, {}, marker, &handle);
for (list<cls_log_entry>::iterator iter = entries.begin();
iter != entries.end(); ++iter) {
cls_log_entry& entry = *iter;
- static_cast<rgw::sal::RadosStore*>(store)->ctl()->meta.mgr->dump_log_entry(entry, s->formatter);
+ static_cast<rgw::sal::RadosStore*>(driver)->ctl()->meta.mgr->dump_log_entry(entry, s->formatter);
flusher.flush();
}
s->formatter->close_section();
void RGWOp_MDLog_Info::execute(optional_yield y) {
num_objects = s->cct->_conf->rgw_md_log_max_shards;
- period = static_cast<rgw::sal::RadosStore*>(store)->svc()->mdlog->read_oldest_log_period(y, s);
+ period = static_cast<rgw::sal::RadosStore*>(driver)->svc()->mdlog->read_oldest_log_period(y, s);
op_ret = period.get_error();
}
if (period.empty()) {
ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl;
- period = store->get_zone()->get_current_period_id();
+ period = driver->get_zone()->get_current_period_id();
if (period.empty()) {
ldpp_dout(this, 5) << "Missing period id" << dendl;
return;
}
}
- RGWMetadataLog meta_log{s->cct, static_cast<rgw::sal::RadosStore*>(store)->svc()->zone, static_cast<rgw::sal::RadosStore*>(store)->svc()->cls, period};
+ RGWMetadataLog meta_log{s->cct, static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone, static_cast<rgw::sal::RadosStore*>(driver)->svc()->cls, period};
op_ret = meta_log.get_info(this, shard_id, &info);
}
if (period.empty()) {
ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl;
- period = store->get_zone()->get_current_period_id();
+ period = driver->get_zone()->get_current_period_id();
if (period.empty()) {
ldpp_dout(this, 5) << "Missing period id" << dendl;
return;
}
}
- RGWMetadataLog meta_log{s->cct, static_cast<rgw::sal::RadosStore*>(store)->svc()->zone, static_cast<rgw::sal::RadosStore*>(store)->svc()->cls, period};
+ RGWMetadataLog meta_log{s->cct, static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone, static_cast<rgw::sal::RadosStore*>(driver)->svc()->cls, period};
op_ret = meta_log.trim(this, shard_id, {}, {}, {}, marker);
}
if (period.empty()) {
ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl;
- period = store->get_zone()->get_current_period_id();
+ period = driver->get_zone()->get_current_period_id();
}
if (period.empty() ||
return;
}
- RGWMetadataLog meta_log{s->cct, static_cast<rgw::sal::RadosStore*>(store)->svc()->zone, static_cast<rgw::sal::RadosStore*>(store)->svc()->cls, period};
+ RGWMetadataLog meta_log{s->cct, static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone, static_cast<rgw::sal::RadosStore*>(driver)->svc()->cls, period};
unsigned dur;
dur = (unsigned)strict_strtol(duration_str.c_str(), 10, &err);
if (!err.empty() || dur <= 0) {
if (period.empty()) {
ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl;
- period = store->get_zone()->get_current_period_id();
+ period = driver->get_zone()->get_current_period_id();
}
if (period.empty() ||
return;
}
- RGWMetadataLog meta_log{s->cct, static_cast<rgw::sal::RadosStore*>(store)->svc()->zone, static_cast<rgw::sal::RadosStore*>(store)->svc()->cls, period};
+ RGWMetadataLog meta_log{s->cct, static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone, static_cast<rgw::sal::RadosStore*>(driver)->svc()->cls, period};
op_ret = meta_log.unlock(s, shard_id, zone_id, locker_id);
}
return;
}
- if (store->ctx()->_conf->subsys.should_gather<ceph_subsys_rgw, 20>()) {
+ if (driver->ctx()->_conf->subsys.should_gather<ceph_subsys_rgw, 20>()) {
for (set<int>::iterator iter = updated_shards.begin(); iter != updated_shards.end(); ++iter) {
ldpp_dout(this, 20) << __func__ << "(): updated shard=" << *iter << dendl;
}
}
- store->wakeup_meta_sync_shards(updated_shards);
+ driver->wakeup_meta_sync_shards(updated_shards);
op_ret = 0;
}
b.name = bn;
b.bucket_id = bucket_instance;
}
- op_ret = store->get_bucket(s, nullptr, b, &bucket, y);
+ op_ret = driver->get_bucket(s, nullptr, b, &bucket, y);
if (op_ret < 0) {
ldpp_dout(this, 5) << "could not get bucket info for bucket=" << bucket_name << dendl;
return;
send_response();
do {
list<rgw_bi_log_entry> entries;
- int ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->bilog_rados->log_list(s, bucket->get_info(), log_layout, shard_id,
+ int ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->bilog_rados->log_list(s, bucket->get_info(), log_layout, shard_id,
marker, max_entries - count,
entries, &truncated);
if (ret < 0) {
b.name = bn;
b.bucket_id = bucket_instance;
}
- op_ret = store->get_bucket(s, nullptr, b, &bucket, y);
+ op_ret = driver->get_bucket(s, nullptr, b, &bucket, y);
if (op_ret < 0) {
ldpp_dout(this, 5) << "could not get bucket info for bucket=" << bucket_name << dendl;
return;
b.name = bn;
b.bucket_id = bucket_instance;
}
- op_ret = store->get_bucket(s, nullptr, b, &bucket, y);
+ op_ret = driver->get_bucket(s, nullptr, b, &bucket, y);
if (op_ret < 0) {
ldpp_dout(this, 5) << "could not get bucket info for bucket=" << bucket_name << dendl;
return;
}
- op_ret = bilog_trim(this, static_cast<rgw::sal::RadosStore*>(store),
+ op_ret = bilog_trim(this, static_cast<rgw::sal::RadosStore*>(driver),
bucket->get_info(), gen, shard_id,
start_marker, end_marker);
if (op_ret < 0) {
// Note that last_marker is updated to be the marker of the last
// entry listed
- op_ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->datalog_rados->list_entries(this, shard_id,
+ op_ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->datalog_rados->list_entries(this, shard_id,
max_entries, entries,
marker, &last_marker,
&truncated);
return;
}
- op_ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->datalog_rados->get_info(this, shard_id, &info);
+ op_ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->datalog_rados->get_info(this, shard_id, &info);
}
void RGWOp_DATALog_ShardInfo::send_response() {
return;
}
- if (store->ctx()->_conf->subsys.should_gather<ceph_subsys_rgw, 20>()) {
+ if (driver->ctx()->_conf->subsys.should_gather<ceph_subsys_rgw, 20>()) {
for (bc::flat_map<int, bc::flat_set<rgw_data_notify_entry> >::iterator iter = updated_shards.begin(); iter != updated_shards.end(); ++iter) {
ldpp_dout(this, 20) << __func__ << "(): updated shard=" << iter->first << dendl;
bc::flat_set<rgw_data_notify_entry>& entries = iter->second;
}
}
- store->wakeup_data_sync_shards(this, source_zone, updated_shards);
+ driver->wakeup_data_sync_shards(this, source_zone, updated_shards);
op_ret = 0;
}
return;
}
- if (store->ctx()->_conf->subsys.should_gather<ceph_subsys_rgw, 20>()) {
+ if (driver->ctx()->_conf->subsys.should_gather<ceph_subsys_rgw, 20>()) {
for (bc::flat_map<int, bc::flat_set<rgw_data_notify_entry> >::iterator iter =
updated_shards.begin(); iter != updated_shards.end(); ++iter) {
ldpp_dout(this, 20) << __func__ << "(): updated shard=" << iter->first << dendl;
}
}
- store->wakeup_data_sync_shards(this, source_zone, updated_shards);
+ driver->wakeup_data_sync_shards(this, source_zone, updated_shards);
op_ret = 0;
}
return;
}
- op_ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->datalog_rados->trim_entries(this, shard_id, marker);
+ op_ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->datalog_rados->trim_entries(this, shard_id, marker);
}
// not in header to avoid pulling in rgw_sync.h
void RGWOp_MDLog_Status::execute(optional_yield y)
{
- auto sync = static_cast<rgw::sal::RadosStore*>(store)->getRados()->get_meta_sync_manager();
+ auto sync = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->get_meta_sync_manager();
if (sync == nullptr) {
ldpp_dout(this, 1) << "no sync manager" << dendl;
op_ret = -ENOENT;
// read the bucket instance info for num_shards
std::unique_ptr<rgw::sal::Bucket> bucket;
- op_ret = store->get_bucket(s, nullptr, b, &bucket, y);
+ op_ret = driver->get_bucket(s, nullptr, b, &bucket, y);
if (op_ret < 0) {
ldpp_dout(this, 4) << "failed to read bucket info: " << cpp_strerror(op_ret) << dendl;
return;
}
}
- const auto& local_zone_id = store->get_zone()->get_id();
+ const auto& local_zone_id = driver->get_zone()->get_id();
if (!merge) {
rgw_sync_bucket_pipe pipe;
op_ret = rgw_read_bucket_full_sync_status(
this,
- static_cast<rgw::sal::RadosStore*>(store),
+ static_cast<rgw::sal::RadosStore*>(driver),
pipe,
&status.sync_status,
s->yield);
op_ret = rgw_read_bucket_inc_sync_status(
this,
- static_cast<rgw::sal::RadosStore*>(store),
+ static_cast<rgw::sal::RadosStore*>(driver),
pipe,
status.sync_status.incremental_gen,
&status.inc_status);
rgw_zone_id source_zone_id(source_zone);
RGWBucketSyncPolicyHandlerRef source_handler;
- op_ret = store->get_sync_policy_handler(s, source_zone_id, source_bucket, &source_handler, y);
+ op_ret = driver->get_sync_policy_handler(s, source_zone_id, source_bucket, &source_handler, y);
if (op_ret < 0) {
ldpp_dout(this, -1) << "could not get bucket sync policy handler (r=" << op_ret << ")" << dendl;
return;
if (*pipe.dest.bucket != pinfo->bucket) {
opt_dest_info.emplace();
std::unique_ptr<rgw::sal::Bucket> dest_bucket;
- op_ret = store->get_bucket(s, nullptr, *pipe.dest.bucket, &dest_bucket, y);
+ op_ret = driver->get_bucket(s, nullptr, *pipe.dest.bucket, &dest_bucket, y);
if (op_ret < 0) {
ldpp_dout(this, 4) << "failed to read target bucket info (bucket=: " << cpp_strerror(op_ret) << dendl;
return;
op_ret = rgw_read_bucket_full_sync_status(
this,
- static_cast<rgw::sal::RadosStore*>(store),
+ static_cast<rgw::sal::RadosStore*>(driver),
pipe,
&status.sync_status,
s->yield);
}
current_status.resize(status.sync_status.shards_done_with_gen.size());
- int r = rgw_read_bucket_inc_sync_status(this, static_cast<rgw::sal::RadosStore*>(store),
+ int r = rgw_read_bucket_inc_sync_status(this, static_cast<rgw::sal::RadosStore*>(driver),
pipe, status.sync_status.incremental_gen, ¤t_status);
if (r < 0) {
ldpp_dout(this, -1) << "ERROR: rgw_read_bucket_inc_sync_status() on pipe=" << pipe << " returned ret=" << r << dendl;
void RGWOp_DATALog_Status::execute(optional_yield y)
{
const auto source_zone = s->info.args.get("source-zone");
- auto sync = store->get_data_sync_manager(source_zone);
+ auto sync = driver->get_data_sync_manager(source_zone);
if (sync == nullptr) {
ldpp_dout(this, 1) << "no sync manager for source-zone " << source_zone << dendl;
op_ret = -ENOENT;
RGWRESTMgr_Log() = default;
~RGWRESTMgr_Log() override = default;
- RGWHandler_REST* get_handler(rgw::sal::Store* store,
+ RGWHandler_REST* get_handler(rgw::sal::Driver* driver,
req_state* const,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string& frontend_prefixs) override {
frame_metadata_key(s, metadata_key);
- auto meta_mgr = static_cast<rgw::sal::RadosStore*>(store)->ctl()->meta.mgr;
+ auto meta_mgr = static_cast<rgw::sal::RadosStore*>(driver)->ctl()->meta.mgr;
/* Get keys */
op_ret = meta_mgr->get(metadata_key, s->formatter, s->yield, s);
marker = "3:bf885d8f:root::sorry_janefonda_665:head";
*/
- op_ret = store->meta_list_keys_init(this, metadata_key, marker, &handle);
+ op_ret = driver->meta_list_keys_init(this, metadata_key, marker, &handle);
if (op_ret < 0) {
ldpp_dout(this, 5) << "ERROR: can't get key: " << cpp_strerror(op_ret) << dendl;
return;
do {
list<string> keys;
left = (max_entries_specified ? max_entries - count : max);
- op_ret = store->meta_list_keys_next(this, handle, left, keys, &truncated);
+ op_ret = driver->meta_list_keys_next(this, handle, left, keys, &truncated);
if (op_ret < 0) {
ldpp_dout(this, 5) << "ERROR: lists_keys_next(): " << cpp_strerror(op_ret)
<< dendl;
encode_json("count", count, s->formatter);
if (truncated) {
string esc_marker =
- rgw::to_base64(store->meta_get_marker(handle));
+ rgw::to_base64(driver->meta_get_marker(handle));
encode_json("marker", esc_marker, s->formatter);
}
s->formatter->close_section();
}
- store->meta_list_keys_complete(handle);
+ driver->meta_list_keys_complete(handle);
op_ret = 0;
}
}
}
- op_ret = static_cast<rgw::sal::RadosStore*>(store)->ctl()->meta.mgr->put(metadata_key, bl, s->yield, s, sync_type,
+ op_ret = static_cast<rgw::sal::RadosStore*>(driver)->ctl()->meta.mgr->put(metadata_key, bl, s->yield, s, sync_type,
false, &ondisk_version);
if (op_ret < 0) {
ldpp_dout(s, 5) << "ERROR: can't put key: " << cpp_strerror(op_ret) << dendl;
string metadata_key;
frame_metadata_key(s, metadata_key);
- op_ret = static_cast<rgw::sal::RadosStore*>(store)->ctl()->meta.mgr->remove(metadata_key, s->yield, s);
+ op_ret = static_cast<rgw::sal::RadosStore*>(driver)->ctl()->meta.mgr->remove(metadata_key, s->yield, s);
if (op_ret < 0) {
ldpp_dout(s, 5) << "ERROR: can't remove key: " << cpp_strerror(op_ret) << dendl;
return;
RGWRESTMgr_Metadata() = default;
~RGWRESTMgr_Metadata() override = default;
- RGWHandler_REST* get_handler(rgw::sal::Store* store,
+ RGWHandler_REST* get_handler(rgw::sal::Driver* driver,
req_state* const s,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string& frontend_prefix) override {
return;
}
- std::unique_ptr<rgw::sal::RGWOIDCProvider> provider = store->get_oidc_provider();
+ std::unique_ptr<rgw::sal::RGWOIDCProvider> provider = driver->get_oidc_provider();
provider->set_url(provider_url);
provider->set_tenant(s->user->get_tenant());
provider->set_client_ids(client_ids);
void RGWDeleteOIDCProvider::execute(optional_yield y)
{
- std::unique_ptr<rgw::sal::RGWOIDCProvider> provider = store->get_oidc_provider();
+ std::unique_ptr<rgw::sal::RGWOIDCProvider> provider = driver->get_oidc_provider();
provider->set_arn(provider_arn);
provider->set_tenant(s->user->get_tenant());
op_ret = provider->delete_obj(s, y);
void RGWGetOIDCProvider::execute(optional_yield y)
{
- std::unique_ptr<rgw::sal::RGWOIDCProvider> provider = store->get_oidc_provider();
+ std::unique_ptr<rgw::sal::RGWOIDCProvider> provider = driver->get_oidc_provider();
provider->set_arn(provider_arn);
provider->set_tenant(s->user->get_tenant());
op_ret = provider->get(s);
void RGWListOIDCProviders::execute(optional_yield y)
{
vector<std::unique_ptr<rgw::sal::RGWOIDCProvider>> result;
- op_ret = store->get_oidc_providers(s, s->user->get_tenant(), result);
+ op_ret = driver->get_oidc_providers(s, s->user->get_tenant(), result);
if (op_ret == 0) {
s->formatter->open_array_section("ListOpenIDConnectProvidersResponse");
if (ratelimit_scope == "bucket" && !bucket_name.empty() && !global) {
std::unique_ptr<rgw::sal::Bucket> bucket;
- int r = store->get_bucket(s, nullptr, tenant_name, bucket_name, &bucket, y);
+ int r = driver->get_bucket(s, nullptr, tenant_name, bucket_name, &bucket, y);
if (r != 0) {
op_ret = r;
ldpp_dout(this, 0) << "Error on getting bucket info" << dendl;
RGWRateLimitInfo ratelimit_info;
rgw_user user(uid_str);
std::unique_ptr<rgw::sal::User> user_sal;
- user_sal = store->get_user(user);
+ user_sal = driver->get_user(user);
if (!rgw::sal::User::empty(user_sal)) {
op_ret = user_sal->load_user(this, y);
if (op_ret) {
flusher.flush();
}
if (global) {
- std::string realm_id = store->get_zone()->get_realm_id();
+ std::string realm_id = driver->get_zone()->get_realm_id();
RGWPeriodConfig period_config;
- op_ret = period_config.read(this, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, realm_id, y);
+ op_ret = period_config.read(this, static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, realm_id, y);
if (op_ret && op_ret != -ENOENT) {
ldpp_dout(this, 0) << "Error on period config read" << dendl;
return;
if (ratelimit_scope == "user" && !uid_str.empty() && !global) {
rgw_user user(uid_str);
std::unique_ptr<rgw::sal::User> user_sal;
- user_sal = store->get_user(user);
+ user_sal = driver->get_user(user);
if (!rgw::sal::User::empty(user_sal)) {
op_ret = user_sal->load_user(this, y);
if (op_ret) {
if (ratelimit_scope == "bucket" && !bucket_name.empty() && !global) {
ldpp_dout(this, 0) << "getting bucket info" << dendl;
std::unique_ptr<rgw::sal::Bucket> bucket;
- op_ret = store->get_bucket(this, nullptr, tenant_name, bucket_name, &bucket, y);
+ op_ret = driver->get_bucket(this, nullptr, tenant_name, bucket_name, &bucket, y);
if (op_ret) {
ldpp_dout(this, 0) << "Error on getting bucket info" << dendl;
return;
return;
}
if (global) {
- std::string realm_id = store->get_zone()->get_realm_id();
+ std::string realm_id = driver->get_zone()->get_realm_id();
RGWPeriodConfig period_config;
- op_ret = period_config.read(s, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, realm_id, y);
+ op_ret = period_config.read(s, static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, realm_id, y);
if (op_ret && op_ret != -ENOENT) {
ldpp_dout(this, 0) << "Error on period config read" << dendl;
return;
have_max_read_bytes, max_read_bytes, have_max_write_bytes, max_write_bytes,
have_enabled, enabled, ratelimit_configured, ratelimit_info);
period_config.bucket_ratelimit = ratelimit_info;
- op_ret = period_config.write(s, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, realm_id, y);
+ op_ret = period_config.write(s, static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, realm_id, y);
return;
}
if (ratelimit_scope == "anon") {
have_max_read_bytes, max_read_bytes, have_max_write_bytes, max_write_bytes,
have_enabled, enabled, ratelimit_configured, ratelimit_info);
period_config.anon_ratelimit = ratelimit_info;
- op_ret = period_config.write(s, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, realm_id, y);
+ op_ret = period_config.write(s, static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, realm_id, y);
return;
}
if (ratelimit_scope == "user") {
have_max_read_bytes, max_read_bytes, have_max_write_bytes, max_write_bytes,
have_enabled, enabled, ratelimit_configured, ratelimit_info);
period_config.user_ratelimit = ratelimit_info;
- op_ret = period_config.write(s, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, realm_id, y);
+ op_ret = period_config.write(s, static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, realm_id, y);
return;
}
}
RGWRESTMgr_Ratelimit() = default;
~RGWRESTMgr_Ratelimit() override = default;
- RGWHandler_REST *get_handler(rgw::sal::Store* store,
+ RGWHandler_REST *get_handler(rgw::sal::Driver* driver,
req_state*,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string&) override {
return new RGWHandler_Ratelimit(auth_registry);
}
-};
\ No newline at end of file
+};
}
string role_name = s->info.args.get("RoleName");
- std::unique_ptr<rgw::sal::RGWRole> role = store->get_role(role_name,
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name,
s->user->get_tenant());
if (op_ret = role->get(s, y); op_ret < 0) {
if (op_ret == -ENOENT) {
return;
}
std::string user_tenant = s->user->get_tenant();
- std::unique_ptr<rgw::sal::RGWRole> role = store->get_role(role_name,
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name,
user_tenant,
role_path,
trust_policy,
std::string role_id;
- if (!store->is_meta_master()) {
+ if (!driver->is_meta_master()) {
RGWXMLDecoder::XMLParser parser;
if (!parser.init()) {
ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
RGWAccessKey cred = it->second;
key.key = cred.key;
}
- op_ret = store->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y);
+ op_ret = driver->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << op_ret << dendl;
return;
return;
}
- if (!store->is_meta_master()) {
+ if (!driver->is_meta_master()) {
is_master = false;
RGWXMLDecoder::XMLParser parser;
if (!parser.init()) {
RGWAccessKey cred = it->second;
key.key = cred.key;
}
- master_op_ret = store->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y);
+ master_op_ret = driver->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y);
if (master_op_ret < 0) {
op_ret = master_op_ret;
ldpp_dout(this, 0) << "forward_iam_request_to_master returned ret=" << op_ret << dendl;
if (op_ret < 0) {
return;
}
- std::unique_ptr<rgw::sal::RGWRole> role = store->get_role(role_name,
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name,
s->user->get_tenant());
op_ret = role->get(s, y);
return;
}
- if (!store->is_meta_master()) {
+ if (!driver->is_meta_master()) {
RGWXMLDecoder::XMLParser parser;
if (!parser.init()) {
ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
RGWAccessKey cred = it->second;
key.key = cred.key;
}
- op_ret = store->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y);
+ op_ret = driver->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << op_ret << dendl;
return;
return;
}
vector<std::unique_ptr<rgw::sal::RGWRole>> result;
- op_ret = store->get_roles(s, y, path_prefix, s->user->get_tenant(), result);
+ op_ret = driver->get_roles(s, y, path_prefix, s->user->get_tenant(), result);
if (op_ret == 0) {
s->formatter->open_array_section("ListRolesResponse");
return;
}
- if (!store->is_meta_master()) {
+ if (!driver->is_meta_master()) {
RGWXMLDecoder::XMLParser parser;
if (!parser.init()) {
ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
RGWAccessKey cred = it->second;
key.key = cred.key;
}
- op_ret = store->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y);
+ op_ret = driver->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << op_ret << dendl;
return;
return;
}
- if (!store->is_meta_master()) {
+ if (!driver->is_meta_master()) {
RGWXMLDecoder::XMLParser parser;
if (!parser.init()) {
ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
RGWAccessKey cred = it->second;
key.key = cred.key;
}
- op_ret = store->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y);
+ op_ret = driver->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << op_ret << dendl;
return;
return;
}
- if (!store->is_meta_master()) {
+ if (!driver->is_meta_master()) {
RGWXMLDecoder::XMLParser parser;
if (!parser.init()) {
ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
RGWAccessKey cred = it->second;
key.key = cred.key;
}
- op_ret = store->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y);
+ op_ret = driver->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << op_ret << dendl;
return;
return;
}
- if (!store->is_meta_master()) {
+ if (!driver->is_meta_master()) {
RGWXMLDecoder::XMLParser parser;
if (!parser.init()) {
ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
RGWAccessKey cred = it->second;
key.key = cred.key;
}
- op_ret = store->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y);
+ op_ret = driver->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << op_ret << dendl;
return;
return;
}
- if (!store->is_meta_master()) {
+ if (!driver->is_meta_master()) {
RGWXMLDecoder::XMLParser parser;
if (!parser.init()) {
ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
RGWAccessKey cred = it->second;
key.key = cred.key;
}
- op_ret = store->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y);
+ op_ret = driver->forward_iam_request_to_master(s, key, nullptr, bl_post_body, &parser, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << op_ret << dendl;
return;
s->formatter->dump_string("RequestId", s->trans_id);
s->formatter->close_section();
s->formatter->close_section();
-}
\ No newline at end of file
+}
ldpp_dout(dpp, 20) << "Read " << obj_tags.count() << "tags" << dendl;
// forward bucket tags requests to meta master zone
- if (!store->is_meta_master()) {
+ if (!driver->is_meta_master()) {
/* only need to keep this data around if we're not meta master */
in_data = std::move(data);
}
}
};
- set<rgw_zone_id> get_zone_ids_from_names(rgw::sal::Store* store,
+ set<rgw_zone_id> get_zone_ids_from_names(rgw::sal::Driver* driver,
const vector<string>& zone_names) const {
set<rgw_zone_id> ids;
for (auto& name : zone_names) {
std::unique_ptr<rgw::sal::Zone> zone;
- int ret = store->get_zone()->get_zonegroup().get_zone_by_name(name, &zone);
+ int ret = driver->get_zone()->get_zonegroup().get_zone_by_name(name, &zone);
if (ret >= 0) {
rgw_zone_id id = zone->get_id();
ids.insert(std::move(id));
return ids;
}
- vector<string> get_zone_names_from_ids(rgw::sal::Store* store,
+ vector<string> get_zone_names_from_ids(rgw::sal::Driver* driver,
const set<rgw_zone_id>& zone_ids) const {
vector<string> names;
for (auto& id : zone_ids) {
std::unique_ptr<rgw::sal::Zone> zone;
- int ret = store->get_zone()->get_zonegroup().get_zone_by_id(id.id, &zone);
+ int ret = driver->get_zone()->get_zonegroup().get_zone_by_id(id.id, &zone);
if (ret >= 0) {
names.emplace_back(zone->get_name());
}
return true;
}
- int to_sync_policy_pipe(req_state *s, rgw::sal::Store* store,
+ int to_sync_policy_pipe(req_state *s, rgw::sal::Driver* driver,
rgw_sync_bucket_pipes *pipe,
bool *enabled) const {
if (!is_valid(s->cct)) {
destination.bucket);
if (source && !source->zone_names.empty()) {
- pipe->source.zones = get_zone_ids_from_names(store, source->zone_names);
+ pipe->source.zones = get_zone_ids_from_names(driver, source->zone_names);
} else {
pipe->source.set_all_zones(true);
}
if (!destination.zone_names.empty()) {
- pipe->dest.zones = get_zone_ids_from_names(store, destination.zone_names);
+ pipe->dest.zones = get_zone_ids_from_names(driver, destination.zone_names);
} else {
pipe->dest.set_all_zones(true);
}
return 0;
}
- void from_sync_policy_pipe(rgw::sal::Store* store,
+ void from_sync_policy_pipe(rgw::sal::Driver* driver,
const rgw_sync_bucket_pipes& pipe,
bool enabled) {
id = pipe.id;
source.reset();
} else if (pipe.source.zones) {
source.emplace();
- source->zone_names = get_zone_names_from_ids(store, *pipe.source.zones);
+ source->zone_names = get_zone_names_from_ids(driver, *pipe.source.zones);
}
if (!pipe.dest.all_zones &&
pipe.dest.zones) {
- destination.zone_names = get_zone_names_from_ids(store, *pipe.dest.zones);
+ destination.zone_names = get_zone_names_from_ids(driver, *pipe.dest.zones);
}
if (pipe.params.dest.acl_translation) {
encode_xml("Rule", rules, f);
}
- int to_sync_policy_groups(req_state *s, rgw::sal::Store* store,
+ int to_sync_policy_groups(req_state *s, rgw::sal::Driver* driver,
vector<rgw_sync_policy_group> *result) const {
result->resize(2);
for (auto& rule : rules) {
rgw_sync_bucket_pipes pipe;
bool enabled;
- int r = rule.to_sync_policy_pipe(s, store, &pipe, &enabled);
+ int r = rule.to_sync_policy_pipe(s, driver, &pipe, &enabled);
if (r < 0) {
ldpp_dout(s, 5) << "NOTICE: failed to convert replication configuration into sync policy pipe (rule.id=" << rule.id << "): " << cpp_strerror(-r) << dendl;
return r;
return 0;
}
- void from_sync_policy_group(rgw::sal::Store* store,
+ void from_sync_policy_group(rgw::sal::Driver* driver,
const rgw_sync_policy_group& group) {
bool enabled = (group.status == rgw_sync_policy_group::Status::ENABLED);
for (auto& pipe : group.pipes) {
auto& rule = rules.emplace_back();
- rule.from_sync_policy_pipe(store, pipe, enabled);
+ rule.from_sync_policy_pipe(driver, pipe, enabled);
}
}
};
auto iter = policy->groups.find(enabled_group_id);
if (iter != policy->groups.end()) {
- conf.from_sync_policy_group(store, iter->second);
+ conf.from_sync_policy_group(driver, iter->second);
}
iter = policy->groups.find(disabled_group_id);
if (iter != policy->groups.end()) {
- conf.from_sync_policy_group(store, iter->second);
+ conf.from_sync_policy_group(driver, iter->second);
}
}
return -ERR_MALFORMED_XML;
}
- r = conf.to_sync_policy_groups(s, store, &sync_policy_groups);
+ r = conf.to_sync_policy_groups(s, driver, &sync_policy_groups);
if (r < 0) {
return r;
}
// forward requests to meta master zone
- if (!store->is_meta_master()) {
+ if (!driver->is_meta_master()) {
/* only need to keep this data around if we're not meta master */
in_data = std::move(data);
}
std::unique_ptr<rgw::sal::ZoneGroup> zonegroup;
string api_name;
- int ret = store->get_zonegroup(s->bucket->get_info().zonegroup, &zonegroup);
+ int ret = driver->get_zonegroup(s->bucket->get_info().zonegroup, &zonegroup);
if (ret >= 0) {
api_name = zonegroup->get_api_name();
} else {
return -EINVAL;
}
- if (!store->is_meta_master()) {
+ if (!driver->is_meta_master()) {
/* only need to keep this data around if we're not meta master */
in_data.append(data);
}
dump_start(s);
}
-static int create_s3_policy(req_state *s, rgw::sal::Store* store,
+static int create_s3_policy(req_state *s, rgw::sal::Driver* driver,
RGWAccessControlPolicy_S3& s3policy,
ACLOwner& owner)
{
if (!s->canned_acl.empty())
return -ERR_INVALID_REQUEST;
- return s3policy.create_from_headers(s, store, s->info.env, owner);
+ return s3policy.create_from_headers(s, driver, s->info.env, owner);
}
return s3policy.create_canned(owner, s->bucket_owner, s->canned_acl);
if (r) return r;
}
- r = create_s3_policy(s, store, s3policy, s->owner);
+ r = create_s3_policy(s, driver, s3policy, s->owner);
if (r < 0)
return r;
}
RGWAccessControlPolicy_S3 s3policy(s->cct);
- ret = create_s3_policy(s, store, s3policy, s->owner);
+ ret = create_s3_policy(s, driver, s3policy, s->owner);
if (ret < 0)
return ret;
return -EINVAL;
}
- s->object = store->get_object(rgw_obj_key(object_str));
+ s->object = driver->get_object(rgw_obj_key(object_str));
rebuild_key(s->object.get());
if (! storage_class.empty()) {
s->dest_placement.storage_class = storage_class;
- if (!store->valid_placement(s->dest_placement)) {
+ if (!driver->valid_placement(s->dest_placement)) {
ldpp_dout(this, 0) << "NOTICE: invalid dest placement: " << s->dest_placement.to_str() << dendl;
err_msg = "The storage class you specified is not valid";
return -EINVAL;
RGWAccessControlPolicy_S3 s3policy(s->cct);
/* build a policy for the target object */
- int r = create_s3_policy(s, store, s3policy, s->owner);
+ int r = create_s3_policy(s, driver, s3policy, s->owner);
if (r < 0)
return r;
return ret;
}
-int RGWPutACLs_ObjStore_S3::get_policy_from_state(rgw::sal::Store* store,
+int RGWPutACLs_ObjStore_S3::get_policy_from_state(rgw::sal::Driver* driver,
req_state *s,
stringstream& ss)
{
s->canned_acl.clear();
}
- int r = create_s3_policy(s, store, s3policy, owner);
+ int r = create_s3_policy(s, driver, s3policy, owner);
if (r < 0)
return r;
}
// forward bucket cors requests to meta master zone
- if (!store->is_meta_master()) {
+ if (!driver->is_meta_master()) {
/* only need to keep this data around if we're not meta master */
in_data.append(data);
}
int RGWInitMultipart_ObjStore_S3::get_params(optional_yield y)
{
RGWAccessControlPolicy_S3 s3policy(s->cct);
- op_ret = create_s3_policy(s, store, s3policy, s->owner);
+ op_ret = create_s3_policy(s, driver, s3policy, s->owner);
if (op_ret < 0)
return op_ret;
if (isSTSEnabled) {
RGWHandler_REST_STS sts_handler(auth_registry, post_body);
- sts_handler.init(store, s, s->cio);
+ sts_handler.init(driver, s, s->cio);
auto op = sts_handler.get_op();
if (op) {
return op;
if (isIAMEnabled) {
RGWHandler_REST_IAM iam_handler(auth_registry, data);
- iam_handler.init(store, s, s->cio);
+ iam_handler.init(driver, s, s->cio);
auto op = iam_handler.get_op();
if (op) {
return op;
if (isPSEnabled) {
RGWHandler_REST_PSTopic_AWS topic_handler(auth_registry, post_body);
- topic_handler.init(store, s, s->cio);
+ topic_handler.init(driver, s, s->cio);
auto op = topic_handler.get_op();
if (op) {
return op;
return RGWHandler_REST_PSNotifs_S3::create_put_op();
} else if (is_replication_op()) {
RGWBucketSyncPolicyHandlerRef sync_policy_handler;
- int ret = store->get_sync_policy_handler(s, nullopt, nullopt,
+ int ret = driver->get_sync_policy_handler(s, nullopt, nullopt,
&sync_policy_handler, null_yield);
if (ret < 0 || !sync_policy_handler ||
sync_policy_handler->is_legacy_config()) {
return new RGWOptionsCORS_ObjStore_S3;
}
-int RGWHandler_REST_S3::init_from_header(rgw::sal::Store* store,
+int RGWHandler_REST_S3::init_from_header(rgw::sal::Driver* driver,
req_state* s,
RGWFormat default_formatter,
bool configurable_format)
if (s->bucket) {
s->object = s->bucket->get_object(rgw_obj_key(encoded_obj_str, s->info.args.get("versionId")));
} else {
- s->object = store->get_object(rgw_obj_key(encoded_obj_str, s->info.args.get("versionId")));
+ s->object = driver->get_object(rgw_obj_key(encoded_obj_str, s->info.args.get("versionId")));
}
}
} else {
if (s->bucket) {
s->object = s->bucket->get_object(rgw_obj_key(req_name, s->info.args.get("versionId")));
} else {
- s->object = store->get_object(rgw_obj_key(req_name, s->info.args.get("versionId")));
+ s->object = driver->get_object(rgw_obj_key(req_name, s->info.args.get("versionId")));
}
}
return 0;
return 0;
}
-int RGWHandler_REST_S3::init(rgw::sal::Store* store, req_state *s,
+int RGWHandler_REST_S3::init(rgw::sal::Driver* driver, req_state *s,
rgw::io::BasicClient *cio)
{
int ret;
ldpp_dout(s, 0) << "failed to parse copy location" << dendl;
return -EINVAL; // XXX why not -ERR_INVALID_BUCKET_NAME or -ERR_BAD_URL?
}
- s->src_object = store->get_object(key);
+ s->src_object = driver->get_object(key);
}
const char *sc = s->info.env->get("HTTP_X_AMZ_STORAGE_CLASS");
s->info.storage_class = sc;
}
- return RGWHandler_REST::init(store, s, cio);
+ return RGWHandler_REST::init(driver, s, cio);
}
int RGWHandler_REST_S3::authorize(const DoutPrefixProvider *dpp, optional_yield y)
{
if (s->info.args.exists("Action") && s->info.args.get("Action") == "AssumeRoleWithWebIdentity") {
- return RGW_Auth_STS::authorize(dpp, store, auth_registry, s, y);
+ return RGW_Auth_STS::authorize(dpp, driver, auth_registry, s, y);
}
- return RGW_Auth_S3::authorize(dpp, store, auth_registry, s, y);
+ return RGW_Auth_S3::authorize(dpp, driver, auth_registry, s, y);
}
enum class AwsVersion {
* it tries AWS v4 before AWS v2
*/
int RGW_Auth_S3::authorize(const DoutPrefixProvider *dpp,
- rgw::sal::Store* const store,
+ rgw::sal::Driver* const driver,
const rgw::auth::StrategyRegistry& auth_registry,
req_state* const s, optional_yield y)
{
/* neither keystone and rados enabled; warn and exit! */
- if (!store->ctx()->_conf->rgw_s3_auth_use_rados &&
- !store->ctx()->_conf->rgw_s3_auth_use_keystone &&
- !store->ctx()->_conf->rgw_s3_auth_use_ldap) {
+ if (!driver->ctx()->_conf->rgw_s3_auth_use_rados &&
+ !driver->ctx()->_conf->rgw_s3_auth_use_keystone &&
+ !driver->ctx()->_conf->rgw_s3_auth_use_ldap) {
ldpp_dout(dpp, 0) << "WARNING: no authorization backend enabled! Users will never authenticate." << dendl;
return -EPERM;
}
return ret;
}
-int RGWHandler_Auth_S3::init(rgw::sal::Store* store, req_state *state,
+int RGWHandler_Auth_S3::init(rgw::sal::Driver* driver, req_state *state,
rgw::io::BasicClient *cio)
{
- int ret = RGWHandler_REST_S3::init_from_header(store, state, RGWFormat::JSON, true);
+ int ret = RGWHandler_REST_S3::init_from_header(driver, state, RGWFormat::JSON, true);
if (ret < 0)
return ret;
- return RGWHandler_REST::init(store, state, cio);
+ return RGWHandler_REST::init(driver, state, cio);
}
-RGWHandler_REST* RGWRESTMgr_S3::get_handler(rgw::sal::Store* store,
+RGWHandler_REST* RGWRESTMgr_S3::get_handler(rgw::sal::Driver* driver,
req_state* const s,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string& frontend_prefix)
{
bool is_s3website = enable_s3website && (s->prot_flags & RGW_REST_WEBSITE);
int ret =
- RGWHandler_REST_S3::init_from_header(store, s,
+ RGWHandler_REST_S3::init_from_header(driver, s,
is_s3website ? RGWFormat::HTML :
RGWFormat::XML, true);
if (ret < 0)
return state->exists;
}
-int RGWHandler_REST_S3Website::init(rgw::sal::Store* store, req_state *s,
+int RGWHandler_REST_S3Website::init(rgw::sal::Driver* driver, req_state *s,
rgw::io::BasicClient* cio)
{
// save the original object name before retarget() replaces it with the
original_object_name = "";
}
- return RGWHandler_REST_S3::init(store, s, cio);
+ return RGWHandler_REST_S3::init(driver, s, cio);
}
int RGWHandler_REST_S3Website::retarget(RGWOp* op, RGWOp** new_op, optional_yield y) {
if (getop.get() == NULL) {
return -1; // Trigger double error handler
}
- getop->init(store, s, this);
+ getop->init(driver, s, this);
getop->range_str = NULL;
getop->if_mod = NULL;
getop->if_unmod = NULL;
/* This is okay. It's an error, so nothing will run after this, and it can be
* called by abort_early(), which can be called before s->object or s->bucket
* are set up. Note, it won't have bucket. */
- s->object = store->get_object(errordoc_key);
+ s->object = driver->get_object(errordoc_key);
ret = init_permissions(getop.get(), y);
if (ret < 0) {
//return error.
/*RGWUserInfo user_info;
user_info.user_id = base64_token.id;
- if (rgw_get_user_info_by_uid(store, user_info.user_id, user_info) >= 0) {
+ if (rgw_get_user_info_by_uid(driver, user_info.user_id, user_info) >= 0) {
if (user_info.type != TYPE_LDAP) {
ldpp_dout(dpp, 10) << "ERROR: User id of type: " << user_info.type << " is already present" << dendl;
return nullptr;
std::unique_ptr<rgw::sal::User> user;
const std::string access_key_id(_access_key_id);
/* TODO(rzarzynski): we need to have string-view taking variant. */
- if (store->get_user_by_access_key(dpp, access_key_id, y, &user) < 0) {
+ if (driver->get_user_by_access_key(dpp, access_key_id, y, &user) < 0) {
ldpp_dout(dpp, 5) << "error reading user info, uid=" << access_key_id
<< " can't authenticate" << dendl;
return result_t::deny(-ERR_INVALID_ACCESS_KEY);
rgw::auth::RoleApplier::Role r;
rgw::auth::RoleApplier::TokenAttrs t_attrs;
if (! token.roleId.empty()) {
- std::unique_ptr<rgw::sal::RGWRole> role = store->get_role(token.roleId);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(token.roleId);
if (role->get_by_id(dpp, y) < 0) {
return result_t::deny(-EPERM);
}
}
}
- user = store->get_user(token.user);
+ user = driver->get_user(token.user);
if (! token.user.empty() && token.acct_type != TYPE_ROLE) {
// get user info
int ret = user->load_user(dpp, y);
RGWPutACLs_ObjStore_S3() {}
~RGWPutACLs_ObjStore_S3() override {}
- int get_policy_from_state(rgw::sal::Store* store, req_state *s, std::stringstream& ss) override;
+ int get_policy_from_state(rgw::sal::Driver* driver, req_state *s, std::stringstream& ss) override;
void send_response() override;
int get_params(optional_yield y) override;
};
class RGW_Auth_S3 {
public:
static int authorize(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const rgw::auth::StrategyRegistry& auth_registry,
req_state *s, optional_yield y);
};
static int validate_bucket_name(const std::string& bucket);
static int validate_object_name(const std::string& bucket);
- int init(rgw::sal::Store* store,
+ int init(rgw::sal::Driver* driver,
req_state *s,
rgw::io::BasicClient *cio) override;
int authorize(const DoutPrefixProvider *dpp, optional_yield y) override {
- return RGW_Auth_S3::authorize(dpp, store, auth_registry, s, y);
+ return RGW_Auth_S3::authorize(dpp, driver, auth_registry, s, y);
}
int postauth_init(optional_yield) override { return 0; }
};
protected:
const rgw::auth::StrategyRegistry& auth_registry;
public:
- static int init_from_header(rgw::sal::Store* store, req_state *s, RGWFormat default_formatter,
+ static int init_from_header(rgw::sal::Driver* driver, req_state *s, RGWFormat default_formatter,
bool configurable_format);
explicit RGWHandler_REST_S3(const rgw::auth::StrategyRegistry& auth_registry)
}
~RGWHandler_REST_S3() override = default;
- int init(rgw::sal::Store* store,
+ int init(rgw::sal::Driver* driver,
req_state *s,
rgw::io::BasicClient *cio) override;
int authorize(const DoutPrefixProvider *dpp, optional_yield y) override;
~RGWRESTMgr_S3() override = default;
- RGWHandler_REST *get_handler(rgw::sal::Store* store,
+ RGWHandler_REST *get_handler(rgw::sal::Driver* driver,
req_state* s,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string& frontend_prefix) override;
using result_t = rgw::auth::Engine::result_t;
protected:
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
const rgw::auth::RemoteApplier::Factory* const apl_factory;
acl_strategy_t get_acl_strategy() const;
optional_yield y) const override;
public:
LDAPEngine(CephContext* const cct,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const VersionAbstractor& ver_abstractor,
const rgw::auth::RemoteApplier::Factory* const apl_factory)
: AWSEngine(cct, ver_abstractor),
- store(store),
+ driver(driver),
apl_factory(apl_factory) {
init(cct);
}
};
class LocalEngine : public AWSEngine {
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
const rgw::auth::LocalApplier::Factory* const apl_factory;
result_t authenticate(const DoutPrefixProvider* dpp,
optional_yield y) const override;
public:
LocalEngine(CephContext* const cct,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const VersionAbstractor& ver_abstractor,
const rgw::auth::LocalApplier::Factory* const apl_factory)
: AWSEngine(cct, ver_abstractor),
- store(store),
+ driver(driver),
apl_factory(apl_factory) {
}
};
class STSEngine : public AWSEngine {
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
const rgw::auth::LocalApplier::Factory* const local_apl_factory;
const rgw::auth::RemoteApplier::Factory* const remote_apl_factory;
const rgw::auth::RoleApplier::Factory* const role_apl_factory;
optional_yield y) const override;
public:
STSEngine(CephContext* const cct,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const VersionAbstractor& ver_abstractor,
const rgw::auth::LocalApplier::Factory* const local_apl_factory,
const rgw::auth::RemoteApplier::Factory* const remote_apl_factory,
const rgw::auth::RoleApplier::Factory* const role_apl_factory)
: AWSEngine(cct, ver_abstractor),
- store(store),
+ driver(driver),
local_apl_factory(local_apl_factory),
remote_apl_factory(remote_apl_factory),
role_apl_factory(role_apl_factory) {
using RGWHandler_REST_S3::RGWHandler_REST_S3;
~RGWHandler_REST_S3Website() override = default;
- int init(rgw::sal::Store* store, req_state *s, rgw::io::BasicClient* cio) override;
+ int init(rgw::sal::Driver* driver, req_state *s, rgw::io::BasicClient* cio) override;
int error_handler(int err_no, std::string *error_content, optional_yield y) override;
};
}
auto provider_arn = rgw::ARN(idp_url, "oidc-provider", tenant);
string p_arn = provider_arn.to_string();
- std::unique_ptr<rgw::sal::RGWOIDCProvider> provider = store->get_oidc_provider();
+ std::unique_ptr<rgw::sal::RGWOIDCProvider> provider = driver->get_oidc_provider();
provider->set_arn(p_arn);
provider->set_tenant(tenant);
auto ret = provider->get(dpp);
string role_arn = s->info.args.get("RoleArn");
string role_tenant = get_role_tenant(role_arn);
string role_name = get_role_name(role_arn);
- std::unique_ptr<rgw::sal::RGWRole> role = store->get_role(role_name, role_tenant);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, role_tenant);
int ret = role->get(dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "Role not found: name:" << role_name << " tenant: " << role_tenant << dendl;
int RGWREST_STS::verify_permission(optional_yield y)
{
- STS::STSService _sts(s->cct, store, s->user->get_id(), s->auth.identity.get());
+ STS::STSService _sts(s->cct, driver, s->user->get_id(), s->auth.identity.get());
sts = std::move(_sts);
string rArn = s->info.args.get("RoleArn");
return;
}
- STS::STSService sts(s->cct, store, s->user->get_id(), s->auth.identity.get());
+ STS::STSService sts(s->cct, driver, s->user->get_id(), s->auth.identity.get());
STS::GetSessionTokenRequest req(duration, serialNumber, tokenCode);
const auto& [ret, creds] = sts.getSessionToken(this, req);
}
int RGW_Auth_STS::authorize(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const rgw::auth::StrategyRegistry& auth_registry,
req_state *s, optional_yield y)
{
return nullptr;
}
-int RGWHandler_REST_STS::init(rgw::sal::Store* store,
+int RGWHandler_REST_STS::init(rgw::sal::Driver* driver,
req_state *s,
rgw::io::BasicClient *cio)
{
return ret;
}
- return RGWHandler_REST::init(store, s, cio);
+ return RGWHandler_REST::init(driver, s, cio);
}
int RGWHandler_REST_STS::authorize(const DoutPrefixProvider* dpp, optional_yield y)
{
if (s->info.args.exists("Action") && s->info.args.get("Action") == "AssumeRoleWithWebIdentity") {
- return RGW_Auth_STS::authorize(dpp, store, auth_registry, s, y);
+ return RGW_Auth_STS::authorize(dpp, driver, auth_registry, s, y);
}
- return RGW_Auth_S3::authorize(dpp, store, auth_registry, s, y);
+ return RGW_Auth_S3::authorize(dpp, driver, auth_registry, s, y);
}
int RGWHandler_REST_STS::init_from_header(req_state* s,
}
RGWHandler_REST*
-RGWRESTMgr_STS::get_handler(rgw::sal::Store* store,
+RGWRESTMgr_STS::get_handler(rgw::sal::Driver* driver,
req_state* const s,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string& frontend_prefix)
class WebTokenEngine : public rgw::auth::Engine {
static constexpr std::string_view princTagsNamespace = "https://aws.amazon.com/tags";
CephContext* const cct;
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
using result_t = rgw::auth::Engine::result_t;
using Pair = std::pair<std::string, std::string>;
public:
WebTokenEngine(CephContext* const cct,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const rgw::auth::TokenExtractor* const extractor,
const rgw::auth::WebIdentityApplier::Factory* const apl_factory)
: cct(cct),
- store(store),
+ driver(driver),
extractor(extractor),
apl_factory(apl_factory) {
}
class DefaultStrategy : public rgw::auth::Strategy,
public rgw::auth::TokenExtractor,
public rgw::auth::WebIdentityApplier::Factory {
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
ImplicitTenants& implicit_tenant_context;
/* The engine. */
const std::unordered_multimap<std::string, std::string>& token,
boost::optional<std::multimap<std::string, std::string>> role_tags,
boost::optional<std::set<std::pair<std::string, std::string>>> principal_tags) const override {
- auto apl = rgw::auth::add_sysreq(cct, store, s,
- rgw::auth::WebIdentityApplier(cct, store, role_session, role_tenant, token, role_tags, principal_tags));
+ auto apl = rgw::auth::add_sysreq(cct, driver, s,
+ rgw::auth::WebIdentityApplier(cct, driver, role_session, role_tenant, token, role_tags, principal_tags));
return aplptr_t(new decltype(apl)(std::move(apl)));
}
public:
DefaultStrategy(CephContext* const cct,
ImplicitTenants& implicit_tenant_context,
- rgw::sal::Store* store)
- : store(store),
+ rgw::sal::Driver* driver)
+ : driver(driver),
implicit_tenant_context(implicit_tenant_context),
- web_token_engine(cct, store,
+ web_token_engine(cct, driver,
static_cast<rgw::auth::TokenExtractor*>(this),
static_cast<rgw::auth::WebIdentityApplier::Factory*>(this)) {
/* When the constructor's body is being executed, all member engines
class RGW_Auth_STS {
public:
static int authorize(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const rgw::auth::StrategyRegistry& auth_registry,
req_state *s, optional_yield y);
};
post_body(post_body) {}
~RGWHandler_REST_STS() override = default;
- int init(rgw::sal::Store* store,
+ int init(rgw::sal::Driver* driver,
req_state *s,
rgw::io::BasicClient *cio) override;
int authorize(const DoutPrefixProvider* dpp, optional_yield y) override;
return this;
}
- RGWHandler_REST* get_handler(rgw::sal::Store* store,
+ RGWHandler_REST* get_handler(rgw::sal::Driver* driver,
req_state*,
const rgw::auth::StrategyRegistry&,
const std::string&) override;
}
static int get_swift_container_settings(req_state * const s,
- rgw::sal::Store* const store,
+ rgw::sal::Driver* const driver,
RGWAccessControlPolicy * const policy,
bool * const has_policy,
uint32_t * rw_mask,
if (read_list || write_list) {
RGWAccessControlPolicy_SWIFT swift_policy(s->cct);
- const auto r = swift_policy.create(s, store,
+ const auto r = swift_policy.create(s, driver,
s->user->get_id(),
s->user->get_display_name(),
read_list,
bool has_policy;
uint32_t policy_rw_mask = 0;
- int r = get_swift_container_settings(s, store, &policy, &has_policy,
+ int r = get_swift_container_settings(s, driver, &policy, &has_policy,
&policy_rw_mask, &cors_config, &has_cors);
if (r < 0) {
return r;
policy.create_default(s->user->get_id(), s->user->get_display_name());
}
- location_constraint = store->get_zone()->get_zonegroup().get_api_name();
+ location_constraint = driver->get_zone()->get_zonegroup().get_api_name();
get_rmattrs_from_headers(s, CONT_PUT_ATTR_PREFIX,
CONT_REMOVE_ATTR_PREFIX, rmattr_names);
placement_rule.init(s->info.env->get("HTTP_X_STORAGE_POLICY", ""), s->info.storage_class);
std::unique_ptr<rgw::sal::Bucket> bucket;
if (bucket_name.compare(s->bucket->get_name()) != 0) {
- r = store->get_bucket(s, s->user.get(), s->user->get_id().tenant, bucket_name, &bucket, s->yield);
+ r = driver->get_bucket(s, s->user.get(), s->user->get_id().tenant, bucket_name, &bucket, s->yield);
if (r < 0) {
ldpp_dout(this, 0) << "could not get bucket info for bucket="
<< bucket_name << dendl;
}
static int get_swift_account_settings(req_state * const s,
- rgw::sal::Store* const store,
+ rgw::sal::Driver* const driver,
RGWAccessControlPolicy_SWIFTAcct* const policy,
bool * const has_policy)
{
const char * const acl_attr = s->info.env->get("HTTP_X_ACCOUNT_ACCESS_CONTROL");
if (acl_attr) {
RGWAccessControlPolicy_SWIFTAcct swift_acct_policy(s->cct);
- const bool r = swift_acct_policy.create(s, store,
+ const bool r = swift_acct_policy.create(s, driver,
s->user->get_id(),
s->user->get_display_name(),
string(acl_attr));
}
int ret = get_swift_account_settings(s,
- store,
+ driver,
// FIXME: we need to carry unique_ptr in generic class
// and allocate appropriate ACL class in the ctor
static_cast<RGWAccessControlPolicy_SWIFTAcct *>(&policy),
return -EINVAL;
}
- int r = get_swift_container_settings(s, store, &policy, &has_policy,
+ int r = get_swift_container_settings(s, driver, &policy, &has_policy,
&policy_rw_mask, &cors_config, &has_cors);
if (r < 0) {
return r;
s->formatter->close_section();
}
else {
- pair.second.list_data(*(s->formatter), s->cct->_conf, store);
+ pair.second.list_data(*(s->formatter), s->cct->_conf, driver);
}
}
void RGWInfo_ObjStore_SWIFT::list_swift_data(Formatter& formatter,
const ConfigProxy& config,
- rgw::sal::Store* store)
+ rgw::sal::Driver* driver)
{
formatter.open_object_section("swift");
formatter.dump_int("max_file_size", config->rgw_max_put_size);
}
formatter.open_array_section("policies");
- const rgw::sal::ZoneGroup& zonegroup = store->get_zone()->get_zonegroup();
+ const rgw::sal::ZoneGroup& zonegroup = driver->get_zone()->get_zonegroup();
std::set<std::string> targets;
if (zonegroup.get_placement_target_names(targets)) {
void RGWInfo_ObjStore_SWIFT::list_tempauth_data(Formatter& formatter,
const ConfigProxy& config,
- rgw::sal::Store* store)
+ rgw::sal::Driver* driver)
{
formatter.open_object_section("tempauth");
formatter.dump_bool("account_acls", true);
}
void RGWInfo_ObjStore_SWIFT::list_tempurl_data(Formatter& formatter,
const ConfigProxy& config,
- rgw::sal::Store* store)
+ rgw::sal::Driver* driver)
{
formatter.open_object_section("tempurl");
formatter.open_array_section("methods");
void RGWInfo_ObjStore_SWIFT::list_slo_data(Formatter& formatter,
const ConfigProxy& config,
- rgw::sal::Store* store)
+ rgw::sal::Driver* driver)
{
formatter.open_object_section("slo");
formatter.dump_int("max_manifest_segments", config->rgw_max_slo_entries);
}
-void RGWFormPost::init(rgw::sal::Store* const store,
+void RGWFormPost::init(rgw::sal::Driver* const driver,
req_state* const s,
RGWHandler* const dialect_handler)
{
prefix = std::move(s->object->get_name());
s->object->set_key(rgw_obj_key());
- return RGWPostObj_ObjStore::init(store, s, dialect_handler);
+ return RGWPostObj_ObjStore::init(driver, s, dialect_handler);
}
std::size_t RGWFormPost::get_max_file_size() /*const*/
const rgw_user uid(s->account_name);
if (uid.tenant.empty()) {
const rgw_user tenanted_uid(uid.id, uid.id);
- user = store->get_user(tenanted_uid);
+ user = driver->get_user(tenanted_uid);
if (user->load_user(s, s->yield) >= 0) {
/* Succeeded. */
}
if (!found) {
- user = store->get_user(uid);
+ user = driver->get_user(uid);
if (user->load_user(s, s->yield) < 0) {
throw -EPERM;
}
/* Need to get user info of bucket owner. */
std::unique_ptr<rgw::sal::Bucket> bucket;
- int ret = store->get_bucket(s, user.get(), user->get_tenant(), bucket_name, &bucket, s->yield);
+ int ret = driver->get_bucket(s, user.get(), user->get_tenant(), bucket_name, &bucket, s->yield);
if (ret < 0) {
throw ret;
}
ldpp_dout(this, 20) << "temp url user (bucket owner): " << bucket->get_info().owner
<< dendl;
- user = store->get_user(bucket->get_info().owner);
+ user = driver->get_user(bucket->get_info().owner);
if (user->load_user(s, s->yield) < 0) {
throw -EPERM;
}
* only. They will be picked up by ::get_data(). */
break;
} else {
- /* Control part ahead. Receive, parse and store for later usage. */
+ /* Control part ahead. Receive, parse and driver for later usage. */
bool boundary;
ret = read_data(part.data, s->cct->_conf->rgw_max_chunk_size,
boundary, stream_done);
class RGWGetErrorPage : public RGWGetObj_ObjStore_SWIFT {
public:
- RGWGetErrorPage(rgw::sal::Store* const store,
+ RGWGetErrorPage(rgw::sal::Driver* const driver,
RGWHandler_REST* const handler,
req_state* const s,
const int http_ret) {
/* Calling a virtual from the base class is safe as the subobject should
* be properly initialized and we haven't overridden the init method. */
- init(store, s, handler);
+ init(driver, s, handler);
set_get_data(true);
set_custom_http_response(http_ret);
}
* fault situation by sending the original message. */
return 0;
}
- } get_errpage_op(store, handler, s, http_ret);
+ } get_errpage_op(driver, handler, s, http_ret);
/* This is okay. It's an error, so nothing will run after this, and it can be
* called by abort_early(), which can be called before s->object or s->bucket
if (!rgw::sal::Bucket::empty(s->bucket.get())) {
s->object = s->bucket->get_object(rgw_obj_key(std::to_string(http_ret) + error_doc));
} else {
- s->object = store->get_object(rgw_obj_key(std::to_string(http_ret) + error_doc));
+ s->object = driver->get_object(rgw_obj_key(std::to_string(http_ret) + error_doc));
}
RGWOp* newop = &get_errpage_op;
RGWRequest req(0);
- return rgw_process_authenticated(handler, newop, &req, s, y, store, true);
+ return rgw_process_authenticated(handler, newop, &req, s, y, driver, true);
}
int RGWSwiftWebsiteHandler::error_handler(const int err_no,
if (op_override) {
handler->put_op(op);
- op_override->init(store, s, handler);
+ op_override->init(driver, s, handler);
*new_op = op_override;
} else {
if (op_override) {
handler->put_op(op);
- op_override->init(store, s, handler);
+ op_override->init(driver, s, handler);
*new_op = op_override;
} else {
if (!s->object) {
/* Need an object, even an empty one */
- s->object = store->get_object(rgw_obj_key());
+ s->object = driver->get_object(rgw_obj_key());
}
ldpp_dout(s, 10) << "s->object=" <<
}
}
-int RGWHandler_REST_SWIFT::init_from_header(rgw::sal::Store* store,
+int RGWHandler_REST_SWIFT::init_from_header(rgw::sal::Driver* driver,
req_state* const s,
const std::string& frontend_prefix)
{
s->init_state.url_bucket = first;
if (req.size()) {
- s->object = store->get_object(
+ s->object = driver->get_object(
rgw_obj_key(req, s->info.env->get("HTTP_X_OBJECT_VERSION_ID", ""))); /* rgw swift extension */
s->info.effective_uri.append("/" + s->object->get_name());
}
return 0;
}
-int RGWHandler_REST_SWIFT::init(rgw::sal::Store* store, req_state* s,
+int RGWHandler_REST_SWIFT::init(rgw::sal::Driver* driver, req_state* s,
rgw::io::BasicClient *cio)
{
struct req_init_state *t = &s->init_state;
bool result = RGWCopyObj::parse_copy_location(copy_source, t->src_bucket, key, s);
if (!result)
return -ERR_BAD_URL;
- s->src_object = store->get_object(key);
+ s->src_object = driver->get_object(key);
if (!s->src_object)
return -ERR_BAD_URL;
}
s->info.storage_class = s->info.env->get("HTTP_X_OBJECT_STORAGE_CLASS", "");
- return RGWHandler_REST::init(store, s, cio);
+ return RGWHandler_REST::init(driver, s, cio);
}
RGWHandler_REST*
-RGWRESTMgr_SWIFT::get_handler(rgw::sal::Store* store,
+RGWRESTMgr_SWIFT::get_handler(rgw::sal::Driver* driver,
req_state* const s,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string& frontend_prefix)
{
- int ret = RGWHandler_REST_SWIFT::init_from_header(store, s, frontend_prefix);
+ int ret = RGWHandler_REST_SWIFT::init_from_header(driver, s, frontend_prefix);
if (ret < 0) {
ldpp_dout(s, 10) << "init_from_header returned err=" << ret << dendl;
return nullptr;
}
RGWHandler_REST* RGWRESTMgr_SWIFT_Info::get_handler(
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
req_state* const s,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string& frontend_prefix)
struct info
{
bool is_admin_info;
- std::function<void (Formatter&, const ConfigProxy&, rgw::sal::Store*)> list_data;
+ std::function<void (Formatter&, const ConfigProxy&, rgw::sal::Driver*)> list_data;
};
static const std::vector<std::pair<std::string, struct info>> swift_info;
void execute(optional_yield y) override;
void send_response() override;
- static void list_swift_data(Formatter& formatter, const ConfigProxy& config, rgw::sal::Store* store);
- static void list_tempauth_data(Formatter& formatter, const ConfigProxy& config, rgw::sal::Store* store);
- static void list_tempurl_data(Formatter& formatter, const ConfigProxy& config, rgw::sal::Store* store);
- static void list_slo_data(Formatter& formatter, const ConfigProxy& config, rgw::sal::Store* store);
+ static void list_swift_data(Formatter& formatter, const ConfigProxy& config, rgw::sal::Driver* driver);
+ static void list_tempauth_data(Formatter& formatter, const ConfigProxy& config, rgw::sal::Driver* driver);
+ static void list_tempurl_data(Formatter& formatter, const ConfigProxy& config, rgw::sal::Driver* driver);
+ static void list_slo_data(Formatter& formatter, const ConfigProxy& config, rgw::sal::Driver* driver);
static bool is_expired(const std::string& expires, const DoutPrefixProvider* dpp);
};
RGWFormPost() = default;
~RGWFormPost() = default;
- void init(rgw::sal::Store* store,
+ void init(rgw::sal::Driver* driver,
req_state* s,
RGWHandler* dialect_handler) override;
class RGWSwiftWebsiteHandler {
- rgw::sal::Store* const store;
+ rgw::sal::Driver* const driver;
req_state* const s;
RGWHandler_REST* const handler;
RGWOp* get_ws_index_op();
RGWOp* get_ws_listing_op();
public:
- RGWSwiftWebsiteHandler(rgw::sal::Store* const store,
+ RGWSwiftWebsiteHandler(rgw::sal::Driver* const driver,
req_state* const s,
RGWHandler_REST* const handler)
- : store(store),
+ : driver(driver),
s(s),
handler(handler) {
}
return false;
}
- static int init_from_header(rgw::sal::Store* store, req_state* s,
+ static int init_from_header(rgw::sal::Driver* driver, req_state* s,
const std::string& frontend_prefix);
public:
explicit RGWHandler_REST_SWIFT(const rgw::auth::Strategy& auth_strategy)
int validate_bucket_name(const std::string& bucket);
- int init(rgw::sal::Store* store, req_state *s, rgw::io::BasicClient *cio) override;
+ int init(rgw::sal::Driver* driver, req_state *s, rgw::io::BasicClient *cio) override;
int authorize(const DoutPrefixProvider *dpp, optional_yield y) override;
int postauth_init(optional_yield y) override;
return website_handler->retarget_bucket(op, new_op);
}
- int init(rgw::sal::Store* const store,
+ int init(rgw::sal::Driver* const driver,
req_state* const s,
rgw::io::BasicClient* const cio) override {
- website_handler = boost::in_place<RGWSwiftWebsiteHandler>(store, s, this);
- return RGWHandler_REST_SWIFT::init(store, s, cio);
+ website_handler = boost::in_place<RGWSwiftWebsiteHandler>(driver, s, this);
+ return RGWHandler_REST_SWIFT::init(driver, s, cio);
}
};
return website_handler->retarget_object(op, new_op);
}
- int init(rgw::sal::Store* const store,
+ int init(rgw::sal::Driver* const driver,
req_state* const s,
rgw::io::BasicClient* const cio) override {
- website_handler = boost::in_place<RGWSwiftWebsiteHandler>(store, s, this);
- return RGWHandler_REST_SWIFT::init(store, s, cio);
+ website_handler = boost::in_place<RGWSwiftWebsiteHandler>(driver, s, this);
+ return RGWHandler_REST_SWIFT::init(driver, s, cio);
}
};
RGWRESTMgr_SWIFT() = default;
~RGWRESTMgr_SWIFT() override = default;
- RGWHandler_REST *get_handler(rgw::sal::Store* store,
+ RGWHandler_REST *get_handler(rgw::sal::Driver* driver,
req_state *s,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string& frontend_prefix) override;
return new RGWGetCrossDomainPolicy_ObjStore_SWIFT();
}
- int init(rgw::sal::Store* const store,
+ int init(rgw::sal::Driver* const driver,
req_state* const state,
rgw::io::BasicClient* const cio) override {
state->dialect = "swift";
state->formatter = new JSONFormatter;
state->format = RGWFormat::JSON;
- return RGWHandler::init(store, state, cio);
+ return RGWHandler::init(driver, state, cio);
}
int authorize(const DoutPrefixProvider *dpp, optional_yield) override {
RGWRESTMgr_SWIFT_CrossDomain() = default;
~RGWRESTMgr_SWIFT_CrossDomain() override = default;
- RGWHandler_REST* get_handler(rgw::sal::Store* store,
+ RGWHandler_REST* get_handler(rgw::sal::Driver* driver,
req_state* const s,
const rgw::auth::StrategyRegistry&,
const std::string&) override {
return new RGWGetHealthCheck_ObjStore_SWIFT();
}
- int init(rgw::sal::Store* const store,
+ int init(rgw::sal::Driver* const driver,
req_state* const state,
rgw::io::BasicClient* const cio) override {
state->dialect = "swift";
state->formatter = new JSONFormatter;
state->format = RGWFormat::JSON;
- return RGWHandler::init(store, state, cio);
+ return RGWHandler::init(driver, state, cio);
}
int authorize(const DoutPrefixProvider *dpp, optional_yield y) override {
RGWRESTMgr_SWIFT_HealthCheck() = default;
~RGWRESTMgr_SWIFT_HealthCheck() override = default;
- RGWHandler_REST* get_handler(rgw::sal::Store* store,
+ RGWHandler_REST* get_handler(rgw::sal::Driver* driver,
req_state* const s,
const rgw::auth::StrategyRegistry&,
const std::string&) override {
return new RGWInfo_ObjStore_SWIFT();
}
- int init(rgw::sal::Store* const store,
+ int init(rgw::sal::Driver* const driver,
req_state* const state,
rgw::io::BasicClient* const cio) override {
state->dialect = "swift";
state->formatter = new JSONFormatter;
state->format = RGWFormat::JSON;
- return RGWHandler::init(store, state, cio);
+ return RGWHandler::init(driver, state, cio);
}
int authorize(const DoutPrefixProvider *dpp, optional_yield) override {
RGWRESTMgr_SWIFT_Info() = default;
~RGWRESTMgr_SWIFT_Info() override = default;
- RGWHandler_REST *get_handler(rgw::sal::Store* store,
+ RGWHandler_REST *get_handler(rgw::sal::Driver* driver,
req_state* s,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string& frontend_prefix) override;
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
RESTArgs::get_string(s, "bucket", bucket_name, &bucket_name);
- std::unique_ptr<rgw::sal::User> user = store->get_user(rgw_user(uid_str));
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(rgw_user(uid_str));
std::unique_ptr<rgw::sal::Bucket> bucket;
if (!bucket_name.empty()) {
- store->get_bucket(nullptr, user.get(), std::string(), bucket_name, &bucket, null_yield);
+ driver->get_bucket(nullptr, user.get(), std::string(), bucket_name, &bucket, null_yield);
}
RESTArgs::get_epoch(s, "start", 0, &start);
}
}
- op_ret = RGWUsage::show(this, store, user.get(), bucket.get(), start, end, show_entries, show_summary, &categories, flusher);
+ op_ret = RGWUsage::show(this, driver, user.get(), bucket.get(), start, end, show_entries, show_summary, &categories, flusher);
}
class RGWOp_Usage_Delete : public RGWRESTOp {
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
RESTArgs::get_string(s, "bucket", bucket_name, &bucket_name);
- std::unique_ptr<rgw::sal::User> user = store->get_user(rgw_user(uid_str));
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(rgw_user(uid_str));
std::unique_ptr<rgw::sal::Bucket> bucket;
if (!bucket_name.empty()) {
- store->get_bucket(nullptr, user.get(), std::string(), bucket_name, &bucket, null_yield);
+ driver->get_bucket(nullptr, user.get(), std::string(), bucket_name, &bucket, null_yield);
}
RESTArgs::get_epoch(s, "start", 0, &start);
}
}
- op_ret = RGWUsage::trim(this, store, user.get(), bucket.get(), start, end);
+ op_ret = RGWUsage::trim(this, driver, user.get(), bucket.get(), start, end);
}
RGWOp *RGWHandler_Usage::op_get()
RGWRESTMgr_Usage() = default;
~RGWRESTMgr_Usage() override = default;
- RGWHandler_REST* get_handler(rgw::sal::Store* store,
+ RGWHandler_REST* get_handler(rgw::sal::Driver* driver,
req_state*,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string&) override {
bufferlist bl = bufferlist::static_from_string(policy);
- std::unique_ptr<rgw::sal::User> user = store->get_user(rgw_user(user_name));
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(rgw_user(user_name));
op_ret = user->load_user(s, s->yield);
if (op_ret < 0) {
}
ceph::bufferlist in_data;
- op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "ERROR: forward_request_to_master returned ret=" << op_ret << dendl;
return;
return;
}
- std::unique_ptr<rgw::sal::User> user = store->get_user(rgw_user(user_name));
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(rgw_user(user_name));
op_ret = user->read_attrs(s, s->yield);
if (op_ret == -ENOENT) {
ldpp_dout(this, 0) << "ERROR: attrs not found for user" << user_name << dendl;
return;
}
- std::unique_ptr<rgw::sal::User> user = store->get_user(rgw_user(user_name));
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(rgw_user(user_name));
op_ret = user->read_attrs(s, s->yield);
if (op_ret == -ENOENT) {
ldpp_dout(this, 0) << "ERROR: attrs not found for user" << user_name << dendl;
return;
}
- std::unique_ptr<rgw::sal::User> user = store->get_user(rgw_user(user_name));
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(rgw_user(user_name));
op_ret = user->load_user(s, s->yield);
if (op_ret < 0) {
op_ret = -ERR_NO_SUCH_ENTITY;
}
ceph::bufferlist in_data;
- op_ret = store->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y);
if (op_ret < 0) {
// a policy might've been uploaded to this site when there was no sync
// req. in earlier releases, proceed deletion
return role_path_oid_prefix;
}
-RGWRoleMetadataHandler::RGWRoleMetadataHandler(Store* store,
+RGWRoleMetadataHandler::RGWRoleMetadataHandler(Driver* driver,
RGWSI_Role_RADOS *role_svc)
{
- this->store = store;
+ this->driver = driver;
base_init(role_svc->ctx(), role_svc->get_be_handler());
}
return nullptr;
}
- return new RGWRoleMetadataObject(info, objv, mtime, store);
+ return new RGWRoleMetadataObject(info, objv, mtime, driver);
}
int RGWRoleMetadataHandler::do_get(RGWSI_MetaBackend_Handler::Op *op,
optional_yield y,
const DoutPrefixProvider *dpp)
{
- std::unique_ptr<rgw::sal::RGWRole> role = store->get_role(entry);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(entry);
int ret = role->read_info(dpp, y);
if (ret < 0) {
return ret;
RGWRoleInfo info = role->get_info();
RGWRoleMetadataObject *rdo = new RGWRoleMetadataObject(info, objv_tracker.read_version,
- mtime, store);
+ mtime, driver);
*obj = rdo;
return 0;
optional_yield y,
const DoutPrefixProvider *dpp)
{
- std::unique_ptr<rgw::sal::RGWRole> role = store->get_role(entry);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(entry);
int ret = role->read_info(dpp, y);
if (ret < 0) {
return ret == -ENOENT? 0 : ret;
int put_checked(const DoutPrefixProvider *dpp) override {
auto& info = mdo->get_role_info();
auto mtime = mdo->get_mtime();
- auto* store = mdo->get_store();
+ auto* driver = mdo->get_driver();
info.mtime = mtime;
- std::unique_ptr<rgw::sal::RGWRole> role = store->get_role(info);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(info);
int ret = role->create(dpp, true, info.id, y);
if (ret == -EEXIST) {
ret = role->update(dpp, y);
class RGWRoleMetadataObject: public RGWMetadataObject {
RGWRoleInfo info;
- Store* store;
+ Driver* driver;
public:
RGWRoleMetadataObject() = default;
RGWRoleMetadataObject(RGWRoleInfo& info,
const obj_version& v,
real_time m,
- Store* store) : RGWMetadataObject(v,m), info(info), store(store) {}
+ Driver* driver) : RGWMetadataObject(v,m), info(info), driver(driver) {}
void dump(Formatter *f) const override {
info.dump(f);
return info;
}
- Store* get_store() {
- return store;
+ Driver* get_driver() {
+ return driver;
}
};
class RGWRoleMetadataHandler: public RGWMetadataHandler_GenericMetaBE
{
public:
- RGWRoleMetadataHandler(Store* store, RGWSI_Role_RADOS *role_svc);
+ RGWRoleMetadataHandler(Driver* driver, RGWSI_Role_RADOS *role_svc);
std::string get_type() final { return "roles"; }
bool from_remote_zone) override;
private:
- Store* store;
+ Driver* driver;
};
} } // namespace rgw::sal
#define dout_subsys ceph_subsys_rgw
extern "C" {
-extern rgw::sal::Store* newStore(void);
+extern rgw::sal::Driver* newRadosStore(void);
#ifdef WITH_RADOSGW_DBSTORE
-extern rgw::sal::Store* newDBStore(CephContext *cct);
+extern rgw::sal::Driver* newDBStore(CephContext *cct);
#endif
#ifdef WITH_RADOSGW_MOTR
-extern rgw::sal::Store* newMotrStore(CephContext *cct);
+extern rgw::sal::Driver* newMotrStore(CephContext *cct);
#endif
#ifdef WITH_RADOSGW_DAOS
-extern rgw::sal::Store* newDaosStore(CephContext *cct);
+extern rgw::sal::Driver* newDaosStore(CephContext *cct);
#endif
-extern rgw::sal::Store* newBaseFilter(rgw::sal::Store* next);
+extern rgw::sal::Driver* newBaseFilter(rgw::sal::Driver* next);
}
compressed = rhs.compressed;
}
-rgw::sal::Store* StoreManager::init_storage_provider(const DoutPrefixProvider* dpp,
+rgw::sal::Driver* DriverManager::init_storage_provider(const DoutPrefixProvider* dpp,
CephContext* cct,
const Config& cfg,
bool use_gc_thread,
bool use_cache,
bool use_gc)
{
- rgw::sal::Store* store{nullptr};
+ rgw::sal::Driver* driver{nullptr};
if (cfg.store_name.compare("rados") == 0) {
- store = newStore();
- RGWRados* rados = static_cast<rgw::sal::RadosStore* >(store)->getRados();
+ driver = newRadosStore();
+ RGWRados* rados = static_cast<rgw::sal::RadosStore* >(driver)->getRados();
if ((*rados).set_use_cache(use_cache)
.set_use_datacache(false)
.set_run_sync_thread(run_sync_thread)
.set_run_reshard_thread(run_reshard_thread)
.init_begin(cct, dpp) < 0) {
- delete store;
+ delete driver;
return nullptr;
}
- if (store->initialize(cct, dpp) < 0) {
- delete store;
+ if (driver->initialize(cct, dpp) < 0) {
+ delete driver;
return nullptr;
}
if (rados->init_complete(dpp) < 0) {
- delete store;
+ delete driver;
return nullptr;
}
}
else if (cfg.store_name.compare("d3n") == 0) {
- store = new rgw::sal::RadosStore();
+ driver = new rgw::sal::RadosStore();
RGWRados* rados = new D3nRGWDataCache<RGWRados>;
- dynamic_cast<rgw::sal::RadosStore*>(store)->setRados(rados);
- rados->set_store(static_cast<rgw::sal::RadosStore* >(store));
+ dynamic_cast<rgw::sal::RadosStore*>(driver)->setRados(rados);
+ rados->set_store(static_cast<rgw::sal::RadosStore* >(driver));
if ((*rados).set_use_cache(use_cache)
.set_use_datacache(true)
.set_run_sync_thread(run_sync_thread)
.set_run_reshard_thread(run_reshard_thread)
.init_begin(cct, dpp) < 0) {
- delete store;
+ delete driver;
return nullptr;
}
- if (store->initialize(cct, dpp) < 0) {
- delete store;
+ if (driver->initialize(cct, dpp) < 0) {
+ delete driver;
return nullptr;
}
if (rados->init_complete(dpp) < 0) {
- delete store;
+ delete driver;
return nullptr;
}
}
#ifdef WITH_RADOSGW_DBSTORE
else if (cfg.store_name.compare("dbstore") == 0) {
- store = newDBStore(cct);
+ driver = newDBStore(cct);
- if ((*(rgw::sal::DBStore*)store).set_run_lc_thread(use_lc_thread)
+ if ((*(rgw::sal::DBStore*)driver).set_run_lc_thread(use_lc_thread)
.initialize(cct, dpp) < 0) {
- delete store;
+ delete driver;
return nullptr;
}
}
#ifdef WITH_RADOSGW_MOTR
else if (cfg.store_name.compare("motr") == 0) {
- store = newMotrStore(cct);
- if (store == nullptr) {
+ driver = newMotrStore(cct);
+ if (driver == nullptr) {
ldpp_dout(dpp, 0) << "newMotrStore() failed!" << dendl;
- return store;
+ return driver;
}
- ((rgw::sal::MotrStore *)store)->init_metadata_cache(dpp, cct);
+ ((rgw::sal::MotrStore *)driver)->init_metadata_cache(dpp, cct);
/* XXX: temporary - create testid user */
rgw_user testid_user("tenant", "tester", "ns");
- std::unique_ptr<rgw::sal::User> user = store->get_user(testid_user);
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(testid_user);
user->get_info().user_id = testid_user;
user->get_info().display_name = "Motr Explorer";
user->get_info().user_email = "tester@seagate.com";
// Read user info and compare.
rgw_user ruser("", "tester", "");
- std::unique_ptr<rgw::sal::User> suser = store->get_user(ruser);
+ std::unique_ptr<rgw::sal::User> suser = driver->get_user(ruser);
suser->get_info().user_id = ruser;
rc = suser->load_user(dpp, null_yield);
if (rc != 0) {
#ifdef WITH_RADOSGW_DAOS
else if (cfg.store_name.compare("daos") == 0) {
- store = newDaosStore(cct);
- if (store == nullptr) {
+ driver = newDaosStore(cct);
+ if (driver == nullptr) {
ldpp_dout(dpp, 0) << "newDaosStore() failed!" << dendl;
- return store;
+ return driver;
}
- int ret = store->initialize(cct, dpp);
+ int ret = driver->initialize(cct, dpp);
if (ret != 0) {
ldpp_dout(dpp, 20) << "ERROR: store->initialize() failed: " << ret << dendl;
- delete store;
+ delete driver;
return nullptr;
}
}
#endif
if (cfg.filter_name.compare("base") == 0) {
- rgw::sal::Store* next = store;
- store = newBaseFilter(next);
+ rgw::sal::Driver* next = driver;
+ driver = newBaseFilter(next);
- if (store->initialize(cct, dpp) < 0) {
- delete store;
+ if (driver->initialize(cct, dpp) < 0) {
+ delete driver;
delete next;
return nullptr;
}
}
- return store;
+ return driver;
}
-rgw::sal::Store* StoreManager::init_raw_storage_provider(const DoutPrefixProvider* dpp, CephContext* cct, const Config& cfg)
+rgw::sal::Driver* DriverManager::init_raw_storage_provider(const DoutPrefixProvider* dpp, CephContext* cct, const Config& cfg)
{
- rgw::sal::Store* store = nullptr;
+ rgw::sal::Driver* driver = nullptr;
if (cfg.store_name.compare("rados") == 0) {
- store = newStore();
- RGWRados* rados = static_cast<rgw::sal::RadosStore* >(store)->getRados();
+ driver = newRadosStore();
+ RGWRados* rados = static_cast<rgw::sal::RadosStore* >(driver)->getRados();
rados->set_context(cct);
int ret = rados->init_svc(true, dpp);
if (ret < 0) {
ldout(cct, 0) << "ERROR: failed to init services (ret=" << cpp_strerror(-ret) << ")" << dendl;
- delete store;
+ delete driver;
return nullptr;
}
if (rados->init_rados() < 0) {
- delete store;
+ delete driver;
return nullptr;
}
- if (store->initialize(cct, dpp) < 0) {
- delete store;
+ if (driver->initialize(cct, dpp) < 0) {
+ delete driver;
return nullptr;
}
} else if (cfg.store_name.compare("dbstore") == 0) {
#ifdef WITH_RADOSGW_DBSTORE
- store = newDBStore(cct);
+ driver = newDBStore(cct);
- if ((*(rgw::sal::DBStore*)store).initialize(cct, dpp) < 0) {
- delete store;
+ if ((*(rgw::sal::DBStore*)driver).initialize(cct, dpp) < 0) {
+ delete driver;
return nullptr;
}
#else
- store = nullptr;
+ driver = nullptr;
#endif
} else if (cfg.store_name.compare("motr") == 0) {
#ifdef WITH_RADOSGW_MOTR
- store = newMotrStore(cct);
+ driver = newMotrStore(cct);
#else
- store = nullptr;
+ driver = nullptr;
#endif
} else if (cfg.store_name.compare("daos") == 0) {
#ifdef WITH_RADOSGW_DAOS
- store = newDaosStore(cct);
+ driver = newDaosStore(cct);
- if (store->initialize(cct, dpp) < 0) {
- delete store;
+ if (driver->initialize(cct, dpp) < 0) {
+ delete driver;
return nullptr;
}
#else
- store = nullptr;
+ driver = nullptr;
#endif
}
if (cfg.filter_name.compare("base") == 0) {
- rgw::sal::Store* next = store;
- store = newBaseFilter(next);
+ rgw::sal::Driver* next = driver;
+ driver = newBaseFilter(next);
- if (store->initialize(cct, dpp) < 0) {
- delete store;
+ if (driver->initialize(cct, dpp) < 0) {
+ delete driver;
delete next;
return nullptr;
}
}
- return store;
+ return driver;
}
-void StoreManager::close_storage(rgw::sal::Store* store)
+void DriverManager::close_storage(rgw::sal::Driver* driver)
{
- if (!store)
+ if (!driver)
return;
- store->finalize();
+ driver->finalize();
- delete store;
+ delete driver;
}
-StoreManager::Config StoreManager::get_config(bool admin, CephContext* cct)
+DriverManager::Config DriverManager::get_config(bool admin, CephContext* cct)
{
- StoreManager::Config cfg;
+ DriverManager::Config cfg;
// Get the store backend
const auto& config_store = g_conf().get_val<std::string>("rgw_backend_store");
return cfg;
}
-auto StoreManager::create_config_store(const DoutPrefixProvider* dpp,
+auto DriverManager::create_config_store(const DoutPrefixProvider* dpp,
std::string_view type)
-> std::unique_ptr<rgw::sal::ConfigStore>
{
/**
* @brief Base singleton representing a Store or Filter
*
- * The Store is the base abstraction of the SAL layer. It represents a base storage
+ * The Driver is the base abstraction of the SAL layer. It represents a base storage
* mechanism, or a intermediate stacking layer. There is a single instance of a given
- * Store per RGW, and this Store mediates all access to it's backing.
+ * Driver per RGW, and this Driver mediates all access to it's backing.
*
- * A store contains, loosely, @a User, @a Bucket, and @a Object entities. The @a Object
+ * A Driver contains, loosely, @a User, @a Bucket, and @a Object entities. The @a Object
* contains data, and it's associated metadata. The @a Bucket contains Objects, and
* metadata about the bucket. Both Buckets and Objects are owned by a @a User, which is
* the basic unit of access control.
*
- * A store also has metadata and some global responsibilities. For example, a store is
+ * A Driver also has metadata and some global responsibilities. For example, a driver is
* responsible for managing the LifeCycle activities for it's data.
*/
-class Store {
+class Driver {
public:
- Store() {}
- virtual ~Store() = default;
+ Driver() {}
+ virtual ~Driver() = default;
- /** Post-creation initialization of store */
+ /** Post-creation initialization of driver */
virtual int initialize(CephContext *cct, const DoutPrefixProvider *dpp) = 0;
- /** Name of this store provider (e.g., "rados") */
+ /** Name of this driver provider (e.g., "rados") */
virtual const std::string get_name() const = 0;
/** Get cluster unique identifier */
virtual std::string get_cluster_id(const DoutPrefixProvider* dpp, optional_yield y) = 0;
- /** Get a User from a rgw_user. Does not query store for user info, so quick */
+ /** Get a User from a rgw_user. Does not query driver for user info, so quick */
virtual std::unique_ptr<User> get_user(const rgw_user& u) = 0;
- /** Lookup a User by access key. Queries store for user info. */
+ /** Lookup a User by access key. Queries driver for user info. */
virtual int get_user_by_access_key(const DoutPrefixProvider* dpp, const std::string& key, optional_yield y, std::unique_ptr<User>* user) = 0;
- /** Lookup a User by email address. Queries store for user info. */
+ /** Lookup a User by email address. Queries driver for user info. */
virtual int get_user_by_email(const DoutPrefixProvider* dpp, const std::string& email, optional_yield y, std::unique_ptr<User>* user) = 0;
- /** Lookup a User by swift username. Queries store for user info. */
+ /** Lookup a User by swift username. Queries driver for user info. */
virtual int get_user_by_swift(const DoutPrefixProvider* dpp, const std::string& user_str, optional_yield y, std::unique_ptr<User>* user) = 0;
/** Get a basic Object. This Object is not looked up, and is incomplete, since is
* does not have a bucket. This should only be used when an Object is needed before
* there is a Bucket, otherwise use the get_object() in the Bucket class. */
virtual std::unique_ptr<Object> get_object(const rgw_obj_key& k) = 0;
- /** Get a Bucket by info. Does not query the store, just uses the give bucket info. */
+ /** Get a Bucket by info. Does not query the driver, just uses the give bucket info. */
virtual int get_bucket(User* u, const RGWBucketInfo& i, std::unique_ptr<Bucket>* bucket) = 0;
- /** Lookup a Bucket by key. Queries store for bucket info. */
+ /** Lookup a Bucket by key. Queries driver for bucket info. */
virtual int get_bucket(const DoutPrefixProvider* dpp, User* u, const rgw_bucket& b, std::unique_ptr<Bucket>* bucket, optional_yield y) = 0;
- /** Lookup a Bucket by name. Queries store for bucket info. */
+ /** Lookup a Bucket by name. Queries driver for bucket info. */
virtual int get_bucket(const DoutPrefixProvider* dpp, User* u, const std::string& tenant, const std::string& name, std::unique_ptr<Bucket>* bucket, optional_yield y) = 0;
- /** For multisite, this Store is the zone's master */
+ /** For multisite, this driver is the zone's master */
virtual bool is_meta_master() = 0;
/** For multisite, forward an OP to the zone's master */
virtual int forward_request_to_master(const DoutPrefixProvider *dpp, User* user, obj_version* objv,
bufferlist& in_data,
RGWXMLDecoder::XMLParser* parser, req_info& info,
optional_yield y) = 0;
- /** Get zone info for this store */
+ /** Get zone info for this driver */
virtual Zone* get_zone() = 0;
/** Get a unique ID specific to this zone. */
virtual std::string zone_unique_id(uint64_t unique_num) = 0;
virtual int get_zonegroup(const std::string& id, std::unique_ptr<ZoneGroup>* zonegroup) = 0;
/** List all zones in all zone groups by ID */
virtual int list_all_zones(const DoutPrefixProvider* dpp, std::list<std::string>& zone_ids) = 0;
- /** Get statistics about the cluster represented by this Store */
+ /** Get statistics about the cluster represented by this driver */
virtual int cluster_stat(RGWClusterStat& stats) = 0;
/** Get a @a Lifecycle object. Used to manage/run lifecycle transitions */
virtual std::unique_ptr<Lifecycle> get_lifecycle(void) = 0;
/** Get access to the coroutine registry. Used to create new coroutine managers */
virtual RGWCoroutinesManagerRegistry* get_cr_registry() = 0;
- /** Log usage data to the store. Usage data is things like bytes sent/received and
+ /** Log usage data to the driver. Usage data is things like bytes sent/received and
* op count */
virtual int log_usage(const DoutPrefixProvider *dpp, std::map<rgw_user_bucket, RGWUsageBatch>& usage_info) = 0;
- /** Log OP data to the store. Data is opaque to SAL */
+ /** Log OP data to the driver. Data is opaque to SAL */
virtual int log_op(const DoutPrefixProvider *dpp, std::string& oid, bufferlist& bl) = 0;
- /** Register this Store to the service map. Somewhat Rados specific; may be removed*/
+ /** Register this driver to the service map. Somewhat Rados specific; may be removed*/
virtual int register_to_service_map(const DoutPrefixProvider *dpp, const std::string& daemon_type,
const std::map<std::string, std::string>& meta) = 0;
/** Get default quota info. Used as fallback if a user or bucket has no quota set*/
/** Check to see if this placement rule is valid */
virtual bool valid_placement(const rgw_placement_rule& rule) = 0;
- /** Clean up a store for termination */
+ /** Clean up a driver for termination */
virtual void finalize(void) = 0;
- /** Get the Ceph context associated with this store. May be removed. */
+ /** Get the Ceph context associated with this driver. May be removed. */
virtual CephContext* ctx(void) = 0;
/** Get the location of where lua packages are installed */
virtual const std::string& get_luarocks_path() const = 0;
/** Set the location of where lua packages are installed */
virtual void set_luarocks_path(const std::string& path) = 0;
- /** Register admin APIs unique to this store */
+ /** Register admin APIs unique to this driver */
virtual void register_admin_apis(RGWRESTMgr* mgr) = 0;
};
optional_yield y) = 0;
/** Check to see if two placement rules match */
virtual bool placement_rules_match(rgw_placement_rule& r1, rgw_placement_rule& r2) = 0;
- /** Dump store-specific object layout info in JSON */
+ /** Dump driver-specific object layout info in JSON */
virtual int dump_obj_layout(const DoutPrefixProvider *dpp, optional_yield y, Formatter* f) = 0;
/** Get the cached attributes for this object */
} } // namespace rgw::sal
/**
- * @brief A manager for Stores
+ * @brief A manager for Drivers
*
- * This will manage the singleton instances of the various stores. Stores come in two
- * varieties: Full and Raw. A full store is suitable for use in a radosgw daemon. It
- * has full access to the cluster, if any. A raw store is a stripped down store, used
+ * This will manage the singleton instances of the various drivers. Drivers come in two
+ * varieties: Full and Raw. A full driver is suitable for use in a radosgw daemon. It
+ * has full access to the cluster, if any. A raw driver is a stripped down driver, used
* for admin commands.
*/
-class StoreManager {
+class DriverManager {
public:
struct Config {
/** Name of store to create */
std::string filter_name;
};
- StoreManager() {}
- /** Get a full store by service name */
- static rgw::sal::Store* get_storage(const DoutPrefixProvider* dpp,
+ DriverManager() {}
+ /** Get a full driver by service name */
+ static rgw::sal::Driver* get_storage(const DoutPrefixProvider* dpp,
CephContext* cct,
const Config& cfg,
bool use_gc_thread,
bool run_reshard_thread,
bool use_cache = true,
bool use_gc = true) {
- rgw::sal::Store* store = init_storage_provider(dpp, cct, cfg, use_gc_thread,
+ rgw::sal::Driver* driver = init_storage_provider(dpp, cct, cfg, use_gc_thread,
use_lc_thread,
quota_threads,
run_sync_thread,
run_reshard_thread,
use_cache, use_gc);
- return store;
+ return driver;
}
- /** Get a stripped down store by service name */
- static rgw::sal::Store* get_raw_storage(const DoutPrefixProvider* dpp,
+ /** Get a stripped down driver by service name */
+ static rgw::sal::Driver* get_raw_storage(const DoutPrefixProvider* dpp,
CephContext* cct, const Config& cfg) {
- rgw::sal::Store* store = init_raw_storage_provider(dpp, cct, cfg);
- return store;
+ rgw::sal::Driver* driver = init_raw_storage_provider(dpp, cct, cfg);
+ return driver;
}
- /** Initialize a new full Store */
- static rgw::sal::Store* init_storage_provider(const DoutPrefixProvider* dpp,
+ /** Initialize a new full Driver */
+ static rgw::sal::Driver* init_storage_provider(const DoutPrefixProvider* dpp,
CephContext* cct,
const Config& cfg,
bool use_gc_thread,
bool run_reshard_thread,
bool use_metadata_cache,
bool use_gc);
- /** Initialize a new raw Store */
- static rgw::sal::Store* init_raw_storage_provider(const DoutPrefixProvider* dpp,
+ /** Initialize a new raw Driver */
+ static rgw::sal::Driver* init_raw_storage_provider(const DoutPrefixProvider* dpp,
CephContext* cct,
const Config& cfg);
- /** Close a Store when it's no longer needed */
- static void close_storage(rgw::sal::Store* store);
+ /** Close a Driver when it's no longer needed */
+ static void close_storage(rgw::sal::Driver* driver);
- /** Get the config for stores/filters */
+ /** Get the config for Drivers */
static Config get_config(bool admin, CephContext* cct);
/** Create a ConfigStore */
const std::string& get_bucket_name() { return bucket->get_name(); }
};
-class DaosStore : public StoreStore {
+class DaosStore : public StoreDriver {
private:
std::string luarocks_path;
DaosZone zone;
optional_yield y,
MultipartUpload* upload,
std::unique_ptr<rgw::sal::Object> _head_obj,
- DBStore* _store,
+ DBStore* _driver,
const rgw_user& _owner,
const rgw_placement_rule *_ptail_placement_rule,
uint64_t _part_num, const std::string& _part_num_str):
StoreWriter(dpp, y),
- store(_store),
+ store(_driver),
owner(_owner),
ptail_placement_rule(_ptail_placement_rule),
head_obj(std::move(_head_obj)),
oid(head_obj->get_name() + "." + upload_id +
"." + std::to_string(part_num)),
meta_obj(((DBMultipartUpload*)upload)->get_meta_obj()),
- op_target(_store->getDB(), head_obj->get_bucket()->get_info(), head_obj->get_obj(), upload_id),
+ op_target(_driver->getDB(), head_obj->get_bucket()->get_info(), head_obj->get_obj(), upload_id),
parent_op(&op_target),
part_num_str(_part_num_str) {}
DBAtomicWriter::DBAtomicWriter(const DoutPrefixProvider *dpp,
optional_yield y,
std::unique_ptr<rgw::sal::Object> _head_obj,
- DBStore* _store,
+ DBStore* _driver,
const rgw_user& _owner,
const rgw_placement_rule *_ptail_placement_rule,
uint64_t _olh_epoch,
const std::string& _unique_tag) :
StoreWriter(dpp, y),
- store(_store),
+ store(_driver),
owner(_owner),
ptail_placement_rule(_ptail_placement_rule),
olh_epoch(_olh_epoch),
unique_tag(_unique_tag),
- obj(_store, _head_obj->get_key(), _head_obj->get_bucket()),
- op_target(_store->getDB(), obj.get_bucket()->get_info(), obj.get_obj()),
+ obj(_driver, _head_obj->get_key(), _head_obj->get_bucket()),
+ op_target(_driver->getDB(), obj.get_bucket()->get_info(), obj.get_obj()),
parent_op(&op_target) {}
int DBAtomicWriter::prepare(optional_yield y)
void *newDBStore(CephContext *cct)
{
- rgw::sal::DBStore *store = new rgw::sal::DBStore();
+ rgw::sal::DBStore *driver = new rgw::sal::DBStore();
DBStoreManager *dbsm = new DBStoreManager(cct);
DB *db = dbsm->getDB();
if (!db) {
delete dbsm;
- delete store;
+ delete driver;
return nullptr;
}
- store->setDBStoreManager(dbsm);
- store->setDB(db);
- db->set_store((rgw::sal::Store*)store);
+ driver->setDBStoreManager(dbsm);
+ driver->setDB(db);
+ db->set_driver((rgw::sal::Driver*)driver);
db->set_context(cct);
- return store;
+ return driver;
}
}
optional_yield y) override;
};
- class DBStore : public StoreStore {
+ class DBStore : public StoreDriver {
private:
/* DBStoreManager is used in case multiple
* connections are needed one for each tenant.
return 0;
}
-int FilterStore::initialize(CephContext *cct, const DoutPrefixProvider *dpp)
+int FilterDriver::initialize(CephContext *cct, const DoutPrefixProvider *dpp)
{
zone = std::make_unique<FilterZone>(next->get_zone()->clone());
return 0;
}
-const std::string FilterStore::get_name() const
+const std::string FilterDriver::get_name() const
{
std::string name = "filter<" + next->get_name() + ">";
return name;
}
-std::string FilterStore::get_cluster_id(const DoutPrefixProvider* dpp, optional_yield y)
+std::string FilterDriver::get_cluster_id(const DoutPrefixProvider* dpp, optional_yield y)
{
return next->get_cluster_id(dpp, y);
}
-std::unique_ptr<User> FilterStore::get_user(const rgw_user &u)
+std::unique_ptr<User> FilterDriver::get_user(const rgw_user &u)
{
std::unique_ptr<User> user = next->get_user(u);
return std::make_unique<FilterUser>(std::move(user));
}
-int FilterStore::get_user_by_access_key(const DoutPrefixProvider* dpp, const std::string& key, optional_yield y, std::unique_ptr<User>* user)
+int FilterDriver::get_user_by_access_key(const DoutPrefixProvider* dpp, const std::string& key, optional_yield y, std::unique_ptr<User>* user)
{
std::unique_ptr<User> nu;
int ret;
return 0;
}
-int FilterStore::get_user_by_email(const DoutPrefixProvider* dpp, const std::string& email, optional_yield y, std::unique_ptr<User>* user)
+int FilterDriver::get_user_by_email(const DoutPrefixProvider* dpp, const std::string& email, optional_yield y, std::unique_ptr<User>* user)
{
std::unique_ptr<User> nu;
int ret;
return 0;
}
-int FilterStore::get_user_by_swift(const DoutPrefixProvider* dpp, const std::string& user_str, optional_yield y, std::unique_ptr<User>* user)
+int FilterDriver::get_user_by_swift(const DoutPrefixProvider* dpp, const std::string& user_str, optional_yield y, std::unique_ptr<User>* user)
{
std::unique_ptr<User> nu;
int ret;
return 0;
}
-std::unique_ptr<Object> FilterStore::get_object(const rgw_obj_key& k)
+std::unique_ptr<Object> FilterDriver::get_object(const rgw_obj_key& k)
{
std::unique_ptr<Object> o = next->get_object(k);
return std::make_unique<FilterObject>(std::move(o));
}
-int FilterStore::get_bucket(const DoutPrefixProvider* dpp, User* u, const rgw_bucket& b, std::unique_ptr<Bucket>* bucket, optional_yield y)
+int FilterDriver::get_bucket(const DoutPrefixProvider* dpp, User* u, const rgw_bucket& b, std::unique_ptr<Bucket>* bucket, optional_yield y)
{
std::unique_ptr<Bucket> nb;
int ret;
return 0;
}
-int FilterStore::get_bucket(User* u, const RGWBucketInfo& i, std::unique_ptr<Bucket>* bucket)
+int FilterDriver::get_bucket(User* u, const RGWBucketInfo& i, std::unique_ptr<Bucket>* bucket)
{
std::unique_ptr<Bucket> nb;
int ret;
return 0;
}
-int FilterStore::get_bucket(const DoutPrefixProvider* dpp, User* u, const std::string& tenant, const std::string& name, std::unique_ptr<Bucket>* bucket, optional_yield y)
+int FilterDriver::get_bucket(const DoutPrefixProvider* dpp, User* u, const std::string& tenant, const std::string& name, std::unique_ptr<Bucket>* bucket, optional_yield y)
{
std::unique_ptr<Bucket> nb;
int ret;
return 0;
}
-bool FilterStore::is_meta_master()
+bool FilterDriver::is_meta_master()
{
return next->is_meta_master();
}
-int FilterStore::forward_request_to_master(const DoutPrefixProvider *dpp,
+int FilterDriver::forward_request_to_master(const DoutPrefixProvider *dpp,
User* user, obj_version* objv,
bufferlist& in_data,
JSONParser* jp, req_info& info,
return next->forward_request_to_master(dpp, user, objv, in_data, jp, info, y);
}
-int FilterStore::forward_iam_request_to_master(const DoutPrefixProvider *dpp,
+int FilterDriver::forward_iam_request_to_master(const DoutPrefixProvider *dpp,
const RGWAccessKey& key,
obj_version* objv,
bufferlist& in_data,
return next->forward_iam_request_to_master(dpp, key, objv, in_data, parser, info, y);
}
-std::string FilterStore::zone_unique_id(uint64_t unique_num)
+std::string FilterDriver::zone_unique_id(uint64_t unique_num)
{
return next->zone_unique_id(unique_num);
}
-std::string FilterStore::zone_unique_trans_id(uint64_t unique_num)
+std::string FilterDriver::zone_unique_trans_id(uint64_t unique_num)
{
return next->zone_unique_trans_id(unique_num);
}
-int FilterStore::get_zonegroup(const std::string& id,
+int FilterDriver::get_zonegroup(const std::string& id,
std::unique_ptr<ZoneGroup>* zonegroup)
{
std::unique_ptr<ZoneGroup> ngz;
return 0;
}
-int FilterStore::cluster_stat(RGWClusterStat& stats)
+int FilterDriver::cluster_stat(RGWClusterStat& stats)
{
return next->cluster_stat(stats);
}
-std::unique_ptr<Lifecycle> FilterStore::get_lifecycle(void)
+std::unique_ptr<Lifecycle> FilterDriver::get_lifecycle(void)
{
std::unique_ptr<Lifecycle> lc = next->get_lifecycle();
return std::make_unique<FilterLifecycle>(std::move(lc));
}
-std::unique_ptr<Completions> FilterStore::get_completions(void)
+std::unique_ptr<Completions> FilterDriver::get_completions(void)
{
std::unique_ptr<Completions> c = next->get_completions();
return std::make_unique<FilterCompletions>(std::move(c));
}
-std::unique_ptr<Notification> FilterStore::get_notification(rgw::sal::Object* obj,
+std::unique_ptr<Notification> FilterDriver::get_notification(rgw::sal::Object* obj,
rgw::sal::Object* src_obj, req_state* s,
rgw::notify::EventType event_type,
const std::string* object_name)
return std::make_unique<FilterNotification>(std::move(n));
}
-std::unique_ptr<Notification> FilterStore::get_notification(const DoutPrefixProvider* dpp,
+std::unique_ptr<Notification> FilterDriver::get_notification(const DoutPrefixProvider* dpp,
rgw::sal::Object* obj, rgw::sal::Object* src_obj,
rgw::notify::EventType event_type,
rgw::sal::Bucket* _bucket, std::string& _user_id,
return std::make_unique<FilterNotification>(std::move(n));
}
-RGWLC* FilterStore::get_rgwlc()
+RGWLC* FilterDriver::get_rgwlc()
{
return next->get_rgwlc();
}
-RGWCoroutinesManagerRegistry* FilterStore::get_cr_registry()
+RGWCoroutinesManagerRegistry* FilterDriver::get_cr_registry()
{
return next->get_cr_registry();
}
-int FilterStore::log_usage(const DoutPrefixProvider *dpp, std::map<rgw_user_bucket, RGWUsageBatch>& usage_info)
+int FilterDriver::log_usage(const DoutPrefixProvider *dpp, std::map<rgw_user_bucket, RGWUsageBatch>& usage_info)
{
return next->log_usage(dpp, usage_info);
}
-int FilterStore::log_op(const DoutPrefixProvider *dpp, std::string& oid, bufferlist& bl)
+int FilterDriver::log_op(const DoutPrefixProvider *dpp, std::string& oid, bufferlist& bl)
{
return next->log_op(dpp, oid, bl);
}
-int FilterStore::register_to_service_map(const DoutPrefixProvider *dpp,
+int FilterDriver::register_to_service_map(const DoutPrefixProvider *dpp,
const std::string& daemon_type,
const std::map<std::string, std::string>& meta)
{
return next->register_to_service_map(dpp, daemon_type, meta);
}
-void FilterStore::get_quota(RGWQuota& quota)
+void FilterDriver::get_quota(RGWQuota& quota)
{
return next->get_quota(quota);
}
-void FilterStore::get_ratelimit(RGWRateLimitInfo& bucket_ratelimit,
+void FilterDriver::get_ratelimit(RGWRateLimitInfo& bucket_ratelimit,
RGWRateLimitInfo& user_ratelimit,
RGWRateLimitInfo& anon_ratelimit)
{
return next->get_ratelimit(bucket_ratelimit, user_ratelimit, anon_ratelimit);
}
-int FilterStore::set_buckets_enabled(const DoutPrefixProvider* dpp,
+int FilterDriver::set_buckets_enabled(const DoutPrefixProvider* dpp,
std::vector<rgw_bucket>& buckets, bool enabled)
{
return next->set_buckets_enabled(dpp, buckets, enabled);
}
-uint64_t FilterStore::get_new_req_id()
+uint64_t FilterDriver::get_new_req_id()
{
return next->get_new_req_id();
}
-int FilterStore::get_sync_policy_handler(const DoutPrefixProvider* dpp,
+int FilterDriver::get_sync_policy_handler(const DoutPrefixProvider* dpp,
std::optional<rgw_zone_id> zone,
std::optional<rgw_bucket> bucket,
RGWBucketSyncPolicyHandlerRef* phandler,
return next->get_sync_policy_handler(dpp, zone, bucket, phandler, y);
}
-RGWDataSyncStatusManager* FilterStore::get_data_sync_manager(const rgw_zone_id& source_zone)
+RGWDataSyncStatusManager* FilterDriver::get_data_sync_manager(const rgw_zone_id& source_zone)
{
return next->get_data_sync_manager(source_zone);
}
-void FilterStore::wakeup_meta_sync_shards(std::set<int>& shard_ids)
+void FilterDriver::wakeup_meta_sync_shards(std::set<int>& shard_ids)
{
return next->wakeup_meta_sync_shards(shard_ids);
}
-void FilterStore::wakeup_data_sync_shards(const DoutPrefixProvider *dpp,
+void FilterDriver::wakeup_data_sync_shards(const DoutPrefixProvider *dpp,
const rgw_zone_id& source_zone,
boost::container::flat_map<int, boost::container::flat_set<rgw_data_notify_entry>>& shard_ids)
{
return next->wakeup_data_sync_shards(dpp, source_zone, shard_ids);
}
-int FilterStore::clear_usage(const DoutPrefixProvider *dpp)
+int FilterDriver::clear_usage(const DoutPrefixProvider *dpp)
{
return next->clear_usage(dpp);
}
-int FilterStore::read_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch,
+int FilterDriver::read_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch,
uint64_t end_epoch, uint32_t max_entries,
bool* is_truncated, RGWUsageIter& usage_iter,
std::map<rgw_user_bucket, rgw_usage_log_entry>& usage)
is_truncated, usage_iter, usage);
}
-int FilterStore::trim_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch,
+int FilterDriver::trim_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch,
uint64_t end_epoch)
{
return next->trim_all_usage(dpp, start_epoch, end_epoch);
}
-int FilterStore::get_config_key_val(std::string name, bufferlist* bl)
+int FilterDriver::get_config_key_val(std::string name, bufferlist* bl)
{
return next->get_config_key_val(name, bl);
}
-int FilterStore::meta_list_keys_init(const DoutPrefixProvider *dpp,
+int FilterDriver::meta_list_keys_init(const DoutPrefixProvider *dpp,
const std::string& section,
const std::string& marker, void** phandle)
{
return next->meta_list_keys_init(dpp, section, marker, phandle);
}
-int FilterStore::meta_list_keys_next(const DoutPrefixProvider *dpp, void* handle,
+int FilterDriver::meta_list_keys_next(const DoutPrefixProvider *dpp, void* handle,
int max, std::list<std::string>& keys,
bool* truncated)
{
return next->meta_list_keys_next(dpp, handle, max, keys, truncated);
}
-void FilterStore::meta_list_keys_complete(void* handle)
+void FilterDriver::meta_list_keys_complete(void* handle)
{
next->meta_list_keys_complete(handle);
}
-std::string FilterStore::meta_get_marker(void* handle)
+std::string FilterDriver::meta_get_marker(void* handle)
{
return next->meta_get_marker(handle);
}
-int FilterStore::meta_remove(const DoutPrefixProvider* dpp, std::string& metadata_key,
+int FilterDriver::meta_remove(const DoutPrefixProvider* dpp, std::string& metadata_key,
optional_yield y)
{
return next->meta_remove(dpp, metadata_key, y);
}
-const RGWSyncModuleInstanceRef& FilterStore::get_sync_module()
+const RGWSyncModuleInstanceRef& FilterDriver::get_sync_module()
{
return next->get_sync_module();
}
-std::unique_ptr<LuaManager> FilterStore::get_lua_manager()
+std::unique_ptr<LuaManager> FilterDriver::get_lua_manager()
{
std::unique_ptr<LuaManager> nm = next->get_lua_manager();
return std::make_unique<FilterLuaManager>(std::move(nm));
}
-std::unique_ptr<RGWRole> FilterStore::get_role(std::string name,
+std::unique_ptr<RGWRole> FilterDriver::get_role(std::string name,
std::string tenant,
std::string path,
std::string trust_policy,
return next->get_role(name, tenant, path, trust_policy, max_session_duration_str, tags);
}
-std::unique_ptr<RGWRole> FilterStore::get_role(std::string id)
+std::unique_ptr<RGWRole> FilterDriver::get_role(std::string id)
{
return next->get_role(id);
}
-std::unique_ptr<RGWRole> FilterStore::get_role(const RGWRoleInfo& info)
+std::unique_ptr<RGWRole> FilterDriver::get_role(const RGWRoleInfo& info)
{
return next->get_role(info);
}
-int FilterStore::get_roles(const DoutPrefixProvider *dpp,
+int FilterDriver::get_roles(const DoutPrefixProvider *dpp,
optional_yield y,
const std::string& path_prefix,
const std::string& tenant,
return next->get_roles(dpp, y, path_prefix, tenant, roles);
}
-std::unique_ptr<RGWOIDCProvider> FilterStore::get_oidc_provider()
+std::unique_ptr<RGWOIDCProvider> FilterDriver::get_oidc_provider()
{
return next->get_oidc_provider();
}
-int FilterStore::get_oidc_providers(const DoutPrefixProvider *dpp,
+int FilterDriver::get_oidc_providers(const DoutPrefixProvider *dpp,
const std::string& tenant,
std::vector<std::unique_ptr<RGWOIDCProvider>>& providers)
{
return next->get_oidc_providers(dpp, tenant, providers);
}
-std::unique_ptr<Writer> FilterStore::get_append_writer(const DoutPrefixProvider *dpp,
+std::unique_ptr<Writer> FilterDriver::get_append_writer(const DoutPrefixProvider *dpp,
optional_yield y,
std::unique_ptr<rgw::sal::Object> _head_obj,
const rgw_user& owner,
return std::make_unique<FilterWriter>(std::move(writer), std::move(_head_obj));
}
-std::unique_ptr<Writer> FilterStore::get_atomic_writer(const DoutPrefixProvider *dpp,
+std::unique_ptr<Writer> FilterDriver::get_atomic_writer(const DoutPrefixProvider *dpp,
optional_yield y,
std::unique_ptr<rgw::sal::Object> _head_obj,
const rgw_user& owner,
return std::make_unique<FilterWriter>(std::move(writer), std::move(_head_obj));
}
-const std::string& FilterStore::get_compression_type(const rgw_placement_rule& rule)
+const std::string& FilterDriver::get_compression_type(const rgw_placement_rule& rule)
{
return next->get_compression_type(rule);
}
-bool FilterStore::valid_placement(const rgw_placement_rule& rule)
+bool FilterDriver::valid_placement(const rgw_placement_rule& rule)
{
return next->valid_placement(rule);
}
-void FilterStore::finalize(void)
+void FilterDriver::finalize(void)
{
next->finalize();
}
-CephContext* FilterStore::ctx(void)
+CephContext* FilterDriver::ctx(void)
{
return next->ctx();
}
-const std::string& FilterStore::get_luarocks_path() const
+const std::string& FilterDriver::get_luarocks_path() const
{
return next->get_luarocks_path();
}
-void FilterStore::set_luarocks_path(const std::string& path)
+void FilterDriver::set_luarocks_path(const std::string& path)
{
next->set_luarocks_path(path);
}
extern "C" {
-rgw::sal::Store* newBaseFilter(rgw::sal::Store* next)
+rgw::sal::Driver* newBaseFilter(rgw::sal::Driver* next)
{
- rgw::sal::FilterStore* store = new rgw::sal::FilterStore(next);
+ rgw::sal::FilterDriver* driver = new rgw::sal::FilterDriver(next);
- return store;
+ return driver;
}
}
}
};
-class FilterStore : public Store {
+class FilterDriver : public Driver {
protected:
- Store* next;
+ Driver* next;
private:
std::unique_ptr<FilterZone> zone;
public:
- FilterStore(Store* _next) : next(_next) {}
- virtual ~FilterStore() = default;
+ FilterDriver(Driver* _next) : next(_next) {}
+ virtual ~FilterDriver() = default;
virtual int initialize(CephContext *cct, const DoutPrefixProvider *dpp) override;
virtual const std::string get_name() const override;
namespace rgw { namespace sal {
- class Store;
+ class Driver;
class User;
class Bucket;
class BucketList;
int delete_parts(const DoutPrefixProvider *dpp);
};
-class MotrStore : public StoreStore {
+class MotrStore : public StoreDriver {
private:
std::string luarocks_path;
MotrZone zone;
namespace rgw { namespace sal {
-class StoreStore : public Store {
+class StoreDriver : public Driver {
public:
- StoreStore() {}
- virtual ~StoreStore() = default;
+ StoreDriver() {}
+ virtual ~StoreDriver() = default;
virtual uint64_t get_new_req_id() override {
return ceph::util::generate_random_number<uint64_t>();
}
int AssumedRoleUser::generateAssumedRoleUser(CephContext* cct,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const string& roleId,
const rgw::ARN& roleArn,
const string& roleSessionName)
if (auto r_arn = rgw::ARN::parse(arn); r_arn) {
auto pos = r_arn->resource.find_last_of('/');
string roleName = r_arn->resource.substr(pos + 1);
- std::unique_ptr<rgw::sal::RGWRole> role = store->get_role(roleName, r_arn->account);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(roleName, r_arn->account);
if (int ret = role->get(dpp, y); ret < 0) {
if (ret == -ENOENT) {
ldpp_dout(dpp, 0) << "Role doesn't exist: " << roleName << dendl;
int STSService::storeARN(const DoutPrefixProvider *dpp, string& arn, optional_yield y)
{
int ret = 0;
- std::unique_ptr<rgw::sal::User> user = store->get_user(user_id);
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(user_id);
if ((ret = user->load_user(dpp, y)) < 0) {
return -ERR_NO_SUCH_ENTITY;
}
//Generate Assumed Role User
response.assumeRoleResp.retCode = response.assumeRoleResp.user.generateAssumedRoleUser(cct,
- store,
+ driver,
roleId,
r_arn.get(),
req.getRoleSessionName());
response.packedPolicySize = (policy.size() / req.getMaxPolicySize()) * 100;
//Generate Assumed Role User
- response.retCode = response.user.generateAssumedRoleUser(cct, store, roleId, r_arn.get(), req.getRoleSessionName());
+ response.retCode = response.user.generateAssumedRoleUser(cct, driver, roleId, r_arn.get(), req.getRoleSessionName());
if (response.retCode < 0) {
return response;
}
std::string assumeRoleId;
public:
int generateAssumedRoleUser( CephContext* cct,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const std::string& roleId,
const rgw::ARN& roleArn,
const std::string& roleSessionName);
class STSService {
CephContext* cct;
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
rgw_user user_id;
std::unique_ptr<rgw::sal::RGWRole> role;
rgw::auth::Identity* identity;
int storeARN(const DoutPrefixProvider *dpp, std::string& arn, optional_yield y);
public:
STSService() = default;
- STSService(CephContext* cct, rgw::sal::Store* store, rgw_user user_id,
+ STSService(CephContext* cct, rgw::sal::Driver* driver, rgw_user user_id,
rgw::auth::Identity* identity)
- : cct(cct), store(store), user_id(user_id), identity(identity) {}
+ : cct(cct), driver(driver), user_id(user_id), identity(identity) {}
std::tuple<int, rgw::sal::RGWRole*> getRoleInfo(const DoutPrefixProvider *dpp, const std::string& arn, optional_yield y);
AssumeRoleResponse assumeRole(const DoutPrefixProvider *dpp, AssumeRoleRequest& req, optional_yield y);
GetSessionTokenResponse getSessionToken(const DoutPrefixProvider *dpp, GetSessionTokenRequest& req);
rgw_user uid(s->account_name);
if (uid.tenant.empty()) {
rgw_user tenanted_uid(uid.id, uid.id);
- user = store->get_user(tenanted_uid);
+ user = driver->get_user(tenanted_uid);
if (user->load_user(dpp, s->yield) >= 0) {
/* Succeeded */
found = true;
}
if (!found) {
- user = store->get_user(uid);
+ user = driver->get_user(uid);
if (user->load_user(dpp, s->yield) < 0) {
throw -EPERM;
}
b.tenant = std::move(bucket_tenant);
b.name = std::move(bucket_name);
std::unique_ptr<rgw::sal::Bucket> bucket;
- int ret = store->get_bucket(dpp, nullptr, b, &bucket, s->yield);
+ int ret = driver->get_bucket(dpp, nullptr, b, &bucket, s->yield);
if (ret < 0) {
throw ret;
}
<< dendl;
std::unique_ptr<rgw::sal::User> user;
- user = store->get_user(bucket->get_info().owner);
+ user = driver->get_user(bucket->get_info().owner);
if (user->load_user(dpp, s->yield) < 0) {
throw -EPERM;
}
ldpp_dout(dpp, 10) << "swift user=" << swift_user << dendl;
std::unique_ptr<rgw::sal::User> user;
- ret = store->get_user_by_swift(dpp, swift_user, s->yield, &user);
+ ret = driver->get_user_by_swift(dpp, swift_user, s->yield, &user);
if (ret < 0) {
ldpp_dout(dpp, 0) << "NOTICE: couldn't map swift user" << dendl;
throw ret;
}
std::unique_ptr<rgw::sal::User> user;
- ret = store->get_user_by_swift(dpp, swift_user, s->yield, &user);
+ ret = driver->get_user_by_swift(dpp, swift_user, s->yield, &user);
if (ret < 0) {
throw ret;
}
user_str = user_name;
- ret = store->get_user_by_swift(s, user_str, s->yield, &user);
+ ret = driver->get_user_by_swift(s, user_str, s->yield, &user);
if (ret < 0) {
ret = -EACCES;
goto done;
end_header(s);
}
-int RGWHandler_SWIFT_Auth::init(rgw::sal::Store* store, req_state *state,
+int RGWHandler_SWIFT_Auth::init(rgw::sal::Driver* driver, req_state *state,
rgw::io::BasicClient *cio)
{
state->dialect = "swift-auth";
state->formatter = new JSONFormatter;
state->format = RGWFormat::JSON;
- return RGWHandler::init(store, state, cio);
+ return RGWHandler::init(driver, state, cio);
}
int RGWHandler_SWIFT_Auth::authorize(const DoutPrefixProvider *dpp, optional_yield)
using result_t = rgw::auth::Engine::result_t;
CephContext* const cct;
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
const TempURLApplier::Factory* const apl_factory;
/* Helper methods. */
public:
TempURLEngine(CephContext* const cct,
- rgw::sal::Store* _store ,
+ rgw::sal::Driver* _driver ,
const TempURLApplier::Factory* const apl_factory)
: cct(cct),
- store(_store),
+ driver(_driver),
apl_factory(apl_factory) {
}
using result_t = rgw::auth::Engine::result_t;
CephContext* const cct;
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
const rgw::auth::TokenExtractor* const extractor;
const rgw::auth::LocalApplier::Factory* const apl_factory;
public:
SignedTokenEngine(CephContext* const cct,
- rgw::sal::Store* _store,
+ rgw::sal::Driver* _driver,
const rgw::auth::TokenExtractor* const extractor,
const rgw::auth::LocalApplier::Factory* const apl_factory)
: cct(cct),
- store(_store),
+ driver(_driver),
extractor(extractor),
apl_factory(apl_factory) {
}
using result_t = rgw::auth::Engine::result_t;
CephContext* const cct;
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
const rgw::auth::TokenExtractor* const extractor;
const rgw::auth::LocalApplier::Factory* const apl_factory;
public:
ExternalTokenEngine(CephContext* const cct,
- rgw::sal::Store* _store,
+ rgw::sal::Driver* _driver,
const rgw::auth::TokenExtractor* const extractor,
const rgw::auth::LocalApplier::Factory* const apl_factory)
: cct(cct),
- store(_store),
+ driver(_driver),
extractor(extractor),
apl_factory(apl_factory) {
}
public rgw::auth::RemoteApplier::Factory,
public rgw::auth::LocalApplier::Factory,
public rgw::auth::swift::TempURLApplier::Factory {
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
ImplicitTenants& implicit_tenant_context;
/* The engines. */
acl_strategy_t&& extra_acl_strategy,
const rgw::auth::RemoteApplier::AuthInfo &info) const override {
auto apl = \
- rgw::auth::add_3rdparty(store, rgw_user(s->account_name),
- rgw::auth::add_sysreq(cct, store, s,
- rgw::auth::RemoteApplier(cct, store, std::move(extra_acl_strategy), info,
+ rgw::auth::add_3rdparty(driver, rgw_user(s->account_name),
+ rgw::auth::add_sysreq(cct, driver, s,
+ rgw::auth::RemoteApplier(cct, driver, std::move(extra_acl_strategy), info,
implicit_tenant_context,
rgw::auth::ImplicitTenants::IMPLICIT_TENANTS_SWIFT)));
/* TODO(rzarzynski): replace with static_ptr. */
const std::optional<uint32_t>& perm_mask,
const std::string& access_key_id) const override {
auto apl = \
- rgw::auth::add_3rdparty(store, rgw_user(s->account_name),
- rgw::auth::add_sysreq(cct, store, s,
+ rgw::auth::add_3rdparty(driver, rgw_user(s->account_name),
+ rgw::auth::add_sysreq(cct, driver, s,
rgw::auth::LocalApplier(cct, user_info, subuser, perm_mask, access_key_id)));
/* TODO(rzarzynski): replace with static_ptr. */
return aplptr_t(new decltype(apl)(std::move(apl)));
public:
DefaultStrategy(CephContext* const cct,
ImplicitTenants& implicit_tenant_context,
- rgw::sal::Store* _store)
- : store(_store),
+ rgw::sal::Driver* _driver)
+ : driver(_driver),
implicit_tenant_context(implicit_tenant_context),
tempurl_engine(cct,
- store,
+ driver,
static_cast<rgw::auth::swift::TempURLApplier::Factory*>(this)),
signed_engine(cct,
- store,
+ driver,
static_cast<rgw::auth::TokenExtractor*>(&auth_token_extractor),
static_cast<rgw::auth::LocalApplier::Factory*>(this)),
external_engine(cct,
- store,
+ driver,
static_cast<rgw::auth::TokenExtractor*>(&auth_token_extractor),
static_cast<rgw::auth::LocalApplier::Factory*>(this)),
anon_engine(cct,
~RGWHandler_SWIFT_Auth() override {}
RGWOp *op_get() override;
- int init(rgw::sal::Store* store, req_state *state, rgw::io::BasicClient *cio) override;
+ int init(rgw::sal::Driver* driver, req_state *state, rgw::io::BasicClient *cio) override;
int authorize(const DoutPrefixProvider *dpp, optional_yield y) override;
int postauth_init(optional_yield) override { return 0; }
int read_permissions(RGWOp *op, optional_yield) override { return 0; }
return this;
}
- RGWHandler_REST* get_handler(rgw::sal::Store* store,
+ RGWHandler_REST* get_handler(rgw::sal::Driver* driver,
req_state*,
const rgw::auth::StrategyRegistry&,
const std::string&) override {
seed::info.sha1_bl.clear();
bl.clear();
s = NULL;
- store = NULL;
+ driver = NULL;
}
-void seed::init(req_state *p_req, rgw::sal::Store* p_store)
+void seed::init(req_state *_req, rgw::sal::Driver* _driver)
{
- s = p_req;
- store = p_store;
+ s = _req;
+ driver = _driver;
}
int seed::get_torrent_file(rgw::sal::Object* object,
bufferlist bl; // bufflist ready to send
req_state *s{nullptr};
- rgw::sal::Store* store{nullptr};
+ rgw::sal::Driver* driver{nullptr};
SHA1 h;
TorrentBencode dencode;
~seed();
int get_params();
- void init(req_state *p_req, rgw::sal::Store* p_store);
+ void init(req_state *p_req, rgw::sal::Driver* _driver);
int get_torrent_file(rgw::sal::Object* object,
uint64_t &total_len,
ceph::bufferlist &bl_data,
formatter->close_section(); // categories
}
-int RGWUsage::show(const DoutPrefixProvider *dpp, rgw::sal::Store* store,
+int RGWUsage::show(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
rgw::sal::User* user , rgw::sal::Bucket* bucket,
uint64_t start_epoch, uint64_t end_epoch, bool show_log_entries,
bool show_log_sum,
ret = user->read_usage(dpp, start_epoch, end_epoch, max_entries, &is_truncated,
usage_iter, usage);
} else {
- ret = store->read_all_usage(dpp, start_epoch, end_epoch, max_entries, &is_truncated,
+ ret = driver->read_all_usage(dpp, start_epoch, end_epoch, max_entries, &is_truncated,
usage_iter, usage);
}
return 0;
}
-int RGWUsage::trim(const DoutPrefixProvider *dpp, rgw::sal::Store* store,
+int RGWUsage::trim(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
rgw::sal::User* user , rgw::sal::Bucket* bucket,
uint64_t start_epoch, uint64_t end_epoch)
{
} else if (user) {
return user->trim_usage(dpp, start_epoch, end_epoch);
} else {
- return store->trim_all_usage(dpp, start_epoch, end_epoch);
+ return driver->trim_all_usage(dpp, start_epoch, end_epoch);
}
}
-int RGWUsage::clear(const DoutPrefixProvider *dpp, rgw::sal::Store* store)
+int RGWUsage::clear(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver)
{
- return store->clear_usage(dpp);
+ return driver->clear_usage(dpp);
}
class RGWUsage
{
public:
- static int show(const DoutPrefixProvider *dpp, rgw::sal::Store* store,
+ static int show(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
rgw::sal::User* user , rgw::sal::Bucket* bucket,
uint64_t start_epoch, uint64_t end_epoch, bool show_log_entries,
bool show_log_sum,
std::map<std::string, bool> *categories, RGWFormatterFlusher& flusher);
- static int trim(const DoutPrefixProvider *dpp, rgw::sal::Store* store,
+ static int trim(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
rgw::sal::User* user , rgw::sal::Bucket* bucket,
uint64_t start_epoch, uint64_t end_epoch);
- static int clear(const DoutPrefixProvider *dpp, rgw::sal::Store* store);
+ static int clear(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver);
};
using namespace std;
-int rgw_user_sync_all_stats(const DoutPrefixProvider *dpp, rgw::sal::Store* store,
+int rgw_user_sync_all_stats(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
rgw::sal::User* user, optional_yield y)
{
rgw::sal::BucketList user_buckets;
- CephContext *cct = store->ctx();
+ CephContext *cct = driver->ctx();
size_t max_entries = cct->_conf->rgw_list_buckets_max_chunk;
string marker;
int ret;
}
int rgw_user_get_all_buckets_stats(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
rgw::sal::User* user,
map<string, bucket_meta_entry>& buckets_usage_map,
optional_yield y)
{
- CephContext *cct = store->ctx();
+ CephContext *cct = driver->ctx();
size_t max_entries = cct->_conf->rgw_list_buckets_max_chunk;
bool done;
string marker;
list<string> zones;
int r = zone_svc->list_zones(dpp, zones);
if (r < 0) {
- ldpp_dout(dpp, 10) << "WARNING: store->list_zones() returned r=" << r << dendl;
+ ldpp_dout(dpp, 10) << "WARNING: driver->list_zones() returned r=" << r << dendl;
}
set<rgw_pool> pools;
class DB {
private:
const std::string db_name;
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
const std::string user_table;
const std::string bucket_table;
const std::string quota_table;
struct DBOps dbops; // DB operations, make it private?
- void set_store(rgw::sal::Store* _store) {
- store = _store;
+ void set_driver(rgw::sal::Driver* _driver) {
+ driver = _driver;
}
void set_context(CephContext *_cct) {
}
}
-void check_bad_user_bucket_mapping(rgw::sal::Store* store, rgw::sal::User* user,
+void check_bad_user_bucket_mapping(rgw::sal::Driver* driver, rgw::sal::User* user,
bool fix,
optional_yield y,
const DoutPrefixProvider *dpp)
rgw::sal::BucketList user_buckets;
string marker;
- CephContext *cct = store->ctx();
+ CephContext *cct = driver->ctx();
size_t max_entries = cct->_conf->rgw_list_buckets_max_chunk;
do {
int ret = user->list_buckets(dpp, marker, string(), max_entries, false, user_buckets, y);
if (ret < 0) {
- ldout(store->ctx(), 0) << "failed to read user buckets: "
+ ldout(driver->ctx(), 0) << "failed to read user buckets: "
<< cpp_strerror(-ret) << dendl;
return;
}
auto& bucket = i->second;
std::unique_ptr<rgw::sal::Bucket> actual_bucket;
- int r = store->get_bucket(dpp, user, user->get_tenant(), bucket->get_name(), &actual_bucket, null_yield);
+ int r = driver->get_bucket(dpp, user, user->get_tenant(), bucket->get_name(), &actual_bucket, null_yield);
if (r < 0) {
- ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket << dendl;
+ ldout(driver->ctx(), 0) << "could not get bucket info for bucket=" << bucket << dendl;
continue;
}
return rgw_obj_key::oid_to_key_in_ns(oid, &key, empty_ns);
}
-int rgw_remove_object(const DoutPrefixProvider *dpp, rgw::sal::Store* store, rgw::sal::Bucket* bucket, rgw_obj_key& key)
+int rgw_remove_object(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::Bucket* bucket, rgw_obj_key& key)
{
if (key.instance.empty()) {
key.instance = "null";
*sink = msg;
}
-int RGWBucket::init(rgw::sal::Store* _store, RGWBucketAdminOpState& op_state,
+int RGWBucket::init(rgw::sal::Driver* _driver, RGWBucketAdminOpState& op_state,
optional_yield y, const DoutPrefixProvider *dpp, std::string *err_msg)
{
- if (!_store) {
+ if (!_driver) {
set_err_msg(err_msg, "no storage!");
return -EINVAL;
}
- store = _store;
+ driver = _driver;
std::string bucket_name = op_state.get_bucket_name();
if (bucket_name.empty() && op_state.get_user_id().empty())
return -EINVAL;
- user = store->get_user(op_state.get_user_id());
+ user = driver->get_user(op_state.get_user_id());
std::string tenant = user->get_tenant();
// split possible tenant/name
bucket_name = bucket_name.substr(pos + 1);
}
- int r = store->get_bucket(dpp, user.get(), tenant, bucket_name, &bucket, y);
+ int r = driver->get_bucket(dpp, user.get(), tenant, bucket_name, &bucket, y);
if (r < 0) {
set_err_msg(err_msg, "failed to fetch bucket info for bucket=" + bucket_name);
return r;
return 0;
}
-bool rgw_find_bucket_by_id(const DoutPrefixProvider *dpp, CephContext *cct, rgw::sal::Store* store,
+bool rgw_find_bucket_by_id(const DoutPrefixProvider *dpp, CephContext *cct, rgw::sal::Driver* driver,
const string& marker, const string& bucket_id, rgw_bucket* bucket_out)
{
void *handle = NULL;
bool truncated = false;
string s;
- int ret = store->meta_list_keys_init(dpp, "bucket.instance", marker, &handle);
+ int ret = driver->meta_list_keys_init(dpp, "bucket.instance", marker, &handle);
if (ret < 0) {
cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl;
- store->meta_list_keys_complete(handle);
+ driver->meta_list_keys_complete(handle);
return -ret;
}
do {
list<string> keys;
- ret = store->meta_list_keys_next(dpp, handle, 1000, keys, &truncated);
+ ret = driver->meta_list_keys_next(dpp, handle, 1000, keys, &truncated);
if (ret < 0) {
cerr << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << std::endl;
- store->meta_list_keys_complete(handle);
+ driver->meta_list_keys_complete(handle);
return -ret;
}
for (list<string>::iterator iter = keys.begin(); iter != keys.end(); ++iter) {
continue;
}
if (bucket_id == bucket_out->bucket_id) {
- store->meta_list_keys_complete(handle);
+ driver->meta_list_keys_complete(handle);
return true;
}
}
} while (truncated);
- store->meta_list_keys_complete(handle);
+ driver->meta_list_keys_complete(handle);
return false;
}
bucket = op_state.get_bucket()->clone();
- int ret = rgw_remove_object(dpp, store, bucket.get(), key);
+ int ret = rgw_remove_object(dpp, driver, bucket.get(), key);
if (ret < 0) {
set_err_msg(err_msg, "unable to remove object" + cpp_strerror(-ret));
return ret;
int RGWBucket::sync(RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, std::string *err_msg)
{
- if (!store->is_meta_master()) {
+ if (!driver->is_meta_master()) {
set_err_msg(err_msg, "ERROR: failed to update bucket sync: only allowed on meta master zone");
return -EINVAL;
}
RGWAccessControlPolicy_S3 policy(g_ceph_context);
int ret = decode_bl(bl, policy);
if (ret < 0) {
- ldout(store->ctx(),0) << "failed to decode RGWAccessControlPolicy" << dendl;
+ ldout(driver->ctx(),0) << "failed to decode RGWAccessControlPolicy" << dendl;
}
policy.to_xml(o);
return 0;
}
int rgw_object_get_attr(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store, rgw::sal::Object* obj,
+ rgw::sal::Driver* driver, rgw::sal::Object* obj,
const char* attr_name, bufferlist& out_bl, optional_yield y)
{
std::unique_ptr<rgw::sal::Object::ReadOp> rop = obj->get_read_op();
bufferlist bl;
std::unique_ptr<rgw::sal::Object> obj = bucket->get_object(rgw_obj_key(object_name));
- ret = rgw_object_get_attr(dpp, store, obj.get(), RGW_ATTR_ACL, bl, y);
+ ret = rgw_object_get_attr(dpp, driver, obj.get(), RGW_ATTR_ACL, bl, y);
if (ret < 0){
return ret;
}
ret = decode_bl(bl, policy);
if (ret < 0) {
- ldout(store->ctx(),0) << "failed to decode RGWAccessControlPolicy" << dendl;
+ ldout(driver->ctx(),0) << "failed to decode RGWAccessControlPolicy" << dendl;
}
return ret;
}
ret = decode_bl(aiter->second, policy);
if (ret < 0) {
- ldout(store->ctx(),0) << "failed to decode RGWAccessControlPolicy" << dendl;
+ ldout(driver->ctx(),0) << "failed to decode RGWAccessControlPolicy" << dendl;
}
return ret;
}
-int RGWBucketAdminOp::get_policy(rgw::sal::Store* store, RGWBucketAdminOpState& op_state,
+int RGWBucketAdminOp::get_policy(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state,
RGWAccessControlPolicy& policy, const DoutPrefixProvider *dpp)
{
RGWBucket bucket;
- int ret = bucket.init(store, op_state, null_yield, dpp);
+ int ret = bucket.init(driver, op_state, null_yield, dpp);
if (ret < 0)
return ret;
/* Wrappers to facilitate RESTful interface */
-int RGWBucketAdminOp::get_policy(rgw::sal::Store* store, RGWBucketAdminOpState& op_state,
+int RGWBucketAdminOp::get_policy(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp)
{
- RGWAccessControlPolicy policy(store->ctx());
+ RGWAccessControlPolicy policy(driver->ctx());
- int ret = get_policy(store, op_state, policy, dpp);
+ int ret = get_policy(driver, op_state, policy, dpp);
if (ret < 0)
return ret;
return 0;
}
-int RGWBucketAdminOp::dump_s3_policy(rgw::sal::Store* store, RGWBucketAdminOpState& op_state,
+int RGWBucketAdminOp::dump_s3_policy(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state,
ostream& os, const DoutPrefixProvider *dpp)
{
- RGWAccessControlPolicy_S3 policy(store->ctx());
+ RGWAccessControlPolicy_S3 policy(driver->ctx());
- int ret = get_policy(store, op_state, policy, dpp);
+ int ret = get_policy(driver, op_state, policy, dpp);
if (ret < 0)
return ret;
return 0;
}
-int RGWBucketAdminOp::unlink(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp)
+int RGWBucketAdminOp::unlink(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp)
{
RGWBucket bucket;
- int ret = bucket.init(store, op_state, null_yield, dpp);
+ int ret = bucket.init(driver, op_state, null_yield, dpp);
if (ret < 0)
return ret;
- return static_cast<rgw::sal::RadosStore*>(store)->ctl()->bucket->unlink_bucket(op_state.get_user_id(), op_state.get_bucket()->get_info().bucket, null_yield, dpp, true);
+ return static_cast<rgw::sal::RadosStore*>(driver)->ctl()->bucket->unlink_bucket(op_state.get_user_id(), op_state.get_bucket()->get_info().bucket, null_yield, dpp, true);
}
-int RGWBucketAdminOp::link(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, string *err)
+int RGWBucketAdminOp::link(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, string *err)
{
if (!op_state.is_user_op()) {
set_err_msg(err, "empty user id");
}
RGWBucket bucket;
- int ret = bucket.init(store, op_state, null_yield, dpp, err);
+ int ret = bucket.init(driver, op_state, null_yield, dpp, err);
if (ret < 0)
return ret;
return -EIO;
}
- int r = static_cast<rgw::sal::RadosStore*>(store)->ctl()->bucket->unlink_bucket(owner.get_id(), old_bucket->get_info().bucket, null_yield, dpp, false);
+ int r = static_cast<rgw::sal::RadosStore*>(driver)->ctl()->bucket->unlink_bucket(owner.get_id(), old_bucket->get_info().bucket, null_yield, dpp, false);
if (r < 0) {
set_err_msg(err, "could not unlink policy from user " + owner.get_id().to_str());
return r;
rgw::sal::Attrs ep_attrs;
rgw_ep_info ep_data{ep, ep_attrs};
- r = static_cast<rgw::sal::RadosStore*>(store)->ctl()->bucket->link_bucket(op_state.get_user_id(), loc_bucket->get_info().bucket, loc_bucket->get_info().creation_time, null_yield, dpp, true, &ep_data);
+ r = static_cast<rgw::sal::RadosStore*>(driver)->ctl()->bucket->link_bucket(op_state.get_user_id(), loc_bucket->get_info().bucket, loc_bucket->get_info().creation_time, null_yield, dpp, true, &ep_data);
if (r < 0) {
set_err_msg(err, "failed to relink bucket");
return r;
if (*loc_bucket != *old_bucket) {
// like RGWRados::delete_bucket -- excepting no bucket_index work.
- r = static_cast<rgw::sal::RadosStore*>(store)->ctl()->bucket->remove_bucket_entrypoint_info(
+ r = static_cast<rgw::sal::RadosStore*>(driver)->ctl()->bucket->remove_bucket_entrypoint_info(
old_bucket->get_key(), null_yield, dpp,
RGWBucketCtl::Bucket::RemoveParams()
.set_objv_tracker(&ep_data.ep_objv));
set_err_msg(err, "failed to unlink old bucket " + old_bucket->get_tenant() + "/" + old_bucket->get_name());
return r;
}
- r = static_cast<rgw::sal::RadosStore*>(store)->ctl()->bucket->remove_bucket_instance_info(
+ r = static_cast<rgw::sal::RadosStore*>(driver)->ctl()->bucket->remove_bucket_instance_info(
old_bucket->get_key(), old_bucket->get_info(),
null_yield, dpp,
RGWBucketCtl::BucketInstance::RemoveParams()
return 0;
}
-int RGWBucketAdminOp::chown(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const string& marker, const DoutPrefixProvider *dpp, string *err)
+int RGWBucketAdminOp::chown(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const string& marker, const DoutPrefixProvider *dpp, string *err)
{
RGWBucket bucket;
- int ret = bucket.init(store, op_state, null_yield, dpp, err);
+ int ret = bucket.init(driver, op_state, null_yield, dpp, err);
if (ret < 0)
return ret;
}
-int RGWBucketAdminOp::check_index(rgw::sal::Store* store, RGWBucketAdminOpState& op_state,
+int RGWBucketAdminOp::check_index(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher, optional_yield y, const DoutPrefixProvider *dpp)
{
int ret;
RGWBucket bucket;
- ret = bucket.init(store, op_state, null_yield, dpp);
+ ret = bucket.init(driver, op_state, null_yield, dpp);
if (ret < 0)
return ret;
return 0;
}
-int RGWBucketAdminOp::remove_bucket(rgw::sal::Store* store, RGWBucketAdminOpState& op_state,
+int RGWBucketAdminOp::remove_bucket(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state,
optional_yield y, const DoutPrefixProvider *dpp,
bool bypass_gc, bool keep_index_consistent)
{
std::unique_ptr<rgw::sal::Bucket> bucket;
- std::unique_ptr<rgw::sal::User> user = store->get_user(op_state.get_user_id());
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(op_state.get_user_id());
- int ret = store->get_bucket(dpp, user.get(), user->get_tenant(), op_state.get_bucket_name(),
+ int ret = driver->get_bucket(dpp, user.get(), user->get_tenant(), op_state.get_bucket_name(),
&bucket, y);
if (ret < 0)
return ret;
return ret;
}
-int RGWBucketAdminOp::remove_object(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp)
+int RGWBucketAdminOp::remove_object(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp)
{
RGWBucket bucket;
- int ret = bucket.init(store, op_state, null_yield, dpp);
+ int ret = bucket.init(driver, op_state, null_yield, dpp);
if (ret < 0)
return ret;
return bucket.remove_object(dpp, op_state);
}
-int RGWBucketAdminOp::sync_bucket(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, string *err_msg)
+int RGWBucketAdminOp::sync_bucket(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, string *err_msg)
{
RGWBucket bucket;
- int ret = bucket.init(store, op_state, null_yield, dpp, err_msg);
+ int ret = bucket.init(driver, op_state, null_yield, dpp, err_msg);
if (ret < 0)
{
return ret;
return bucket.sync(op_state, dpp, err_msg);
}
-static int bucket_stats(rgw::sal::Store* store,
+static int bucket_stats(rgw::sal::Driver* driver,
const std::string& tenant_name,
const std::string& bucket_name,
Formatter *formatter,
map<RGWObjCategory, RGWStorageStats> stats;
real_time mtime;
- int ret = store->get_bucket(dpp, nullptr, tenant_name, bucket_name, &bucket, null_yield);
+ int ret = driver->get_bucket(dpp, nullptr, tenant_name, bucket_name, &bucket, null_yield);
if (ret < 0) {
return ret;
}
return 0;
}
-int RGWBucketAdminOp::limit_check(rgw::sal::Store* store,
+int RGWBucketAdminOp::limit_check(rgw::sal::Driver* driver,
RGWBucketAdminOpState& op_state,
const std::list<std::string>& user_ids,
RGWFormatterFlusher& flusher, optional_yield y,
{
int ret = 0;
const size_t max_entries =
- store->ctx()->_conf->rgw_list_buckets_max_chunk;
+ driver->ctx()->_conf->rgw_list_buckets_max_chunk;
const size_t safe_max_objs_per_shard =
- store->ctx()->_conf->rgw_safe_max_objects_per_shard;
+ driver->ctx()->_conf->rgw_safe_max_objects_per_shard;
uint16_t shard_warn_pct =
- store->ctx()->_conf->rgw_shard_warning_threshold;
+ driver->ctx()->_conf->rgw_shard_warning_threshold;
if (shard_warn_pct > 100)
shard_warn_pct = 90;
string marker;
rgw::sal::BucketList buckets;
do {
- std::unique_ptr<rgw::sal::User> user = store->get_user(rgw_user(user_id));
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(rgw_user(user_id));
ret = user->list_buckets(dpp, marker, string(), max_entries, false, buckets, y);
return ret;
} /* RGWBucketAdminOp::limit_check */
-int RGWBucketAdminOp::info(rgw::sal::Store* store,
+int RGWBucketAdminOp::info(rgw::sal::Driver* driver,
RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher,
optional_yield y,
int ret = 0;
const std::string& bucket_name = op_state.get_bucket_name();
if (!bucket_name.empty()) {
- ret = bucket.init(store, op_state, null_yield, dpp);
+ ret = bucket.init(driver, op_state, null_yield, dpp);
if (-ENOENT == ret)
return -ERR_NO_SUCH_BUCKET;
else if (ret < 0)
Formatter *formatter = flusher.get_formatter();
flusher.start(0);
- CephContext *cct = store->ctx();
+ CephContext *cct = driver->ctx();
const size_t max_entries = cct->_conf->rgw_list_buckets_max_chunk;
formatter->open_array_section("buckets");
rgw::sal::BucketList buckets;
- std::unique_ptr<rgw::sal::User> user = store->get_user(op_state.get_user_id());
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(op_state.get_user_id());
std::string marker;
const std::string empty_end_marker;
constexpr bool no_need_stats = false; // set need_stats to false
}
if (show_stats) {
- bucket_stats(store, user_id.tenant, obj_name, formatter, dpp);
+ bucket_stats(driver, user_id.tenant, obj_name, formatter, dpp);
} else {
formatter->dump_string("bucket", obj_name);
}
formatter->close_section();
} else if (!bucket_name.empty()) {
- ret = bucket_stats(store, user_id.tenant, bucket_name, formatter, dpp);
+ ret = bucket_stats(driver, user_id.tenant, bucket_name, formatter, dpp);
if (ret < 0) {
return ret;
}
bool truncated = true;
formatter->open_array_section("buckets");
- ret = store->meta_list_keys_init(dpp, "bucket", string(), &handle);
+ ret = driver->meta_list_keys_init(dpp, "bucket", string(), &handle);
while (ret == 0 && truncated) {
std::list<std::string> buckets;
constexpr int max_keys = 1000;
- ret = store->meta_list_keys_next(dpp, handle, max_keys, buckets,
+ ret = driver->meta_list_keys_next(dpp, handle, max_keys, buckets,
&truncated);
for (auto& bucket_name : buckets) {
if (show_stats) {
- bucket_stats(store, user_id.tenant, bucket_name, formatter, dpp);
+ bucket_stats(driver, user_id.tenant, bucket_name, formatter, dpp);
} else {
formatter->dump_string("bucket", bucket_name);
}
}
}
- store->meta_list_keys_complete(handle);
+ driver->meta_list_keys_complete(handle);
formatter->close_section();
}
return 0;
}
-int RGWBucketAdminOp::set_quota(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp)
+int RGWBucketAdminOp::set_quota(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp)
{
RGWBucket bucket;
- int ret = bucket.init(store, op_state, null_yield, dpp);
+ int ret = bucket.init(driver, op_state, null_yield, dpp);
if (ret < 0)
return ret;
return bucket.set_quota(op_state, dpp);
}
using bucket_instance_ls = std::vector<RGWBucketInfo>;
-void get_stale_instances(rgw::sal::Store* store, const std::string& bucket_name,
+void get_stale_instances(rgw::sal::Driver* driver, const std::string& bucket_name,
const vector<std::string>& lst,
bucket_instance_ls& stale_instances,
const DoutPrefixProvider *dpp)
RGWBucketInfo binfo;
std::unique_ptr<rgw::sal::Bucket> bucket;
rgw_bucket rbucket;
- rgw_bucket_parse_bucket_key(store->ctx(), bucket_instance, &rbucket, nullptr);
- int r = store->get_bucket(dpp, nullptr, rbucket, &bucket, null_yield);
+ rgw_bucket_parse_bucket_key(driver->ctx(), bucket_instance, &rbucket, nullptr);
+ int r = driver->get_bucket(dpp, nullptr, rbucket, &bucket, null_yield);
if (r < 0){
// this can only happen if someone deletes us right when we're processing
ldpp_dout(dpp, -1) << "Bucket instance is invalid: " << bucket_instance
auto [tenant, bname] = split_tenant(bucket_name);
RGWBucketInfo cur_bucket_info;
std::unique_ptr<rgw::sal::Bucket> cur_bucket;
- int r = store->get_bucket(dpp, nullptr, tenant, bname, &cur_bucket, null_yield);
+ int r = driver->get_bucket(dpp, nullptr, tenant, bname, &cur_bucket, null_yield);
if (r < 0) {
if (r == -ENOENT) {
// bucket doesn't exist, everything is stale then
// bucket and walk through these instances to make sure no one else interferes
// with these
{
- RGWBucketReshardLock reshard_lock(static_cast<rgw::sal::RadosStore*>(store), cur_bucket->get_info(), true);
+ RGWBucketReshardLock reshard_lock(static_cast<rgw::sal::RadosStore*>(driver), cur_bucket->get_info(), true);
r = reshard_lock.lock(dpp);
if (r < 0) {
// most likely bucket is under reshard, return the sureshot stale instances
return;
}
-static int process_stale_instances(rgw::sal::Store* store, RGWBucketAdminOpState& op_state,
+static int process_stale_instances(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher,
const DoutPrefixProvider *dpp,
std::function<void(const bucket_instance_ls&,
Formatter *,
- rgw::sal::Store*)> process_f)
+ rgw::sal::Driver*)> process_f)
{
std::string marker;
void *handle;
Formatter *formatter = flusher.get_formatter();
static constexpr auto default_max_keys = 1000;
- int ret = store->meta_list_keys_init(dpp, "bucket.instance", marker, &handle);
+ int ret = driver->meta_list_keys_init(dpp, "bucket.instance", marker, &handle);
if (ret < 0) {
cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl;
return ret;
bool truncated;
formatter->open_array_section("keys");
- auto g = make_scope_guard([&store, &handle, &formatter]() {
- store->meta_list_keys_complete(handle);
+ auto g = make_scope_guard([&driver, &handle, &formatter]() {
+ driver->meta_list_keys_complete(handle);
formatter->close_section(); // keys
formatter->flush(cout);
});
do {
list<std::string> keys;
- ret = store->meta_list_keys_next(dpp, handle, default_max_keys, keys, &truncated);
+ ret = driver->meta_list_keys_next(dpp, handle, default_max_keys, keys, &truncated);
if (ret < 0 && ret != -ENOENT) {
cerr << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << std::endl;
return ret;
}
for (const auto& kv: bucket_instance_map) {
bucket_instance_ls stale_lst;
- get_stale_instances(store, kv.first, kv.second, stale_lst, dpp);
- process_f(stale_lst, formatter, store);
+ get_stale_instances(driver, kv.first, kv.second, stale_lst, dpp);
+ process_f(stale_lst, formatter, driver);
}
}
} while (truncated);
return 0;
}
-int RGWBucketAdminOp::list_stale_instances(rgw::sal::Store* store,
+int RGWBucketAdminOp::list_stale_instances(rgw::sal::Driver* driver,
RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher,
const DoutPrefixProvider *dpp)
{
auto process_f = [](const bucket_instance_ls& lst,
Formatter *formatter,
- rgw::sal::Store*){
+ rgw::sal::Driver*){
for (const auto& binfo: lst)
formatter->dump_string("key", binfo.bucket.get_key());
};
- return process_stale_instances(store, op_state, flusher, dpp, process_f);
+ return process_stale_instances(driver, op_state, flusher, dpp, process_f);
}
-int RGWBucketAdminOp::clear_stale_instances(rgw::sal::Store* store,
+int RGWBucketAdminOp::clear_stale_instances(rgw::sal::Driver* driver,
RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher,
const DoutPrefixProvider *dpp)
{
auto process_f = [dpp](const bucket_instance_ls& lst,
Formatter *formatter,
- rgw::sal::Store* store){
+ rgw::sal::Driver* driver){
for (const auto &binfo: lst) {
std::unique_ptr<rgw::sal::Bucket> bucket;
- store->get_bucket(nullptr, binfo, &bucket);
+ driver->get_bucket(nullptr, binfo, &bucket);
int ret = bucket->purge_instance(dpp);
if (ret == 0){
auto md_key = "bucket.instance:" + binfo.bucket.get_key();
- ret = store->meta_remove(dpp, md_key, null_yield);
+ ret = driver->meta_remove(dpp, md_key, null_yield);
}
formatter->open_object_section("delete_status");
formatter->dump_string("bucket_instance", binfo.bucket.get_key());
}
};
- return process_stale_instances(store, op_state, flusher, dpp, process_f);
+ return process_stale_instances(driver, op_state, flusher, dpp, process_f);
}
-static int fix_single_bucket_lc(rgw::sal::Store* store,
+static int fix_single_bucket_lc(rgw::sal::Driver* driver,
const std::string& tenant_name,
const std::string& bucket_name,
const DoutPrefixProvider *dpp)
{
std::unique_ptr<rgw::sal::Bucket> bucket;
- int ret = store->get_bucket(dpp, nullptr, tenant_name, bucket_name, &bucket, null_yield);
+ int ret = driver->get_bucket(dpp, nullptr, tenant_name, bucket_name, &bucket, null_yield);
if (ret < 0) {
// TODO: Should we handle the case where the bucket could've been removed between
// listing and fetching?
return ret;
}
- return rgw::lc::fix_lc_shard_entry(dpp, store, store->get_rgwlc()->get_lc(), bucket.get());
+ return rgw::lc::fix_lc_shard_entry(dpp, driver, driver->get_rgwlc()->get_lc(), bucket.get());
}
static void format_lc_status(Formatter* formatter,
formatter->close_section(); // bucket_entry
}
-static void process_single_lc_entry(rgw::sal::Store* store,
+static void process_single_lc_entry(rgw::sal::Driver* driver,
Formatter *formatter,
const std::string& tenant_name,
const std::string& bucket_name,
const DoutPrefixProvider *dpp)
{
- int ret = fix_single_bucket_lc(store, tenant_name, bucket_name, dpp);
+ int ret = fix_single_bucket_lc(driver, tenant_name, bucket_name, dpp);
format_lc_status(formatter, tenant_name, bucket_name, -ret);
}
-int RGWBucketAdminOp::fix_lc_shards(rgw::sal::Store* store,
+int RGWBucketAdminOp::fix_lc_shards(rgw::sal::Driver* driver,
RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher,
const DoutPrefixProvider *dpp)
if (const std::string& bucket_name = op_state.get_bucket_name();
! bucket_name.empty()) {
const rgw_user user_id = op_state.get_user_id();
- process_single_lc_entry(store, formatter, user_id.tenant, bucket_name, dpp);
+ process_single_lc_entry(driver, formatter, user_id.tenant, bucket_name, dpp);
formatter->flush(cout);
} else {
- int ret = store->meta_list_keys_init(dpp, "bucket", marker, &handle);
+ int ret = driver->meta_list_keys_init(dpp, "bucket", marker, &handle);
if (ret < 0) {
std::cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl;
return ret;
{
formatter->open_array_section("lc_fix_status");
- auto sg = make_scope_guard([&store, &handle, &formatter](){
- store->meta_list_keys_complete(handle);
+ auto sg = make_scope_guard([&driver, &handle, &formatter](){
+ driver->meta_list_keys_complete(handle);
formatter->close_section(); // lc_fix_status
formatter->flush(cout);
});
do {
list<std::string> keys;
- ret = store->meta_list_keys_next(dpp, handle, default_max_keys, keys, &truncated);
+ ret = driver->meta_list_keys_next(dpp, handle, default_max_keys, keys, &truncated);
if (ret < 0 && ret != -ENOENT) {
std::cerr << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << std::endl;
return ret;
} if (ret != -ENOENT) {
for (const auto &key:keys) {
auto [tenant_name, bucket_name] = split_tenant(key);
- process_single_lc_entry(store, formatter, tenant_name, bucket_name, dpp);
+ process_single_lc_entry(driver, formatter, tenant_name, bucket_name, dpp);
}
}
formatter->flush(cout); // regularly flush every 1k entries
}
static bool has_object_expired(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
rgw::sal::Bucket* bucket,
const rgw_obj_key& key, utime_t& delete_at)
{
std::unique_ptr<rgw::sal::Object> obj = bucket->get_object(key);
bufferlist delete_at_bl;
- int ret = rgw_object_get_attr(dpp, store, obj.get(), RGW_ATTR_DELETE_AT, delete_at_bl, null_yield);
+ int ret = rgw_object_get_attr(dpp, driver, obj.get(), RGW_ATTR_DELETE_AT, delete_at_bl, null_yield);
if (ret < 0) {
return false; // no delete at attr, proceed
}
}
static int fix_bucket_obj_expiry(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
rgw::sal::Bucket* bucket,
RGWFormatterFlusher& flusher, bool dry_run)
{
for (const auto& obj : results.objs) {
rgw_obj_key key(obj.key);
utime_t delete_at;
- if (has_object_expired(dpp, store, bucket, key, delete_at)) {
+ if (has_object_expired(dpp, driver, bucket, key, delete_at)) {
formatter->open_object_section("object_status");
formatter->dump_string("object", key.name);
formatter->dump_stream("delete_at") << delete_at;
if (!dry_run) {
- ret = rgw_remove_object(dpp, store, bucket, key);
+ ret = rgw_remove_object(dpp, driver, bucket, key);
formatter->dump_int("status", ret);
}
return 0;
}
-int RGWBucketAdminOp::fix_obj_expiry(rgw::sal::Store* store,
+int RGWBucketAdminOp::fix_obj_expiry(rgw::sal::Driver* driver,
RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher,
const DoutPrefixProvider *dpp, bool dry_run)
{
RGWBucket admin_bucket;
- int ret = admin_bucket.init(store, op_state, null_yield, dpp);
+ int ret = admin_bucket.init(driver, op_state, null_yield, dpp);
if (ret < 0) {
ldpp_dout(dpp, -1) << "failed to initialize bucket" << dendl;
return ret;
}
std::unique_ptr<rgw::sal::Bucket> bucket;
- ret = store->get_bucket(nullptr, admin_bucket.get_bucket_info(), &bucket);
+ ret = driver->get_bucket(nullptr, admin_bucket.get_bucket_info(), &bucket);
if (ret < 0) {
return ret;
}
- return fix_bucket_obj_expiry(dpp, store, bucket.get(), flusher, dry_run);
+ return fix_bucket_obj_expiry(dpp, driver, bucket.get(), flusher, dry_run);
}
void RGWBucketCompleteInfo::dump(Formatter *f) const {
RGWSI_BucketIndex *bi{nullptr};
} svc;
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
- RGWBucketInstanceMetadataHandler(rgw::sal::Store* store)
- : store(store) {}
+ RGWBucketInstanceMetadataHandler(rgw::sal::Driver* driver)
+ : driver(driver) {}
void init(RGWSI_Zone *zone_svc,
RGWSI_Bucket *bucket_svc,
/* update lifecyle policy */
{
std::unique_ptr<rgw::sal::Bucket> bucket;
- ret = bihandler->store->get_bucket(nullptr, bci.info, &bucket);
+ ret = bihandler->driver->get_bucket(nullptr, bci.info, &bucket);
if (ret < 0) {
ldpp_dout(dpp, 0) << __func__ << " failed to get_bucket(...) for "
<< bci.info.bucket.name
return ret;
}
- auto lc = bihandler->store->get_rgwlc();
+ auto lc = bihandler->driver->get_rgwlc();
auto lc_it = bci.attrs.find(RGW_ATTR_LC);
if (lc_it != bci.attrs.end()) {
class RGWArchiveBucketInstanceMetadataHandler : public RGWBucketInstanceMetadataHandler {
public:
- RGWArchiveBucketInstanceMetadataHandler(rgw::sal::Store* store)
- : RGWBucketInstanceMetadataHandler(store) {}
+ RGWArchiveBucketInstanceMetadataHandler(rgw::sal::Driver* driver)
+ : RGWBucketInstanceMetadataHandler(driver) {}
// N.B. replication of lifecycle policy relies on logic in RGWBucketInstanceMetadataHandler::do_put(...), override with caution
nullptr, &attrs,
y, dpp);
if (ret < 0 && ret != -ENOENT) {
- ldpp_dout(dpp, 0) << "ERROR: store->get_bucket_entrypoint_info() returned: "
+ ldpp_dout(dpp, 0) << "ERROR: read_bucket_entrypoint_info() returned: "
<< cpp_strerror(-ret) << dendl;
}
pattrs = &attrs;
}
// TODO: remove RGWRados dependency for bucket listing
-int RGWBucketCtl::chown(rgw::sal::Store* store, rgw::sal::Bucket* bucket,
+int RGWBucketCtl::chown(rgw::sal::Driver* driver, rgw::sal::Bucket* bucket,
const rgw_user& user_id, const std::string& display_name,
const std::string& marker, optional_yield y, const DoutPrefixProvider *dpp)
{
//Loop through objects and update object acls to point to bucket owner
do {
- RGWObjectCtx obj_ctx(store);
+ RGWObjectCtx obj_ctx(driver);
results.objs.clear();
int ret = bucket->list(dpp, params, max_entries, results, y);
if (ret < 0) {
continue;
} else {
bufferlist& bl = aiter->second;
- RGWAccessControlPolicy policy(store->ctx());
+ RGWAccessControlPolicy policy(driver->ctx());
ACLOwner owner;
try {
decode(policy, bl);
return new RGWBucketMetadataHandler();
}
-RGWBucketInstanceMetadataHandlerBase* RGWBucketInstanceMetaHandlerAllocator::alloc(rgw::sal::Store* store)
+RGWBucketInstanceMetadataHandlerBase* RGWBucketInstanceMetaHandlerAllocator::alloc(rgw::sal::Driver* driver)
{
- return new RGWBucketInstanceMetadataHandler(store);
+ return new RGWBucketInstanceMetadataHandler(driver);
}
RGWBucketMetadataHandlerBase* RGWArchiveBucketMetaHandlerAllocator::alloc()
return new RGWArchiveBucketMetadataHandler();
}
-RGWBucketInstanceMetadataHandlerBase* RGWArchiveBucketInstanceMetaHandlerAllocator::alloc(rgw::sal::Store* store)
+RGWBucketInstanceMetadataHandlerBase* RGWArchiveBucketInstanceMetaHandlerAllocator::alloc(rgw::sal::Driver* driver)
{
- return new RGWArchiveBucketInstanceMetadataHandler(store);
+ return new RGWArchiveBucketInstanceMetadataHandler(driver);
}
};
/**
- * Store a list of the user's buckets, with associated functinos.
+ * store a list of the user's buckets, with associated functinos.
*/
class RGWUserBuckets {
std::map<std::string, RGWBucketEnt> buckets;
class RGWBucketInstanceMetaHandlerAllocator {
public:
- static RGWBucketInstanceMetadataHandlerBase *alloc(rgw::sal::Store* store);
+ static RGWBucketInstanceMetadataHandlerBase *alloc(rgw::sal::Driver* driver);
};
class RGWArchiveBucketMetaHandlerAllocator {
class RGWArchiveBucketInstanceMetaHandlerAllocator {
public:
- static RGWBucketInstanceMetadataHandlerBase *alloc(rgw::sal::Store* store);
+ static RGWBucketInstanceMetadataHandlerBase *alloc(rgw::sal::Driver* driver);
};
-extern int rgw_remove_object(const DoutPrefixProvider *dpp, rgw::sal::Store* store, rgw::sal::Bucket* bucket, rgw_obj_key& key);
+extern int rgw_remove_object(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::Bucket* bucket, rgw_obj_key& key);
-extern int rgw_object_get_attr(rgw::sal::Store* store, rgw::sal::Object* obj,
+extern int rgw_object_get_attr(rgw::sal::Driver* driver, rgw::sal::Object* obj,
const char* attr_name, bufferlist& out_bl,
optional_yield y);
-extern void check_bad_user_bucket_mapping(rgw::sal::Store* store, rgw::sal::User* user, bool fix, optional_yield y, const DoutPrefixProvider *dpp);
+extern void check_bad_user_bucket_mapping(rgw::sal::Driver* driver, rgw::sal::User* user, bool fix, optional_yield y, const DoutPrefixProvider *dpp);
struct RGWBucketAdminOpState {
rgw_user uid;
*/
class RGWBucket {
RGWUserBuckets buckets;
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
RGWAccessHandle handle;
std::unique_ptr<rgw::sal::Bucket> bucket;
RGWObjVersionTracker ep_objv; // entrypoint object version
public:
- RGWBucket() : store(NULL), handle(NULL), failure(false) {}
- int init(rgw::sal::Store* storage, RGWBucketAdminOpState& op_state, optional_yield y,
+ RGWBucket() : driver(NULL), handle(NULL), failure(false) {}
+ int init(rgw::sal::Driver* storage, RGWBucketAdminOpState& op_state, optional_yield y,
const DoutPrefixProvider *dpp, std::string *err_msg = NULL);
int check_bad_index_multipart(RGWBucketAdminOpState& op_state,
class RGWBucketAdminOp {
public:
- static int get_policy(rgw::sal::Store* store, RGWBucketAdminOpState& op_state,
+ static int get_policy(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp);
- static int get_policy(rgw::sal::Store* store, RGWBucketAdminOpState& op_state,
+ static int get_policy(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state,
RGWAccessControlPolicy& policy, const DoutPrefixProvider *dpp);
- static int dump_s3_policy(rgw::sal::Store* store, RGWBucketAdminOpState& op_state,
+ static int dump_s3_policy(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state,
std::ostream& os, const DoutPrefixProvider *dpp);
- static int unlink(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp);
- static int link(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, std::string *err_msg = NULL);
- static int chown(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const std::string& marker, const DoutPrefixProvider *dpp, std::string *err_msg = NULL);
+ static int unlink(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp);
+ static int link(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, std::string *err_msg = NULL);
+ static int chown(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const std::string& marker, const DoutPrefixProvider *dpp, std::string *err_msg = NULL);
- static int check_index(rgw::sal::Store* store, RGWBucketAdminOpState& op_state,
+ static int check_index(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher, optional_yield y, const DoutPrefixProvider *dpp);
- static int remove_bucket(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, optional_yield y,
+ static int remove_bucket(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, optional_yield y,
const DoutPrefixProvider *dpp, bool bypass_gc = false, bool keep_index_consistent = true);
- static int remove_object(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp);
- static int info(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y, const DoutPrefixProvider *dpp);
- static int limit_check(rgw::sal::Store* store, RGWBucketAdminOpState& op_state,
+ static int remove_object(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp);
+ static int info(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y, const DoutPrefixProvider *dpp);
+ static int limit_check(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state,
const std::list<std::string>& user_ids,
RGWFormatterFlusher& flusher, optional_yield y,
const DoutPrefixProvider *dpp,
bool warnings_only = false);
- static int set_quota(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp);
+ static int set_quota(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp);
- static int list_stale_instances(rgw::sal::Store* store, RGWBucketAdminOpState& op_state,
+ static int list_stale_instances(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp);
- static int clear_stale_instances(rgw::sal::Store* store, RGWBucketAdminOpState& op_state,
+ static int clear_stale_instances(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp);
- static int fix_lc_shards(rgw::sal::Store* store, RGWBucketAdminOpState& op_state,
+ static int fix_lc_shards(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp);
- static int fix_obj_expiry(rgw::sal::Store* store, RGWBucketAdminOpState& op_state,
+ static int fix_obj_expiry(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp, bool dry_run = false);
- static int sync_bucket(rgw::sal::Store* store, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, std::string *err_msg = NULL);
+ static int sync_bucket(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, std::string *err_msg = NULL);
};
struct rgw_ep_info {
const DoutPrefixProvider *dpp,
bool update_entrypoint = true);
- int chown(rgw::sal::Store* store, rgw::sal::Bucket* bucket,
+ int chown(rgw::sal::Driver* driver, rgw::sal::Bucket* bucket,
const rgw_user& user_id, const std::string& display_name,
const std::string& marker, optional_yield y, const DoutPrefixProvider *dpp);
};
-bool rgw_find_bucket_by_id(const DoutPrefixProvider *dpp, CephContext *cct, rgw::sal::Store* store, const std::string& marker,
+bool rgw_find_bucket_by_id(const DoutPrefixProvider *dpp, CephContext *cct, rgw::sal::Driver* driver, const std::string& marker,
const std::string& bucket_id, rgw_bucket* bucket_out);
string error_oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id) + ".retry";
auto& shard_keys = omapkeys[shard_id];
shard_keys = std::make_shared<RGWRadosGetOmapKeysCR::Result>();
- spawn(new RGWRadosGetOmapKeysCR(env->store, rgw_raw_obj(env->svc->zone->get_zone_params().log_pool, error_oid),
+ spawn(new RGWRadosGetOmapKeysCR(env->driver, rgw_raw_obj(env->svc->zone->get_zone_params().log_pool, error_oid),
marker, max_entries, shard_keys), false);
++shard_id;
static constexpr uint32_t lock_duration = 30;
RGWDataSyncCtx *sc;
RGWDataSyncEnv *sync_env;
- rgw::sal::RadosStore* store; // RGWDataSyncEnv also has a pointer to store
+ rgw::sal::RadosStore* driver; // RGWDataSyncEnv also has a pointer to driver
const rgw_pool& pool;
const uint32_t num_shards;
uint64_t instance_id,
RGWSyncTraceNodeRef& _tn_parent,
rgw_data_sync_status *status)
- : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), store(sync_env->store),
+ : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), driver(sync_env->driver),
pool(sync_env->svc->zone->get_zone_params().log_pool),
num_shards(num_shards), status(status),
tn(sync_env->sync_tracer->add_node(_tn_parent, "init_data_sync_status")) {
int ret;
reenter(this) {
using LockCR = RGWSimpleRadosLockCR;
- yield call(new LockCR(sync_env->async_rados, store,
+ yield call(new LockCR(sync_env->async_rados, driver,
rgw_raw_obj{pool, sync_status_oid},
lock_name, cookie, lock_duration));
if (retcode < 0) {
}
/* take lock again, we just recreated the object */
- yield call(new LockCR(sync_env->async_rados, store,
+ yield call(new LockCR(sync_env->async_rados, driver,
rgw_raw_obj{pool, sync_status_oid},
lock_name, cookie, lock_duration));
if (retcode < 0) {
tn->log(0, SSTR("ERROR: failed to write sync status info with " << retcode));
return set_cr_error(retcode);
}
- yield call(new RGWSimpleRadosUnlockCR(sync_env->async_rados, store,
+ yield call(new RGWSimpleRadosUnlockCR(sync_env->async_rados, driver,
rgw_raw_obj{pool, sync_status_oid},
lock_name, cookie));
return set_cr_done();
};
RGWRemoteDataLog::RGWRemoteDataLog(const DoutPrefixProvider *dpp,
- rgw::sal::RadosStore* store,
+ rgw::sal::RadosStore* driver,
RGWAsyncRadosProcessor *async_rados)
- : RGWCoroutinesManager(store->ctx(), store->getRados()->get_cr_registry()),
- dpp(dpp), store(store),
- cct(store->ctx()), cr_registry(store->getRados()->get_cr_registry()),
+ : RGWCoroutinesManager(driver->ctx(), driver->getRados()->get_cr_registry()),
+ dpp(dpp), driver(driver),
+ cct(driver->ctx()), cr_registry(driver->getRados()->get_cr_registry()),
async_rados(async_rados),
- http_manager(store->ctx(), completion_mgr),
+ http_manager(driver->ctx(), completion_mgr),
data_sync_cr(NULL),
initialized(false)
{
RGWSyncTraceManager *_sync_tracer, RGWSyncModuleInstanceRef& _sync_module,
PerfCounters* counters)
{
- sync_env.init(dpp, cct, store, store->svc(), async_rados, &http_manager, _error_logger,
+ sync_env.init(dpp, cct, driver, driver->svc(), async_rados, &http_manager, _error_logger,
_sync_tracer, _sync_module, counters);
sc.init(&sync_env, _conn, _source_zone);
RGWDataSyncCtx *sc;
RGWDataSyncEnv *sync_env = sc->env;
- rgw::sal::RadosStore* store = sync_env->store;
+ rgw::sal::RadosStore* driver = sync_env->driver;
rgw_data_sync_status *sync_status;
int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
entries_index = std::make_unique<RGWShardedOmapCRManager>(
- sync_env->async_rados, store, this,
+ sync_env->async_rados, driver, this,
cct->_conf->rgw_data_log_num_shards,
sync_env->svc->zone->get_zone_params().log_pool,
oid_prefix);
if (ret < 0) {
yield call(sync_env->error_logger->log_error_cr(
dpp, sc->conn->get_remote_id(), "data.init", "",
- -ret, string("failed to store sync status: ") +
+ -ret, string("failed to driver sync status: ") +
cpp_strerror(-ret)));
req_ret = ret;
}
}
if (complete->timestamp != ceph::real_time{}) {
tn->log(10, SSTR("writing " << *complete << " to error repo for retry"));
- yield call(rgw::error_repo::write_cr(sync_env->store->svc()->rados, error_repo,
+ yield call(rgw::error_repo::write_cr(sync_env->driver->svc()->rados, error_repo,
rgw::error_repo::encode_key(complete->bs, complete->gen),
complete->timestamp));
if (retcode < 0) {
}
}
} else if (complete->retry) {
- yield call(rgw::error_repo::remove_cr(sync_env->store->svc()->rados, error_repo,
+ yield call(rgw::error_repo::remove_cr(sync_env->driver->svc()->rados, error_repo,
rgw::error_repo::encode_key(complete->bs, complete->gen),
complete->timestamp));
if (retcode < 0) {
}
};
-rgw_raw_obj datalog_oid_for_error_repo(RGWDataSyncCtx *sc, rgw::sal::RadosStore* store,
+rgw_raw_obj datalog_oid_for_error_repo(RGWDataSyncCtx *sc, rgw::sal::RadosStore* driver,
rgw_pool& pool, rgw_bucket_shard& bs) {
- int datalog_shard = store->svc()->datalog_rados->choose_oid(bs);
+ int datalog_shard = driver->svc()->datalog_rados->choose_oid(bs);
string oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, datalog_shard);
return rgw_raw_obj(pool, oid + ".retry");
}
for (sid = 0; sid < each->num_shards; sid++) {
bs.bucket = source_bs.bucket;
bs.shard_id = sid;
- error_repo = datalog_oid_for_error_repo(sc, sync_env->store, pool, source_bs);
+ error_repo = datalog_oid_for_error_repo(sc, sync_env->driver, pool, source_bs);
tn->log(10, SSTR("writing shard_id " << sid << " of gen " << each->gen << " to error repo for retry"));
- yield_spawn_window(rgw::error_repo::write_cr(sync_env->store->svc()->rados, error_repo,
+ yield_spawn_window(rgw::error_repo::write_cr(sync_env->driver->svc()->rados, error_repo,
rgw::error_repo::encode_key(bs, each->gen),
timestamp), cct->_conf->rgw_data_sync_spawn_window,
[&](uint64_t stack_id, int ret) {
});
// once everything succeeds, remove the full sync obligation from the error repo
- yield call(rgw::error_repo::remove_cr(sync_env->store->svc()->rados, error_repo,
+ yield call(rgw::error_repo::remove_cr(sync_env->driver->svc()->rados, error_repo,
error_marker, timestamp));
return set_cr_done();
}
lease_cr.get(), tn);
}
-static ceph::real_time timestamp_for_bucket_shard(rgw::sal::RadosStore* store,
+static ceph::real_time timestamp_for_bucket_shard(rgw::sal::RadosStore* driver,
const rgw_data_sync_status& sync_status,
const rgw_bucket_shard& bs) {
- int datalog_shard = store->svc()->datalog_rados->choose_oid(bs);
+ int datalog_shard = driver->svc()->datalog_rados->choose_oid(bs);
auto status = sync_status.sync_markers.find(datalog_shard);
if (status == sync_status.sync_markers.end()) {
return ceph::real_clock::zero();
if (retcode < 0) {
tn->log(10, SSTR("full sync: failed to read remote bucket info. Writing "
<< source_bs.shard_id << " to error repo for retry"));
- yield call(rgw::error_repo::write_cr(sync_env->store->svc()->rados, error_repo,
+ yield call(rgw::error_repo::write_cr(sync_env->driver->svc()->rados, error_repo,
rgw::error_repo::encode_key(source_bs, std::nullopt),
timestamp));
if (retcode < 0) {
for (sid = 0; sid < each->num_shards; sid++) {
source_bs.shard_id = sid;
// use the error repo and sync status timestamp from the datalog shard corresponding to source_bs
- error_repo = datalog_oid_for_error_repo(sc, sync_env->store, pool, source_bs);
- timestamp = timestamp_for_bucket_shard(sync_env->store, sync_status, source_bs);
+ error_repo = datalog_oid_for_error_repo(sc, sync_env->driver, pool, source_bs);
+ timestamp = timestamp_for_bucket_shard(sync_env->driver, sync_status, source_bs);
if (retcode < 0) {
tn->log(10, SSTR("Write " << source_bs.shard_id << " to error repo for retry"));
- yield_spawn_window(rgw::error_repo::write_cr(sync_env->store->svc()->rados, error_repo,
+ yield_spawn_window(rgw::error_repo::write_cr(sync_env->driver->svc()->rados, error_repo,
rgw::error_repo::encode_key(source_bs, each->gen),
timestamp), cct->_conf->rgw_data_sync_spawn_window, std::nullopt);
} else {
return set_cr_error(-ECANCELED);
}
omapvals = std::make_shared<RGWRadosGetOmapValsCR::Result>();
- yield call(new RGWRadosGetOmapValsCR(sc->env->store,
+ yield call(new RGWRadosGetOmapValsCR(sc->env->driver,
rgw_raw_obj(pool, oid),
sync_marker.marker,
OMAP_GET_MAX_ENTRIES, omapvals));
}
// clean up full sync index, ignoring errors
- yield call(new RGWRadosRemoveCR(sc->env->store, {pool, oid}));
+ yield call(new RGWRadosRemoveCR(sc->env->driver, {pool, oid}));
// transition to incremental sync
return set_cr_done();
if (error_retry_time <= ceph::coarse_real_clock::now()) {
/* process bucket shards that previously failed */
omapvals = std::make_shared<RGWRadosGetOmapValsCR::Result>();
- yield call(new RGWRadosGetOmapValsCR(sc->env->store, error_repo,
+ yield call(new RGWRadosGetOmapValsCR(sc->env->driver, error_repo,
error_marker, max_error_entries,
omapvals));
error_entries = std::move(omapvals->entries);
}
if (retcode < 0) {
tn->log(1, SSTR("failed to parse bucket shard: " << error_marker));
- spawn(rgw::error_repo::remove_cr(sc->env->store->svc()->rados,
+ spawn(rgw::error_repo::remove_cr(sc->env->driver->svc()->rados,
error_repo, error_marker,
entry_timestamp),
false);
if (lease_cr) {
lease_cr->abort();
}
- auto store = sync_env->store;
- lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, store,
+ auto driver = sync_env->driver;
+ lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, driver,
rgw_raw_obj(pool, status_oid),
lock_name, lock_duration, this));
lease_stack.reset(spawn(lease_cr.get(), false));
uid(handler->uid),
info(handler->info) {}
int operate() override {
- auto user_ctl = sync_env->store->getRados()->ctl.user;
+ auto user_ctl = sync_env->driver->getRados()->ctl.user;
ret = user_ctl->get_info_by_uid(sync_env->dpp, uid, &info->user_info, null_yield);
if (ret < 0) {
* the correct policy configuration. This can happen if there are multiple
* policy rules, and some depend on the object tagging */
yield call(new RGWStatRemoteObjCR(sync_env->async_rados,
- sync_env->store,
+ sync_env->driver,
sc->source_zone,
sync_pipe.info.source_bs.bucket,
key,
std::move(dest_params),
need_retry);
- call(new RGWFetchRemoteObjCR(sync_env->async_rados, sync_env->store, sc->source_zone,
+ call(new RGWFetchRemoteObjCR(sync_env->async_rados, sync_env->driver, sc->source_zone,
nullopt,
sync_pipe.info.source_bs.bucket,
std::nullopt, sync_pipe.dest_bucket_info,
real_time& mtime, bool versioned, uint64_t versioned_epoch, rgw_zone_set *zones_trace)
{
auto sync_env = sc->env;
- return new RGWRemoveObjCR(sync_env->dpp, sync_env->async_rados, sync_env->store, sc->source_zone,
+ return new RGWRemoveObjCR(sync_env->dpp, sync_env->async_rados, sync_env->driver, sc->source_zone,
sync_pipe.dest_bucket_info, key, versioned, versioned_epoch,
NULL, NULL, false, &mtime, zones_trace);
}
rgw_bucket_entry_owner& owner, bool versioned, uint64_t versioned_epoch, rgw_zone_set *zones_trace)
{
auto sync_env = sc->env;
- return new RGWRemoveObjCR(sync_env->dpp, sync_env->async_rados, sync_env->store, sc->source_zone,
+ return new RGWRemoveObjCR(sync_env->dpp, sync_env->async_rados, sync_env->driver, sc->source_zone,
sync_pipe.dest_bucket_info, key, versioned, versioned_epoch,
&owner.id, &owner.display_name, true, &mtime, zones_trace);
}
RGWMetadataHandler *alloc_bucket_meta_handler() override {
return RGWArchiveBucketMetaHandlerAllocator::alloc();
}
- RGWBucketInstanceMetadataHandlerBase *alloc_bucket_instance_meta_handler(rgw::sal::Store* store) override {
- return RGWArchiveBucketInstanceMetaHandlerAllocator::alloc(store);
+ RGWBucketInstanceMetadataHandlerBase *alloc_bucket_instance_meta_handler(rgw::sal::Driver* driver) override {
+ return RGWArchiveBucketInstanceMetaHandlerAllocator::alloc(driver);
}
};
(sync_pipe.dest_bucket_info.flags & BUCKET_VERSIONS_SUSPENDED)) {
ldout(sc->cct, 0) << "SYNC_ARCHIVE: sync_object: enabling object versioning for archive bucket" << dendl;
sync_pipe.dest_bucket_info.flags = (sync_pipe.dest_bucket_info.flags & ~BUCKET_VERSIONS_SUSPENDED) | BUCKET_VERSIONED;
- int op_ret = sync_env->store->getRados()->put_bucket_instance_info(sync_pipe.dest_bucket_info, false, real_time(), NULL, sync_env->dpp);
+ int op_ret = sync_env->driver->getRados()->put_bucket_instance_info(sync_pipe.dest_bucket_info, false, real_time(), NULL, sync_env->dpp);
if (op_ret < 0) {
ldpp_dout(sync_env->dpp, 0) << "SYNC_ARCHIVE: sync_object: error versioning archive bucket" << dendl;
return NULL;
versioned_epoch = 0;
dest_key = key;
if (key.instance.empty()) {
- sync_env->store->getRados()->gen_rand_obj_instance_name(&(*dest_key));
+ sync_env->driver->getRados()->gen_rand_obj_instance_name(&(*dest_key));
}
}
ldout(sc->cct, 0) << "SYNC_ARCHIVE: create_delete_marker: b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " mtime=" << mtime
<< " versioned=" << versioned << " versioned_epoch=" << versioned_epoch << dendl;
auto sync_env = sc->env;
- return new RGWRemoveObjCR(sync_env->dpp, sync_env->async_rados, sync_env->store, sc->source_zone,
+ return new RGWRemoveObjCR(sync_env->dpp, sync_env->async_rados, sync_env->driver, sc->source_zone,
sync_pipe.dest_bucket_info, key, versioned, versioned_epoch,
&owner.id, &owner.display_name, true, &mtime, zones_trace);
}
CephContext *RGWDataSyncStatusManager::get_cct() const
{
- return store->ctx();
+ return driver->ctx();
}
int RGWDataSyncStatusManager::init(const DoutPrefixProvider *dpp)
{
RGWZone *zone_def;
- if (!(zone_def = store->svc()->zone->find_zone(source_zone))) {
+ if (!(zone_def = driver->svc()->zone->find_zone(source_zone))) {
ldpp_dout(this, 0) << "ERROR: failed to find zone config info for zone=" << source_zone << dendl;
return -EIO;
}
- if (!store->svc()->sync_modules->get_manager()->supports_data_export(zone_def->tier_type)) {
+ if (!driver->svc()->sync_modules->get_manager()->supports_data_export(zone_def->tier_type)) {
return -ENOTSUP;
}
- const RGWZoneParams& zone_params = store->svc()->zone->get_zone_params();
+ const RGWZoneParams& zone_params = driver->svc()->zone->get_zone_params();
if (sync_module == nullptr) {
- sync_module = store->get_sync_module();
+ sync_module = driver->get_sync_module();
}
- conn = store->svc()->zone->get_zone_conn(source_zone);
+ conn = driver->svc()->zone->get_zone_conn(source_zone);
if (!conn) {
ldpp_dout(this, 0) << "connection object to zone " << source_zone << " does not exist" << dendl;
return -EINVAL;
}
- error_logger = new RGWSyncErrorLogger(store, RGW_SYNC_ERROR_LOG_SHARD_PREFIX, ERROR_LOGGER_SHARDS);
+ error_logger = new RGWSyncErrorLogger(driver, RGW_SYNC_ERROR_LOG_SHARD_PREFIX, ERROR_LOGGER_SHARDS);
- int r = source_log.init(source_zone, conn, error_logger, store->getRados()->get_sync_tracer(),
+ int r = source_log.init(source_zone, conn, error_logger, driver->getRados()->get_sync_tracer(),
sync_module, counters);
if (r < 0) {
ldpp_dout(this, 0) << "ERROR: failed to init remote log, r=" << r << dendl;
int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
- yield call(new RGWRadosRemoveCR(sync_env->store, obj, &objv));
+ yield call(new RGWRadosRemoveCR(sync_env->driver, obj, &objv));
if (retcode < 0 && retcode != -ENOENT) {
ldout(cct, 20) << "ERROR: failed to remove bucket shard status for: " << sync_pair <<
". with error: " << retcode << dendl;
class RGWReadRecoveringBucketShardsCoroutine : public RGWCoroutine {
RGWDataSyncCtx *sc;
RGWDataSyncEnv *sync_env;
- rgw::sal::RadosStore* store;
+ rgw::sal::RadosStore* driver;
const int shard_id;
int max_entries;
RGWReadRecoveringBucketShardsCoroutine(RGWDataSyncCtx *_sc, const int _shard_id,
set<string>& _recovering_buckets, const int _max_entries)
: RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env),
- store(sync_env->store), shard_id(_shard_id), max_entries(_max_entries),
+ driver(sync_env->driver), shard_id(_shard_id), max_entries(_max_entries),
recovering_buckets(_recovering_buckets), max_omap_entries(OMAP_READ_MAX_ENTRIES)
{
error_oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id) + ".retry";
count = 0;
do {
omapkeys = std::make_shared<RGWRadosGetOmapKeysCR::Result>();
- yield call(new RGWRadosGetOmapKeysCR(store, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, error_oid),
+ yield call(new RGWRadosGetOmapKeysCR(driver, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, error_oid),
marker, max_omap_entries, omapkeys));
if (retcode == -ENOENT) {
class RGWReadPendingBucketShardsCoroutine : public RGWCoroutine {
RGWDataSyncCtx *sc;
RGWDataSyncEnv *sync_env;
- rgw::sal::RadosStore* store;
+ rgw::sal::RadosStore* driver;
const int shard_id;
int max_entries;
set<string>& _pending_buckets,
rgw_data_sync_marker* _sync_marker, const int _max_entries)
: RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env),
- store(sync_env->store), shard_id(_shard_id), max_entries(_max_entries),
+ driver(sync_env->driver), shard_id(_shard_id), max_entries(_max_entries),
pending_buckets(_pending_buckets), sync_marker(_sync_marker)
{
status_oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id);
int RGWRemoteDataLog::read_shard_status(const DoutPrefixProvider *dpp, int shard_id, set<string>& pending_buckets, set<string>& recovering_buckets, rgw_data_sync_marker *sync_marker, const int max_entries)
{
// cannot run concurrently with run_sync(), so run in a separate manager
- RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
- RGWHTTPManager http_manager(store->ctx(), crs.get_completion_mgr());
+ RGWCoroutinesManager crs(driver->ctx(), driver->getRados()->get_cr_registry());
+ RGWHTTPManager http_manager(driver->ctx(), crs.get_completion_mgr());
int ret = http_manager.start();
if (ret < 0) {
ldpp_dout(dpp, 0) << "failed in http_manager.start() ret=" << ret << dendl;
RGWDataSyncCtx sc_local = sc;
sc_local.env = &sync_env_local;
list<RGWCoroutinesStack *> stacks;
- RGWCoroutinesStack* recovering_stack = new RGWCoroutinesStack(store->ctx(), &crs);
+ RGWCoroutinesStack* recovering_stack = new RGWCoroutinesStack(driver->ctx(), &crs);
recovering_stack->call(new RGWReadRecoveringBucketShardsCoroutine(&sc_local, shard_id, recovering_buckets, max_entries));
stacks.push_back(recovering_stack);
- RGWCoroutinesStack* pending_stack = new RGWCoroutinesStack(store->ctx(), &crs);
+ RGWCoroutinesStack* pending_stack = new RGWCoroutinesStack(driver->ctx(), &crs);
pending_stack->call(new RGWReadPendingBucketShardsCoroutine(&sc_local, shard_id, pending_buckets, sync_marker, max_entries));
stacks.push_back(pending_stack);
ret = crs.run(dpp, stacks);
CephContext *RGWBucketPipeSyncStatusManager::get_cct() const
{
- return store->ctx();
+ return driver->ctx();
}
void rgw_bucket_entry_owner::decode_json(JSONObj *obj)
if (sc->env->ostr) {
RGWZone* z;
- if ((z = sc->env->store->svc()->zone->find_zone(sc->source_zone))) {
+ if ((z = sc->env->driver->svc()->zone->find_zone(sc->source_zone))) {
zone_name = z->name;
}
}
if (retcode < 0) {
return set_cr_error(retcode);
}
- call(new RGWRadosRemoveOidCR(sync_env->store, std::move(status_obj)));
+ call(new RGWRadosRemoveOidCR(sync_env->driver, std::move(status_obj)));
if (retcode < 0) {
ldpp_dout(dpp, 20) << "failed to remove shard status object: " << cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
int RGWSyncGetBucketInfoCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
- yield call(new RGWGetBucketInstanceInfoCR(sync_env->async_rados, sync_env->store, bucket, pbucket_info, pattrs, dpp));
+ yield call(new RGWGetBucketInstanceInfoCR(sync_env->async_rados, sync_env->driver, bucket, pbucket_info, pattrs, dpp));
if (retcode == -ENOENT) {
/* bucket instance info has not been synced in yet, fetch it now */
yield {
tn->log(10, SSTR("no local info for bucket:" << ": fetching metadata"));
string raw_key = string("bucket.instance:") + bucket.get_key();
- meta_sync_env.init(dpp, cct, sync_env->store, sync_env->svc->zone->get_master_conn(), sync_env->async_rados,
+ meta_sync_env.init(dpp, cct, sync_env->driver, sync_env->svc->zone->get_master_conn(), sync_env->async_rados,
sync_env->http_manager, sync_env->error_logger, sync_env->sync_tracer);
call(new RGWMetaSyncSingleEntryCR(&meta_sync_env, raw_key,
return set_cr_error(retcode);
}
- yield call(new RGWGetBucketInstanceInfoCR(sync_env->async_rados, sync_env->store, bucket, pbucket_info, pattrs, dpp));
+ yield call(new RGWGetBucketInstanceInfoCR(sync_env->async_rados, sync_env->driver, bucket, pbucket_info, pattrs, dpp));
}
if (retcode < 0) {
tn->log(0, SSTR("ERROR: failed to retrieve bucket info for bucket=" << bucket_str{bucket}));
reenter(this) {
for (i = 0; i < 2; ++i) {
yield call(new RGWBucketGetSyncPolicyHandlerCR(sync_env->async_rados,
- sync_env->store,
+ sync_env->driver,
get_policy_params,
policy,
dpp));
// remote indicates stopped state
tn->log(20, "remote bilog indicates that sync was stopped");
if (!bucket_lease_cr) {
- bucket_lease_cr.reset(new RGWContinuousLeaseCR(env->async_rados, env->store, status_obj,
+ bucket_lease_cr.reset(new RGWContinuousLeaseCR(env->async_rados, env->driver, status_obj,
lock_name, lock_duration, this));
yield spawn(bucket_lease_cr.get(), false);
while (!bucket_lease_cr->is_locked()) {
// if the state wasn't Incremental, take a bucket-wide lease to prevent
// different shards from duplicating the init and full sync
if (!bucket_lease_cr) {
- bucket_lease_cr.reset(new RGWContinuousLeaseCR(env->async_rados, env->store, status_obj,
+ bucket_lease_cr.reset(new RGWContinuousLeaseCR(env->async_rados, env->driver, status_obj,
lock_name, lock_duration, this));
yield spawn(bucket_lease_cr.get(), false);
while (!bucket_lease_cr->is_locked()) {
}
sync_module.reset(new RGWDefaultSyncModuleInstance());
- auto async_rados = store->svc()->rados->get_async_processor();
+ auto async_rados = driver->svc()->rados->get_async_processor();
- sync_env.init(this, store->ctx(), store,
- store->svc(), async_rados, &http_manager,
- error_logger.get(), store->getRados()->get_sync_tracer(),
+ sync_env.init(this, driver->ctx(), driver,
+ driver->svc(), async_rados, &http_manager,
+ error_logger.get(), driver->getRados()->get_sync_tracer(),
sync_module, nullptr);
sync_env.ostr = ostr;
for (auto& pipe : pipes) {
auto& szone = pipe.source.zone;
- auto conn = store->svc()->zone->get_zone_conn(szone);
+ auto conn = driver->svc()->zone->get_zone_conn(szone);
if (!conn) {
ldpp_dout(this, 0) << "connection object to zone " << szone << " does not exist" << dendl;
return -EINVAL;
}
RGWZone* z;
- if (!(z = store->svc()->zone->find_zone(szone))) {
+ if (!(z = driver->svc()->zone->find_zone(szone))) {
ldpp_dout(this, 0) << "zone " << szone << " does not exist" << dendl;
return -EINVAL;
}
tl::expected<std::unique_ptr<RGWBucketPipeSyncStatusManager>, int>
RGWBucketPipeSyncStatusManager::construct(
const DoutPrefixProvider* dpp,
- rgw::sal::RadosStore* store,
+ rgw::sal::RadosStore* driver,
std::optional<rgw_zone_id> source_zone,
std::optional<rgw_bucket> source_bucket,
const rgw_bucket& dest_bucket,
std::ostream* ostr)
{
std::unique_ptr<RGWBucketPipeSyncStatusManager> self{
- new RGWBucketPipeSyncStatusManager(store, source_zone, source_bucket,
+ new RGWBucketPipeSyncStatusManager(driver, source_zone, source_bucket,
dest_bucket)};
auto r = self->do_init(dpp, ostr);
if (r < 0) {
// practice we only do one zone at a time.
for (auto& source : sources) {
list<RGWCoroutinesStack*> stacks;
- RGWCoroutinesStack *stack = new RGWCoroutinesStack(store->ctx(), &cr_mgr);
+ RGWCoroutinesStack *stack = new RGWCoroutinesStack(driver->ctx(), &cr_mgr);
pretty_print(source.sc.env, "Initializing sync state of bucket {} with zone {}.\n",
source.info.bucket.name, source.zone_name);
stack->call(new RGWSimpleRadosWriteCR<rgw_bucket_sync_status>(
<< ret << dendl;
return tl::unexpected(ret);
}
- auto stack = new RGWCoroutinesStack(store->ctx(), &cr_mgr);
+ auto stack = new RGWCoroutinesStack(driver->ctx(), &cr_mgr);
std::vector<rgw_bucket_sync_pair_info> pairs(num_shards);
for (auto shard = 0u; shard < num_shards; ++shard) {
auto& pair = pairs[shard];
{
list<RGWCoroutinesStack *> stacks;
for (auto& source : sources) {
- auto stack = new RGWCoroutinesStack(store->ctx(), &cr_mgr);
+ auto stack = new RGWCoroutinesStack(driver->ctx(), &cr_mgr);
stack->call(new rgw::bucket_sync_run::SourceCR(
source.sc, source.info, source.dest, source.handler,
source.zone_name));
class RGWCollectBucketSyncStatusCR : public RGWShardCollectCR {
static constexpr int max_concurrent_shards = 16;
- rgw::sal::RadosStore* const store;
+ rgw::sal::RadosStore* const driver;
RGWDataSyncCtx *const sc;
RGWDataSyncEnv *const env;
const uint64_t gen;
return r;
}
public:
- RGWCollectBucketSyncStatusCR(rgw::sal::RadosStore* store, RGWDataSyncCtx *sc,
+ RGWCollectBucketSyncStatusCR(rgw::sal::RadosStore* driver, RGWDataSyncCtx *sc,
const rgw_bucket_sync_pair_info& sync_pair,
uint64_t gen,
Vector *status)
: RGWShardCollectCR(sc->cct, max_concurrent_shards),
- store(store), sc(sc), env(sc->env), gen(gen), sync_pair(sync_pair),
+ driver(driver), sc(sc), env(sc->env), gen(gen), sync_pair(sync_pair),
i(status->begin()), end(status->end())
{}
};
int rgw_read_bucket_full_sync_status(const DoutPrefixProvider *dpp,
- rgw::sal::RadosStore *store,
+ rgw::sal::RadosStore *driver,
const rgw_sync_bucket_pipe& pipe,
rgw_bucket_sync_status *status,
optional_yield y)
{
auto get_oid = RGWBucketPipeSyncStatusManager::full_status_oid;
- const rgw_raw_obj obj{store->svc()->zone->get_zone_params().log_pool,
+ const rgw_raw_obj obj{driver->svc()->zone->get_zone_params().log_pool,
get_oid(*pipe.source.zone, *pipe.source.bucket, *pipe.dest.bucket)};
- auto svc = store->svc()->sysobj;
+ auto svc = driver->svc()->sysobj;
auto sysobj = svc->get_obj(obj);
bufferlist bl;
int ret = sysobj.rop().read(dpp, &bl, y);
}
int rgw_read_bucket_inc_sync_status(const DoutPrefixProvider *dpp,
- rgw::sal::RadosStore *store,
+ rgw::sal::RadosStore *driver,
const rgw_sync_bucket_pipe& pipe,
uint64_t gen,
std::vector<rgw_bucket_shard_sync_info> *status)
RGWDataSyncEnv env;
RGWSyncModuleInstanceRef module; // null sync module
- env.init(dpp, store->ctx(), store, store->svc(), store->svc()->rados->get_async_processor(),
+ env.init(dpp, driver->ctx(), driver, driver->svc(), driver->svc()->rados->get_async_processor(),
nullptr, nullptr, nullptr, module, nullptr);
RGWDataSyncCtx sc;
sc.init(&env, nullptr, *pipe.source.zone);
- RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
- return crs.run(dpp, new RGWCollectBucketSyncStatusCR(store, &sc,
+ RGWCoroutinesManager crs(driver->ctx(), driver->getRados()->get_cr_registry());
+ return crs.run(dpp, new RGWCollectBucketSyncStatusCR(driver, &sc,
sync_pair,
gen,
status));
struct RGWDataSyncEnv {
const DoutPrefixProvider *dpp{nullptr};
CephContext *cct{nullptr};
- rgw::sal::RadosStore* store{nullptr};
+ rgw::sal::RadosStore* driver{nullptr};
RGWServices *svc{nullptr};
RGWAsyncRadosProcessor *async_rados{nullptr};
RGWHTTPManager *http_manager{nullptr};
RGWDataSyncEnv() {}
- void init(const DoutPrefixProvider *_dpp, CephContext *_cct, rgw::sal::RadosStore* _store, RGWServices *_svc,
+ void init(const DoutPrefixProvider *_dpp, CephContext *_cct, rgw::sal::RadosStore* _driver, RGWServices *_svc,
RGWAsyncRadosProcessor *_async_rados, RGWHTTPManager *_http_manager,
RGWSyncErrorLogger *_error_logger, RGWSyncTraceManager *_sync_tracer,
RGWSyncModuleInstanceRef& _sync_module,
PerfCounters* _counters) {
dpp = _dpp;
cct = _cct;
- store = _store;
+ driver = _driver;
svc = _svc;
async_rados = _async_rados;
http_manager = _http_manager;
class RGWRemoteDataLog : public RGWCoroutinesManager {
const DoutPrefixProvider *dpp;
- rgw::sal::RadosStore* store;
+ rgw::sal::RadosStore* driver;
CephContext *cct;
RGWCoroutinesManagerRegistry *cr_registry;
RGWAsyncRadosProcessor *async_rados;
};
class RGWDataSyncStatusManager : public DoutPrefixProvider {
- rgw::sal::RadosStore* store;
+ rgw::sal::RadosStore* driver;
rgw_zone_id source_zone;
RGWRESTConn *conn;
int num_shards;
public:
- RGWDataSyncStatusManager(rgw::sal::RadosStore* _store, RGWAsyncRadosProcessor *async_rados,
+ RGWDataSyncStatusManager(rgw::sal::RadosStore* _driver, RGWAsyncRadosProcessor *async_rados,
const rgw_zone_id& _source_zone, PerfCounters* counters)
- : store(_store), source_zone(_source_zone), conn(NULL), error_logger(NULL),
+ : driver(_driver), source_zone(_source_zone), conn(NULL), error_logger(NULL),
sync_module(nullptr), counters(counters),
- source_log(this, store, async_rados), num_shards(0) {}
- RGWDataSyncStatusManager(rgw::sal::RadosStore* _store, RGWAsyncRadosProcessor *async_rados,
+ source_log(this, driver, async_rados), num_shards(0) {}
+ RGWDataSyncStatusManager(rgw::sal::RadosStore* _driver, RGWAsyncRadosProcessor *async_rados,
const rgw_zone_id& _source_zone, PerfCounters* counters,
const RGWSyncModuleInstanceRef& _sync_module)
- : store(_store), source_zone(_source_zone), conn(NULL), error_logger(NULL),
+ : driver(_driver), source_zone(_source_zone), conn(NULL), error_logger(NULL),
sync_module(_sync_module), counters(counters),
- source_log(this, store, async_rados), num_shards(0) {}
+ source_log(this, driver, async_rados), num_shards(0) {}
~RGWDataSyncStatusManager() {
finalize();
}
optional_yield y);
class RGWBucketPipeSyncStatusManager : public DoutPrefixProvider {
- rgw::sal::RadosStore* store;
+ rgw::sal::RadosStore* driver;
RGWDataSyncEnv sync_env;
- RGWCoroutinesManager cr_mgr{store->ctx(),
- store->getRados()->get_cr_registry()};
+ RGWCoroutinesManager cr_mgr{driver->ctx(),
+ driver->getRados()->get_cr_registry()};
- RGWHTTPManager http_manager{store->ctx(), cr_mgr.get_completion_mgr()};
+ RGWHTTPManager http_manager{driver->ctx(), cr_mgr.get_completion_mgr()};
std::optional<rgw_zone_id> source_zone;
std::optional<rgw_bucket> source_bucket;
std::unique_ptr<RGWSyncErrorLogger> error_logger =
- std::make_unique<RGWSyncErrorLogger>(store, RGW_SYNC_ERROR_LOG_SHARD_PREFIX,
+ std::make_unique<RGWSyncErrorLogger>(driver, RGW_SYNC_ERROR_LOG_SHARD_PREFIX,
ERROR_LOGGER_SHARDS);
RGWSyncModuleInstanceRef sync_module;
std::vector<source> sources;
int do_init(const DoutPrefixProvider *dpp, std::ostream* ostr);
- RGWBucketPipeSyncStatusManager(rgw::sal::RadosStore* store,
+ RGWBucketPipeSyncStatusManager(rgw::sal::RadosStore* driver,
std::optional<rgw_zone_id> source_zone,
std::optional<rgw_bucket> source_bucket,
const rgw_bucket& dest_bucket)
- : store(store), source_zone(source_zone), source_bucket(source_bucket),
+ : driver(driver), source_zone(source_zone), source_bucket(source_bucket),
dest_bucket(dest_bucket) {}
int remote_info(const DoutPrefixProvider *dpp, source& s,
uint64_t* num_shards);
public:
static tl::expected<std::unique_ptr<RGWBucketPipeSyncStatusManager>, int>
- construct(const DoutPrefixProvider* dpp, rgw::sal::RadosStore* store,
+ construct(const DoutPrefixProvider* dpp, rgw::sal::RadosStore* driver,
std::optional<rgw_zone_id> source_zone,
std::optional<rgw_bucket> source_bucket,
const rgw_bucket& dest_bucket, std::ostream *ostream);
/// read the full sync status with respect to a source bucket
int rgw_read_bucket_full_sync_status(const DoutPrefixProvider *dpp,
- rgw::sal::RadosStore *store,
+ rgw::sal::RadosStore *driver,
const rgw_sync_bucket_pipe& pipe,
rgw_bucket_sync_status *status,
optional_yield y);
/// read the incremental sync status of all bucket shards from the given source zone
int rgw_read_bucket_inc_sync_status(const DoutPrefixProvider *dpp,
- rgw::sal::RadosStore *store,
+ rgw::sal::RadosStore *driver,
const rgw_sync_bucket_pipe& pipe,
uint64_t gen,
std::vector<rgw_bucket_shard_sync_info> *status);
return path;
}
-static int read_upload_status(const DoutPrefixProvider *dpp, rgw::sal::Store *store,
+static int read_upload_status(const DoutPrefixProvider *dpp, rgw::sal::Driver *driver,
const rgw_raw_obj *status_obj, rgw_lc_multipart_upload_info *status)
{
int ret = 0;
- rgw::sal::RadosStore *rados = dynamic_cast<rgw::sal::RadosStore*>(store);
+ rgw::sal::RadosStore *rados = dynamic_cast<rgw::sal::RadosStore*>(driver);
if (!rados) {
ldpp_dout(dpp, 0) << "ERROR: Not a RadosStore. Cannot be transitioned to cloud." << dendl;
return 0;
}
-static int put_upload_status(const DoutPrefixProvider *dpp, rgw::sal::Store *store,
+static int put_upload_status(const DoutPrefixProvider *dpp, rgw::sal::Driver *driver,
const rgw_raw_obj *status_obj, rgw_lc_multipart_upload_info *status)
{
int ret = 0;
- rgw::sal::RadosStore *rados = dynamic_cast<rgw::sal::RadosStore*>(store);
+ rgw::sal::RadosStore *rados = dynamic_cast<rgw::sal::RadosStore*>(driver);
if (!rados) {
ldpp_dout(dpp, 0) << "ERROR: Not a RadosStore. Cannot be transitioned to cloud." << dendl;
return ret;
}
-static int delete_upload_status(const DoutPrefixProvider *dpp, rgw::sal::Store *store,
+static int delete_upload_status(const DoutPrefixProvider *dpp, rgw::sal::Driver *driver,
const rgw_raw_obj *status_obj)
{
int ret = 0;
- rgw::sal::RadosStore *rados = dynamic_cast<rgw::sal::RadosStore*>(store);
+ rgw::sal::RadosStore *rados = dynamic_cast<rgw::sal::RadosStore*>(driver);
if (!rados) {
ldpp_dout(dpp, 0) << "ERROR: Not a RadosStore. Cannot be transitioned to cloud." << dendl;
target_obj_name += get_key_instance(tier_ctx.obj->get_key());
}
- ret = tier_ctx.store->get_bucket(nullptr, b, &dest_bucket);
+ ret = tier_ctx.driver->get_bucket(nullptr, b, &dest_bucket);
if (ret < 0) {
ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to initialize dest_bucket - " << tier_ctx.target_bucket_name << " , reterr = " << ret << dendl;
return ret;
target_obj_name += get_key_instance(tier_ctx.obj->get_key());
}
- ret = tier_ctx.store->get_bucket(nullptr, b, &dest_bucket);
+ ret = tier_ctx.driver->get_bucket(nullptr, b, &dest_bucket);
if (ret < 0) {
ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to initialize dest_bucket - " << tier_ctx.target_bucket_name << " , ret = " << ret << dendl;
return ret;
target_obj_name += get_key_instance(tier_ctx.obj->get_key());
}
- ret = tier_ctx.store->get_bucket(nullptr, b, &dest_bucket);
+ ret = tier_ctx.driver->get_bucket(nullptr, b, &dest_bucket);
if (ret < 0) {
ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to initialize dest_bucket - " << tier_ctx.target_bucket_name << " , ret = " << ret << dendl;
return ret;
/* ignore error, best effort */
}
/* remove status obj */
- ret = delete_upload_status(tier_ctx.dpp, tier_ctx.store, &status_obj);
+ ret = delete_upload_status(tier_ctx.dpp, tier_ctx.driver, &status_obj);
if (ret < 0) {
ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to remove sync status obj obj=" << status_obj << " ret=" << ret << dendl;
// ignore error, best effort
}
dest_obj.init(target_bucket, target_obj_name);
- rgw_pool pool = static_cast<rgw::sal::RadosStore*>(tier_ctx.store)->svc()->zone->get_zone_params().log_pool;
+ rgw_pool pool = static_cast<rgw::sal::RadosStore*>(tier_ctx.driver)->svc()->zone->get_zone_params().log_pool;
status_obj = rgw_raw_obj(pool, "lc_multipart_" + tier_ctx.obj->get_oid());
- ret = read_upload_status(tier_ctx.dpp, tier_ctx.store, &status_obj, &status);
+ ret = read_upload_status(tier_ctx.dpp, tier_ctx.driver, &status_obj, &status);
if (ret < 0 && ret != -ENOENT) {
ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to read sync status of object " << src_obj << " ret=" << ret << dendl;
status.mtime = obj_properties.mtime;
status.etag = obj_properties.etag;
- ret = put_upload_status(tier_ctx.dpp, tier_ctx.store, &status_obj, &status);
+ ret = put_upload_status(tier_ctx.dpp, tier_ctx.driver, &status_obj, &status);
if (ret < 0) {
- ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to store multipart upload state, ret=" << ret << dendl;
+ ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to driver multipart upload state, ret=" << ret << dendl;
// continue with upload anyway
}
}
/* remove status obj */
- ret = delete_upload_status(tier_ctx.dpp, tier_ctx.store, &status_obj);
+ ret = delete_upload_status(tier_ctx.dpp, tier_ctx.driver, &status_obj);
if (ret < 0) {
ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to abort multipart upload obj=" << tier_ctx.obj << " upload_id=" << status.upload_id << " part number " << cur_part << " (" << cpp_strerror(-ret) << ")" << dendl;
// ignore error, best effort
/* Source */
rgw_bucket_dir_entry& o;
- rgw::sal::Store *store;
+ rgw::sal::Driver *driver;
RGWBucketInfo& bucket_info;
std::string storage_class;
bool target_bucket_created{true};
RGWLCCloudTierCtx(CephContext* _cct, const DoutPrefixProvider *_dpp,
- rgw_bucket_dir_entry& _o, rgw::sal::Store *_store,
+ rgw_bucket_dir_entry& _o, rgw::sal::Driver *_driver,
RGWBucketInfo &_binfo, rgw::sal::Object *_obj,
RGWRESTConn& _conn, std::string& _bucket,
std::string& _storage_class) :
- cct(_cct), dpp(_dpp), o(_o), store(_store), bucket_info(_binfo),
+ cct(_cct), dpp(_dpp), o(_o), driver(_driver), bucket_info(_binfo),
obj(_obj), conn(_conn), target_bucket_name(_bucket),
target_storage_class(_storage_class) {}
};
cls_timeindex_add(op, utime_t(delete_at), keyext, hebl);
string shard_name = objexp_hint_get_shardname(objexp_key_shard(obj_key, cct->_conf->rgw_objexp_hints_num_shards));
- auto obj = rados_svc->obj(rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, shard_name));
+ auto obj = rados_svc->obj(rgw_raw_obj(driver->svc()->zone->get_zone_params().log_pool, shard_name));
int r = obj.open(dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl;
cls_timeindex_list(op, utime_t(start_time), utime_t(end_time), marker, max_entries, entries,
out_marker, truncated);
- auto obj = rados_svc->obj(rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, oid));
+ auto obj = rados_svc->obj(rgw_raw_obj(driver->svc()->zone->get_zone_params().log_pool, oid));
int r = obj.open(dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl;
const string& from_marker,
const string& to_marker)
{
- auto obj = rados_svc->obj(rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, oid));
+ auto obj = rados_svc->obj(rgw_raw_obj(driver->svc()->zone->get_zone_params().log_pool, oid));
int r = obj.open(dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl;
RGWBucketInfo bucket_info;
std::unique_ptr<rgw::sal::Bucket> bucket;
- int ret = store->get_bucket(dpp, nullptr, rgw_bucket(hint.tenant, hint.bucket_name, hint.bucket_id), &bucket, null_yield);
+ int ret = driver->get_bucket(dpp, nullptr, rgw_bucket(hint.tenant, hint.bucket_name, hint.bucket_id), &bucket, null_yield);
if (-ENOENT == ret) {
ldpp_dout(dpp, 15) << "NOTICE: cannot find bucket = " \
<< hint.bucket_name << ". The object must be already removed" << dendl;
ldpp_dout(dpp, 15) << "got removal hint for: " << iter->key_ts.sec() \
<< " - " << iter->key_ext << dendl;
- int ret = objexp_hint_parse(dpp, store->ctx(), *iter, &hint);
+ int ret = objexp_hint_parse(dpp, driver->ctx(), *iter, &hint);
if (ret < 0) {
ldpp_dout(dpp, 1) << "cannot parse removal hint for " << hint.obj_key << dendl;
continue;
bool truncated = false;
bool done = true;
- CephContext *cct = store->ctx();
+ CephContext *cct = driver->ctx();
int num_entries = cct->_conf->rgw_objexp_chunk_size;
int max_secs = cct->_conf->rgw_objexp_gc_interval;
utime_t time(max_secs, 0);
l.set_duration(time);
- int ret = l.lock_exclusive(&static_cast<rgw::sal::RadosStore*>(store)->getRados()->objexp_pool_ctx, shard);
+ int ret = l.lock_exclusive(&static_cast<rgw::sal::RadosStore*>(driver)->getRados()->objexp_pool_ctx, shard);
if (ret == -EBUSY) { /* already locked by another processor */
ldpp_dout(dpp, 5) << __func__ << "(): failed to acquire lock on " << shard << dendl;
return false;
marker = out_marker;
} while (truncated);
- l.unlock(&static_cast<rgw::sal::RadosStore*>(store)->getRados()->objexp_pool_ctx, shard);
+ l.unlock(&static_cast<rgw::sal::RadosStore*>(driver)->getRados()->objexp_pool_ctx, shard);
return done;
}
const utime_t& last_run,
const utime_t& round_start)
{
- CephContext * const cct = store->ctx();
+ CephContext * const cct = driver->ctx();
int num_shards = cct->_conf->rgw_objexp_hints_num_shards;
bool all_done = true;
void RGWObjectExpirer::start_processor()
{
- worker = new OEWorker(store->ctx(), this);
+ worker = new OEWorker(driver->ctx(), this);
worker->create("rgw_obj_expirer");
}
class RGWObjExpStore {
CephContext *cct;
RGWSI_RADOS *rados_svc;
- rgw::sal::RadosStore* store;
+ rgw::sal::RadosStore* driver;
public:
- RGWObjExpStore(CephContext *_cct, RGWSI_RADOS *_rados_svc, rgw::sal::RadosStore* _store) : cct(_cct),
+ RGWObjExpStore(CephContext *_cct, RGWSI_RADOS *_rados_svc, rgw::sal::RadosStore* _driver) : cct(_cct),
rados_svc(_rados_svc),
- store(_store) {}
+ driver(_driver) {}
int objexp_hint_add(const DoutPrefixProvider *dpp,
const ceph::real_time& delete_at,
class RGWObjectExpirer {
protected:
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
RGWObjExpStore exp_store;
class OEWorker : public Thread, public DoutPrefixProvider {
std::atomic<bool> down_flag = { false };
public:
- explicit RGWObjectExpirer(rgw::sal::Store* _store)
- : store(_store),
- exp_store(_store->ctx(), static_cast<rgw::sal::RadosStore*>(store)->svc()->rados, static_cast<rgw::sal::RadosStore*>(store)),
+ explicit RGWObjectExpirer(rgw::sal::Driver* _driver)
+ : driver(_driver),
+ exp_store(_driver->ctx(), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados, static_cast<rgw::sal::RadosStore*>(driver)),
worker(NULL) {
}
~RGWObjectExpirer() {
realm_epoch++;
}
-static int read_sync_status(const DoutPrefixProvider *dpp, rgw::sal::Store* store, rgw_meta_sync_status *sync_status)
+static int read_sync_status(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw_meta_sync_status *sync_status)
{
- rgw::sal::RadosStore* rados_store = static_cast<rgw::sal::RadosStore*>(store);
+ rgw::sal::RadosStore* rados_store = static_cast<rgw::sal::RadosStore*>(driver);
// initialize a sync status manager to read the status
RGWMetaSyncStatusManager mgr(rados_store, rados_store->svc()->rados->get_async_processor());
int r = mgr.init(dpp);
}
int RGWPeriod::update_sync_status(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store, /* for now */
+ rgw::sal::Driver* driver, /* for now */
const RGWPeriod ¤t_period,
std::ostream& error_stream,
bool force_if_stale)
{
rgw_meta_sync_status status;
- int r = read_sync_status(dpp, store, &status);
+ int r = read_sync_status(dpp, driver, &status);
if (r < 0) {
ldpp_dout(dpp, 0) << "period failed to read sync status: "
<< cpp_strerror(-r) << dendl;
}
int RGWPeriod::commit(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWRealm& realm, const RGWPeriod& current_period,
std::ostream& error_stream, optional_yield y,
bool force_if_stale)
// did the master zone change?
if (master_zone != current_period.get_master_zone()) {
// store the current metadata sync status in the period
- int r = update_sync_status(dpp, store, current_period, error_stream, force_if_stale);
+ int r = update_sync_status(dpp, driver, current_period, error_stream, force_if_stale);
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to update metadata sync status: "
<< cpp_strerror(-r) << dendl;
dest.arn_topic = topic_name;
// the topic ARN will be sent in the reply
const rgw::ARN arn(rgw::Partition::aws, rgw::Service::sns,
- store->get_zone()->get_zonegroup().get_name(),
+ driver->get_zone()->get_zonegroup().get_name(),
s->user->get_tenant(), topic_name);
topic_arn = arn.to_string();
return 0;
return;
}
- ps.emplace(static_cast<rgw::sal::RadosStore*>(store), s->owner.get_id().tenant);
+ ps.emplace(static_cast<rgw::sal::RadosStore*>(driver), s->owner.get_id().tenant);
op_ret = ps->create_topic(this, topic_name, dest, topic_arn, opaque_data, y);
if (op_ret < 0) {
ldpp_dout(this, 1) << "failed to create topic '" << topic_name << "', ret=" << op_ret << dendl;
};
void RGWPSListTopicsOp::execute(optional_yield y) {
- ps.emplace(static_cast<rgw::sal::RadosStore*>(store), s->owner.get_id().tenant);
+ ps.emplace(static_cast<rgw::sal::RadosStore*>(driver), s->owner.get_id().tenant);
op_ret = ps->get_topics(&result);
// if there are no topics it is not considered an error
op_ret = op_ret == -ENOENT ? 0 : op_ret;
if (op_ret < 0) {
return;
}
- ps.emplace(static_cast<rgw::sal::RadosStore*>(store), s->owner.get_id().tenant);
+ ps.emplace(static_cast<rgw::sal::RadosStore*>(driver), s->owner.get_id().tenant);
op_ret = ps->get_topic(topic_name, &result);
if (op_ret < 0) {
ldpp_dout(this, 1) << "failed to get topic '" << topic_name << "', ret=" << op_ret << dendl;
if (op_ret < 0) {
return;
}
- ps.emplace(static_cast<rgw::sal::RadosStore*>(store), s->owner.get_id().tenant);
+ ps.emplace(static_cast<rgw::sal::RadosStore*>(driver), s->owner.get_id().tenant);
op_ret = ps->get_topic(topic_name, &result);
if (op_ret < 0) {
ldpp_dout(this, 1) << "failed to get topic '" << topic_name << "', ret=" << op_ret << dendl;
if (op_ret < 0) {
return;
}
- ps.emplace(static_cast<rgw::sal::RadosStore*>(store), s->owner.get_id().tenant);
+ ps.emplace(static_cast<rgw::sal::RadosStore*>(driver), s->owner.get_id().tenant);
op_ret = ps->remove_topic(this, topic_name, y);
if (op_ret < 0) {
ldpp_dout(this, 1) << "failed to remove topic '" << topic_name << ", ret=" << op_ret << dendl;
}
int RGWHandler_REST_PSTopic_AWS::authorize(const DoutPrefixProvider* dpp, optional_yield y) {
- return RGW_Auth_S3::authorize(dpp, store, auth_registry, s, y);
+ return RGW_Auth_S3::authorize(dpp, driver, auth_registry, s, y);
}
namespace {
return;
}
- ps.emplace(static_cast<rgw::sal::RadosStore*>(store), s->owner.get_id().tenant);
+ ps.emplace(static_cast<rgw::sal::RadosStore*>(driver), s->owner.get_id().tenant);
auto b = ps->get_bucket(bucket_info.bucket);
ceph_assert(b);
return ret;
}
- std::unique_ptr<rgw::sal::User> user = store->get_user(s->owner.get_id());
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(s->owner.get_id());
std::unique_ptr<rgw::sal::Bucket> bucket;
- ret = store->get_bucket(this, user.get(), s->owner.get_id().tenant, bucket_name, &bucket, y);
+ ret = driver->get_bucket(this, user.get(), s->owner.get_id().tenant, bucket_name, &bucket, y);
if (ret < 0) {
ldpp_dout(this, 1) << "failed to get bucket info, cannot verify ownership" << dendl;
return ret;
return;
}
- ps.emplace(static_cast<rgw::sal::RadosStore*>(store), s->owner.get_id().tenant);
+ ps.emplace(static_cast<rgw::sal::RadosStore*>(driver), s->owner.get_id().tenant);
auto b = ps->get_bucket(bucket_info.bucket);
ceph_assert(b);
return ret;
}
- std::unique_ptr<rgw::sal::User> user = store->get_user(s->owner.get_id());
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(s->owner.get_id());
std::unique_ptr<rgw::sal::Bucket> bucket;
- ret = store->get_bucket(this, user.get(), s->owner.get_id().tenant, bucket_name, &bucket, y);
+ ret = driver->get_bucket(this, user.get(), s->owner.get_id().tenant, bucket_name, &bucket, y);
if (ret < 0) {
return ret;
}
};
void RGWPSListNotifsOp::execute(optional_yield y) {
- ps.emplace(static_cast<rgw::sal::RadosStore*>(store), s->owner.get_id().tenant);
+ ps.emplace(static_cast<rgw::sal::RadosStore*>(driver), s->owner.get_id().tenant);
auto b = ps->get_bucket(bucket_info.bucket);
ceph_assert(b);
return ret;
}
- std::unique_ptr<rgw::sal::User> user = store->get_user(s->owner.get_id());
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(s->owner.get_id());
std::unique_ptr<rgw::sal::Bucket> bucket;
- ret = store->get_bucket(this, user.get(), s->owner.get_id().tenant, bucket_name, &bucket, y);
+ ret = driver->get_bucket(this, user.get(), s->owner.get_id().tenant, bucket_name, &bucket, y);
if (ret < 0) {
return ret;
}
period.set_id(period_id);
period.set_epoch(epoch);
- op_ret = period.init(this, store->ctx(), static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, realm_id, y, realm_name);
+ op_ret = period.init(this, driver->ctx(), static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, realm_id, y, realm_name);
if (op_ret < 0)
ldpp_dout(this, 5) << "failed to read period" << dendl;
}
void RGWOp_Period_Post::execute(optional_yield y)
{
- auto cct = store->ctx();
+ auto cct = driver->ctx();
// initialize the period without reading from rados
- period.init(this, cct, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, y, false);
+ period.init(this, cct, static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, y, false);
// decode the period from input
const auto max_size = cct->_conf->rgw_max_put_param_size;
}
// require period.realm_id to match our realm
- if (period.get_realm() != static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->get_realm().get_id()) {
+ if (period.get_realm() != static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_realm().get_id()) {
error_stream << "period with realm id " << period.get_realm()
- << " doesn't match current realm " << static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->get_realm().get_id() << std::endl;
+ << " doesn't match current realm " << static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_realm().get_id() << std::endl;
op_ret = -EINVAL;
return;
}
// period that we haven't restarted with yet. we also don't want to modify
// the objects in use by RGWRados
RGWRealm realm(period.get_realm());
- op_ret = realm.init(this, cct, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, y);
+ op_ret = realm.init(this, cct, static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, y);
if (op_ret < 0) {
ldpp_dout(this, -1) << "failed to read current realm: "
<< cpp_strerror(-op_ret) << dendl;
}
RGWPeriod current_period;
- op_ret = current_period.init(this, cct, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, realm.get_id(), y);
+ op_ret = current_period.init(this, cct, static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, realm.get_id(), y);
if (op_ret < 0) {
ldpp_dout(this, -1) << "failed to read current period: "
<< cpp_strerror(-op_ret) << dendl;
// if period id is empty, handle as 'period commit'
if (period.get_id().empty()) {
- op_ret = period.commit(this, store, realm, current_period, error_stream, y);
+ op_ret = period.commit(this, driver, realm, current_period, error_stream, y);
if (op_ret < 0) {
ldpp_dout(this, -1) << "master zone failed to commit period" << dendl;
}
}
// if it's not period commit, nobody is allowed to push to the master zone
- if (period.get_master_zone() == static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->get_zone_params().get_id()) {
+ if (period.get_master_zone() == static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_zone_params().get_id()) {
ldpp_dout(this, 10) << "master zone rejecting period id="
<< period.get_id() << " epoch=" << period.get_epoch() << dendl;
op_ret = -EINVAL; // XXX: error code
return;
}
- auto period_history = static_cast<rgw::sal::RadosStore*>(store)->svc()->mdlog->get_period_history();
+ auto period_history = static_cast<rgw::sal::RadosStore*>(driver)->svc()->mdlog->get_period_history();
// decide whether we can set_current_period() or set_latest_epoch()
if (period.get_id() != current_period.get_id()) {
class RGWRESTMgr_Period : public RGWRESTMgr {
public:
- RGWHandler_REST* get_handler(rgw::sal::Store* store,
+ RGWHandler_REST* get_handler(rgw::sal::Driver* driver,
req_state*,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string&) override {
// read realm
realm.reset(new RGWRealm(id, name));
- op_ret = realm->init(this, g_ceph_context, static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj, y);
+ op_ret = realm->init(this, g_ceph_context, static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, y);
if (op_ret < 0)
ldpp_dout(this, -1) << "failed to read realm id=" << id
<< " name=" << name << dendl;
{
{
// read default realm
- RGWRealm realm(store->ctx(), static_cast<rgw::sal::RadosStore*>(store)->svc()->sysobj);
+ RGWRealm realm(driver->ctx(), static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj);
[[maybe_unused]] int ret = realm.read_default_id(this, default_id, y);
}
- op_ret = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone->list_realms(this, realms);
+ op_ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->list_realms(this, realms);
if (op_ret < 0)
ldpp_dout(this, -1) << "failed to list realms" << dendl;
}
}
RGWHandler_REST*
-RGWRESTMgr_Realm::get_handler(rgw::sal::Store* store,
+RGWRESTMgr_Realm::get_handler(rgw::sal::Driver* driver,
req_state*,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string&)
public:
RGWRESTMgr_Realm();
- RGWHandler_REST* get_handler(rgw::sal::Store* store,
+ RGWHandler_REST* get_handler(rgw::sal::Driver* driver,
req_state*,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string&) override;
void RGWOp_User_List::execute(optional_yield y)
{
- RGWUserAdminOpState op_state(store);
+ RGWUserAdminOpState op_state(driver);
uint32_t max_entries;
std::string marker;
op_state.max_entries = max_entries;
op_state.marker = marker;
- op_ret = RGWUserAdminOp_User::list(this, store, op_state, flusher);
+ op_ret = RGWUserAdminOp_User::list(this, driver, op_state, flusher);
}
class RGWOp_User_Info : public RGWRESTOp {
void RGWOp_User_Info::execute(optional_yield y)
{
- RGWUserAdminOpState op_state(store);
+ RGWUserAdminOpState op_state(driver);
std::string uid_str, access_key_str;
bool fetch_stats;
op_state.set_fetch_stats(fetch_stats);
op_state.set_sync_stats(sync_stats);
- op_ret = RGWUserAdminOp_User::info(s, store, op_state, flusher, y);
+ op_ret = RGWUserAdminOp_User::info(s, driver, op_state, flusher, y);
}
class RGWOp_User_Create : public RGWRESTOp {
const int32_t default_max_buckets =
s->cct->_conf.get_val<int64_t>("rgw_user_max_buckets");
- RGWUserAdminOpState op_state(store);
+ RGWUserAdminOpState op_state(driver);
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
rgw_user uid(uid_str);
if (!default_placement_str.empty()) {
rgw_placement_rule target_rule;
target_rule.from_str(default_placement_str);
- if (!store->valid_placement(target_rule)) {
+ if (!driver->valid_placement(target_rule)) {
ldpp_dout(this, 0) << "NOTICE: invalid dest placement: " << target_rule.to_str() << dendl;
op_ret = -EINVAL;
return;
}
bufferlist data;
- op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
- op_ret = RGWUserAdminOp_User::create(s, store, op_state, flusher, y);
+ op_ret = RGWUserAdminOp_User::create(s, driver, op_state, flusher, y);
}
class RGWOp_User_Modify : public RGWRESTOp {
bool quota_set;
int32_t max_buckets;
- RGWUserAdminOpState op_state(store);
+ RGWUserAdminOpState op_state(driver);
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
rgw_user uid(uid_str);
if (!default_placement_str.empty()) {
rgw_placement_rule target_rule;
target_rule.from_str(default_placement_str);
- if (!store->valid_placement(target_rule)) {
+ if (!driver->valid_placement(target_rule)) {
ldpp_dout(this, 0) << "NOTICE: invalid dest placement: " << target_rule.to_str() << dendl;
op_ret = -EINVAL;
return;
}
bufferlist data;
- op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
- op_ret = RGWUserAdminOp_User::modify(s, store, op_state, flusher, y);
+ op_ret = RGWUserAdminOp_User::modify(s, driver, op_state, flusher, y);
}
class RGWOp_User_Remove : public RGWRESTOp {
std::string uid_str;
bool purge_data;
- RGWUserAdminOpState op_state(store);
+ RGWUserAdminOpState op_state(driver);
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
rgw_user uid(uid_str);
op_state.set_purge_data(purge_data);
bufferlist data;
- op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
- op_ret = RGWUserAdminOp_User::remove(s, store, op_state, flusher, s->yield);
+ op_ret = RGWUserAdminOp_User::remove(s, driver, op_state, flusher, s->yield);
}
class RGWOp_Subuser_Create : public RGWRESTOp {
uint32_t perm_mask = 0;
int32_t key_type = KEY_TYPE_SWIFT;
- RGWUserAdminOpState op_state(store);
+ RGWUserAdminOpState op_state(driver);
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
rgw_user uid(uid_str);
op_state.set_key_type(key_type);
bufferlist data;
- op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
- op_ret = RGWUserAdminOp_Subuser::create(s, store, op_state, flusher, y);
+ op_ret = RGWUserAdminOp_Subuser::create(s, driver, op_state, flusher, y);
}
class RGWOp_Subuser_Modify : public RGWRESTOp {
std::string key_type_str;
std::string perm_str;
- RGWUserAdminOpState op_state(store);
+ RGWUserAdminOpState op_state(driver);
uint32_t perm_mask;
int32_t key_type = KEY_TYPE_SWIFT;
op_state.set_key_type(key_type);
bufferlist data;
- op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
- op_ret = RGWUserAdminOp_Subuser::modify(s, store, op_state, flusher, y);
+ op_ret = RGWUserAdminOp_Subuser::modify(s, driver, op_state, flusher, y);
}
class RGWOp_Subuser_Remove : public RGWRESTOp {
std::string subuser;
bool purge_keys;
- RGWUserAdminOpState op_state(store);
+ RGWUserAdminOpState op_state(driver);
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
rgw_user uid(uid_str);
op_state.set_purge_keys();
bufferlist data;
- op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
- op_ret = RGWUserAdminOp_Subuser::remove(s, store, op_state, flusher, y);
+ op_ret = RGWUserAdminOp_Subuser::remove(s, driver, op_state, flusher, y);
}
class RGWOp_Key_Create : public RGWRESTOp {
bool gen_key;
- RGWUserAdminOpState op_state(store);
+ RGWUserAdminOpState op_state(driver);
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
rgw_user uid(uid_str);
op_state.set_key_type(key_type);
}
- op_ret = RGWUserAdminOp_Key::create(s, store, op_state, flusher, y);
+ op_ret = RGWUserAdminOp_Key::create(s, driver, op_state, flusher, y);
}
class RGWOp_Key_Remove : public RGWRESTOp {
std::string access_key;
std::string key_type_str;
- RGWUserAdminOpState op_state(store);
+ RGWUserAdminOpState op_state(driver);
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
rgw_user uid(uid_str);
op_state.set_key_type(key_type);
}
- op_ret = RGWUserAdminOp_Key::remove(s, store, op_state, flusher, y);
+ op_ret = RGWUserAdminOp_Key::remove(s, driver, op_state, flusher, y);
}
class RGWOp_Caps_Add : public RGWRESTOp {
std::string uid_str;
std::string caps;
- RGWUserAdminOpState op_state(store);
+ RGWUserAdminOpState op_state(driver);
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
rgw_user uid(uid_str);
op_state.set_caps(caps);
bufferlist data;
- op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
- op_ret = RGWUserAdminOp_Caps::add(s, store, op_state, flusher, y);
+ op_ret = RGWUserAdminOp_Caps::add(s, driver, op_state, flusher, y);
}
class RGWOp_Caps_Remove : public RGWRESTOp {
std::string uid_str;
std::string caps;
- RGWUserAdminOpState op_state(store);
+ RGWUserAdminOpState op_state(driver);
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
rgw_user uid(uid_str);
op_state.set_caps(caps);
bufferlist data;
- op_ret = store->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
+ op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
- op_ret = RGWUserAdminOp_Caps::remove(s, store, op_state, flusher, y);
+ op_ret = RGWUserAdminOp_Caps::remove(s, driver, op_state, flusher, y);
}
struct UserQuotas {
void RGWOp_Quota_Info::execute(optional_yield y)
{
- RGWUserAdminOpState op_state(store);
+ RGWUserAdminOpState op_state(driver);
std::string uid_str;
std::string quota_type;
op_state.set_user_id(uid);
RGWUser user;
- op_ret = user.init(s, store, op_state, y);
+ op_ret = user.init(s, driver, op_state, y);
if (op_ret < 0)
return;
void RGWOp_Quota_Set::execute(optional_yield y)
{
- RGWUserAdminOpState op_state(store);
+ RGWUserAdminOpState op_state(driver);
std::string uid_str;
std::string quota_type;
op_state.set_user_id(uid);
RGWUser user;
- op_ret = user.init(s, store, op_state, y);
+ op_ret = user.init(s, driver, op_state, y);
if (op_ret < 0) {
ldpp_dout(this, 20) << "failed initializing user info: " << op_ret << dendl;
return;
if (set_all) {
UserQuotas quotas;
- if ((op_ret = get_json_input(store->ctx(), s, quotas, QUOTA_INPUT_MAX_LEN, NULL)) < 0) {
+ if ((op_ret = get_json_input(driver->ctx(), s, quotas, QUOTA_INPUT_MAX_LEN, NULL)) < 0) {
ldpp_dout(this, 20) << "failed to retrieve input" << dendl;
return;
}
if (!use_http_params) {
bool empty;
- op_ret = get_json_input(store->ctx(), s, quota, QUOTA_INPUT_MAX_LEN, &empty);
+ op_ret = get_json_input(driver->ctx(), s, quota, QUOTA_INPUT_MAX_LEN, &empty);
if (op_ret < 0) {
ldpp_dout(this, 20) << "failed to retrieve input" << dendl;
if (!empty)
RGWRESTMgr_User() = default;
~RGWRESTMgr_User() override = default;
- RGWHandler_REST *get_handler(rgw::sal::Store* store,
+ RGWHandler_REST *get_handler(rgw::sal::Driver* driver,
req_state*,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string&) override {
extern "C" {
-void* newStore(void)
+void* newRadosStore(void)
{
rgw::sal::RadosStore* store = new rgw::sal::RadosStore();
if (store) {
virtual RGWBucketSyncPolicyHandlerRef get_sync_policy_handler() override;
};
-class RadosStore : public StoreStore {
+class RadosStore : public StoreDriver {
private:
RGWRados* rados;
RGWUserCtl* user_ctl;
: StoreObject(_k),
store(_st),
acls(),
- rados_ctx(new RGWObjectCtx(dynamic_cast<Store*>(store))),
+ rados_ctx(new RGWObjectCtx(dynamic_cast<Driver*>(store))),
rados_ctx_owned(true) {
}
RadosObject(RadosStore *_st, const rgw_obj_key& _k, Bucket* _b)
: StoreObject(_k, _b),
store(_st),
acls(),
- rados_ctx(new RGWObjectCtx(dynamic_cast<Store*>(store))) ,
+ rados_ctx(new RGWObjectCtx(dynamic_cast<Driver*>(store))) ,
rados_ctx_owned(true) {
}
RadosObject(RadosObject& _o) : StoreObject(_o) {
RGWCtlDef::_meta::~_meta() {}
-int RGWCtlDef::init(RGWServices& svc, rgw::sal::Store* store, const DoutPrefixProvider *dpp)
+int RGWCtlDef::init(RGWServices& svc, rgw::sal::Driver* driver, const DoutPrefixProvider *dpp)
{
meta.mgr.reset(new RGWMetadataManager(svc.meta));
auto sync_module = svc.sync_modules->get_sync_module();
if (sync_module) {
meta.bucket.reset(sync_module->alloc_bucket_meta_handler());
- meta.bucket_instance.reset(sync_module->alloc_bucket_instance_meta_handler(store));
+ meta.bucket_instance.reset(sync_module->alloc_bucket_instance_meta_handler(driver));
} else {
meta.bucket.reset(RGWBucketMetaHandlerAllocator::alloc());
- meta.bucket_instance.reset(RGWBucketInstanceMetaHandlerAllocator::alloc(store));
+ meta.bucket_instance.reset(RGWBucketInstanceMetaHandlerAllocator::alloc(driver));
}
meta.otp.reset(RGWOTPMetaHandlerAllocator::alloc());
- meta.role = std::make_unique<rgw::sal::RGWRoleMetadataHandler>(store, svc.role);
+ meta.role = std::make_unique<rgw::sal::RGWRoleMetadataHandler>(driver, svc.role);
user.reset(new RGWUserCtl(svc.zone, svc.user, (RGWUserMetadataHandler *)meta.user.get()));
bucket.reset(new RGWBucketCtl(svc.zone,
return 0;
}
-int RGWCtl::init(RGWServices *_svc, rgw::sal::Store* store, const DoutPrefixProvider *dpp)
+int RGWCtl::init(RGWServices *_svc, rgw::sal::Driver* driver, const DoutPrefixProvider *dpp)
{
svc = _svc;
cct = svc->cct;
- int r = _ctl.init(*svc, store, dpp);
+ int r = _ctl.init(*svc, driver, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start init ctls (" << cpp_strerror(-r) << dendl;
return r;
RGWCtlDef();
~RGWCtlDef();
- int init(RGWServices& svc, rgw::sal::Store* store, const DoutPrefixProvider *dpp);
+ int init(RGWServices& svc, rgw::sal::Driver* driver, const DoutPrefixProvider *dpp);
};
struct RGWCtl {
RGWBucketCtl *bucket{nullptr};
RGWOTPCtl *otp{nullptr};
- int init(RGWServices *_svc, rgw::sal::Store* store, const DoutPrefixProvider *dpp);
+ int init(RGWServices *_svc, rgw::sal::Driver* driver, const DoutPrefixProvider *dpp);
};
#endif
return RGWBucketMetaHandlerAllocator::alloc();
}
-RGWBucketInstanceMetadataHandlerBase* RGWSyncModuleInstance::alloc_bucket_instance_meta_handler(rgw::sal::Store* store)
+RGWBucketInstanceMetadataHandlerBase* RGWSyncModuleInstance::alloc_bucket_instance_meta_handler(rgw::sal::Driver* driver)
{
- return RGWBucketInstanceMetaHandlerAllocator::alloc(store);
+ return RGWBucketInstanceMetaHandlerAllocator::alloc(driver);
}
RGWStatRemoteObjCBCR::RGWStatRemoteObjCBCR(RGWDataSyncCtx *_sc,
int RGWCallStatRemoteObjCR::operate(const DoutPrefixProvider *dpp) {
reenter(this) {
yield {
- call(new RGWStatRemoteObjCR(sync_env->async_rados, sync_env->store,
+ call(new RGWStatRemoteObjCR(sync_env->async_rados, sync_env->driver,
sc->source_zone,
src_bucket, key, &mtime, &size, &etag, &attrs, &headers));
}
return false;
}
virtual RGWMetadataHandler *alloc_bucket_meta_handler();
- virtual RGWBucketInstanceMetadataHandlerBase *alloc_bucket_instance_meta_handler(rgw::sal::Store* store);
+ virtual RGWBucketInstanceMetadataHandlerBase *alloc_bucket_instance_meta_handler(rgw::sal::Driver* driver);
// indication whether the sync module start with full sync (default behavior)
// incremental sync would follow anyway
ldpp_dout(dpp, 0) << "ERROR: failed to abort multipart upload dest obj=" << dest_obj << " upload_id=" << upload_id << " retcode=" << retcode << dendl;
/* ignore error, best effort */
}
- yield call(new RGWRadosRemoveCR(sc->env->store, status_obj));
+ yield call(new RGWRadosRemoveCR(sc->env->driver, status_obj));
if (retcode < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to remove sync status obj obj=" << status_obj << " retcode=" << retcode << dendl;
/* ignore error, best effort */
}
/* remove status obj */
- yield call(new RGWRadosRemoveCR(sync_env->store, status_obj));
+ yield call(new RGWRadosRemoveCR(sync_env->driver, status_obj));
if (retcode < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to abort multipart upload obj=" << src_obj << " upload_id=" << status.upload_id << " part number " << status.cur_part << " (" << cpp_strerror(-retcode) << ")" << dendl;
/* ignore error, best effort */
}
yield {
- bucket.reset(new rgw::sal::RadosBucket(sync_env->store, src_bucket));
- src_obj.reset(new rgw::sal::RadosObject(sync_env->store, key, bucket.get()));
+ bucket.reset(new rgw::sal::RadosBucket(sync_env->driver, src_bucket));
+ src_obj.reset(new rgw::sal::RadosObject(sync_env->driver, key, bucket.get()));
/* init output */
target_bucket.name = target_bucket_name; /* this is only possible because we only use bucket name for
uri resolution */
- dest_bucket.reset(new rgw::sal::RadosBucket(sync_env->store, target_bucket));
- dest_obj.reset(new rgw::sal::RadosObject(sync_env->store, rgw_obj_key(target_obj_name), dest_bucket.get()));
+ dest_bucket.reset(new rgw::sal::RadosBucket(sync_env->driver, target_bucket));
+ dest_obj.reset(new rgw::sal::RadosObject(sync_env->driver, rgw_obj_key(target_obj_name), dest_bucket.get()));
rgw_sync_aws_src_obj_properties src_properties;
src_properties.mtime = mtime;
void init(CephContext *cct, const JSONFormattable& config) {
string elastic_endpoint = config["endpoint"];
id = string("elastic:") + elastic_endpoint;
- conn.reset(new RGWRESTConn(cct, (rgw::sal::Store*)nullptr, id, { elastic_endpoint }, nullopt /* region */ ));
+ conn.reset(new RGWRESTConn(cct, (rgw::sal::Driver*)nullptr, id, { elastic_endpoint }, nullopt /* region */ ));
explicit_custom_meta = config["explicit_custom_meta"](true);
index_buckets.init(config["index_buckets_list"], true); /* approve all buckets by default */
allow_owners.init(config["approved_owners_list"], true); /* approve all bucket owners by default */
protected:
RGWOp *op_get() override {
if (s->info.args.exists("query")) {
- return new RGWMetadataSearch_ObjStore_S3(store->get_sync_module());
+ return new RGWMetadataSearch_ObjStore_S3(driver->get_sync_module());
}
if (!s->init_state.url_bucket.empty() &&
s->info.args.exists("mdsearch")) {
};
-RGWHandler_REST* RGWRESTMgr_MDSearch_S3::get_handler(rgw::sal::Store* store,
+RGWHandler_REST* RGWRESTMgr_MDSearch_S3::get_handler(rgw::sal::Driver* driver,
req_state* const s,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string& frontend_prefix)
{
int ret =
- RGWHandler_REST_S3::init_from_header(store, s,
+ RGWHandler_REST_S3::init_from_header(driver, s,
RGWFormat::XML, true);
if (ret < 0) {
return nullptr;
public:
explicit RGWRESTMgr_MDSearch_S3() {}
- RGWHandler_REST *get_handler(rgw::sal::Store* store,
+ RGWHandler_REST *get_handler(rgw::sal::Driver* driver,
req_state* s,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string& frontend_prefix) override;
}
}
-RGWDataAccess::RGWDataAccess(rgw::sal::Store* _store) : store(_store)
+RGWDataAccess::RGWDataAccess(rgw::sal::Driver* _driver) : driver(_driver)
{
}
int RGWDataAccess::Bucket::init(const DoutPrefixProvider *dpp, optional_yield y)
{
std::unique_ptr<rgw::sal::Bucket> bucket;
- int ret = sd->store->get_bucket(dpp, nullptr, tenant, name, &bucket, y);
+ int ret = sd->driver->get_bucket(dpp, nullptr, tenant, name, &bucket, y);
if (ret < 0) {
return ret;
}
const DoutPrefixProvider *dpp,
optional_yield y)
{
- rgw::sal::Store* store = sd->store;
- CephContext *cct = store->ctx();
+ rgw::sal::Driver* driver = sd->driver;
+ CephContext *cct = driver->ctx();
string tag;
append_rand_alpha(cct, tag, tag, 32);
RGWBucketInfo& bucket_info = bucket->bucket_info;
- rgw::BlockingAioThrottle aio(store->ctx()->_conf->rgw_put_obj_min_window_size);
+ rgw::BlockingAioThrottle aio(driver->ctx()->_conf->rgw_put_obj_min_window_size);
std::unique_ptr<rgw::sal::Bucket> b;
- store->get_bucket(NULL, bucket_info, &b);
+ driver->get_bucket(NULL, bucket_info, &b);
std::unique_ptr<rgw::sal::Object> obj = b->get_object(key);
auto& owner = bucket->policy.get_owner();
- string req_id = store->zone_unique_id(store->get_new_req_id());
+ string req_id = driver->zone_unique_id(driver->get_new_req_id());
std::unique_ptr<rgw::sal::Writer> processor;
- processor = store->get_atomic_writer(dpp, y, std::move(obj),
+ processor = driver->get_atomic_writer(dpp, y, std::move(obj),
owner.get_id(),
nullptr, olh_epoch, req_id);
CompressorRef plugin;
boost::optional<RGWPutObj_Compress> compressor;
- const auto& compression_type = store->get_compression_type(bucket_info.placement_rule);
+ const auto& compression_type = driver->get_compression_type(bucket_info.placement_rule);
if (compression_type != "none") {
- plugin = Compressor::create(store->ctx(), compression_type);
+ plugin = Compressor::create(driver->ctx(), compression_type);
if (!plugin) {
ldpp_dout(dpp, 1) << "Cannot load plugin for compression type "
<< compression_type << dendl;
} else {
- compressor.emplace(store->ctx(), plugin, filter);
+ compressor.emplace(driver->ctx(), plugin, filter);
filter = &*compressor;
}
}
class RGWDataAccess
{
- rgw::sal::Store* store;
+ rgw::sal::Driver* driver;
public:
- RGWDataAccess(rgw::sal::Store* _store);
+ RGWDataAccess(rgw::sal::Driver* _driver);
class Object;
class Bucket;
user = usr;
- store = user->get_store();
+ driver = user->get_driver();
}
int RGWAccessKeyPool::init(RGWUserAdminOpState& op_state)
return 0;
}
-RGWUserAdminOpState::RGWUserAdminOpState(rgw::sal::Store* store)
+RGWUserAdminOpState::RGWUserAdminOpState(rgw::sal::Driver* driver)
{
- user = store->get_user(rgw_user(RGW_USER_ANON_ID));
+ user = driver->get_user(rgw_user(RGW_USER_ANON_ID));
}
void RGWUserAdminOpState::set_user_id(const rgw_user& id)
if (!id.empty()) {
switch (key_type) {
case KEY_TYPE_SWIFT:
- if (store->get_user_by_swift(dpp, id, y, &duplicate_check) >= 0) {
+ if (driver->get_user_by_swift(dpp, id, y, &duplicate_check) >= 0) {
set_err_msg(err_msg, "existing swift key in RGW system:" + id);
return -ERR_KEY_EXIST;
}
break;
case KEY_TYPE_S3:
- if (store->get_user_by_access_key(dpp, id, y, &duplicate_check) >= 0) {
+ if (driver->get_user_by_access_key(dpp, id, y, &duplicate_check) >= 0) {
set_err_msg(err_msg, "existing S3 key in RGW system:" + id);
return -ERR_KEY_EXIST;
}
if (!validate_access_key(id))
continue;
- } while (!store->get_user_by_access_key(dpp, id, y, &duplicate_check));
+ } while (!driver->get_user_by_access_key(dpp, id, y, &duplicate_check));
}
if (key_type == KEY_TYPE_SWIFT) {
}
// check that the access key doesn't exist
- if (store->get_user_by_swift(dpp, id, y, &duplicate_check) >= 0) {
+ if (driver->get_user_by_swift(dpp, id, y, &duplicate_check) >= 0) {
set_err_msg(err_msg, "cannot create existing swift key");
return -ERR_KEY_EXIST;
}
user = usr;
subusers_allowed = true;
- store = user->get_store();
+ driver = user->get_driver();
}
int RGWSubUserPool::init(RGWUserAdminOpState& op_state)
init_default();
}
-int RGWUser::init(const DoutPrefixProvider *dpp, rgw::sal::Store* storage,
+int RGWUser::init(const DoutPrefixProvider *dpp, rgw::sal::Driver* _driver,
RGWUserAdminOpState& op_state, optional_yield y)
{
init_default();
- int ret = init_storage(storage);
+ int ret = init_storage(_driver);
if (ret < 0)
return ret;
clear_populated();
}
-int RGWUser::init_storage(rgw::sal::Store* storage)
+int RGWUser::init_storage(rgw::sal::Driver* _driver)
{
- if (!storage) {
+ if (!_driver) {
return -EINVAL;
}
- store = storage;
+ driver = _driver;
clear_populated();
}
if (!user_id.empty() && (user_id.compare(RGW_USER_ANON_ID) != 0)) {
- user = store->get_user(user_id);
+ user = driver->get_user(user_id);
found = (user->load_user(dpp, y) >= 0);
op_state.found_by_uid = found;
}
- if (store->ctx()->_conf.get_val<bool>("rgw_user_unique_email")) {
+ if (driver->ctx()->_conf.get_val<bool>("rgw_user_unique_email")) {
if (!user_email.empty() && !found) {
- found = (store->get_user_by_email(dpp, user_email, y, &user) >= 0);
+ found = (driver->get_user_by_email(dpp, user_email, y, &user) >= 0);
op_state.found_by_email = found;
}
}
if (!swift_user.empty() && !found) {
- found = (store->get_user_by_swift(dpp, swift_user, y, &user) >= 0);
+ found = (driver->get_user_by_swift(dpp, swift_user, y, &user) >= 0);
op_state.found_by_key = found;
}
if (!access_key.empty() && !found) {
- found = (store->get_user_by_access_key(dpp, access_key, y, &user) >= 0);
+ found = (driver->get_user_by_access_key(dpp, access_key, y, &user) >= 0);
op_state.found_by_key = found;
}
std::string subprocess_msg;
rgw::sal::User* user = op_state.get_user();
- if (!store) {
+ if (!driver) {
set_err_msg(err_msg, "couldn't initialize storage");
return -EINVAL;
}
}
}
- std::unique_ptr<rgw::sal::User> old_user = store->get_user(op_state.get_user_info().user_id);
- std::unique_ptr<rgw::sal::User> new_user = store->get_user(op_state.get_new_uid());
+ std::unique_ptr<rgw::sal::User> old_user = driver->get_user(op_state.get_user_info().user_id);
+ std::unique_ptr<rgw::sal::User> new_user = driver->get_user(op_state.get_new_uid());
if (old_user->get_tenant() != new_user->get_tenant()) {
set_err_msg(err_msg, "users have to be under the same tenant namespace "
+ old_user->get_tenant() + " != " + new_user->get_tenant());
// create a stub user and write only the uid index and buckets object
std::unique_ptr<rgw::sal::User> user;
- user = store->get_user(new_user->get_id());
+ user = driver->get_user(new_user->get_id());
const bool exclusive = !op_state.get_overwrite_new_user(); // overwrite if requested
//unlink and link buckets to new user
string marker;
- CephContext *cct = store->ctx();
+ CephContext *cct = driver->ctx();
size_t max_buckets = cct->_conf->rgw_list_buckets_max_chunk;
rgw::sal::BucketList buckets;
if (!user_email.empty())
user_info.user_email = user_email;
- CephContext *cct = store->ctx();
+ CephContext *cct = driver->ctx();
if (op_state.max_buckets_specified) {
user_info.max_buckets = op_state.get_max_buckets();
} else {
rgw::sal::BucketList buckets;
string marker;
- CephContext *cct = store->ctx();
+ CephContext *cct = driver->ctx();
size_t max_buckets = cct->_conf->rgw_list_buckets_max_chunk;
do {
ret = user->list_buckets(dpp, marker, string(), max_buckets, false, buckets, y);
if (!op_email.empty()) {
// make sure we are not adding a duplicate email
if (old_email != op_email) {
- ret = store->get_user_by_email(dpp, op_email, y, &duplicate_check);
+ ret = driver->get_user_by_email(dpp, op_email, y, &duplicate_check);
if (ret >= 0 && duplicate_check->get_id().compare(user_id) != 0) {
set_err_msg(err_msg, "cannot add duplicate email");
return -ERR_EMAIL_EXIST;
}
string marker;
- CephContext *cct = store->ctx();
+ CephContext *cct = driver->ctx();
size_t max_buckets = cct->_conf->rgw_list_buckets_max_chunk;
- std::unique_ptr<rgw::sal::User> user = store->get_user(user_id);
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(user_id);
do {
ret = user->list_buckets(dpp, marker, string(), max_buckets, false, buckets, y);
if (ret < 0) {
marker = iter->first;
}
- ret = store->set_buckets_enabled(dpp, bucket_names, !suspended);
+ ret = driver->set_buckets_enabled(dpp, bucket_names, !suspended);
if (ret < 0) {
set_err_msg(err_msg, "failed to modify bucket");
return ret;
op_state.max_entries = 1000;
}
- int ret = store->meta_list_keys_init(dpp, metadata_key, op_state.marker, &handle);
+ int ret = driver->meta_list_keys_init(dpp, metadata_key, op_state.marker, &handle);
if (ret < 0) {
return ret;
}
do {
std::list<std::string> keys;
left = op_state.max_entries - count;
- ret = store->meta_list_keys_next(dpp, handle, left, keys, &truncated);
+ ret = driver->meta_list_keys_next(dpp, handle, left, keys, &truncated);
if (ret < 0 && ret != -ENOENT) {
return ret;
} if (ret != -ENOENT) {
formatter->dump_bool("truncated", truncated);
formatter->dump_int("count", count);
if (truncated) {
- formatter->dump_string("marker", store->meta_get_marker(handle));
+ formatter->dump_string("marker", driver->meta_get_marker(handle));
}
// close result object section
formatter->close_section();
- store->meta_list_keys_complete(handle);
+ driver->meta_list_keys_complete(handle);
flusher.flush();
return 0;
}
-int RGWUserAdminOp_User::list(const DoutPrefixProvider *dpp, rgw::sal::Store* store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_User::list(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher)
{
RGWUser user;
- int ret = user.init_storage(store);
+ int ret = user.init_storage(driver);
if (ret < 0)
return ret;
}
int RGWUserAdminOp_User::info(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store, RGWUserAdminOpState& op_state,
+ rgw::sal::Driver* driver, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher,
optional_yield y)
{
RGWUser user;
std::unique_ptr<rgw::sal::User> ruser;
- int ret = user.init(dpp, store, op_state, y);
+ int ret = user.init(dpp, driver, op_state, y);
if (ret < 0)
return ret;
if (ret < 0)
return ret;
- ruser = store->get_user(info.user_id);
+ ruser = driver->get_user(info.user_id);
if (op_state.sync_stats) {
- ret = rgw_user_sync_all_stats(dpp, store, ruser.get(), y);
+ ret = rgw_user_sync_all_stats(dpp, driver, ruser.get(), y);
if (ret < 0) {
return ret;
}
}
int RGWUserAdminOp_User::create(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher, optional_yield y)
{
RGWUserInfo info;
RGWUser user;
- int ret = user.init(dpp, store, op_state, y);
+ int ret = user.init(dpp, driver, op_state, y);
if (ret < 0)
return ret;
}
int RGWUserAdminOp_User::modify(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher, optional_yield y)
{
RGWUserInfo info;
RGWUser user;
- int ret = user.init(dpp, store, op_state, y);
+ int ret = user.init(dpp, driver, op_state, y);
if (ret < 0)
return ret;
Formatter *formatter = flusher.get_formatter();
}
int RGWUserAdminOp_User::remove(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store, RGWUserAdminOpState& op_state,
+ rgw::sal::Driver* driver, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher, optional_yield y)
{
RGWUserInfo info;
RGWUser user;
- int ret = user.init(dpp, store, op_state, y);
+ int ret = user.init(dpp, driver, op_state, y);
if (ret < 0)
return ret;
}
int RGWUserAdminOp_Subuser::create(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher,
optional_yield y)
{
RGWUserInfo info;
RGWUser user;
- int ret = user.init(dpp, store, op_state, y);
+ int ret = user.init(dpp, driver, op_state, y);
if (ret < 0)
return ret;
}
int RGWUserAdminOp_Subuser::modify(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store, RGWUserAdminOpState& op_state,
+ rgw::sal::Driver* driver, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher, optional_yield y)
{
RGWUserInfo info;
RGWUser user;
- int ret = user.init(dpp, store, op_state, y);
+ int ret = user.init(dpp, driver, op_state, y);
if (ret < 0)
return ret;
}
int RGWUserAdminOp_Subuser::remove(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher,
optional_yield y)
{
RGWUserInfo info;
RGWUser user;
- int ret = user.init(dpp, store, op_state, y);
+ int ret = user.init(dpp, driver, op_state, y);
if (ret < 0)
return ret;
}
int RGWUserAdminOp_Key::create(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store, RGWUserAdminOpState& op_state,
+ rgw::sal::Driver* driver, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher,
optional_yield y)
{
RGWUserInfo info;
RGWUser user;
- int ret = user.init(dpp, store, op_state, y);
+ int ret = user.init(dpp, driver, op_state, y);
if (ret < 0)
return ret;
}
int RGWUserAdminOp_Key::remove(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher,
optional_yield y)
{
RGWUserInfo info;
RGWUser user;
- int ret = user.init(dpp, store, op_state, y);
+ int ret = user.init(dpp, driver, op_state, y);
if (ret < 0)
return ret;
}
int RGWUserAdminOp_Caps::add(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher, optional_yield y)
{
RGWUserInfo info;
RGWUser user;
- int ret = user.init(dpp, store, op_state, y);
+ int ret = user.init(dpp, driver, op_state, y);
if (ret < 0)
return ret;
int RGWUserAdminOp_Caps::remove(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher, optional_yield y)
{
RGWUserInfo info;
RGWUser user;
- int ret = user.init(dpp, store, op_state, y);
+ int ret = user.init(dpp, driver, op_state, y);
if (ret < 0)
return ret;
uint64_t count;
};
-extern int rgw_user_sync_all_stats(const DoutPrefixProvider *dpp, rgw::sal::Store* store, rgw::sal::User* user, optional_yield y);
+extern int rgw_user_sync_all_stats(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::User* user, optional_yield y);
extern int rgw_user_get_all_buckets_stats(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store, rgw::sal::User* user,
+ rgw::sal::Driver* driver, rgw::sal::User* user,
std::map<std::string, bucket_meta_entry>& buckets_usage_map, optional_yield y);
/**
std::string generate_subuser();
- RGWUserAdminOpState(rgw::sal::Store* store);
+ RGWUserAdminOpState(rgw::sal::Driver* driver);
};
class RGWUser;
std::map<std::string, int, ltstr_nocase> key_type_map;
rgw_user user_id;
- rgw::sal::Store* store{nullptr};
+ rgw::sal::Driver* driver{nullptr};
std::map<std::string, RGWAccessKey> *swift_keys{nullptr};
std::map<std::string, RGWAccessKey> *access_keys{nullptr};
RGWUser *user{nullptr};
rgw_user user_id;
- rgw::sal::Store* store{nullptr};
+ rgw::sal::Driver* driver{nullptr};
bool subusers_allowed{false};
std::map<std::string, RGWSubUser> *subuser_map{nullptr};
private:
RGWUserInfo old_info;
- rgw::sal::Store* store{nullptr};
+ rgw::sal::Driver* driver{nullptr};
rgw_user user_id;
bool info_stored{false};
public:
RGWUser();
- int init(const DoutPrefixProvider *dpp, rgw::sal::Store* storage, RGWUserAdminOpState& op_state,
+ int init(const DoutPrefixProvider *dpp, rgw::sal::Driver* storage, RGWUserAdminOpState& op_state,
optional_yield y);
- int init_storage(rgw::sal::Store* storage);
+ int init_storage(rgw::sal::Driver* storage);
int init(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y);
int init_members(RGWUserAdminOpState& op_state);
- rgw::sal::Store* get_store() { return store; }
+ rgw::sal::Driver* get_driver() { return driver; }
/* API Contracted Members */
RGWUserCapPool caps;
class RGWUserAdminOp_User
{
public:
- static int list(const DoutPrefixProvider *dpp, rgw::sal::Store* store,
+ static int list(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
static int info(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher,
optional_yield y);
static int create(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher,
optional_yield y);
static int modify(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y);
- static int remove(const DoutPrefixProvider *dpp, rgw::sal::Store* store,
+ static int remove(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y);
};
{
public:
static int create(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher,
optional_yield y);
static int modify(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher,
optional_yield y);
static int remove(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher,
optional_yield y);
};
class RGWUserAdminOp_Key
{
public:
- static int create(const DoutPrefixProvider *dpp, rgw::sal::Store* store,
+ static int create(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher,
optional_yield y);
static int remove(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher,
optional_yield y);
};
{
public:
static int add(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher,
optional_yield y);
static int remove(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher,
optional_yield y);
};
}
int commit_period(const DoutPrefixProvider* dpp, optional_yield y,
- sal::ConfigStore* cfgstore, sal::Store* store,
+ sal::ConfigStore* cfgstore, sal::Driver* driver,
RGWRealm& realm, sal::RealmWriter& realm_writer,
const RGWPeriod& current_period,
RGWPeriod& info, std::ostream& error_stream,
bool force_if_stale)
{
- auto zone_svc = static_cast<rgw::sal::RadosStore*>(store)->svc()->zone; // XXX
+ auto zone_svc = static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone; // XXX
ldpp_dout(dpp, 20) << __func__ << " realm " << realm.id
<< " period " << current_period.id << dendl;
// did the master zone change?
if (info.master_zone != current_period.master_zone) {
// store the current metadata sync status in the period
- int r = info.update_sync_status(dpp, store, current_period,
+ int r = info.update_sync_status(dpp, driver, current_period,
error_stream, force_if_stale);
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to update metadata sync status: "
// gather the metadata sync status for each shard; only for use on master zone
int update_sync_status(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
const RGWPeriod ¤t_period,
std::ostream& error_stream, bool force_if_stale);
// commit a staging period; only for use on master zone
int commit(const DoutPrefixProvider *dpp,
- rgw::sal::Store* store,
+ rgw::sal::Driver* driver,
RGWRealm& realm, const RGWPeriod ¤t_period,
std::ostream& error_stream, optional_yield y,
bool force_if_stale = false);
/// Validates the given 'staging' period and tries to commit it as the
/// realm's new current period.
int commit_period(const DoutPrefixProvider* dpp, optional_yield y,
- sal::ConfigStore* cfgstore, sal::Store* store,
+ sal::ConfigStore* cfgstore, sal::Driver* driver,
RGWRealm& realm, sal::RealmWriter& realm_writer,
const RGWPeriod& current_period,
RGWPeriod& info, std::ostream& error_stream,
<< std::endl;
}
RGWPutObjRequest req(cct,
- g_rgwlib->get_store()->get_user(fs_private->get_user()->user_id),
+ g_rgwlib->get_driver()->get_user(fs_private->get_user()->user_id),
bucket_name, obj_name, bl);
int rc = g_rgwlib->get_fe()->execute_req(&req);
int rc2 = req.get_ret();