This is the first part of Project Zipper, the Store Abstraction Layer.
It introduces the basic framework, and wraps RGWRados in RGWRadosStore.
The goal over the next few weeks is to do the same for user, bucket, and
object. This will make most of the remaining users of RGWRados wrapped
in SAL classes, allowing it to be completely absorbed into the private
RGWRadosStore. This will also expose all the APIs that need to be
pusheg up to higher layers in the SAL.
Signed-off-by: Daniel Gryniewicz <dang@redhat.com>
rgw_rest_role.cc
rgw_rest_s3.cc
rgw_role.cc
+ rgw_sal.cc
rgw_string.cc
rgw_tag.cc
rgw_tag_s3.cc
RGWObjectCtx rados_ctx(store, s); // XXX holds std::map
- auto sysobj_ctx = store->svc.sysobj->init_obj_ctx();
+ auto sysobj_ctx = store->svc()->sysobj->init_obj_ctx();
s->sysobj_ctx = &sysobj_ctx;
/* XXX and -then- stash req_state pointers everywhere they are needed */
<< e.what() << dendl;
}
if (should_log) {
- rgw_log_op(store, nullptr /* !rest */, s,
+ rgw_log_op(store->getRados(), nullptr /* !rest */, s,
(op ? op->name() : "unknown"), olog);
}
r = rgw_perf_start(g_ceph_context);
- rgw_rest_init(g_ceph_context, store, store->svc.zone->get_zonegroup());
+ rgw_rest_init(g_ceph_context, store->svc()->zone->get_zonegroup());
mutex.lock();
init_timer.cancel_all_events();
ldh->init();
ldh->bind();
- rgw_log_usage_init(g_ceph_context, store);
+ rgw_log_usage_init(g_ceph_context, store->getRados());
// XXX ex-RGWRESTMgr_lib, mgr->set_logging(true)
fe->run();
- r = store->register_to_service_map("rgw-nfs", service_map_meta);
+ r = store->getRados()->register_to_service_map("rgw-nfs", service_map_meta);
if (r < 0) {
derr << "ERROR: failed to register to service map: " << cpp_strerror(-r) << dendl;
/* ignore error */
return 0;
} /* RGWLib::stop() */
- int RGWLibIO::set_uid(RGWRados *store, const rgw_user& uid)
+ int RGWLibIO::set_uid(rgw::sal::RGWRadosStore *store, const rgw_user& uid)
{
- int ret = store->ctl.user->get_info_by_uid(uid, &user_info, null_yield);
+ int ret = store->ctl()->user->get_info_by_uid(uid, &user_info, null_yield);
if (ret < 0) {
derr << "ERROR: failed reading user info: uid=" << uid << " ret="
<< ret << dendl;
#define SECRET_KEY_LEN 40
#define PUBLIC_ID_LEN 20
-static RGWRados *store = NULL;
+static rgw::sal::RGWRadosStore *store = NULL;
static const DoutPrefixProvider* dpp() {
struct GlobalPrefix : public DoutPrefixProvider {
}
class StoreDestructor {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
public:
- explicit StoreDestructor(RGWRados *_s) : store(_s) {}
+ explicit StoreDestructor(rgw::sal::RGWRadosStore *_s) : store(_s) {}
~StoreDestructor() {
RGWStoreManager::close_storage(store);
rgw_http_client_cleanup();
RGWBucketInfo& bucket_info, rgw_bucket& bucket, map<string, bufferlist> *pattrs = nullptr)
{
if (!bucket_name.empty()) {
- auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+ auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
int r;
if (bucket_id.empty()) {
- r = store->get_bucket_info(obj_ctx, tenant_name, bucket_name, bucket_info, nullptr, null_yield, pattrs);
+ r = store->getRados()->get_bucket_info(obj_ctx, tenant_name, bucket_name, bucket_info, nullptr, null_yield, pattrs);
} else {
string bucket_instance_id = bucket_name + ":" + bucket_id;
- r = store->get_bucket_instance_info(obj_ctx, bucket_instance_id, bucket_info, NULL, pattrs, null_yield);
+ r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket_instance_id, bucket_info, NULL, pattrs, null_yield);
}
if (r < 0) {
cerr << "could not get bucket info for bucket=" << bucket_name << std::endl;
}
}
-int set_bucket_quota(RGWRados *store, int opt_cmd,
+int set_bucket_quota(rgw::sal::RGWRadosStore *store, int opt_cmd,
const string& tenant_name, const string& bucket_name,
int64_t max_size, int64_t max_objects,
bool have_max_size, bool have_max_objects)
{
RGWBucketInfo bucket_info;
map<string, bufferlist> attrs;
- auto obj_ctx = store->svc.sysobj->init_obj_ctx();
- int r = store->get_bucket_info(obj_ctx, tenant_name, bucket_name, bucket_info, NULL, null_yield, &attrs);
+ auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
+ int r = store->getRados()->get_bucket_info(obj_ctx, tenant_name, bucket_name, bucket_info, NULL, null_yield, &attrs);
if (r < 0) {
cerr << "could not get bucket info for bucket=" << bucket_name << ": " << cpp_strerror(-r) << std::endl;
return -r;
set_quota_info(bucket_info.quota, opt_cmd, max_size, max_objects, have_max_size, have_max_objects);
- r = store->put_bucket_instance_info(bucket_info, false, real_time(), &attrs);
+ r = store->getRados()->put_bucket_instance_info(bucket_info, false, real_time(), &attrs);
if (r < 0) {
cerr << "ERROR: failed writing bucket instance info: " << cpp_strerror(-r) << std::endl;
return -r;
return rgw_obj_key::oid_to_key_in_ns(name, &k, ns);
}
-int check_min_obj_stripe_size(RGWRados *store, RGWBucketInfo& bucket_info, rgw_obj& obj, uint64_t min_stripe_size, bool *need_rewrite)
+int check_min_obj_stripe_size(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info, rgw_obj& obj, uint64_t min_stripe_size, bool *need_rewrite)
{
map<string, bufferlist> attrs;
uint64_t obj_size;
RGWObjectCtx obj_ctx(store);
- RGWRados::Object op_target(store, bucket_info, obj_ctx, obj);
+ RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, obj);
RGWRados::Object::Read read_op(&op_target);
read_op.params.attrs = &attrs;
RGWObjectCtx obj_ctx(store);
- RGWRados::Object op_target(store, bucket_info, obj_ctx, obj);
+ RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, obj);
RGWRados::Object::Read read_op(&op_target);
int ret = read_op.prepare(null_yield);
string status = (needs_fixing ? "needs_fixing" : "ok");
if ((needs_fixing || remove_bad) && fix) {
- ret = store->fix_head_obj_locator(bucket_info, needs_fixing, remove_bad, key);
+ ret = store->getRados()->fix_head_obj_locator(bucket_info, needs_fixing, remove_bad, key);
if (ret < 0) {
cerr << "ERROR: fix_head_object_locator() returned ret=" << ret << std::endl;
goto done;
bool needs_fixing;
string status;
- int ret = store->fix_tail_obj_locator(bucket_info, key, fix, &needs_fixing, null_yield);
+ int ret = store->getRados()->fix_tail_obj_locator(bucket_info, key, fix, &needs_fixing, null_yield);
if (ret < 0) {
cerr << "ERROR: fix_tail_object_locator_underscore() returned ret=" << ret << std::endl;
status = "failed";
map<string, bool> common_prefixes;
string ns;
- RGWRados::Bucket target(store, bucket_info);
+ RGWRados::Bucket target(store->getRados(), bucket_info);
RGWRados::Bucket::List list_op(&target);
string marker;
return 0;
}
-int set_bucket_sync_enabled(RGWRados *store, int opt_cmd, const string& tenant_name, const string& bucket_name)
+int set_bucket_sync_enabled(rgw::sal::RGWRadosStore *store, int opt_cmd, const string& tenant_name, const string& bucket_name)
{
RGWBucketInfo bucket_info;
map<string, bufferlist> attrs;
- auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+ auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
- int r = store->get_bucket_info(obj_ctx, tenant_name, bucket_name, bucket_info, NULL, null_yield, &attrs);
+ int r = store->getRados()->get_bucket_info(obj_ctx, tenant_name, bucket_name, bucket_info, NULL, null_yield, &attrs);
if (r < 0) {
cerr << "could not get bucket info for bucket=" << bucket_name << ": " << cpp_strerror(-r) << std::endl;
return -r;
bucket_info.flags |= BUCKET_DATASYNC_DISABLED;
}
- r = store->put_bucket_instance_info(bucket_info, false, real_time(), &attrs);
+ r = store->getRados()->put_bucket_instance_info(bucket_info, false, real_time(), &attrs);
if (r < 0) {
cerr << "ERROR: failed writing bucket instance info: " << cpp_strerror(-r) << std::endl;
return -r;
int shard_id = bucket_info.num_shards? 0 : -1;
if (opt_cmd == OPT_BUCKET_SYNC_DISABLE) {
- r = store->svc.bilog_rados->log_stop(bucket_info, -1);
+ r = store->svc()->bilog_rados->log_stop(bucket_info, -1);
if (r < 0) {
lderr(store->ctx()) << "ERROR: failed writing stop bilog" << dendl;
return r;
}
} else {
- r = store->svc.bilog_rados->log_start(bucket_info, -1);
+ r = store->svc()->bilog_rados->log_start(bucket_info, -1);
if (r < 0) {
lderr(store->ctx()) << "ERROR: failed writing resync bilog" << dendl;
return r;
}
for (int i = 0; i < shards_num; ++i, ++shard_id) {
- r = store->svc.datalog_rados->add_entry(bucket_info.bucket, shard_id);
+ r = store->svc()->datalog_rados->add_entry(bucket_info.bucket, shard_id);
if (r < 0) {
lderr(store->ctx()) << "ERROR: failed writing data log" << dendl;
return r;
/// search for a matching zone/zonegroup id and return a connection if found
-static boost::optional<RGWRESTConn> get_remote_conn(RGWRados *store,
+static boost::optional<RGWRESTConn> get_remote_conn(rgw::sal::RGWRadosStore *store,
const RGWZoneGroup& zonegroup,
const std::string& remote)
{
boost::optional<RGWRESTConn> conn;
if (remote == zonegroup.get_id()) {
- conn.emplace(store->ctx(), store->svc.zone, remote, zonegroup.endpoints);
+ conn.emplace(store->ctx(), store->svc()->zone, remote, zonegroup.endpoints);
} else {
for (const auto& z : zonegroup.zones) {
const auto& zone = z.second;
if (remote == zone.id) {
- conn.emplace(store->ctx(), store->svc.zone, remote, zone.endpoints);
+ conn.emplace(store->ctx(), store->svc()->zone, remote, zone.endpoints);
break;
}
}
}
/// search each zonegroup for a connection
-static boost::optional<RGWRESTConn> get_remote_conn(RGWRados *store,
+static boost::optional<RGWRESTConn> get_remote_conn(rgw::sal::RGWRadosStore *store,
const RGWPeriodMap& period_map,
const std::string& remote)
{
return -EINVAL;
}
// are we the period's master zone?
- if (store->svc.zone->get_zone_params().get_id() == master_zone) {
+ if (store->svc()->zone->get_zone_params().get_id() == master_zone) {
// read the current period
RGWPeriod current_period;
- int ret = current_period.init(g_ceph_context, store->svc.sysobj, realm.get_id());
+ int ret = current_period.init(g_ceph_context, store->svc()->sysobj, realm.get_id());
if (ret < 0) {
cerr << "Error initializing current period: "
<< cpp_strerror(-ret) << std::endl;
Formatter *formatter, bool force)
{
RGWRealm realm(realm_id, realm_name);
- int ret = realm.init(g_ceph_context, store->svc.sysobj);
+ int ret = realm.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0 ) {
cerr << "Error initializing realm " << cpp_strerror(-ret) << std::endl;
return ret;
epoch = atoi(period_epoch.c_str());
}
RGWPeriod period(period_id, epoch);
- ret = period.init(g_ceph_context, store->svc.sysobj, realm.get_id());
+ ret = period.init(g_ceph_context, store->svc()->sysobj, realm.get_id());
if (ret < 0) {
cerr << "period init failed: " << cpp_strerror(-ret) << std::endl;
return ret;
cerr << "request failed: " << cpp_strerror(-ret) << std::endl;
return ret;
}
- ret = period->init(g_ceph_context, store->svc.sysobj, false);
+ ret = period->init(g_ceph_context, store->svc()->sysobj, false);
if (ret < 0) {
cerr << "faile to init period " << cpp_strerror(-ret) << std::endl;
return ret;
return 0;
}
-static int read_current_period_id(RGWRados* store, const std::string& realm_id,
+static int read_current_period_id(rgw::sal::RGWRadosStore* store, const std::string& realm_id,
const std::string& realm_name,
std::string* period_id)
{
RGWRealm realm(realm_id, realm_name);
- int ret = realm.init(g_ceph_context, store->svc.sysobj);
+ int ret = realm.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
std::cerr << "failed to read realm: " << cpp_strerror(-ret) << std::endl;
return ret;
static void get_md_sync_status(list<string>& status)
{
- RGWMetaSyncStatusManager sync(store, store->svc.rados->get_async_processor());
+ RGWMetaSyncStatusManager sync(store, store->svc()->rados->get_async_processor());
int ret = sync.init();
if (ret < 0) {
push_ss(ss, status) << "incremental sync: " << num_inc << "/" << total_shards << " shards";
map<int, RGWMetadataLogInfo> master_shards_info;
- string master_period = store->svc.zone->get_current_period_id();
+ string master_period = store->svc()->zone->get_current_period_id();
ret = sync.read_master_log_shards_info(master_period, &master_shards_info);
if (ret < 0) {
RGWZone *sz;
- if (!store->svc.zone->find_zone_by_id(source_zone, &sz)) {
+ if (!store->svc()->zone->find_zone_by_id(source_zone, &sz)) {
push_ss(ss, status, tab) << string("zone not found");
flush_ss(ss, status);
return;
}
- if (!store->svc.zone->zone_syncs_from(store->svc.zone->get_zone(), *sz)) {
+ if (!store->svc()->zone->zone_syncs_from(store->svc()->zone->get_zone(), *sz)) {
push_ss(ss, status, tab) << string("not syncing from zone");
flush_ss(ss, status);
return;
}
- RGWDataSyncStatusManager sync(store, store->svc.rados->get_async_processor(), source_zone, nullptr);
+ RGWDataSyncStatusManager sync(store, store->svc()->rados->get_async_processor(), source_zone, nullptr);
int ret = sync.init();
if (ret < 0) {
static void sync_status(Formatter *formatter)
{
- const RGWRealm& realm = store->svc.zone->get_realm();
- const RGWZoneGroup& zonegroup = store->svc.zone->get_zonegroup();
- const RGWZone& zone = store->svc.zone->get_zone();
+ const RGWRealm& realm = store->svc()->zone->get_realm();
+ const RGWZoneGroup& zonegroup = store->svc()->zone->get_zonegroup();
+ const RGWZone& zone = store->svc()->zone->get_zone();
int width = 15;
list<string> md_status;
- if (store->svc.zone->is_meta_master()) {
+ if (store->svc()->zone->is_meta_master()) {
md_status.push_back("no sync (zone is master)");
} else {
get_md_sync_status(md_status);
list<string> data_status;
- auto& zone_conn_map = store->svc.zone->get_zone_conn_map();
+ auto& zone_conn_map = store->svc()->zone->get_zone_conn_map();
for (auto iter : zone_conn_map) {
const string& source_id = iter.first;
string source_str = "source: ";
string s = source_str + source_id;
RGWZone *sz;
- if (store->svc.zone->find_zone_by_id(source_id, &sz)) {
+ if (store->svc()->zone->find_zone_by_id(source_id, &sz)) {
s += string(" (") + sz->name + ")";
}
data_status.push_back(s);
return out << std::setw(h.w) << h.header << std::setw(1) << ' ';
}
-static int remote_bilog_markers(RGWRados *store, const RGWZone& source,
+static int remote_bilog_markers(rgw::sal::RGWRadosStore *store, const RGWZone& source,
RGWRESTConn *conn, const RGWBucketInfo& info,
BucketIndexShardsManager *markers)
{
return 0;
}
-static int bucket_source_sync_status(RGWRados *store, const RGWZone& zone,
+static int bucket_source_sync_status(rgw::sal::RGWRadosStore *store, const RGWZone& zone,
const RGWZone& source, RGWRESTConn *conn,
const RGWBucketInfo& bucket_info,
int width, std::ostream& out)
return 0;
}
-static int bucket_sync_status(RGWRados *store, const RGWBucketInfo& info,
+static int bucket_sync_status(rgw::sal::RGWRadosStore *store, const RGWBucketInfo& info,
const std::string& source_zone_id,
std::ostream& out)
{
- const RGWRealm& realm = store->svc.zone->get_realm();
- const RGWZoneGroup& zonegroup = store->svc.zone->get_zonegroup();
- const RGWZone& zone = store->svc.zone->get_zone();
+ const RGWRealm& realm = store->svc()->zone->get_realm();
+ const RGWZoneGroup& zonegroup = store->svc()->zone->get_zonegroup();
+ const RGWZone& zone = store->svc()->zone->get_zone();
constexpr int width = 15;
out << indented{width, "realm"} << realm.get_id() << " (" << realm.get_name() << ")\n";
return 0;
}
- auto& zone_conn_map = store->svc.zone->get_zone_conn_map();
+ auto& zone_conn_map = store->svc()->zone->get_zone_conn_map();
if (!source_zone_id.empty()) {
auto z = zonegroup.zones.find(source_zone_id);
if (z == zonegroup.zones.end()) {
static int check_pool_support_omap(const rgw_pool& pool)
{
librados::IoCtx io_ctx;
- int ret = store->get_rados_handle()->ioctx_create(pool.to_str().c_str(), io_ctx);
+ int ret = store->getRados()->get_rados_handle()->ioctx_create(pool.to_str().c_str(), io_ctx);
if (ret < 0) {
// the pool may not exist at this moment, we have no way to check if it supports omap.
return 0;
return 0;
}
-int check_reshard_bucket_params(RGWRados *store,
+int check_reshard_bucket_params(rgw::sal::RGWRadosStore *store,
const string& bucket_name,
const string& tenant,
const string& bucket_id,
return -EINVAL;
}
- if (num_shards > (int)store->get_max_bucket_shards()) {
- cerr << "ERROR: num_shards too high, max value: " << store->get_max_bucket_shards() << std::endl;
+ if (num_shards > (int)store->getRados()->get_max_bucket_shards()) {
+ cerr << "ERROR: num_shards too high, max value: " << store->getRados()->get_max_bucket_shards() << std::endl;
return -EINVAL;
}
return 0;
}
-int create_new_bucket_instance(RGWRados *store,
+int create_new_bucket_instance(rgw::sal::RGWRadosStore *store,
int new_num_shards,
const RGWBucketInfo& bucket_info,
map<string, bufferlist>& attrs,
RGWBucketInfo& new_bucket_info)
{
- store->create_bucket_id(&new_bucket_info.bucket.bucket_id);
+ store->getRados()->create_bucket_id(&new_bucket_info.bucket.bucket_id);
new_bucket_info.num_shards = new_num_shards;
new_bucket_info.objv_tracker.clear();
- int ret = store->svc.bi->init_index(new_bucket_info);
+ int ret = store->svc()->bi->init_index(new_bucket_info);
if (ret < 0) {
cerr << "ERROR: failed to init new bucket indexes: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
- ret = store->put_bucket_instance_info(new_bucket_info, true, real_time(), &attrs);
+ ret = store->getRados()->put_bucket_instance_info(new_bucket_info, true, real_time(), &attrs);
if (ret < 0) {
cerr << "ERROR: failed to store new bucket instance info: " << cpp_strerror(-ret) << std::endl;
return -ret;
shard_id);
// call cls_log_trim() until it returns -ENODATA
for (;;) {
- int ret = store->svc.cls->timelog.trim(oid, start_time, end_time,
+ int ret = store->svc()->cls->timelog.trim(oid, start_time, end_time,
start_marker, end_marker, nullptr,
null_yield);
if (ret == -ENODATA) {
// unreachable
}
-const string& get_tier_type(RGWRados *store) {
- return store->svc.zone->get_zone().tier_type;
+const string& get_tier_type(rgw::sal::RGWRadosStore *store) {
+ return store->svc()->zone->get_zone().tier_type;
}
int main(int argc, const char **argv)
}
if (!source_zone_name.empty()) {
- if (!store->svc.zone->find_zone_id_by_name(source_zone_name, &source_zone)) {
+ if (!store->svc()->zone->find_zone_id_by_name(source_zone_name, &source_zone)) {
cerr << "WARNING: cannot find source zone id for name=" << source_zone_name << std::endl;
source_zone = source_zone_name;
}
return EINVAL;
}
RGWPeriod period(period_id);
- int ret = period.init(g_ceph_context, store->svc.sysobj);
+ int ret = period.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "period.init failed: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
if (staging) {
RGWRealm realm(realm_id, realm_name);
- int ret = realm.init(g_ceph_context, store->svc.sysobj);
+ int ret = realm.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0 ) {
cerr << "Error initializing realm " << cpp_strerror(-ret) << std::endl;
return -ret;
epoch = 1;
}
RGWPeriod period(period_id, epoch);
- int ret = period.init(g_ceph_context, store->svc.sysobj, realm_id, realm_name);
+ int ret = period.init(g_ceph_context, store->svc()->sysobj, realm_id, realm_name);
if (ret < 0) {
cerr << "period init failed: " << cpp_strerror(-ret) << std::endl;
return -ret;
case OPT_PERIOD_LIST:
{
list<string> periods;
- int ret = store->svc.zone->list_periods(periods);
+ int ret = store->svc()->zone->list_periods(periods);
if (ret < 0) {
cerr << "failed to list periods: " << cpp_strerror(-ret) << std::endl;
return -ret;
if (url.empty()) {
// load current period for endpoints
RGWRealm realm(realm_id, realm_name);
- int ret = realm.init(g_ceph_context, store->svc.sysobj);
+ int ret = realm.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
RGWPeriod current_period(realm.get_current_period());
- ret = current_period.init(g_ceph_context, store->svc.sysobj);
+ ret = current_period.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "failed to init current period: " << cpp_strerror(-ret) << std::endl;
return -ret;
case OPT_GLOBAL_QUOTA_DISABLE:
{
if (realm_id.empty()) {
- RGWRealm realm(g_ceph_context, store->svc.sysobj);
+ RGWRealm realm(g_ceph_context, store->svc()->sysobj);
if (!realm_name.empty()) {
// look up realm_id for the given realm_name
int ret = realm.read_id(realm_name, realm_id);
}
RGWPeriodConfig period_config;
- int ret = period_config.read(store->svc.sysobj, realm_id);
+ int ret = period_config.read(store->svc()->sysobj, realm_id);
if (ret < 0 && ret != -ENOENT) {
cerr << "ERROR: failed to read period config: "
<< cpp_strerror(-ret) << std::endl;
if (opt_cmd != OPT_GLOBAL_QUOTA_GET) {
// write the modified period config
- ret = period_config.write(store->svc.sysobj, realm_id);
+ ret = period_config.write(store->svc()->sysobj, realm_id);
if (ret < 0) {
cerr << "ERROR: failed to write period config: "
<< cpp_strerror(-ret) << std::endl;
return EINVAL;
}
- RGWRealm realm(realm_name, g_ceph_context, store->svc.sysobj);
+ RGWRealm realm(realm_name, g_ceph_context, store->svc()->sysobj);
int ret = realm.create();
if (ret < 0) {
cerr << "ERROR: couldn't create realm " << realm_name << ": " << cpp_strerror(-ret) << std::endl;
cerr << "missing realm name or id" << std::endl;
return EINVAL;
}
- int ret = realm.init(g_ceph_context, store->svc.sysobj);
+ int ret = realm.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "realm.init failed: " << cpp_strerror(-ret) << std::endl;
return -ret;
case OPT_REALM_GET:
{
RGWRealm realm(realm_id, realm_name);
- int ret = realm.init(g_ceph_context, store->svc.sysobj);
+ int ret = realm.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
if (ret == -ENOENT && realm_name.empty() && realm_id.empty()) {
cerr << "missing realm name or id, or default realm not found" << std::endl;
break;
case OPT_REALM_GET_DEFAULT:
{
- RGWRealm realm(g_ceph_context, store->svc.sysobj);
+ RGWRealm realm(g_ceph_context, store->svc()->sysobj);
string default_id;
int ret = realm.read_default_id(default_id);
if (ret == -ENOENT) {
break;
case OPT_REALM_LIST:
{
- RGWRealm realm(g_ceph_context, store->svc.sysobj);
+ RGWRealm realm(g_ceph_context, store->svc()->sysobj);
string default_id;
int ret = realm.read_default_id(default_id);
if (ret < 0 && ret != -ENOENT) {
cerr << "could not determine default realm: " << cpp_strerror(-ret) << std::endl;
}
list<string> realms;
- ret = store->svc.zone->list_realms(realms);
+ ret = store->svc()->zone->list_realms(realms);
if (ret < 0) {
cerr << "failed to list realms: " << cpp_strerror(-ret) << std::endl;
return -ret;
return -ret;
}
list<string> periods;
- ret = store->svc.zone->list_periods(period_id, periods);
+ ret = store->svc()->zone->list_periods(period_id, periods);
if (ret < 0) {
cerr << "list periods failed: " << cpp_strerror(-ret) << std::endl;
return -ret;
cerr << "missing realm name or id" << std::endl;
return EINVAL;
}
- int ret = realm.init(g_ceph_context, store->svc.sysobj);
+ int ret = realm.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "realm.init failed: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
RGWRealm realm(realm_id, realm_name);
bool new_realm = false;
- int ret = realm.init(g_ceph_context, store->svc.sysobj);
+ int ret = realm.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0 && ret != -ENOENT) {
cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl;
return -ret;
case OPT_REALM_DEFAULT:
{
RGWRealm realm(realm_id, realm_name);
- int ret = realm.init(g_ceph_context, store->svc.sysobj);
+ int ret = realm.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl;
return -ret;
return -ret;
}
RGWRealm realm;
- realm.init(g_ceph_context, store->svc.sysobj, false);
+ realm.init(g_ceph_context, store->svc()->sysobj, false);
try {
decode_json_obj(realm, &p);
} catch (const JSONDecoder::err& e) {
}
RGWZoneGroup zonegroup(zonegroup_id,zonegroup_name);
- int ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+ int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "failed to initialize zonegroup " << zonegroup_name << " id " << zonegroup_id << " :"
<< cpp_strerror(-ret) << std::endl;
return -ret;
}
RGWZoneParams zone(zone_id, zone_name);
- ret = zone.init(g_ceph_context, store->svc.sysobj);
+ ret = zone.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
endpoints, ptier_type,
psync_from_all, sync_from, sync_from_rm,
predirect_zone,
- store->svc.sync_modules->get_manager());
+ store->svc()->sync_modules->get_manager());
if (ret < 0) {
cerr << "failed to add zone " << zone_name << " to zonegroup " << zonegroup.get_name() << ": "
<< cpp_strerror(-ret) << std::endl;
return EINVAL;
}
RGWRealm realm(realm_id, realm_name);
- int ret = realm.init(g_ceph_context, store->svc.sysobj);
+ int ret = realm.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
- RGWZoneGroup zonegroup(zonegroup_name, is_master, g_ceph_context, store->svc.sysobj, realm.get_id(), endpoints);
+ RGWZoneGroup zonegroup(zonegroup_name, is_master, g_ceph_context, store->svc()->sysobj, realm.get_id(), endpoints);
zonegroup.api_name = (api_name.empty() ? zonegroup_name : api_name);
ret = zonegroup.create();
if (ret < 0) {
}
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- int ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+ int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
return EINVAL;
}
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- int ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+ int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
case OPT_ZONEGROUP_GET:
{
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- int ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+ int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
case OPT_ZONEGROUP_LIST:
{
RGWZoneGroup zonegroup;
- int ret = zonegroup.init(g_ceph_context, store->svc.sysobj, false);
+ int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, false);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
list<string> zonegroups;
- ret = store->svc.zone->list_zonegroups(zonegroups);
+ ret = store->svc()->zone->list_zonegroups(zonegroups);
if (ret < 0) {
cerr << "failed to list zonegroups: " << cpp_strerror(-ret) << std::endl;
return -ret;
case OPT_ZONEGROUP_MODIFY:
{
RGWRealm realm(realm_id, realm_name);
- int ret = realm.init(g_ceph_context, store->svc.sysobj);
+ int ret = realm.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+ ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
need_update = true;
} else if (!realm_name.empty()) {
// get realm id from name
- RGWRealm realm{g_ceph_context, store->svc.sysobj};
+ RGWRealm realm{g_ceph_context, store->svc()->sysobj};
ret = realm.read_id(realm_name, zonegroup.realm_id);
if (ret < 0) {
cerr << "failed to find realm by name " << realm_name << std::endl;
case OPT_ZONEGROUP_SET:
{
RGWRealm realm(realm_id, realm_name);
- int ret = realm.init(g_ceph_context, store->svc.sysobj);
+ int ret = realm.init(g_ceph_context, store->svc()->sysobj);
bool default_realm_not_exist = (ret == -ENOENT && realm_id.empty() && realm_name.empty());
if (ret < 0 && !default_realm_not_exist ) {
}
RGWZoneGroup zonegroup;
- ret = zonegroup.init(g_ceph_context, store->svc.sysobj, false);
+ ret = zonegroup.init(g_ceph_context, store->svc()->sysobj, false);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
case OPT_ZONEGROUP_REMOVE:
{
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- int ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+ int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
return EINVAL;
}
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- int ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+ int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
case OPT_ZONEGROUP_PLACEMENT_LIST:
{
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- int ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+ int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- int ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+ int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
/* if the user didn't provide zonegroup info , create stand alone zone */
if (!zonegroup_id.empty() || !zonegroup_name.empty()) {
- ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+ ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "unable to initialize zonegroup " << zonegroup_name << ": " << cpp_strerror(-ret) << std::endl;
return -ret;
}
RGWZoneParams zone(zone_id, zone_name);
- ret = zone.init(g_ceph_context, store->svc.sysobj, false);
+ ret = zone.init(g_ceph_context, store->svc()->sysobj, false);
if (ret < 0) {
cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
psync_from_all,
sync_from, sync_from_rm,
predirect_zone,
- store->svc.sync_modules->get_manager());
+ store->svc()->sync_modules->get_manager());
if (ret < 0) {
cerr << "failed to add zone " << zone_name << " to zonegroup " << zonegroup.get_name()
<< ": " << cpp_strerror(-ret) << std::endl;
case OPT_ZONE_DEFAULT:
{
RGWZoneGroup zonegroup(zonegroup_id,zonegroup_name);
- int ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+ int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "WARNING: failed to initialize zonegroup " << zonegroup_name << std::endl;
}
return EINVAL;
}
RGWZoneParams zone(zone_id, zone_name);
- ret = zone.init(g_ceph_context, store->svc.sysobj);
+ ret = zone.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
return EINVAL;
}
RGWZoneParams zone(zone_id, zone_name);
- int ret = zone.init(g_ceph_context, store->svc.sysobj);
+ int ret = zone.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
list<string> zonegroups;
- ret = store->svc.zone->list_zonegroups(zonegroups);
+ ret = store->svc()->zone->list_zonegroups(zonegroups);
if (ret < 0) {
cerr << "failed to list zonegroups: " << cpp_strerror(-ret) << std::endl;
return -ret;
for (list<string>::iterator iter = zonegroups.begin(); iter != zonegroups.end(); ++iter) {
RGWZoneGroup zonegroup(string(), *iter);
- int ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+ int ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "WARNING: failed to initialize zonegroup " << zonegroup_name << std::endl;
continue;
case OPT_ZONE_GET:
{
RGWZoneParams zone(zone_id, zone_name);
- int ret = zone.init(g_ceph_context, store->svc.sysobj);
+ int ret = zone.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
case OPT_ZONE_SET:
{
RGWZoneParams zone(zone_name);
- int ret = zone.init(g_ceph_context, store->svc.sysobj, false);
+ int ret = zone.init(g_ceph_context, store->svc()->sysobj, false);
if (ret < 0) {
return -ret;
}
if(zone.realm_id.empty()) {
RGWRealm realm(realm_id, realm_name);
- int ret = realm.init(g_ceph_context, store->svc.sysobj);
+ int ret = realm.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0 && ret != -ENOENT) {
cerr << "failed to init realm: " << cpp_strerror(-ret) << std::endl;
return -ret;
case OPT_ZONE_LIST:
{
list<string> zones;
- int ret = store->svc.zone->list_zones(zones);
+ int ret = store->svc()->zone->list_zones(zones);
if (ret < 0) {
cerr << "failed to list zones: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
RGWZoneParams zone;
- ret = zone.init(g_ceph_context, store->svc.sysobj, false);
+ ret = zone.init(g_ceph_context, store->svc()->sysobj, false);
if (ret < 0) {
cerr << "failed to init zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
case OPT_ZONE_MODIFY:
{
RGWZoneParams zone(zone_id, zone_name);
- int ret = zone.init(g_ceph_context, store->svc.sysobj);
+ int ret = zone.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "failed to init zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
need_zone_update = true;
} else if (!realm_name.empty()) {
// get realm id from name
- RGWRealm realm{g_ceph_context, store->svc.sysobj};
+ RGWRealm realm{g_ceph_context, store->svc()->sysobj};
ret = realm.read_id(realm_name, zone.realm_id);
if (ret < 0) {
cerr << "failed to find realm by name " << realm_name << std::endl;
}
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+ ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
endpoints, ptier_type,
psync_from_all, sync_from, sync_from_rm,
predirect_zone,
- store->svc.sync_modules->get_manager());
+ store->svc()->sync_modules->get_manager());
if (ret < 0) {
cerr << "failed to update zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
return EINVAL;
}
RGWZoneParams zone(zone_id,zone_name);
- int ret = zone.init(g_ceph_context, store->svc.sysobj);
+ int ret = zone.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
return -ret;
}
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+ ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "WARNING: failed to initialize zonegroup " << zonegroup_name << std::endl;
} else {
}
RGWZoneParams zone(zone_id, zone_name);
- int ret = zone.init(g_ceph_context, store->svc.sysobj);
+ int ret = zone.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "failed to init zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
if (opt_cmd == OPT_ZONE_PLACEMENT_ADD ||
opt_cmd == OPT_ZONE_PLACEMENT_MODIFY) {
RGWZoneGroup zonegroup(zonegroup_id, zonegroup_name);
- ret = zonegroup.init(g_ceph_context, store->svc.sysobj);
+ ret = zonegroup.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "failed to init zonegroup: " << cpp_strerror(-ret) << std::endl;
return -ret;
case OPT_ZONE_PLACEMENT_LIST:
{
RGWZoneParams zone(zone_id, zone_name);
- int ret = zone.init(g_ceph_context, store->svc.sysobj);
+ int ret = zone.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "unable to initialize zone: " << cpp_strerror(-ret) << std::endl;
return -ret;
return 0;
}
- bool non_master_cmd = (!store->svc.zone->is_meta_master() && !yes_i_really_mean_it);
+ bool non_master_cmd = (!store->svc()->zone->is_meta_master() && !yes_i_really_mean_it);
std::set<int> non_master_ops_list = {OPT_USER_CREATE, OPT_USER_RM,
OPT_USER_MODIFY, OPT_USER_ENABLE,
OPT_USER_SUSPEND, OPT_SUBUSER_CREATE,
// load the period
RGWPeriod period(period_id);
- int ret = period.init(g_ceph_context, store->svc.sysobj);
+ int ret = period.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "period init failed: " << cpp_strerror(-ret) << std::endl;
return -ret;
{
// read realm and staging period
RGWRealm realm(realm_id, realm_name);
- int ret = realm.init(g_ceph_context, store->svc.sysobj);
+ int ret = realm.init(g_ceph_context, store->svc()->sysobj);
if (ret < 0) {
cerr << "Error initializing realm: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
RGWPeriod period(RGWPeriod::get_staging_id(realm.get_id()), 1);
- ret = period.init(g_ceph_context, store->svc.sysobj, realm.get_id());
+ ret = period.init(g_ceph_context, store->svc()->sysobj, realm.get_id());
if (ret < 0) {
cerr << "period init failed: " << cpp_strerror(-ret) << std::endl;
return -ret;
cerr << "failed to parse policy: " << e.what() << std::endl;
return -EINVAL;
}
- RGWRole role(g_ceph_context, store->pctl, role_name, path, assume_role_doc, tenant);
+ RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, path, assume_role_doc, tenant);
ret = role.create(true);
if (ret < 0) {
return -ret;
cerr << "ERROR: empty role name" << std::endl;
return -EINVAL;
}
- RGWRole role(g_ceph_context, store->pctl, role_name, tenant);
+ RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, tenant);
ret = role.delete_obj();
if (ret < 0) {
return -ret;
cerr << "ERROR: empty role name" << std::endl;
return -EINVAL;
}
- RGWRole role(g_ceph_context, store->pctl, role_name, tenant);
+ RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, tenant);
ret = role.get();
if (ret < 0) {
return -ret;
return -EINVAL;
}
- RGWRole role(g_ceph_context, store->pctl, role_name, tenant);
+ RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, tenant);
ret = role.get();
if (ret < 0) {
return -ret;
case OPT_ROLE_LIST:
{
vector<RGWRole> result;
- ret = RGWRole::get_roles_by_path_prefix(store, g_ceph_context, path_prefix, tenant, result);
+ ret = RGWRole::get_roles_by_path_prefix(store->getRados(), g_ceph_context, path_prefix, tenant, result);
if (ret < 0) {
return -ret;
}
return -EINVAL;
}
- RGWRole role(g_ceph_context, store->pctl, role_name, tenant);
+ RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, tenant);
ret = role.get();
if (ret < 0) {
return -ret;
cerr << "ERROR: Role name is empty" << std::endl;
return -EINVAL;
}
- RGWRole role(g_ceph_context, store->pctl, role_name, tenant);
+ RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, tenant);
ret = role.get();
if (ret < 0) {
return -ret;
cerr << "ERROR: policy name is empty" << std::endl;
return -EINVAL;
}
- RGWRole role(g_ceph_context, store->pctl, role_name, tenant);
+ RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, tenant);
int ret = role.get();
if (ret < 0) {
return -ret;
cerr << "ERROR: policy name is empty" << std::endl;
return -EINVAL;
}
- RGWRole role(g_ceph_context, store->pctl, role_name, tenant);
+ RGWRole role(g_ceph_context, store->getRados()->pctl, role_name, tenant);
ret = role.get();
if (ret < 0) {
return -ret;
} else {
/* list users in groups of max-keys, then perform user-bucket
* limit-check on each group */
- ret = store->ctl.meta.mgr->list_keys_init(metadata_key, &handle);
+ ret = store->ctl()->meta.mgr->list_keys_init(metadata_key, &handle);
if (ret < 0) {
cerr << "ERROR: buckets limit check can't get user metadata_key: "
<< cpp_strerror(-ret) << std::endl;
}
do {
- ret = store->ctl.meta.mgr->list_keys_next(handle, max, user_ids,
+ ret = store->ctl()->meta.mgr->list_keys_next(handle, max, user_ids,
&truncated);
if (ret < 0 && ret != -ENOENT) {
cerr << "ERROR: buckets limit check lists_keys_next(): "
}
user_ids.clear();
} while (truncated);
- store->ctl.meta.mgr->list_keys_complete(handle);
+ store->ctl()->meta.mgr->list_keys_complete(handle);
}
return -ret;
} /* OPT_BUCKET_LIMIT_CHECK */
map<string, bool> common_prefixes;
string ns;
- RGWRados::Bucket target(store, bucket_info);
+ RGWRados::Bucket target(store->getRados(), bucket_info);
RGWRados::Bucket::List list_op(&target);
list_op.params.prefix = prefix;
formatter->reset();
formatter->open_array_section("logs");
RGWAccessHandle h;
- int r = store->log_list_init(date, &h);
+ int r = store->getRados()->log_list_init(date, &h);
if (r == -ENOENT) {
// no logs.
} else {
}
while (true) {
string name;
- int r = store->log_list_next(h, &name);
+ int r = store->getRados()->log_list_next(h, &name);
if (r == -ENOENT)
break;
if (r < 0) {
if (opt_cmd == OPT_LOG_SHOW) {
RGWAccessHandle h;
- int r = store->log_show_init(oid, &h);
+ int r = store->getRados()->log_show_init(oid, &h);
if (r < 0) {
cerr << "error opening log " << oid << ": " << cpp_strerror(-r) << std::endl;
return -r;
struct rgw_log_entry entry;
// peek at first entry to get bucket metadata
- r = store->log_show_next(h, &entry);
+ r = store->getRados()->log_show_next(h, &entry);
if (r < 0) {
cerr << "error reading log " << oid << ": " << cpp_strerror(-r) << std::endl;
return -r;
formatter->flush(cout);
}
next:
- r = store->log_show_next(h, &entry);
+ r = store->getRados()->log_show_next(h, &entry);
} while (r > 0);
if (r < 0) {
cout << std::endl;
}
if (opt_cmd == OPT_LOG_RM) {
- int r = store->log_remove(oid);
+ int r = store->getRados()->log_remove(oid);
if (r < 0) {
cerr << "error removing log " << oid << ": " << cpp_strerror(-r) << std::endl;
return -r;
exit(1);
}
- int ret = store->svc.zone->add_bucket_placement(pool);
+ int ret = store->svc()->zone->add_bucket_placement(pool);
if (ret < 0)
cerr << "failed to add bucket placement: " << cpp_strerror(-ret) << std::endl;
}
exit(1);
}
- int ret = store->svc.zone->remove_bucket_placement(pool);
+ int ret = store->svc()->zone->remove_bucket_placement(pool);
if (ret < 0)
cerr << "failed to remove bucket placement: " << cpp_strerror(-ret) << std::endl;
}
if (opt_cmd == OPT_POOLS_LIST) {
set<rgw_pool> pools;
- int ret = store->svc.zone->list_placement_set(pools);
+ int ret = store->svc()->zone->list_placement_set(pools);
if (ret < 0) {
cerr << "could not list placement set: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
- ret = RGWUsage::show(store, user_id, bucket_name, start_epoch, end_epoch,
+ ret = RGWUsage::show(store->getRados(), user_id, bucket_name, start_epoch, end_epoch,
show_log_entries, show_log_sum, &categories,
f);
if (ret < 0) {
}
}
- ret = RGWUsage::trim(store, user_id, bucket_name, start_epoch, end_epoch);
+ ret = RGWUsage::trim(store->getRados(), user_id, bucket_name, start_epoch, end_epoch);
if (ret < 0) {
cerr << "ERROR: read_usage() returned ret=" << ret << std::endl;
return 1;
return 1;
}
- ret = RGWUsage::clear(store);
+ ret = RGWUsage::clear(store->getRados());
if (ret < 0) {
return ret;
}
}
RGWOLHInfo olh;
rgw_obj obj(bucket, object);
- ret = store->get_olh(bucket_info, obj, &olh);
+ ret = store->getRados()->get_olh(bucket_info, obj, &olh);
if (ret < 0) {
cerr << "ERROR: failed reading olh: " << cpp_strerror(-ret) << std::endl;
return -ret;
RGWObjState *state;
- ret = store->get_obj_state(&rctx, bucket_info, obj, &state, false, null_yield); /* don't follow olh */
+ ret = store->getRados()->get_obj_state(&rctx, bucket_info, obj, &state, false, null_yield); /* don't follow olh */
if (ret < 0) {
return -ret;
}
- ret = store->bucket_index_read_olh_log(bucket_info, *state, obj, 0, &log, &is_truncated);
+ ret = store->getRados()->bucket_index_read_olh_log(bucket_info, *state, obj, 0, &log, &is_truncated);
if (ret < 0) {
cerr << "ERROR: failed reading olh: " << cpp_strerror(-ret) << std::endl;
return -ret;
rgw_cls_bi_entry entry;
- ret = store->bi_get(bucket_info, obj, bi_index_type, &entry);
+ ret = store->getRados()->bi_get(bucket_info, obj, bi_index_type, &entry);
if (ret < 0) {
cerr << "ERROR: bi_get(): " << cpp_strerror(-ret) << std::endl;
return -ret;
rgw_obj obj(bucket, key);
- ret = store->bi_put(bucket, obj, entry);
+ ret = store->getRados()->bi_put(bucket, obj, entry);
if (ret < 0) {
cerr << "ERROR: bi_put(): " << cpp_strerror(-ret) << std::endl;
return -ret;
int i = (specified_shard_id ? shard_id : 0);
for (; i < max_shards; i++) {
- RGWRados::BucketShard bs(store);
+ RGWRados::BucketShard bs(store->getRados());
int shard_id = (bucket_info.num_shards > 0 ? i : -1);
int ret = bs.init(bucket, shard_id, nullptr /* no RGWBucketInfo */);
marker.clear();
do {
entries.clear();
- ret = store->bi_list(bs, object, marker, max_entries, &entries, &is_truncated);
+ ret = store->getRados()->bi_list(bs, object, marker, max_entries, &entries, &is_truncated);
if (ret < 0) {
cerr << "ERROR: bi_list(): " << cpp_strerror(-ret) << std::endl;
return -ret;
int max_shards = (bucket_info.num_shards > 0 ? bucket_info.num_shards : 1);
for (int i = 0; i < max_shards; i++) {
- RGWRados::BucketShard bs(store);
+ RGWRados::BucketShard bs(store->getRados());
int shard_id = (bucket_info.num_shards > 0 ? i : -1);
int ret = bs.init(bucket, shard_id, nullptr /* no RGWBucketInfo */);
if (ret < 0) {
return -ret;
}
- ret = store->bi_remove(bs);
+ ret = store->getRados()->bi_remove(bs);
if (ret < 0) {
cerr << "ERROR: failed to remove bucket index object: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
}
if (need_rewrite) {
- ret = store->rewrite_obj(bucket_info, obj, dpp(), null_yield);
+ ret = store->getRados()->rewrite_obj(bucket_info, obj, dpp(), null_yield);
if (ret < 0) {
cerr << "ERROR: object rewrite returned: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
if (opt_cmd == OPT_OBJECTS_EXPIRE) {
- if (!store->process_expire_objects()) {
+ if (!store->getRados()->process_expire_objects()) {
cerr << "ERROR: process_expire_objects() processing returned error." << std::endl;
return 1;
}
while (is_truncated) {
RGWRados::ent_map_t result;
int r =
- store->cls_bucket_list_ordered(bucket_info, RGW_NO_SHARD, marker,
+ store->getRados()->cls_bucket_list_ordered(bucket_info, RGW_NO_SHARD, marker,
prefix, 1000, true,
result, &is_truncated, &marker,
null_yield,
if (!need_rewrite) {
formatter->dump_string("status", "Skipped");
} else {
- r = store->rewrite_obj(bucket_info, obj, dpp(), null_yield);
+ r = store->getRados()->rewrite_obj(bucket_info, obj, dpp(), null_yield);
if (r == 0) {
formatter->dump_string("status", "Success");
} else {
rgw_obj_index_key index_key;
key.get_index_key(&index_key);
oid_list.push_back(index_key);
- ret = store->remove_objs_from_index(bucket_info, oid_list);
+ ret = store->getRados()->remove_objs_from_index(bucket_info, oid_list);
if (ret < 0) {
cerr << "ERROR: remove_obj_from_index() returned error: " << cpp_strerror(-ret) << std::endl;
return 1;
uint64_t obj_size;
map<string, bufferlist> attrs;
RGWObjectCtx obj_ctx(store);
- RGWRados::Object op_target(store, bucket_info, obj_ctx, obj);
+ RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, obj);
RGWRados::Object::Read read_op(&op_target);
read_op.params.attrs = &attrs;
do {
list<cls_rgw_gc_obj_info> result;
- int ret = store->list_gc_objs(&index, marker, 1000, !include_all, result, &truncated);
+ int ret = store->getRados()->list_gc_objs(&index, marker, 1000, !include_all, result, &truncated);
if (ret < 0) {
cerr << "ERROR: failed to list objs: " << cpp_strerror(-ret) << std::endl;
return 1;
}
if (opt_cmd == OPT_GC_PROCESS) {
- int ret = store->process_gc(!include_all);
+ int ret = store->getRados()->process_gc(!include_all);
if (ret < 0) {
cerr << "ERROR: gc processing returned error: " << cpp_strerror(-ret) << std::endl;
return 1;
max_entries = MAX_LC_LIST_ENTRIES;
}
do {
- int ret = store->list_lc_progress(marker, max_entries, &bucket_lc_map);
+ int ret = store->getRados()->list_lc_progress(marker, max_entries, &bucket_lc_map);
if (ret < 0) {
cerr << "ERROR: failed to list objs: " << cpp_strerror(-ret) << std::endl;
return 1;
}
if (opt_cmd == OPT_LC_PROCESS) {
- int ret = store->process_lc();
+ int ret = store->getRados()->process_lc();
if (ret < 0) {
cerr << "ERROR: lc processing returned error: " << cpp_strerror(-ret) << std::endl;
return 1;
cerr << "ERROR: recalculate doesn't work on buckets" << std::endl;
return EINVAL;
}
- ret = store->ctl.user->reset_stats(user_id);
+ ret = store->ctl()->user->reset_stats(user_id);
if (ret < 0) {
cerr << "ERROR: could not clear user stats: " << cpp_strerror(-ret) << std::endl;
return -ret;
cerr << "ERROR: could not init bucket: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
- ret = store->ctl.bucket->sync_user_stats(user_id, bucket_info);
+ ret = store->ctl()->bucket->sync_user_stats(user_id, bucket_info);
if (ret < 0) {
cerr << "ERROR: could not sync bucket stats: " << cpp_strerror(-ret) << std::endl;
return -ret;
RGWStorageStats stats;
ceph::real_time last_stats_sync;
ceph::real_time last_stats_update;
- int ret = store->ctl.user->read_stats(user_id, &stats, &last_stats_sync, &last_stats_update);
+ int ret = store->ctl()->user->read_stats(user_id, &stats, &last_stats_sync, &last_stats_update);
if (ret < 0) {
if (ret == -ENOENT) { /* in case of ENOENT */
cerr << "User has not been initialized or user does not exist" << std::endl;
}
if (opt_cmd == OPT_METADATA_GET) {
- int ret = store->ctl.meta.mgr->get(metadata_key, formatter, null_yield);
+ int ret = store->ctl()->meta.mgr->get(metadata_key, formatter, null_yield);
if (ret < 0) {
cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl;
return -ret;
cerr << "ERROR: failed to read input: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
- ret = store->ctl.meta.mgr->put(metadata_key, bl, null_yield, RGWMDLogSyncType::APPLY_ALWAYS);
+ ret = store->ctl()->meta.mgr->put(metadata_key, bl, null_yield, RGWMDLogSyncType::APPLY_ALWAYS);
if (ret < 0) {
cerr << "ERROR: can't put key: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
if (opt_cmd == OPT_METADATA_RM) {
- int ret = store->ctl.meta.mgr->remove(metadata_key, null_yield);
+ int ret = store->ctl()->meta.mgr->remove(metadata_key, null_yield);
if (ret < 0) {
cerr << "ERROR: can't remove key: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
void *handle;
int max = 1000;
- int ret = store->ctl.meta.mgr->list_keys_init(metadata_key, marker, &handle);
+ int ret = store->ctl()->meta.mgr->list_keys_init(metadata_key, marker, &handle);
if (ret < 0) {
cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl;
return -ret;
do {
list<string> keys;
left = (max_entries_specified ? max_entries - count : max);
- ret = store->ctl.meta.mgr->list_keys_next(handle, left, keys, &truncated);
+ ret = store->ctl()->meta.mgr->list_keys_next(handle, left, keys, &truncated);
if (ret < 0 && ret != -ENOENT) {
cerr << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << std::endl;
return -ret;
encode_json("truncated", truncated, formatter);
encode_json("count", count, formatter);
if (truncated) {
- encode_json("marker", store->ctl.meta.mgr->get_marker(handle), formatter);
+ encode_json("marker", store->ctl()->meta.mgr->get_marker(handle), formatter);
}
formatter->close_section();
}
formatter->flush(cout);
- store->ctl.meta.mgr->list_keys_complete(handle);
+ store->ctl()->meta.mgr->list_keys_complete(handle);
}
if (opt_cmd == OPT_MDLOG_LIST) {
std::cerr << "No --period given, using current period="
<< period_id << std::endl;
}
- RGWMetadataLog *meta_log = store->svc.mdlog->get_log(period_id);
+ RGWMetadataLog *meta_log = store->svc()->mdlog->get_log(period_id);
formatter->open_array_section("entries");
for (; i < g_ceph_context->_conf->rgw_md_log_max_shards; i++) {
for (list<cls_log_entry>::iterator iter = entries.begin(); iter != entries.end(); ++iter) {
cls_log_entry& entry = *iter;
- store->ctl.meta.mgr->dump_log_entry(entry, formatter);
+ store->ctl()->meta.mgr->dump_log_entry(entry, formatter);
}
formatter->flush(cout);
} while (truncated);
std::cerr << "No --period given, using current period="
<< period_id << std::endl;
}
- RGWMetadataLog *meta_log = store->svc.mdlog->get_log(period_id);
+ RGWMetadataLog *meta_log = store->svc()->mdlog->get_log(period_id);
formatter->open_array_section("entries");
if (opt_cmd == OPT_MDLOG_AUTOTRIM) {
// need a full history for purging old mdlog periods
- store->svc.mdlog->init_oldest_log_period();
+ store->svc()->mdlog->init_oldest_log_period();
- RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
+ RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
RGWHTTPManager http(store->ctx(), crs.get_completion_mgr());
int ret = http.start();
if (ret < 0) {
std::cerr << "missing --period argument" << std::endl;
return EINVAL;
}
- RGWMetadataLog *meta_log = store->svc.mdlog->get_log(period_id);
+ RGWMetadataLog *meta_log = store->svc()->mdlog->get_log(period_id);
ret = meta_log->trim(shard_id, start_time.to_real_time(), end_time.to_real_time(), start_marker, end_marker);
if (ret < 0) {
}
if (opt_cmd == OPT_METADATA_SYNC_STATUS) {
- RGWMetaSyncStatusManager sync(store, store->svc.rados->get_async_processor());
+ RGWMetaSyncStatusManager sync(store, store->svc()->rados->get_async_processor());
int ret = sync.init();
if (ret < 0) {
}
if (opt_cmd == OPT_METADATA_SYNC_INIT) {
- RGWMetaSyncStatusManager sync(store, store->svc.rados->get_async_processor());
+ RGWMetaSyncStatusManager sync(store, store->svc()->rados->get_async_processor());
int ret = sync.init();
if (ret < 0) {
if (opt_cmd == OPT_METADATA_SYNC_RUN) {
- RGWMetaSyncStatusManager sync(store, store->svc.rados->get_async_processor());
+ RGWMetaSyncStatusManager sync(store, store->svc()->rados->get_async_processor());
int ret = sync.init();
if (ret < 0) {
cerr << "ERROR: source zone not specified" << std::endl;
return EINVAL;
}
- RGWDataSyncStatusManager sync(store, store->svc.rados->get_async_processor(), source_zone, nullptr);
+ RGWDataSyncStatusManager sync(store, store->svc()->rados->get_async_processor(), source_zone, nullptr);
int ret = sync.init();
if (ret < 0) {
return EINVAL;
}
- RGWDataSyncStatusManager sync(store, store->svc.rados->get_async_processor(), source_zone, nullptr);
+ RGWDataSyncStatusManager sync(store, store->svc()->rados->get_async_processor(), source_zone, nullptr);
int ret = sync.init();
if (ret < 0) {
}
RGWSyncModuleInstanceRef sync_module;
- int ret = store->svc.sync_modules->get_manager()->create_instance(g_ceph_context, store->svc.zone->get_zone().tier_type,
- store->svc.zone->get_zone_params().tier_config, &sync_module);
+ int ret = store->svc()->sync_modules->get_manager()->create_instance(g_ceph_context, store->svc()->zone->get_zone().tier_type,
+ store->svc()->zone->get_zone_params().tier_config, &sync_module);
if (ret < 0) {
lderr(cct) << "ERROR: failed to init sync module instance, ret=" << ret << dendl;
return ret;
}
- RGWDataSyncStatusManager sync(store, store->svc.rados->get_async_processor(), source_zone, nullptr, sync_module);
+ RGWDataSyncStatusManager sync(store, store->svc()->rados->get_async_processor(), source_zone, nullptr, sync_module);
ret = sync.init();
if (ret < 0) {
return ret;
}
RGWPeriod period;
- ret = period.init(g_ceph_context, store->svc.sysobj, realm_id, realm_name, true);
+ ret = period.init(g_ceph_context, store->svc()->sysobj, realm_id, realm_name, true);
if (ret < 0) {
cerr << "failed to init period " << ": " << cpp_strerror(-ret) << std::endl;
return ret;
}
- if (!store->svc.zone->is_meta_master()) {
+ if (!store->svc()->zone->is_meta_master()) {
cerr << "failed to update bucket sync: only allowed on meta master zone " << std::endl;
cerr << period.get_master_zone() << " | " << period.get_realm() << std::endl;
return EINVAL;
do {
list<rgw_bi_log_entry> entries;
- ret = store->svc.bilog_rados->log_list(bucket_info, shard_id, marker, max_entries - count, entries, &truncated);
+ ret = store->svc()->bilog_rados->log_list(bucket_info, shard_id, marker, max_entries - count, entries, &truncated);
if (ret < 0) {
cerr << "ERROR: list_bi_log_entries(): " << cpp_strerror(-ret) << std::endl;
return -ret;
do {
list<cls_log_entry> entries;
- ret = store->svc.cls->timelog.list(oid, start_time.to_real_time(), end_time.to_real_time(),
+ ret = store->svc()->cls->timelog.list(oid, start_time.to_real_time(), end_time.to_real_time(),
max_entries - count, entries, marker, &marker, &truncated,
null_yield);
if (ret == -ENOENT) {
cerr << "ERROR: could not init bucket: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
- ret = store->svc.bilog_rados->log_trim(bucket_info, shard_id, start_marker, end_marker);
+ ret = store->svc()->bilog_rados->log_trim(bucket_info, shard_id, start_marker, end_marker);
if (ret < 0) {
cerr << "ERROR: trim_bi_log_entries(): " << cpp_strerror(-ret) << std::endl;
return -ret;
return -ret;
}
map<int, string> markers;
- ret = store->svc.bilog_rados->get_log_status(bucket_info, shard_id, &markers);
+ ret = store->svc()->bilog_rados->get_log_status(bucket_info, shard_id, &markers);
if (ret < 0) {
cerr << "ERROR: get_bi_log_status(): " << cpp_strerror(-ret) << std::endl;
return -ret;
}
if (opt_cmd == OPT_BILOG_AUTOTRIM) {
- RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
+ RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
RGWHTTPManager http(store->ctx(), crs.get_completion_mgr());
int ret = http.start();
if (ret < 0) {
if (ret < 0)
return -ret;
- auto datalog_svc = store->svc.datalog_rados;
+ auto datalog_svc = store->svc()->datalog_rados;
RGWDataChangesLog::LogMarker log_marker;
do {
list<cls_log_entry> entries;
RGWDataChangesLogInfo info;
- store->svc.datalog_rados->get_info(i, &info);
+ store->svc()->datalog_rados->get_info(i, &info);
::encode_json("info", info, formatter);
}
if (opt_cmd == OPT_DATALOG_AUTOTRIM) {
- RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
+ RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
RGWHTTPManager http(store->ctx(), crs.get_completion_mgr());
int ret = http.start();
if (ret < 0) {
if (ret < 0)
return -ret;
- ret = store->svc.datalog_rados->trim_entries(start_time.to_real_time(), end_time.to_real_time(), start_marker, end_marker);
+ ret = store->svc()->datalog_rados->trim_entries(start_time.to_real_time(), end_time.to_real_time(), start_marker, end_marker);
if (ret < 0) {
cerr << "ERROR: trim_entries(): " << cpp_strerror(-ret) << std::endl;
return -ret;
}
real_time mtime = real_clock::now();
- string oid = store->svc.cls->mfa.get_mfa_oid(user_id);
-
- int ret = store->ctl.meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user_id),
- mtime, &objv_tracker,
- null_yield,
- MDLOG_STATUS_WRITE,
- [&] {
- return store->svc.cls->mfa.create_mfa(user_id, config, &objv_tracker, mtime, null_yield);
+ string oid = store->svc()->cls->mfa.get_mfa_oid(user_id);
+
+ int ret = store->ctl()->meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user_id),
+ mtime, &objv_tracker,
+ null_yield,
+ MDLOG_STATUS_WRITE,
+ [&] {
+ return store->svc()->cls->mfa.create_mfa(user_id, config, &objv_tracker, mtime, null_yield);
});
if (ret < 0) {
cerr << "MFA creation failed, error: " << cpp_strerror(-ret) << std::endl;
real_time mtime = real_clock::now();
- int ret = store->ctl.meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user_id),
- mtime, &objv_tracker,
- null_yield,
- MDLOG_STATUS_WRITE,
- [&] {
- return store->svc.cls->mfa.remove_mfa(user_id, totp_serial, &objv_tracker, mtime, null_yield);
+ int ret = store->ctl()->meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user_id),
+ mtime, &objv_tracker,
+ null_yield,
+ MDLOG_STATUS_WRITE,
+ [&] {
+ return store->svc()->cls->mfa.remove_mfa(user_id, totp_serial, &objv_tracker, mtime, null_yield);
});
if (ret < 0) {
cerr << "MFA removal failed, error: " << cpp_strerror(-ret) << std::endl;
}
rados::cls::otp::otp_info_t result;
- int ret = store->svc.cls->mfa.get_mfa(user_id, totp_serial, &result, null_yield);
+ int ret = store->svc()->cls->mfa.get_mfa(user_id, totp_serial, &result, null_yield);
if (ret < 0) {
if (ret == -ENOENT || ret == -ENODATA) {
cerr << "MFA serial id not found" << std::endl;
}
list<rados::cls::otp::otp_info_t> result;
- int ret = store->svc.cls->mfa.list_mfa(user_id, &result, null_yield);
+ int ret = store->svc()->cls->mfa.list_mfa(user_id, &result, null_yield);
if (ret < 0) {
cerr << "MFA listing failed, error: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
list<rados::cls::otp::otp_info_t> result;
- int ret = store->svc.cls->mfa.check_mfa(user_id, totp_serial, totp_pin.front(), null_yield);
+ int ret = store->svc()->cls->mfa.check_mfa(user_id, totp_serial, totp_pin.front(), null_yield);
if (ret < 0) {
cerr << "MFA check failed, error: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
rados::cls::otp::otp_info_t config;
- int ret = store->svc.cls->mfa.get_mfa(user_id, totp_serial, &config, null_yield);
+ int ret = store->svc()->cls->mfa.get_mfa(user_id, totp_serial, &config, null_yield);
if (ret < 0) {
if (ret == -ENOENT || ret == -ENODATA) {
cerr << "MFA serial id not found" << std::endl;
ceph::real_time now;
- ret = store->svc.cls->mfa.otp_get_current_time(user_id, &now, null_yield);
+ ret = store->svc()->cls->mfa.otp_get_current_time(user_id, &now, null_yield);
if (ret < 0) {
cerr << "ERROR: failed to fetch current time from osd: " << cpp_strerror(-ret) << std::endl;
return -ret;
/* now update the backend */
real_time mtime = real_clock::now();
- ret = store->ctl.meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user_id),
- mtime, &objv_tracker,
- null_yield,
- MDLOG_STATUS_WRITE,
- [&] {
- return store->svc.cls->mfa.create_mfa(user_id, config, &objv_tracker, mtime, null_yield);
+ ret = store->ctl()->meta.mgr->mutate(RGWSI_MetaBackend_OTP::get_meta_key(user_id),
+ mtime, &objv_tracker,
+ null_yield,
+ MDLOG_STATUS_WRITE,
+ [&] {
+ return store->svc()->cls->mfa.create_mfa(user_id, config, &objv_tracker, mtime, null_yield);
});
if (ret < 0) {
cerr << "MFA update failed, error: " << cpp_strerror(-ret) << std::endl;
}
if (opt_cmd == OPT_RESHARD_STALE_INSTANCES_LIST) {
- if (!store->svc.zone->can_reshard() && !yes_i_really_mean_it) {
+ if (!store->svc()->zone->can_reshard() && !yes_i_really_mean_it) {
cerr << "Resharding disabled in a multisite env, stale instances unlikely from resharding" << std::endl;
cerr << "These instances may not be safe to delete." << std::endl;
cerr << "Use --yes-i-really-mean-it to force displaying these instances." << std::endl;
}
if (opt_cmd == OPT_RESHARD_STALE_INSTANCES_DELETE) {
- if (!store->svc.zone->can_reshard()) {
+ if (!store->svc()->zone->can_reshard()) {
cerr << "Resharding disabled in a multisite env. Stale instances are not safe to be deleted." << std::endl;
return EINVAL;
}
dest_config.oid_prefix = sub_oid_prefix;
dest_config.push_endpoint = sub_push_endpoint;
- auto psmodule = static_cast<RGWPSSyncModuleInstance *>(store->get_sync_module().get());
+ auto psmodule = static_cast<RGWPSSyncModuleInstance *>(store->getRados()->get_sync_module().get());
auto conf = psmodule->get_effective_conf();
if (dest_config.bucket_name.empty()) {
#include "common/config.h"
#include <boost/intrusive_ptr.hpp>
-
-class RGWRados;
+#include "rgw_sal.h"
namespace rgw {
class RGWLibAdmin
{
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
boost::intrusive_ptr<CephContext> cct;
public:
- RGWRados* get_store()
+ rgw::sal::RGWRadosStore* get_store()
{
return store;
}
}
// process the request
- RGWRequest req{env.store->get_new_req_id()};
+ RGWRequest req{env.store->getRados()->get_new_req_id()};
auto& socket = stream.lowest_layer();
StreamIO real_client{cct, stream, parser, yield, buffer, is_ssl,
void stop();
void join();
void pause();
- void unpause(RGWRados* store, rgw_auth_registry_ptr_t);
+ void unpause(rgw::sal::RGWRadosStore* store, rgw_auth_registry_ptr_t);
};
unsigned short parse_port(const char *input, boost::system::error_code& ec)
}
}
-void AsioFrontend::unpause(RGWRados* const store,
+void AsioFrontend::unpause(rgw::sal::RGWRadosStore* const store,
rgw_auth_registry_ptr_t auth_registry)
{
env.store = store;
}
void RGWAsioFrontend::unpause_with_new_config(
- RGWRados* const store,
+ rgw::sal::RGWRadosStore* const store,
rgw_auth_registry_ptr_t auth_registry
) {
impl->unpause(store, std::move(auth_registry));
void join() override;
void pause_for_new_config() override;
- void unpause_with_new_config(RGWRados *store,
+ void unpause_with_new_config(rgw::sal::RGWRadosStore *store,
rgw_auth_registry_ptr_t auth_registry) override;
};
#include "cls/user/cls_user_types.h"
+#include "rgw_sal.h"
+
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rgw
#define BUCKET_TAG_TIMEOUT 30
+using namespace rgw::sal;
+
/*
* The tenant_name is always returned on purpose. May be empty, of course.
* Get all the buckets owned by a user and fill up an RGWUserBuckets with them.
* Returns: 0 on success, -ERR# on failure.
*/
-int rgw_read_user_buckets(RGWRados * store,
+int rgw_read_user_buckets(RGWRadosStore * store,
const rgw_user& user_id,
RGWUserBuckets& buckets,
const string& marker,
bool *is_truncated,
uint64_t default_amount)
{
- return store->ctl.user->list_buckets(user_id, marker, end_marker,
+ return store->ctl()->user->list_buckets(user_id, marker, end_marker,
max, need_stats, &buckets,
is_truncated, default_amount);
}
}
}
-void check_bad_user_bucket_mapping(RGWRados *store, const rgw_user& user_id,
+void check_bad_user_bucket_mapping(RGWRadosStore *store, const rgw_user& user_id,
bool fix)
{
RGWUserBuckets user_buckets;
RGWBucketInfo bucket_info;
real_time mtime;
- RGWSysObjectCtx obj_ctx = store->svc.sysobj->init_obj_ctx();
- int r = store->get_bucket_info(obj_ctx, user_id.tenant, bucket.name, bucket_info, &mtime, null_yield);
+ RGWSysObjectCtx obj_ctx = store->svc()->sysobj->init_obj_ctx();
+ int r = store->getRados()->get_bucket_info(obj_ctx, user_id.tenant, bucket.name, bucket_info, &mtime, null_yield);
if (r < 0) {
ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket << dendl;
continue;
cout << "bucket info mismatch: expected " << actual_bucket << " got " << bucket << std::endl;
if (fix) {
cout << "fixing" << std::endl;
- r = store->ctl.bucket->link_bucket(user_id, actual_bucket,
+ r = store->ctl()->bucket->link_bucket(user_id, actual_bucket,
bucket_info.creation_time,
null_yield);
if (r < 0) {
return rgw_obj_key::oid_to_key_in_ns(oid, &key, ns);
}
-int rgw_remove_object(RGWRados *store, const RGWBucketInfo& bucket_info, const rgw_bucket& bucket, rgw_obj_key& key)
+int rgw_remove_object(RGWRadosStore *store, const RGWBucketInfo& bucket_info, const rgw_bucket& bucket, rgw_obj_key& key)
{
RGWObjectCtx rctx(store);
rgw_obj obj(bucket, key);
- return store->delete_obj(rctx, bucket_info, obj, bucket_info.versioning_status());
+ return store->getRados()->delete_obj(rctx, bucket_info, obj, bucket_info.versioning_status());
}
-int rgw_remove_bucket(RGWRados *store, rgw_bucket& bucket, bool delete_children, optional_yield y)
+int rgw_remove_bucket(RGWRadosStore *store, rgw_bucket& bucket, bool delete_children, optional_yield y)
{
int ret;
map<RGWObjCategory, RGWStorageStats> stats;
std::vector<rgw_bucket_dir_entry> objs;
map<string, bool> common_prefixes;
RGWBucketInfo info;
- RGWSysObjectCtx obj_ctx = store->svc.sysobj->init_obj_ctx();
+ RGWSysObjectCtx obj_ctx = store->svc()->sysobj->init_obj_ctx();
string bucket_ver, master_ver;
- ret = store->get_bucket_info(obj_ctx, bucket.tenant, bucket.name, info, NULL, null_yield);
+ ret = store->getRados()->get_bucket_info(obj_ctx, bucket.tenant, bucket.name, info, NULL, null_yield);
if (ret < 0)
return ret;
- ret = store->get_bucket_stats(info, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, NULL);
+ ret = store->getRados()->get_bucket_stats(info, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, NULL);
if (ret < 0)
return ret;
- RGWRados::Bucket target(store, info);
+ RGWRados::Bucket target(store->getRados(), info);
RGWRados::Bucket::List list_op(&target);
CephContext *cct = store->ctx();
int max = 1000;
return ret;
}
- ret = store->ctl.bucket->sync_user_stats(info.owner, info);
+ ret = store->ctl()->bucket->sync_user_stats(info.owner, info);
if ( ret < 0) {
dout(1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret << dendl;
}
// if we deleted children above we will force delete, as any that
// remain is detrius from a prior bug
- ret = store->delete_bucket(info, objv_tracker, null_yield, !delete_children);
+ ret = store->getRados()->delete_bucket(info, objv_tracker, null_yield, !delete_children);
if (ret < 0) {
lderr(store->ctx()) << "ERROR: could not remove bucket " <<
bucket.name << dendl;
return ret;
}
- ret = store->ctl.bucket->unlink_bucket(info.owner, bucket, null_yield, false);
+ ret = store->ctl()->bucket->unlink_bucket(info.owner, bucket, null_yield, false);
if (ret < 0) {
lderr(store->ctx()) << "ERROR: unable to remove user bucket information" << dendl;
}
return ret;
}
-int rgw_remove_bucket_bypass_gc(RGWRados *store, rgw_bucket& bucket,
+int rgw_remove_bucket_bypass_gc(RGWRadosStore *store, rgw_bucket& bucket,
int concurrent_max, bool keep_index_consistent,
optional_yield y)
{
map<string, bool> common_prefixes;
RGWBucketInfo info;
RGWObjectCtx obj_ctx(store);
- RGWSysObjectCtx sysobj_ctx = store->svc.sysobj->init_obj_ctx();
+ RGWSysObjectCtx sysobj_ctx = store->svc()->sysobj->init_obj_ctx();
CephContext *cct = store->ctx();
string bucket_ver, master_ver;
- ret = store->get_bucket_info(sysobj_ctx, bucket.tenant, bucket.name, info, NULL, null_yield);
+ ret = store->getRados()->get_bucket_info(sysobj_ctx, bucket.tenant, bucket.name, info, NULL, null_yield);
if (ret < 0)
return ret;
- ret = store->get_bucket_stats(info, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, NULL);
+ ret = store->getRados()->get_bucket_stats(info, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, NULL);
if (ret < 0)
return ret;
return ret;
}
- RGWRados::Bucket target(store, info);
+ RGWRados::Bucket target(store->getRados(), info);
RGWRados::Bucket::List list_op(&target);
list_op.params.list_versions = true;
RGWObjState *astate = NULL;
rgw_obj obj(bucket, (*it).key);
- ret = store->get_obj_state(&obj_ctx, info, obj, &astate, false, y);
+ ret = store->getRados()->get_obj_state(&obj_ctx, info, obj, &astate, false, y);
if (ret == -ENOENT) {
dout(1) << "WARNING: cannot find obj state for obj " << obj.get_oid() << dendl;
continue;
RGWObjManifest::obj_iterator miter = manifest.obj_begin();
rgw_obj head_obj = manifest.get_obj();
rgw_raw_obj raw_head_obj;
- store->obj_to_raw(info.placement_rule, head_obj, &raw_head_obj);
+ store->getRados()->obj_to_raw(info.placement_rule, head_obj, &raw_head_obj);
for (; miter != manifest.obj_end() && max_aio--; ++miter) {
max_aio = concurrent_max;
}
- rgw_raw_obj last_obj = miter.get_location().get_raw_obj(store);
+ rgw_raw_obj last_obj = miter.get_location().get_raw_obj(store->getRados());
if (last_obj == raw_head_obj) {
// have the head obj deleted at the end
continue;
}
- ret = store->delete_raw_obj_aio(last_obj, handles);
+ ret = store->getRados()->delete_raw_obj_aio(last_obj, handles);
if (ret < 0) {
lderr(store->ctx()) << "ERROR: delete obj aio failed with " << ret << dendl;
return ret;
}
} // for all shadow objs
- ret = store->delete_obj_aio(head_obj, info, astate, handles, keep_index_consistent, null_yield);
+ ret = store->getRados()->delete_obj_aio(head_obj, info, astate, handles, keep_index_consistent, null_yield);
if (ret < 0) {
lderr(store->ctx()) << "ERROR: delete obj aio failed with " << ret << dendl;
return ret;
return ret;
}
- ret = store->ctl.bucket->sync_user_stats(info.owner, info);
+ ret = store->ctl()->bucket->sync_user_stats(info.owner, info);
if (ret < 0) {
dout(1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret << dendl;
}
// this function can only be run if caller wanted children to be
// deleted, so we can ignore the check for children as any that
// remain are detritus from a prior bug
- ret = store->delete_bucket(info, objv_tracker, y, false);
+ ret = store->getRados()->delete_bucket(info, objv_tracker, y, false);
if (ret < 0) {
lderr(store->ctx()) << "ERROR: could not remove bucket " << bucket.name << dendl;
return ret;
}
- ret = store->ctl.bucket->unlink_bucket(info.owner, bucket, null_yield, false);
+ ret = store->ctl()->bucket->unlink_bucket(info.owner, bucket, null_yield, false);
if (ret < 0) {
lderr(store->ctx()) << "ERROR: unable to remove user bucket information" << dendl;
}
*sink = msg;
}
-int RGWBucket::init(RGWRados *storage, RGWBucketAdminOpState& op_state,
+int RGWBucket::init(RGWRadosStore *storage, RGWBucketAdminOpState& op_state,
optional_yield y, std::string *err_msg,
map<string, bufferlist> *pattrs)
{
if (!bucket.name.empty()) {
ceph::real_time mtime;
- int r = store->ctl.bucket->read_bucket_info(
+ int r = store->ctl()->bucket->read_bucket_info(
bucket, &bucket_info, y,
RGWBucketCtl::BucketInstance::GetParams().set_attrs(pattrs),
&ep_objv);
}
if (!user_id.empty()) {
- int r = store->ctl.user->get_info_by_uid(user_id, &user_info, y);
+ int r = store->ctl()->user->get_info_by_uid(user_id, &user_info, y);
if (r < 0) {
set_err_msg(err_msg, "failed to fetch user info");
return r;
return -EIO;
}
- auto bucket_ctl = store->ctl.bucket;
+ auto bucket_ctl = store->ctl()->bucket;
int r = bucket_ctl->unlink_bucket(owner.get_id(), old_bucket, y, false);
if (r < 0) {
set_err_msg(err_msg, "could not unlink policy from user " + owner.get_id().to_str());
rgw_ep_info ep_data{ep, ep_attrs};
/* link to user */
- r = store->ctl.bucket->link_bucket(user_info.user_id,
+ r = store->ctl()->bucket->link_bucket(user_info.user_id,
bucket_info.bucket,
ceph::real_time(),
y, true, &ep_data);
int RGWBucket::chown(RGWBucketAdminOpState& op_state, const string& marker,
optional_yield y, std::string *err_msg)
{
- int ret = store->ctl.bucket->chown(store, bucket_info, user_info.user_id,
+ int ret = store->ctl()->bucket->chown(store, bucket_info, user_info.user_id,
user_info.display_name, marker, y);
if (ret < 0) {
set_err_msg(err_msg, "Failed to change object ownership: " + cpp_strerror(-ret));
return -EINVAL;
}
- int r = store->ctl.bucket->unlink_bucket(user_info.user_id, bucket, y);
+ int r = store->ctl()->bucket->unlink_bucket(user_info.user_id, bucket, y);
if (r < 0) {
set_err_msg(err_msg, "error unlinking bucket" + cpp_strerror(-r));
}
rgw_bucket bucket = op_state.get_bucket();
RGWBucketInfo bucket_info;
map<string, bufferlist> attrs;
- auto obj_ctx = store->svc.sysobj->init_obj_ctx();
- int r = store->get_bucket_info(obj_ctx, bucket.tenant, bucket.name, bucket_info, NULL, null_yield, &attrs);
+ auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
+ int r = store->getRados()->get_bucket_info(obj_ctx, bucket.tenant, bucket.name, bucket_info, NULL, null_yield, &attrs);
if (r < 0) {
set_err_msg(err_msg, "could not get bucket info for bucket=" + bucket.name + ": " + cpp_strerror(-r));
return r;
}
bucket_info.quota = op_state.quota;
- r = store->put_bucket_instance_info(bucket_info, false, real_time(), &attrs);
+ r = store->getRados()->put_bucket_instance_info(bucket_info, false, real_time(), &attrs);
if (r < 0) {
set_err_msg(err_msg, "ERROR: failed writing bucket instance info: " + cpp_strerror(-r));
return r;
map<rgw_obj_index_key, string> all_objs;
RGWBucketInfo bucket_info;
- auto obj_ctx = store->svc.sysobj->init_obj_ctx();
- int r = store->get_bucket_instance_info(obj_ctx, bucket, bucket_info, nullptr, nullptr, null_yield);
+ auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
+ int r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket, bucket_info, nullptr, nullptr, null_yield);
if (r < 0) {
ldout(store->ctx(), 0) << "ERROR: " << __func__ << "(): get_bucket_instance_info(bucket=" << bucket << ") returned r=" << r << dendl;
return r;
}
- RGWRados::Bucket target(store, bucket_info);
+ RGWRados::Bucket target(store->getRados(), bucket_info);
RGWRados::Bucket::List list_op(&target);
list_op.params.list_versions = true;
if (objs_to_unlink.size() > max) {
if (fix_index) {
- int r = store->remove_objs_from_index(bucket_info, objs_to_unlink);
+ int r = store->getRados()->remove_objs_from_index(bucket_info, objs_to_unlink);
if (r < 0) {
set_err_msg(err_msg, "ERROR: remove_obj_from_index() returned error: " +
cpp_strerror(-r));
}
if (fix_index) {
- int r = store->remove_objs_from_index(bucket_info, objs_to_unlink);
+ int r = store->getRados()->remove_objs_from_index(bucket_info, objs_to_unlink);
if (r < 0) {
set_err_msg(err_msg, "ERROR: remove_obj_from_index() returned error: " +
cpp_strerror(-r));
return -EINVAL;
}
- store->cls_obj_set_bucket_tag_timeout(bucket_info, BUCKET_TAG_TIMEOUT);
+ store->getRados()->cls_obj_set_bucket_tag_timeout(bucket_info, BUCKET_TAG_TIMEOUT);
string prefix;
rgw_obj_index_key marker;
while (is_truncated) {
RGWRados::ent_map_t result;
- int r = store->cls_bucket_list_ordered(bucket_info, RGW_NO_SHARD,
+ int r = store->getRados()->cls_bucket_list_ordered(bucket_info, RGW_NO_SHARD,
marker, prefix, 1000, true,
result, &is_truncated, &marker,
y,
formatter->close_section();
- store->cls_obj_set_bucket_tag_timeout(bucket_info, 0);
+ store->getRados()->cls_obj_set_bucket_tag_timeout(bucket_info, 0);
return 0;
}
{
bool fix_index = op_state.will_fix_index();
- int r = store->bucket_check_index(bucket_info, &existing_stats, &calculated_stats);
+ int r = store->getRados()->bucket_check_index(bucket_info, &existing_stats, &calculated_stats);
if (r < 0) {
set_err_msg(err_msg, "failed to check index error=" + cpp_strerror(-r));
return r;
}
if (fix_index) {
- r = store->bucket_rebuild_index(bucket_info);
+ r = store->getRados()->bucket_rebuild_index(bucket_info);
if (r < 0) {
set_err_msg(err_msg, "failed to rebuild index err=" + cpp_strerror(-r));
return r;
return 0;
}
-int rgw_object_get_attr(RGWRados* store, const RGWBucketInfo& bucket_info,
+int rgw_object_get_attr(rgw::sal::RGWRadosStore* store, const RGWBucketInfo& bucket_info,
const rgw_obj& obj, const char* attr_name,
bufferlist& out_bl, optional_yield y)
{
RGWObjectCtx obj_ctx(store);
- RGWRados::Object op_target(store, bucket_info, obj_ctx, obj);
+ RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, obj);
RGWRados::Object::Read rop(&op_target);
return rop.get_attr(attr_name, out_bl, y);
{
std::string object_name = op_state.get_object_name();
rgw_bucket bucket = op_state.get_bucket();
- auto sysobj_ctx = store->svc.sysobj->init_obj_ctx();
+ auto sysobj_ctx = store->svc()->sysobj->init_obj_ctx();
RGWBucketInfo bucket_info;
map<string, bufferlist> attrs;
- int ret = store->get_bucket_info(sysobj_ctx, bucket.tenant, bucket.name, bucket_info, NULL, null_yield, &attrs);
+ int ret = store->getRados()->get_bucket_info(sysobj_ctx, bucket.tenant, bucket.name, bucket_info, NULL, null_yield, &attrs);
if (ret < 0) {
return ret;
}
}
-int RGWBucketAdminOp::get_policy(RGWRados *store, RGWBucketAdminOpState& op_state,
+int RGWBucketAdminOp::get_policy(RGWRadosStore *store, RGWBucketAdminOpState& op_state,
RGWAccessControlPolicy& policy)
{
RGWBucket bucket;
/* Wrappers to facilitate RESTful interface */
-int RGWBucketAdminOp::get_policy(RGWRados *store, RGWBucketAdminOpState& op_state,
+int RGWBucketAdminOp::get_policy(RGWRadosStore *store, RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher)
{
RGWAccessControlPolicy policy(store->ctx());
return 0;
}
-int RGWBucketAdminOp::dump_s3_policy(RGWRados *store, RGWBucketAdminOpState& op_state,
+int RGWBucketAdminOp::dump_s3_policy(RGWRadosStore *store, RGWBucketAdminOpState& op_state,
ostream& os)
{
RGWAccessControlPolicy_S3 policy(store->ctx());
return 0;
}
-int RGWBucketAdminOp::unlink(RGWRados *store, RGWBucketAdminOpState& op_state)
+int RGWBucketAdminOp::unlink(RGWRadosStore *store, RGWBucketAdminOpState& op_state)
{
RGWBucket bucket;
return bucket.unlink(op_state, null_yield);
}
-int RGWBucketAdminOp::link(RGWRados *store, RGWBucketAdminOpState& op_state, string *err)
+int RGWBucketAdminOp::link(RGWRadosStore *store, RGWBucketAdminOpState& op_state, string *err)
{
RGWBucket bucket;
map<string, bufferlist> attrs;
}
-int RGWBucketAdminOp::chown(RGWRados *store, RGWBucketAdminOpState& op_state, const string& marker, string *err)
+int RGWBucketAdminOp::chown(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, const string& marker, string *err)
{
RGWBucket bucket;
map<string, bufferlist> attrs;
}
-int RGWBucketAdminOp::check_index(RGWRados *store, RGWBucketAdminOpState& op_state,
+int RGWBucketAdminOp::check_index(RGWRadosStore *store, RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher, optional_yield y)
{
int ret;
return 0;
}
-int RGWBucketAdminOp::remove_bucket(RGWRados *store, RGWBucketAdminOpState& op_state,
+int RGWBucketAdminOp::remove_bucket(RGWRadosStore *store, RGWBucketAdminOpState& op_state,
optional_yield y, bool bypass_gc, bool keep_index_consistent)
{
RGWBucket bucket;
return ret;
}
-int RGWBucketAdminOp::remove_object(RGWRados *store, RGWBucketAdminOpState& op_state)
+int RGWBucketAdminOp::remove_object(RGWRadosStore *store, RGWBucketAdminOpState& op_state)
{
RGWBucket bucket;
return bucket.remove_object(op_state);
}
-static int bucket_stats(RGWRados *store, const std::string& tenant_name, std::string& bucket_name, Formatter *formatter)
+static int bucket_stats(RGWRadosStore *store, const std::string& tenant_name, std::string& bucket_name, Formatter *formatter)
{
RGWBucketInfo bucket_info;
map<RGWObjCategory, RGWStorageStats> stats;
map<string, bufferlist> attrs;
real_time mtime;
- auto obj_ctx = store->svc.sysobj->init_obj_ctx();
- int r = store->get_bucket_info(obj_ctx, tenant_name, bucket_name, bucket_info, &mtime, null_yield, &attrs);
+ auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
+ int r = store->getRados()->get_bucket_info(obj_ctx, tenant_name, bucket_name, bucket_info, &mtime, null_yield, &attrs);
if (r < 0)
return r;
string bucket_ver, master_ver;
string max_marker;
- int ret = store->get_bucket_stats(bucket_info, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, &max_marker);
+ int ret = store->getRados()->get_bucket_stats(bucket_info, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, &max_marker);
if (ret < 0) {
cerr << "error getting bucket stats ret=" << ret << std::endl;
return ret;
return 0;
}
-int RGWBucketAdminOp::limit_check(RGWRados *store,
+int RGWBucketAdminOp::limit_check(RGWRadosStore *store,
RGWBucketAdminOpState& op_state,
const std::list<std::string>& user_ids,
RGWFormatterFlusher& flusher,
/* need info for num_shards */
RGWBucketInfo info;
- auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+ auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
marker = bucket.name; /* Casey's location for marker update,
* as we may now not reach the end of
* the loop body */
- ret = store->get_bucket_info(obj_ctx, bucket.tenant, bucket.name,
+ ret = store->getRados()->get_bucket_info(obj_ctx, bucket.tenant, bucket.name,
info, nullptr, null_yield);
if (ret < 0)
continue;
/* need stats for num_entries */
string bucket_ver, master_ver;
std::map<RGWObjCategory, RGWStorageStats> stats;
- ret = store->get_bucket_stats(info, RGW_NO_SHARD, &bucket_ver,
+ ret = store->getRados()->get_bucket_stats(info, RGW_NO_SHARD, &bucket_ver,
&master_ver, stats, nullptr);
if (ret < 0)
return ret;
} /* RGWBucketAdminOp::limit_check */
-int RGWBucketAdminOp::info(RGWRados *store, RGWBucketAdminOpState& op_state,
+int RGWBucketAdminOp::info(RGWRadosStore *store, RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher)
{
int ret = 0;
bool truncated = true;
formatter->open_array_section("buckets");
- ret = store->ctl.meta.mgr->list_keys_init("bucket", &handle);
+ ret = store->ctl()->meta.mgr->list_keys_init("bucket", &handle);
while (ret == 0 && truncated) {
std::list<std::string> buckets;
const int max_keys = 1000;
- ret = store->ctl.meta.mgr->list_keys_next(handle, max_keys, buckets,
+ ret = store->ctl()->meta.mgr->list_keys_next(handle, max_keys, buckets,
&truncated);
for (auto& bucket_name : buckets) {
if (show_stats)
return 0;
}
-int RGWBucketAdminOp::set_quota(RGWRados *store, RGWBucketAdminOpState& op_state)
+int RGWBucketAdminOp::set_quota(RGWRadosStore *store, RGWBucketAdminOpState& op_state)
{
RGWBucket bucket;
return bucket.set_quota(op_state);
}
-static int purge_bucket_instance(RGWRados *store, const RGWBucketInfo& bucket_info)
+static int purge_bucket_instance(RGWRadosStore *store, const RGWBucketInfo& bucket_info)
{
int max_shards = (bucket_info.num_shards > 0 ? bucket_info.num_shards : 1);
for (int i = 0; i < max_shards; i++) {
- RGWRados::BucketShard bs(store);
+ RGWRados::BucketShard bs(store->getRados());
int shard_id = (bucket_info.num_shards > 0 ? i : -1);
int ret = bs.init(bucket_info.bucket, shard_id, nullptr);
if (ret < 0) {
<< "): " << cpp_strerror(-ret) << std::endl;
return ret;
}
- ret = store->bi_remove(bs);
+ ret = store->getRados()->bi_remove(bs);
if (ret < 0) {
cerr << "ERROR: failed to remove bucket index object: "
<< cpp_strerror(-ret) << std::endl;
}
using bucket_instance_ls = std::vector<RGWBucketInfo>;
-void get_stale_instances(RGWRados *store, const std::string& bucket_name,
+void get_stale_instances(RGWRadosStore *store, const std::string& bucket_name,
const vector<std::string>& lst,
bucket_instance_ls& stale_instances)
{
- auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+ auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
bucket_instance_ls other_instances;
// first iterate over the entries, and pick up the done buckets; these
// are guaranteed to be stale
for (const auto& bucket_instance : lst){
RGWBucketInfo binfo;
- int r = store->get_bucket_instance_info(obj_ctx, bucket_instance,
+ int r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket_instance,
binfo, nullptr,nullptr, null_yield);
if (r < 0){
// this can only happen if someone deletes us right when we're processing
// all the instances
auto [tenant, bucket] = split_tenant(bucket_name);
RGWBucketInfo cur_bucket_info;
- int r = store->get_bucket_info(obj_ctx, tenant, bucket, cur_bucket_info, nullptr, null_yield);
+ int r = store->getRados()->get_bucket_info(obj_ctx, tenant, bucket, cur_bucket_info, nullptr, null_yield);
if (r < 0) {
if (r == -ENOENT) {
// bucket doesn't exist, everything is stale then
return;
}
-static int process_stale_instances(RGWRados *store, RGWBucketAdminOpState& op_state,
+static int process_stale_instances(RGWRadosStore *store, RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher,
std::function<void(const bucket_instance_ls&,
Formatter *,
- RGWRados*)> process_f)
+ RGWRadosStore*)> process_f)
{
std::string marker;
void *handle;
Formatter *formatter = flusher.get_formatter();
static constexpr auto default_max_keys = 1000;
- int ret = store->ctl.meta.mgr->list_keys_init("bucket.instance", marker, &handle);
+ int ret = store->ctl()->meta.mgr->list_keys_init("bucket.instance", marker, &handle);
if (ret < 0) {
cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl;
return ret;
do {
list<std::string> keys;
- ret = store->ctl.meta.mgr->list_keys_next(handle, default_max_keys, keys, &truncated);
+ ret = store->ctl()->meta.mgr->list_keys_next(handle, default_max_keys, keys, &truncated);
if (ret < 0 && ret != -ENOENT) {
cerr << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << std::endl;
return ret;
return 0;
}
-int RGWBucketAdminOp::list_stale_instances(RGWRados *store,
+int RGWBucketAdminOp::list_stale_instances(RGWRadosStore *store,
RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher)
{
auto process_f = [](const bucket_instance_ls& lst,
Formatter *formatter,
- RGWRados*){
+ RGWRadosStore*){
for (const auto& binfo: lst)
formatter->dump_string("key", binfo.bucket.get_key());
};
}
-int RGWBucketAdminOp::clear_stale_instances(RGWRados *store,
+int RGWBucketAdminOp::clear_stale_instances(RGWRadosStore *store,
RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher)
{
auto process_f = [](const bucket_instance_ls& lst,
Formatter *formatter,
- RGWRados *store){
+ RGWRadosStore *store){
for (const auto &binfo: lst) {
int ret = purge_bucket_instance(store, binfo);
if (ret == 0){
auto md_key = "bucket.instance:" + binfo.bucket.get_key();
- ret = store->ctl.meta.mgr->remove(md_key, null_yield);
+ ret = store->ctl()->meta.mgr->remove(md_key, null_yield);
}
formatter->open_object_section("delete_status");
formatter->dump_string("bucket_instance", binfo.bucket.get_key());
return process_stale_instances(store, op_state, flusher, process_f);
}
-static int fix_single_bucket_lc(RGWRados *store,
+static int fix_single_bucket_lc(rgw::sal::RGWRadosStore *store,
const std::string& tenant_name,
const std::string& bucket_name)
{
- auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+ auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
RGWBucketInfo bucket_info;
map <std::string, bufferlist> bucket_attrs;
- int ret = store->get_bucket_info(obj_ctx, tenant_name, bucket_name,
+ int ret = store->getRados()->get_bucket_info(obj_ctx, tenant_name, bucket_name,
bucket_info, nullptr, null_yield, &bucket_attrs);
if (ret < 0) {
// TODO: Should we handle the case where the bucket could've been removed between
formatter->close_section(); // bucket_entry
}
-static void process_single_lc_entry(RGWRados *store, Formatter *formatter,
+static void process_single_lc_entry(rgw::sal::RGWRadosStore *store,
+ Formatter *formatter,
const std::string& tenant_name,
const std::string& bucket_name)
{
format_lc_status(formatter, tenant_name, bucket_name, -ret);
}
-int RGWBucketAdminOp::fix_lc_shards(RGWRados *store,
+int RGWBucketAdminOp::fix_lc_shards(rgw::sal::RGWRadosStore *store,
RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher)
{
process_single_lc_entry(store, formatter, user_id.tenant, bucket_name);
formatter->flush(cout);
} else {
- int ret = store->ctl.meta.mgr->list_keys_init("bucket", marker, &handle);
+ int ret = store->ctl()->meta.mgr->list_keys_init("bucket", marker, &handle);
if (ret < 0) {
std::cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl;
return ret;
{
formatter->open_array_section("lc_fix_status");
auto sg = make_scope_guard([&store, &handle, &formatter](){
- store->ctl.meta.mgr->list_keys_complete(handle);
+ store->ctl()->meta.mgr->list_keys_complete(handle);
formatter->close_section(); // lc_fix_status
formatter->flush(cout);
});
do {
list<std::string> keys;
- ret = store->ctl.meta.mgr->list_keys_next(handle, default_max_keys, keys, &truncated);
+ ret = store->ctl()->meta.mgr->list_keys_next(handle, default_max_keys, keys, &truncated);
if (ret < 0 && ret != -ENOENT) {
std::cerr << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << std::endl;
return ret;
}
-static bool has_object_expired(RGWRados *store, const RGWBucketInfo& bucket_info,
+static bool has_object_expired(rgw::sal::RGWRadosStore *store,
+ const RGWBucketInfo& bucket_info,
const rgw_obj_key& key, utime_t& delete_at)
{
rgw_obj obj(bucket_info.bucket, key);
return false;
}
-static int fix_bucket_obj_expiry(RGWRados *store, const RGWBucketInfo& bucket_info,
+static int fix_bucket_obj_expiry(rgw::sal::RGWRadosStore *store,
+ const RGWBucketInfo& bucket_info,
RGWFormatterFlusher& flusher, bool dry_run)
{
if (bucket_info.bucket.bucket_id == bucket_info.bucket.marker) {
formatter->flush(std::cout);
});
- RGWRados::Bucket target(store, bucket_info);
+ RGWRados::Bucket target(store->getRados(), bucket_info);
RGWRados::Bucket::List list_op(&target);
list_op.params.list_versions = bucket_info.versioned();
return 0;
}
-int RGWBucketAdminOp::fix_obj_expiry(RGWRados *store, RGWBucketAdminOpState& op_state,
+int RGWBucketAdminOp::fix_obj_expiry(rgw::sal::RGWRadosStore *store,
+ RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher, bool dry_run)
{
RGWBucket admin_bucket;
}
// TODO: remove RGWRados dependency for bucket listing
-int RGWBucketCtl::chown(RGWRados *store, RGWBucketInfo& bucket_info,
+int RGWBucketCtl::chown(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info,
const rgw_user& user_id, const std::string& display_name,
const std::string& marker, optional_yield y)
{
std::vector<rgw_bucket_dir_entry> objs;
map<string, bool> common_prefixes;
- RGWRados::Bucket target(store, bucket_info);
+ RGWRados::Bucket target(store->getRados(), bucket_info);
RGWRados::Bucket::List list_op(&target);
list_op.params.list_versions = true;
for (const auto& obj : objs) {
rgw_obj r_obj(bucket_info.bucket, obj.key);
- RGWRados::Object op_target(store, bucket_info, obj_ctx, r_obj);
+ RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, r_obj);
RGWRados::Object::Read read_op(&op_target);
map<string, bufferlist> attrs;
encode(policy, bl);
obj_ctx.set_atomic(r_obj);
- ret = store->set_attr(&obj_ctx, bucket_info, r_obj, RGW_ATTR_ACL, bl);
+ ret = store->getRados()->set_attr(&obj_ctx, bucket_info, r_obj, RGW_ATTR_ACL, bl);
if (ret < 0) {
ldout(store->ctx(), 0) << "ERROR: modify attr failed " << cpp_strerror(-ret) << dendl;
return ret;
class RGWBucketMetadataHandler;
class RGWBucketInstanceMetadataHandler;
class RGWUserCtl;
+namespace rgw { namespace sal { class RGWRadosStore; } }
extern int rgw_bucket_parse_bucket_instance(const string& bucket_instance, string *bucket_name, string *bucket_id, int *shard_id);
extern int rgw_bucket_parse_bucket_key(CephContext *cct, const string& key,
* Get all the buckets owned by a user and fill up an RGWUserBuckets with them.
* Returns: 0 on success, -ERR# on failure.
*/
-extern int rgw_read_user_buckets(RGWRados *store,
+extern int rgw_read_user_buckets(rgw::sal::RGWRadosStore *store,
const rgw_user& user_id,
RGWUserBuckets& buckets,
const string& marker,
bool* is_truncated,
uint64_t default_amount = 1000);
-extern int rgw_remove_object(RGWRados *store, const RGWBucketInfo& bucket_info, const rgw_bucket& bucket, rgw_obj_key& key);
-extern int rgw_remove_bucket(RGWRados *store, rgw_bucket& bucket, bool delete_children, optional_yield y);
-extern int rgw_remove_bucket_bypass_gc(RGWRados *store, rgw_bucket& bucket, int concurrent_max, optional_yield y);
+extern int rgw_remove_object(rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info, const rgw_bucket& bucket, rgw_obj_key& key);
+extern int rgw_remove_bucket(rgw::sal::RGWRadosStore *store, rgw_bucket& bucket, bool delete_children, optional_yield y);
+extern int rgw_remove_bucket_bypass_gc(rgw::sal::RGWRadosStore *store, rgw_bucket& bucket, int concurrent_max, optional_yield y);
-extern int rgw_object_get_attr(RGWRados* store, const RGWBucketInfo& bucket_info,
+extern int rgw_object_get_attr(rgw::sal::RGWRadosStore* store, const RGWBucketInfo& bucket_info,
const rgw_obj& obj, const char* attr_name,
bufferlist& out_bl, optional_yield y);
-extern void check_bad_user_bucket_mapping(RGWRados *store, const rgw_user& user_id, bool fix);
+extern void check_bad_user_bucket_mapping(rgw::sal::RGWRadosStore *store, const rgw_user& user_id, bool fix);
struct RGWBucketAdminOpState {
rgw_user uid;
class RGWBucket
{
RGWUserBuckets buckets;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
RGWAccessHandle handle;
RGWUserInfo user_info;
public:
RGWBucket() : store(NULL), handle(NULL), failure(false) {}
- int init(RGWRados *storage, RGWBucketAdminOpState& op_state, optional_yield y,
+ int init(rgw::sal::RGWRadosStore *storage, RGWBucketAdminOpState& op_state, optional_yield y,
std::string *err_msg = NULL, map<string, bufferlist> *pattrs = NULL);
int check_bad_index_multipart(RGWBucketAdminOpState& op_state,
class RGWBucketAdminOp
{
public:
- static int get_policy(RGWRados *store, RGWBucketAdminOpState& op_state,
+ static int get_policy(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher);
- static int get_policy(RGWRados *store, RGWBucketAdminOpState& op_state,
+ static int get_policy(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state,
RGWAccessControlPolicy& policy);
- static int dump_s3_policy(RGWRados *store, RGWBucketAdminOpState& op_state,
+ static int dump_s3_policy(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state,
ostream& os);
- static int unlink(RGWRados *store, RGWBucketAdminOpState& op_state);
- static int link(RGWRados *store, RGWBucketAdminOpState& op_state, string *err_msg = NULL);
- static int chown(RGWRados *store, RGWBucketAdminOpState& op_state, const string& marker, string *err_msg = NULL);
+ static int unlink(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state);
+ static int link(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, string *err_msg = NULL);
+ static int chown(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, const string& marker, string *err_msg = NULL);
- static int check_index(RGWRados *store, RGWBucketAdminOpState& op_state,
+ static int check_index(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher, optional_yield y);
- static int remove_bucket(RGWRados *store, RGWBucketAdminOpState& op_state, optional_yield y, bool bypass_gc = false, bool keep_index_consistent = true);
- static int remove_object(RGWRados *store, RGWBucketAdminOpState& op_state);
- static int info(RGWRados *store, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher);
- static int limit_check(RGWRados *store, RGWBucketAdminOpState& op_state,
+ static int remove_bucket(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, optional_yield y, bool bypass_gc = false, bool keep_index_consistent = true);
+ static int remove_object(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state);
+ static int info(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher);
+ static int limit_check(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state,
const std::list<std::string>& user_ids,
RGWFormatterFlusher& flusher,
bool warnings_only = false);
- static int set_quota(RGWRados *store, RGWBucketAdminOpState& op_state);
+ static int set_quota(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state);
- static int list_stale_instances(RGWRados *store, RGWBucketAdminOpState& op_state,
+ static int list_stale_instances(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher);
- static int clear_stale_instances(RGWRados *store, RGWBucketAdminOpState& op_state,
+ static int clear_stale_instances(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher);
- static int fix_lc_shards(RGWRados *store, RGWBucketAdminOpState& op_state,
+ static int fix_lc_shards(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher);
- static int fix_obj_expiry(RGWRados *store, RGWBucketAdminOpState& op_state,
+ static int fix_obj_expiry(rgw::sal::RGWRadosStore *store, RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher, bool dry_run = false);
};
optional_yield y,
bool update_entrypoint = true);
- int chown(RGWRados *store, RGWBucketInfo& bucket_info,
+ int chown(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info,
const rgw_user& user_id, const std::string& display_name,
const std::string& marker, optional_yield y);
&cw_client))));
RGWRestfulIO client_io(dout_context, &real_client_io);
- RGWRequest req(env.store->get_new_req_id());
+ RGWRequest req(env.store->getRados()->get_new_req_id());
int http_ret = 0;
//assert (scheduler != nullptr);
int ret = process_request(env.store, env.rest, &req, env.uri_prefix,
// vim: ts=8 sw=2 smarttab
#include "include/compat.h"
-#include "rgw_rados.h"
+#include "rgw_sal.h"
#include "rgw_zone.h"
#include "rgw_coroutine.h"
#include "rgw_cr_rados.h"
}
-RGWOmapAppend::RGWOmapAppend(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store, const rgw_raw_obj& _obj,
+RGWOmapAppend::RGWOmapAppend(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store, const rgw_raw_obj& _obj,
uint64_t _window_size)
: RGWConsumerCR<string>(_store->ctx()), async_rados(_async_rados),
store(_store), obj(_obj), going_down(false), num_pending_entries(0), window_size(_window_size), total_entries(0)
int RGWAsyncLockSystemObj::_send_request()
{
rgw_rados_ref ref;
- int r = store->get_raw_obj_ref(obj, &ref);
+ int r = store->getRados()->get_raw_obj_ref(obj, &ref);
if (r < 0) {
lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
return r;
return l.lock_exclusive(&ref.pool.ioctx(), ref.obj.oid);
}
-RGWAsyncLockSystemObj::RGWAsyncLockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+RGWAsyncLockSystemObj::RGWAsyncLockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj,
const string& _name, const string& _cookie, uint32_t _duration_secs) : RGWAsyncRadosRequest(caller, cn), store(_store),
obj(_obj),
int RGWAsyncUnlockSystemObj::_send_request()
{
rgw_rados_ref ref;
- int r = store->get_raw_obj_ref(obj, &ref);
+ int r = store->getRados()->get_raw_obj_ref(obj, &ref);
if (r < 0) {
lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
return r;
return l.unlock(&ref.pool.ioctx(), ref.obj.oid);
}
-RGWAsyncUnlockSystemObj::RGWAsyncUnlockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+RGWAsyncUnlockSystemObj::RGWAsyncUnlockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj,
const string& _name, const string& _cookie) : RGWAsyncRadosRequest(caller, cn), store(_store),
obj(_obj),
{
}
-RGWRadosSetOmapKeysCR::RGWRadosSetOmapKeysCR(RGWRados *_store,
+RGWRadosSetOmapKeysCR::RGWRadosSetOmapKeysCR(rgw::sal::RGWRadosStore *_store,
const rgw_raw_obj& _obj,
map<string, bufferlist>& _entries) : RGWSimpleCoroutine(_store->ctx()),
store(_store),
int RGWRadosSetOmapKeysCR::send_request()
{
- int r = store->get_raw_obj_ref(obj, &ref);
+ int r = store->getRados()->get_raw_obj_ref(obj, &ref);
if (r < 0) {
lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
return r;
return r;
}
-RGWRadosGetOmapKeysCR::RGWRadosGetOmapKeysCR(RGWRados *_store,
+RGWRadosGetOmapKeysCR::RGWRadosGetOmapKeysCR(rgw::sal::RGWRadosStore *_store,
const rgw_raw_obj& _obj,
const string& _marker,
int _max_entries,
}
int RGWRadosGetOmapKeysCR::send_request() {
- int r = store->get_raw_obj_ref(obj, &result->ref);
+ int r = store->getRados()->get_raw_obj_ref(obj, &result->ref);
if (r < 0) {
lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
return r;
return r;
}
-RGWRadosRemoveOmapKeysCR::RGWRadosRemoveOmapKeysCR(RGWRados *_store,
+RGWRadosRemoveOmapKeysCR::RGWRadosRemoveOmapKeysCR(rgw::sal::RGWRadosStore *_store,
const rgw_raw_obj& _obj,
const set<string>& _keys) : RGWSimpleCoroutine(_store->ctx()),
store(_store),
}
int RGWRadosRemoveOmapKeysCR::send_request() {
- int r = store->get_raw_obj_ref(obj, &ref);
+ int r = store->getRados()->get_raw_obj_ref(obj, &ref);
if (r < 0) {
lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
return r;
return r;
}
-RGWRadosRemoveCR::RGWRadosRemoveCR(RGWRados *store, const rgw_raw_obj& obj)
+RGWRadosRemoveCR::RGWRadosRemoveCR(rgw::sal::RGWRadosStore *store, const rgw_raw_obj& obj)
: RGWSimpleCoroutine(store->ctx()), store(store), obj(obj)
{
set_description() << "remove dest=" << obj;
int RGWRadosRemoveCR::send_request()
{
- auto rados = store->get_rados_handle();
+ auto rados = store->getRados()->get_rados_handle();
int r = rados->ioctx_create(obj.pool.name.c_str(), ioctx);
if (r < 0) {
lderr(cct) << "ERROR: failed to open pool (" << obj.pool.name << ") ret=" << r << dendl;
return r;
}
-RGWSimpleRadosLockCR::RGWSimpleRadosLockCR(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store,
+RGWSimpleRadosLockCR::RGWSimpleRadosLockCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store,
const rgw_raw_obj& _obj,
const string& _lock_name,
const string& _cookie,
return req->get_ret_status();
}
-RGWSimpleRadosUnlockCR::RGWSimpleRadosUnlockCR(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store,
+RGWSimpleRadosUnlockCR::RGWSimpleRadosUnlockCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store,
const rgw_raw_obj& _obj,
const string& _lock_name,
const string& _cookie) : RGWSimpleCoroutine(_store->ctx()),
int RGWAsyncGetBucketInstanceInfo::_send_request()
{
- RGWSysObjectCtx obj_ctx = store->svc.sysobj->init_obj_ctx();
- int r = store->get_bucket_instance_info(obj_ctx, bucket, bucket_info, nullptr, nullptr, null_yield);
+ RGWSysObjectCtx obj_ctx = store->svc()->sysobj->init_obj_ctx();
+ int r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket, bucket_info, nullptr, nullptr, null_yield);
if (r < 0) {
ldout(store->ctx(), 0) << "ERROR: failed to get bucket instance info for "
<< bucket << dendl;
return 0;
}
-RGWRadosBILogTrimCR::RGWRadosBILogTrimCR(RGWRados *store,
+RGWRadosBILogTrimCR::RGWRadosBILogTrimCR(rgw::sal::RGWRadosStore *store,
const RGWBucketInfo& bucket_info,
int shard_id,
const std::string& start_marker,
const std::string& end_marker)
- : RGWSimpleCoroutine(store->ctx()), bs(store),
+ : RGWSimpleCoroutine(store->ctx()), bs(store->getRados()),
start_marker(BucketIndexShardsManager::get_shard_marker(start_marker)),
end_marker(BucketIndexShardsManager::get_shard_marker(end_marker))
{
string user_id;
char buf[16];
- snprintf(buf, sizeof(buf), ".%lld", (long long)store->instance_id());
+ snprintf(buf, sizeof(buf), ".%lld", (long long)store->getRados()->instance_id());
map<string, bufferlist> attrs;
rgw_obj src_obj(bucket_info.bucket, key);
rgw_obj dest_obj(bucket_info.bucket, dest_key.value_or(key));
std::optional<uint64_t> bytes_transferred;
- int r = store->fetch_remote_obj(obj_ctx,
+ int r = store->getRados()->fetch_remote_obj(obj_ctx,
user_id,
NULL, /* req_info */
source_zone,
string user_id;
char buf[16];
- snprintf(buf, sizeof(buf), ".%lld", (long long)store->instance_id());
+ snprintf(buf, sizeof(buf), ".%lld", (long long)store->getRados()->instance_id());
rgw_obj src_obj(bucket_info.bucket, key);
rgw_obj dest_obj(src_obj);
- int r = store->stat_remote_obj(obj_ctx,
+ int r = store->getRados()->stat_remote_obj(obj_ctx,
user_id,
nullptr, /* req_info */
source_zone,
RGWObjState *state;
- int ret = store->get_obj_state(&obj_ctx, bucket_info, obj, &state, null_yield);
+ int ret = store->getRados()->get_obj_state(&obj_ctx, bucket_info, obj, &state, null_yield);
if (ret < 0) {
ldout(store->ctx(), 20) << __func__ << "(): get_obj_state() obj=" << obj << " returned ret=" << ret << dendl;
return ret;
}
}
- RGWRados::Object del_target(store, bucket_info, obj_ctx, obj);
+ RGWRados::Object del_target(store->getRados(), bucket_info, obj_ctx, obj);
RGWRados::Object::Delete del_op(&del_target);
del_op.params.bucket_owner = bucket_info.owner;
return 0;
}
-RGWRadosTimelogAddCR::RGWRadosTimelogAddCR(RGWRados *_store, const string& _oid,
+RGWRadosTimelogAddCR::RGWRadosTimelogAddCR(rgw::sal::RGWRadosStore *_store, const string& _oid,
const cls_log_entry& entry) : RGWSimpleCoroutine(_store->ctx()),
store(_store),
oid(_oid), cn(NULL)
set_status() << "sending request";
cn = stack->create_completion_notifier();
- return store->svc.cls->timelog.add(oid, entries, cn->completion(), true, null_yield);
+ return store->svc()->cls->timelog.add(oid, entries, cn->completion(), true, null_yield);
}
int RGWRadosTimelogAddCR::request_complete()
return r;
}
-RGWRadosTimelogTrimCR::RGWRadosTimelogTrimCR(RGWRados *store,
+RGWRadosTimelogTrimCR::RGWRadosTimelogTrimCR(rgw::sal::RGWRadosStore *store,
const std::string& oid,
const real_time& start_time,
const real_time& end_time,
set_status() << "sending request";
cn = stack->create_completion_notifier();
- return store->svc.cls->timelog.trim(oid, start_time, end_time, from_marker,
+ return store->svc()->cls->timelog.trim(oid, start_time, end_time, from_marker,
to_marker, cn->completion(),
null_yield);
}
}
-RGWSyncLogTrimCR::RGWSyncLogTrimCR(RGWRados *store, const std::string& oid,
+RGWSyncLogTrimCR::RGWSyncLogTrimCR(rgw::sal::RGWRadosStore *store, const std::string& oid,
const std::string& to_marker,
std::string *last_trim_marker)
: RGWRadosTimelogTrimCR(store, oid, real_time{}, real_time{},
int RGWAsyncStatObj::_send_request()
{
rgw_raw_obj raw_obj;
- store->obj_to_raw(bucket_info.placement_rule, obj, &raw_obj);
- return store->raw_obj_stat(raw_obj, psize, pmtime, pepoch,
+ store->getRados()->obj_to_raw(bucket_info.placement_rule, obj, &raw_obj);
+ return store->getRados()->raw_obj_stat(raw_obj, psize, pmtime, pepoch,
nullptr, nullptr, objv_tracker, null_yield);
}
-RGWStatObjCR::RGWStatObjCR(RGWAsyncRadosProcessor *async_rados, RGWRados *store,
+RGWStatObjCR::RGWStatObjCR(RGWAsyncRadosProcessor *async_rados, rgw::sal::RGWRadosStore *store,
const RGWBucketInfo& _bucket_info, const rgw_obj& obj, uint64_t *psize,
real_time* pmtime, uint64_t *pepoch,
RGWObjVersionTracker *objv_tracker)
return req->get_ret_status();
}
-RGWRadosNotifyCR::RGWRadosNotifyCR(RGWRados *store, const rgw_raw_obj& obj,
+RGWRadosNotifyCR::RGWRadosNotifyCR(rgw::sal::RGWRadosStore *store, const rgw_raw_obj& obj,
bufferlist& request, uint64_t timeout_ms,
bufferlist *response)
: RGWSimpleCoroutine(store->ctx()), store(store), obj(obj),
int RGWRadosNotifyCR::send_request()
{
- int r = store->get_raw_obj_ref(obj, &ref);
+ int r = store->getRados()->get_raw_obj_ref(obj, &ref);
if (r < 0) {
lderr(store->ctx()) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl;
return r;
#include <boost/intrusive_ptr.hpp>
#include "include/ceph_assert.h"
#include "rgw_coroutine.h"
-#include "rgw_rados.h"
+#include "rgw_sal.h"
#include "common/WorkQueue.h"
#include "common/Throttle.h"
template <class P>
class RGWSimpleWriteOnlyAsyncCR : public RGWSimpleCoroutine {
RGWAsyncRadosProcessor *async_rados;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
P params;
const DoutPrefixProvider *dpp;
class Request : public RGWAsyncRadosRequest {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
P params;
const DoutPrefixProvider *dpp;
protected:
public:
Request(RGWCoroutine *caller,
RGWAioCompletionNotifier *cn,
- RGWRados *store,
+ rgw::sal::RGWRadosStore *store,
const P& _params,
const DoutPrefixProvider *dpp) : RGWAsyncRadosRequest(caller, cn),
store(store),
public:
RGWSimpleWriteOnlyAsyncCR(RGWAsyncRadosProcessor *_async_rados,
- RGWRados *_store,
+ rgw::sal::RGWRadosStore *_store,
const P& _params,
const DoutPrefixProvider *_dpp) : RGWSimpleCoroutine(_store->ctx()),
async_rados(_async_rados),
template <class P, class R>
class RGWSimpleAsyncCR : public RGWSimpleCoroutine {
RGWAsyncRadosProcessor *async_rados;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
P params;
std::shared_ptr<R> result;
class Request : public RGWAsyncRadosRequest {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
P params;
std::shared_ptr<R> result;
protected:
public:
Request(RGWCoroutine *caller,
RGWAioCompletionNotifier *cn,
- RGWRados *_store,
+ rgw::sal::RGWRadosStore *_store,
const P& _params,
std::shared_ptr<R>& _result) : RGWAsyncRadosRequest(caller, cn),
store(_store),
public:
RGWSimpleAsyncCR(RGWAsyncRadosProcessor *_async_rados,
- RGWRados *_store,
+ rgw::sal::RGWRadosStore *_store,
const P& _params,
std::shared_ptr<R>& _result) : RGWSimpleCoroutine(_store->ctx()),
async_rados(_async_rados),
};
class RGWAsyncLockSystemObj : public RGWAsyncRadosRequest {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
rgw_raw_obj obj;
string lock_name;
string cookie;
protected:
int _send_request() override;
public:
- RGWAsyncLockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+ RGWAsyncLockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj,
const string& _name, const string& _cookie, uint32_t _duration_secs);
};
class RGWAsyncUnlockSystemObj : public RGWAsyncRadosRequest {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
rgw_raw_obj obj;
string lock_name;
string cookie;
protected:
int _send_request() override;
public:
- RGWAsyncUnlockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+ RGWAsyncUnlockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj,
const string& _name, const string& _cookie);
};
};
class RGWRadosSetOmapKeysCR : public RGWSimpleCoroutine {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
map<string, bufferlist> entries;
rgw_rados_ref ref;
boost::intrusive_ptr<RGWAioCompletionNotifier> cn;
public:
- RGWRadosSetOmapKeysCR(RGWRados *_store,
+ RGWRadosSetOmapKeysCR(rgw::sal::RGWRadosStore *_store,
const rgw_raw_obj& _obj,
map<string, bufferlist>& _entries);
};
using ResultPtr = std::shared_ptr<Result>;
- RGWRadosGetOmapKeysCR(RGWRados *_store, const rgw_raw_obj& _obj,
+ RGWRadosGetOmapKeysCR(rgw::sal::RGWRadosStore *_store, const rgw_raw_obj& _obj,
const string& _marker, int _max_entries,
ResultPtr result);
int request_complete() override;
private:
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
rgw_raw_obj obj;
string marker;
int max_entries;
};
class RGWRadosRemoveOmapKeysCR : public RGWSimpleCoroutine {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
rgw_rados_ref ref;
boost::intrusive_ptr<RGWAioCompletionNotifier> cn;
public:
- RGWRadosRemoveOmapKeysCR(RGWRados *_store,
+ RGWRadosRemoveOmapKeysCR(rgw::sal::RGWRadosStore *_store,
const rgw_raw_obj& _obj,
const set<string>& _keys);
};
class RGWRadosRemoveCR : public RGWSimpleCoroutine {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
librados::IoCtx ioctx;
const rgw_raw_obj obj;
boost::intrusive_ptr<RGWAioCompletionNotifier> cn;
public:
- RGWRadosRemoveCR(RGWRados *store, const rgw_raw_obj& obj);
+ RGWRadosRemoveCR(rgw::sal::RGWRadosStore *store, const rgw_raw_obj& obj);
int send_request() override;
int request_complete() override;
class RGWSimpleRadosLockCR : public RGWSimpleCoroutine {
RGWAsyncRadosProcessor *async_rados;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
string lock_name;
string cookie;
uint32_t duration;
RGWAsyncLockSystemObj *req;
public:
- RGWSimpleRadosLockCR(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store,
+ RGWSimpleRadosLockCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store,
const rgw_raw_obj& _obj,
const string& _lock_name,
const string& _cookie,
class RGWSimpleRadosUnlockCR : public RGWSimpleCoroutine {
RGWAsyncRadosProcessor *async_rados;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
string lock_name;
string cookie;
RGWAsyncUnlockSystemObj *req;
public:
- RGWSimpleRadosUnlockCR(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store,
+ RGWSimpleRadosUnlockCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store,
const rgw_raw_obj& _obj,
const string& _lock_name,
const string& _cookie);
class RGWOmapAppend : public RGWConsumerCR<string> {
RGWAsyncRadosProcessor *async_rados;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
rgw_raw_obj obj;
uint64_t window_size;
uint64_t total_entries;
public:
- RGWOmapAppend(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store,
+ RGWOmapAppend(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store,
const rgw_raw_obj& _obj,
uint64_t _window_size = OMAP_APPEND_MAX_ENTRIES_DEFAULT);
int operate() override;
class RGWShardedOmapCRManager {
RGWAsyncRadosProcessor *async_rados;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
RGWCoroutine *op;
int num_shards;
vector<RGWOmapAppend *> shards;
public:
- RGWShardedOmapCRManager(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store, RGWCoroutine *_op, int _num_shards, const rgw_pool& pool, const string& oid_prefix)
+ RGWShardedOmapCRManager(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store, RGWCoroutine *_op, int _num_shards, const rgw_pool& pool, const string& oid_prefix)
: async_rados(_async_rados),
store(_store), op(_op), num_shards(_num_shards) {
shards.reserve(num_shards);
};
class RGWAsyncGetBucketInstanceInfo : public RGWAsyncRadosRequest {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
rgw_bucket bucket;
protected:
int _send_request() override;
public:
RGWAsyncGetBucketInstanceInfo(RGWCoroutine *caller, RGWAioCompletionNotifier *cn,
- RGWRados *_store, const rgw_bucket& bucket)
+ rgw::sal::RGWRadosStore *_store, const rgw_bucket& bucket)
: RGWAsyncRadosRequest(caller, cn), store(_store), bucket(bucket) {}
RGWBucketInfo bucket_info;
class RGWGetBucketInstanceInfoCR : public RGWSimpleCoroutine {
RGWAsyncRadosProcessor *async_rados;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
rgw_bucket bucket;
RGWBucketInfo *bucket_info;
public:
// rgw_bucket constructor
- RGWGetBucketInstanceInfoCR(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store,
+ RGWGetBucketInstanceInfoCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store,
const rgw_bucket& _bucket, RGWBucketInfo *_bucket_info)
: RGWSimpleCoroutine(_store->ctx()), async_rados(_async_rados), store(_store),
bucket(_bucket), bucket_info(_bucket_info) {}
std::string end_marker;
boost::intrusive_ptr<RGWAioCompletionNotifier> cn;
public:
- RGWRadosBILogTrimCR(RGWRados *store, const RGWBucketInfo& bucket_info,
+ RGWRadosBILogTrimCR(rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info,
int shard_id, const std::string& start_marker,
const std::string& end_marker);
};
class RGWAsyncFetchRemoteObj : public RGWAsyncRadosRequest {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
string source_zone;
RGWBucketInfo bucket_info;
protected:
int _send_request() override;
public:
- RGWAsyncFetchRemoteObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+ RGWAsyncFetchRemoteObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
const string& _source_zone,
RGWBucketInfo& _bucket_info,
std::optional<rgw_placement_rule> _dest_placement_rule,
class RGWFetchRemoteObjCR : public RGWSimpleCoroutine {
CephContext *cct;
RGWAsyncRadosProcessor *async_rados;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
string source_zone;
RGWBucketInfo bucket_info;
const DoutPrefixProvider *dpp;
public:
- RGWFetchRemoteObjCR(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store,
+ RGWFetchRemoteObjCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store,
const string& _source_zone,
RGWBucketInfo& _bucket_info,
std::optional<rgw_placement_rule> _dest_placement_rule,
};
class RGWAsyncStatRemoteObj : public RGWAsyncRadosRequest {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
string source_zone;
RGWBucketInfo bucket_info;
protected:
int _send_request() override;
public:
- RGWAsyncStatRemoteObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+ RGWAsyncStatRemoteObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
const string& _source_zone,
RGWBucketInfo& _bucket_info,
const rgw_obj_key& _key,
class RGWStatRemoteObjCR : public RGWSimpleCoroutine {
CephContext *cct;
RGWAsyncRadosProcessor *async_rados;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
string source_zone;
RGWBucketInfo bucket_info;
RGWAsyncStatRemoteObj *req;
public:
- RGWStatRemoteObjCR(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store,
+ RGWStatRemoteObjCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store,
const string& _source_zone,
RGWBucketInfo& _bucket_info,
const rgw_obj_key& _key,
};
class RGWAsyncRemoveObj : public RGWAsyncRadosRequest {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
string source_zone;
RGWBucketInfo bucket_info;
protected:
int _send_request() override;
public:
- RGWAsyncRemoveObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+ RGWAsyncRemoveObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
const string& _source_zone,
RGWBucketInfo& _bucket_info,
const rgw_obj_key& _key,
class RGWRemoveObjCR : public RGWSimpleCoroutine {
CephContext *cct;
RGWAsyncRadosProcessor *async_rados;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
string source_zone;
RGWBucketInfo bucket_info;
rgw_zone_set *zones_trace;
public:
- RGWRemoveObjCR(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store,
+ RGWRemoveObjCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store,
const string& _source_zone,
RGWBucketInfo& _bucket_info,
const rgw_obj_key& _key,
class RGWContinuousLeaseCR : public RGWCoroutine {
RGWAsyncRadosProcessor *async_rados;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
const rgw_raw_obj obj;
bool aborted{false};
public:
- RGWContinuousLeaseCR(RGWAsyncRadosProcessor *_async_rados, RGWRados *_store,
+ RGWContinuousLeaseCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RGWRadosStore *_store,
const rgw_raw_obj& _obj,
const string& _lock_name, int _interval, RGWCoroutine *_caller)
: RGWCoroutine(_store->ctx()), async_rados(_async_rados), store(_store),
};
class RGWRadosTimelogAddCR : public RGWSimpleCoroutine {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
list<cls_log_entry> entries;
string oid;
boost::intrusive_ptr<RGWAioCompletionNotifier> cn;
public:
- RGWRadosTimelogAddCR(RGWRados *_store, const string& _oid,
+ RGWRadosTimelogAddCR(rgw::sal::RGWRadosStore *_store, const string& _oid,
const cls_log_entry& entry);
int send_request() override;
};
class RGWRadosTimelogTrimCR : public RGWSimpleCoroutine {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
boost::intrusive_ptr<RGWAioCompletionNotifier> cn;
protected:
std::string oid;
std::string to_marker;
public:
- RGWRadosTimelogTrimCR(RGWRados *store, const std::string& oid,
+ RGWRadosTimelogTrimCR(rgw::sal::RGWRadosStore *store, const std::string& oid,
const real_time& start_time, const real_time& end_time,
const std::string& from_marker,
const std::string& to_marker);
// a marker that compares greater than any timestamp-based index
static constexpr const char* max_marker = "99999999";
- RGWSyncLogTrimCR(RGWRados *store, const std::string& oid,
+ RGWSyncLogTrimCR(rgw::sal::RGWRadosStore *store, const std::string& oid,
const std::string& to_marker, std::string *last_trim_marker);
int request_complete() override;
};
class RGWAsyncStatObj : public RGWAsyncRadosRequest {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
RGWBucketInfo bucket_info;
rgw_obj obj;
uint64_t *psize;
protected:
int _send_request() override;
public:
- RGWAsyncStatObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *store,
+ RGWAsyncStatObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *store,
const RGWBucketInfo& _bucket_info, const rgw_obj& obj, uint64_t *psize = nullptr,
real_time *pmtime = nullptr, uint64_t *pepoch = nullptr,
RGWObjVersionTracker *objv_tracker = nullptr)
};
class RGWStatObjCR : public RGWSimpleCoroutine {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
RGWAsyncRadosProcessor *async_rados;
RGWBucketInfo bucket_info;
rgw_obj obj;
RGWObjVersionTracker *objv_tracker;
RGWAsyncStatObj *req = nullptr;
public:
- RGWStatObjCR(RGWAsyncRadosProcessor *async_rados, RGWRados *store,
+ RGWStatObjCR(RGWAsyncRadosProcessor *async_rados, rgw::sal::RGWRadosStore *store,
const RGWBucketInfo& _bucket_info, const rgw_obj& obj, uint64_t *psize = nullptr,
real_time* pmtime = nullptr, uint64_t *pepoch = nullptr,
RGWObjVersionTracker *objv_tracker = nullptr);
/// coroutine wrapper for IoCtx::aio_notify()
class RGWRadosNotifyCR : public RGWSimpleCoroutine {
- RGWRados *const store;
+ rgw::sal::RGWRadosStore *const store;
const rgw_raw_obj obj;
bufferlist request;
const uint64_t timeout_ms;
boost::intrusive_ptr<RGWAioCompletionNotifier> cn;
public:
- RGWRadosNotifyCR(RGWRados *store, const rgw_raw_obj& obj,
+ RGWRadosNotifyCR(rgw::sal::RGWRadosStore *store, const rgw_raw_obj& obj,
bufferlist& request, uint64_t timeout_ms,
bufferlist *response);
template<>
int RGWGetUserInfoCR::Request::_send_request()
{
- return store->ctl.user->get_info_by_uid(params.user, result.get(), null_yield);
+ return store->ctl()->user->get_info_by_uid(params.user, result.get(), null_yield);
}
template<>
int RGWGetBucketInfoCR::Request::_send_request()
{
- RGWSysObjectCtx obj_ctx(store->svc.sysobj->init_obj_ctx());
- return store->get_bucket_info(obj_ctx, params.tenant, params.bucket_name,
+ RGWSysObjectCtx obj_ctx(store->svc()->sysobj->init_obj_ctx());
+ return store->getRados()->get_bucket_info(obj_ctx, params.tenant, params.bucket_name,
result->bucket_info, &result->mtime, null_yield, &result->attrs);
}
int RGWBucketCreateLocalCR::Request::_send_request()
{
CephContext *cct = store->ctx();
- auto& zone_svc = store->svc.zone;
- auto& sysobj_svc = store->svc.sysobj;
+ auto& zone_svc = store->svc()->zone;
+ auto& sysobj_svc = store->svc()->sysobj;
const auto& user_info = params.user_info.get();
const auto& user = user_info->user_id;
RGWBucketInfo bucket_info;
map<string, bufferlist> bucket_attrs;
- int ret = store->get_bucket_info(sysobj_ctx, user.tenant, bucket_name,
+ int ret = store->getRados()->get_bucket_info(sysobj_ctx, user.tenant, bucket_name,
bucket_info, nullptr, null_yield, &bucket_attrs);
if (ret < 0 && ret != -ENOENT)
return ret;
bucket_owner.set_id(user);
bucket_owner.set_name(user_info->display_name);
if (bucket_exists) {
- ret = rgw_op_get_bucket_policy_from_attr(cct, store->ctl.user, bucket_info,
+ ret = rgw_op_get_bucket_policy_from_attr(cct, store->ctl()->user, bucket_info,
bucket_attrs, &old_policy);
if (ret >= 0) {
if (old_policy.get_owner().get_id().compare(user) != 0) {
RGWBucketInfo info;
obj_version ep_objv;
- ret = store->create_bucket(*user_info, bucket, zonegroup_id,
+ ret = store->getRados()->create_bucket(*user_info, bucket, zonegroup_id,
placement_rule, bucket_info.swift_ver_location,
pquota_info, attrs,
info, nullptr, &ep_objv, creation_time,
bucket = info.bucket;
}
- ret = store->ctl.bucket->link_bucket(user, bucket, info.creation_time, null_yield, false);
+ ret = store->ctl()->bucket->link_bucket(user, bucket, info.creation_time, null_yield, false);
if (ret && !existed && ret != -EEXIST) {
/* if it exists (or previously existed), don't remove it! */
- int r = store->ctl.bucket->unlink_bucket(user, bucket, null_yield);
+ int r = store->ctl()->bucket->unlink_bucket(user, bucket, null_yield);
if (r < 0) {
ldout(cct, 0) << "WARNING: failed to unlink bucket: ret=" << r << dendl;
}
{
CephContext *cct = store->ctx();
- RGWLC *lc = store->get_lc();
+ RGWLC *lc = store->getRados()->get_lc();
if (!lc) {
lderr(cct) << "ERROR: lifecycle object is not initialized!" << dendl;
return -EIO;
#include "rgw_metadata.h"
#include "rgw_sync_counters.h"
#include "rgw_sync_module.h"
+#include "rgw_sal.h"
#include "cls/lock/cls_lock_client.h"
return false;
}
using CR = RGWSimpleRadosReadCR<rgw_data_sync_marker>;
- spawn(new CR(env->async_rados, env->store->svc.sysobj,
- rgw_raw_obj(env->store->svc.zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(env->source_zone, shard_id)),
+ spawn(new CR(env->async_rados, env->store->svc()->sysobj,
+ rgw_raw_obj(env->store->svc()->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(env->source_zone, shard_id)),
&markers[shard_id]),
false);
shard_id++;
string error_oid = RGWDataSyncStatusManager::shard_obj_name(env->source_zone, shard_id) + ".retry";
auto& shard_keys = omapkeys[shard_id];
shard_keys = std::make_shared<RGWRadosGetOmapKeysCR::Result>();
- spawn(new RGWRadosGetOmapKeysCR(env->store, rgw_raw_obj(env->store->svc.zone->get_zone_params().log_pool, error_oid),
+ spawn(new RGWRadosGetOmapKeysCR(env->store, rgw_raw_obj(env->store->svc()->zone->get_zone_params().log_pool, error_oid),
marker, max_entries, shard_keys), false);
++shard_id;
using ReadInfoCR = RGWSimpleRadosReadCR<rgw_data_sync_info>;
yield {
bool empty_on_enoent = false; // fail on ENOENT
- call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc.sysobj,
- rgw_raw_obj(sync_env->store->svc.zone->get_zone_params().log_pool, RGWDataSyncStatusManager::sync_status_oid(sync_env->source_zone)),
+ call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj,
+ rgw_raw_obj(sync_env->store->svc()->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::sync_status_oid(sync_env->source_zone)),
&sync_status->sync_info, empty_on_enoent));
}
if (retcode < 0) {
int send_request() override {
RGWRESTConn *conn = sync_env->conn;
- RGWRados *store = sync_env->store;
char buf[32];
snprintf(buf, sizeof(buf), "%d", shard_id);
int ret = http_op->aio_read();
if (ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: failed to read from " << p << dendl;
+ ldout(sync_env->store->ctx(), 0) << "ERROR: failed to read from " << p << dendl;
log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl;
http_op->put();
return ret;
class RGWInitDataSyncStatusCoroutine : public RGWCoroutine {
static constexpr uint32_t lock_duration = 30;
RGWDataSyncEnv *sync_env;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
const rgw_pool& pool;
const uint32_t num_shards;
RGWSyncTraceNodeRef& _tn_parent,
rgw_data_sync_status *status)
: RGWCoroutine(_sync_env->cct), sync_env(_sync_env), store(sync_env->store),
- pool(store->svc.zone->get_zone_params().log_pool),
+ pool(store->svc()->zone->get_zone_params().log_pool),
num_shards(num_shards), status(status),
tn(sync_env->sync_tracer->add_node(_tn_parent, "init_data_sync_status")) {
lock_name = "sync_lock";
return set_cr_error(retcode);
}
using WriteInfoCR = RGWSimpleRadosWriteCR<rgw_data_sync_info>;
- yield call(new WriteInfoCR(sync_env->async_rados, store->svc.sysobj,
+ yield call(new WriteInfoCR(sync_env->async_rados, store->svc()->sysobj,
rgw_raw_obj{pool, sync_status_oid},
status->sync_info));
if (retcode < 0) {
/* fetch current position in logs */
yield {
- RGWRESTConn *conn = store->svc.zone->get_zone_conn_by_id(sync_env->source_zone);
+ RGWRESTConn *conn = store->svc()->zone->get_zone_conn_by_id(sync_env->source_zone);
if (!conn) {
tn->log(0, SSTR("ERROR: connection to zone " << sync_env->source_zone << " does not exist!"));
return set_cr_error(-EIO);
marker.timestamp = info.last_update;
const auto& oid = RGWDataSyncStatusManager::shard_obj_name(sync_env->source_zone, i);
using WriteMarkerCR = RGWSimpleRadosWriteCR<rgw_data_sync_marker>;
- spawn(new WriteMarkerCR(sync_env->async_rados, store->svc.sysobj,
+ spawn(new WriteMarkerCR(sync_env->async_rados, store->svc()->sysobj,
rgw_raw_obj{pool, oid}, marker), true);
}
}
}
status->sync_info.state = rgw_data_sync_info::StateBuildingFullSyncMaps;
- yield call(new WriteInfoCR(sync_env->async_rados, store->svc.sysobj,
+ yield call(new WriteInfoCR(sync_env->async_rados, store->svc()->sysobj,
rgw_raw_obj{pool, sync_status_oid},
status->sync_info));
if (retcode < 0) {
}
};
-RGWRemoteDataLog::RGWRemoteDataLog(const DoutPrefixProvider *dpp, RGWRados *_store,
+RGWRemoteDataLog::RGWRemoteDataLog(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *_store,
RGWAsyncRadosProcessor *async_rados)
- : RGWCoroutinesManager(_store->ctx(), _store->get_cr_registry()),
+ : RGWCoroutinesManager(_store->ctx(), _store->getRados()->get_cr_registry()),
dpp(dpp), store(_store), async_rados(async_rados),
http_manager(store->ctx(), completion_mgr),
data_sync_cr(NULL),
int RGWRemoteDataLog::read_sync_status(rgw_data_sync_status *sync_status)
{
// cannot run concurrently with run_sync(), so run in a separate manager
- RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
+ RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
RGWHTTPManager http_manager(store->ctx(), crs.get_completion_mgr());
int ret = http_manager.start();
if (ret < 0) {
int RGWRemoteDataLog::read_recovering_shards(const int num_shards, set<int>& recovering_shards)
{
// cannot run concurrently with run_sync(), so run in a separate manager
- RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
+ RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
RGWHTTPManager http_manager(store->ctx(), crs.get_completion_mgr());
int ret = http_manager.start();
if (ret < 0) {
rgw_data_sync_status sync_status;
sync_status.sync_info.num_shards = num_shards;
- RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
+ RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
RGWHTTPManager http_manager(store->ctx(), crs.get_completion_mgr());
int ret = http_manager.start();
if (ret < 0) {
class RGWListBucketIndexesCR : public RGWCoroutine {
RGWDataSyncEnv *sync_env;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
rgw_data_sync_status *sync_status;
int num_shards;
int operate() override {
reenter(this) {
entries_index = new RGWShardedOmapCRManager(sync_env->async_rados, store, this, num_shards,
- store->svc.zone->get_zone_params().log_pool,
+ store->svc()->zone->get_zone_params().log_pool,
oid_prefix);
yield; // yield so OmapAppendCRs can start
char buf[16];
snprintf(buf, sizeof(buf), ":%d", i);
s = key + buf;
- yield entries_index->append(s, store->svc.datalog_rados->get_log_shard_id(meta_info.data.get_bucket_info().bucket, i));
+ yield entries_index->append(s, store->svc()->datalog_rados->get_log_shard_id(meta_info.data.get_bucket_info().bucket, i));
}
} else {
- yield entries_index->append(key, store->svc.datalog_rados->get_log_shard_id(meta_info.data.get_bucket_info().bucket, -1));
+ yield entries_index->append(key, store->svc()->datalog_rados->get_log_shard_id(meta_info.data.get_bucket_info().bucket, -1));
}
}
truncated = result.truncated;
int shard_id = (int)iter->first;
rgw_data_sync_marker& marker = iter->second;
marker.total_entries = entries_index->get_total_entries(shard_id);
- spawn(new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(sync_env->async_rados, store->svc.sysobj,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sync_env->source_zone, shard_id)),
+ spawn(new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(sync_env->async_rados, store->svc()->sysobj,
+ rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sync_env->source_zone, shard_id)),
marker),
true);
}
sync_marker.pos = index_pos;
tn->log(20, SSTR("updating marker marker_oid=" << marker_oid << " marker=" << new_marker));
- RGWRados *store = sync_env->store;
+ RGWRados *rados = sync_env->store->getRados();
- return new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(sync_env->async_rados, store->svc.sysobj,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, marker_oid),
+ return new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(sync_env->async_rados, rados->svc.sysobj,
+ rgw_raw_obj(rados->svc.zone->get_zone_params().log_pool, marker_oid),
sync_marker);
}
if (lease_cr) {
lease_cr->abort();
}
- RGWRados *store = sync_env->store;
- lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, store,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, status_oid),
+ lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, sync_env->store,
+ rgw_raw_obj(sync_env->store->svc()->zone->get_zone_params().log_pool, status_oid),
lock_name, lock_duration, this));
lease_stack.reset(spawn(lease_cr.get(), false));
}
sync_marker.state = rgw_data_sync_marker::IncrementalSync;
sync_marker.marker = sync_marker.next_step_marker;
sync_marker.next_step_marker.clear();
- RGWRados *store = sync_env->store;
- call(new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(sync_env->async_rados, store->svc.sysobj,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, status_oid),
+ RGWRados *rados = sync_env->store->getRados();
+ call(new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(sync_env->async_rados, rados->svc.sysobj,
+ rgw_raw_obj(rados->svc.zone->get_zone_params().log_pool, status_oid),
sync_marker));
}
if (retcode < 0) {
}
RGWCoroutine *alloc_finisher_cr() override {
- RGWRados *store = sync_env->store;
- return new RGWSimpleRadosReadCR<rgw_data_sync_marker>(sync_env->async_rados, store->svc.sysobj,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sync_env->source_zone, shard_id)),
+ RGWRados *rados = sync_env->store->getRados();
+ return new RGWSimpleRadosReadCR<rgw_data_sync_marker>(sync_env->async_rados, rados->svc.sysobj,
+ rgw_raw_obj(rados->svc.zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sync_env->source_zone, shard_id)),
&sync_marker);
}
tn->log(10, SSTR("spawning " << num_shards << " shards sync"));
for (map<uint32_t, rgw_data_sync_marker>::iterator iter = sync_status.sync_markers.begin();
iter != sync_status.sync_markers.end(); ++iter) {
- RGWDataSyncShardControlCR *cr = new RGWDataSyncShardControlCR(sync_env, sync_env->store->svc.zone->get_zone_params().log_pool,
+ RGWDataSyncShardControlCR *cr = new RGWDataSyncShardControlCR(sync_env, sync_env->store->svc()->zone->get_zone_params().log_pool,
iter->first, iter->second, tn);
cr->get();
shard_crs_lock.lock();
}
RGWCoroutine *set_sync_info_cr() {
- RGWRados *store = sync_env->store;
- return new RGWSimpleRadosWriteCR<rgw_data_sync_info>(sync_env->async_rados, store->svc.sysobj,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, RGWDataSyncStatusManager::sync_status_oid(sync_env->source_zone)),
+ RGWRados *rados = sync_env->store->getRados();
+ return new RGWSimpleRadosWriteCR<rgw_data_sync_info>(sync_env->async_rados, rados->svc.sysobj,
+ rgw_raw_obj(rados->svc.zone->get_zone_params().log_pool, RGWDataSyncStatusManager::sync_status_oid(sync_env->source_zone)),
sync_status.sync_info);
}
(bucket_info.flags & BUCKET_VERSIONS_SUSPENDED)) {
ldout(sync_env->cct, 0) << "SYNC_ARCHIVE: sync_object: enabling object versioning for archive bucket" << dendl;
bucket_info.flags = (bucket_info.flags & ~BUCKET_VERSIONS_SUSPENDED) | BUCKET_VERSIONED;
- int op_ret = sync_env->store->put_bucket_instance_info(bucket_info, false, real_time(), NULL);
+ int op_ret = sync_env->store->getRados()->put_bucket_instance_info(bucket_info, false, real_time(), NULL);
if (op_ret < 0) {
ldout(sync_env->cct, 0) << "SYNC_ARCHIVE: sync_object: error versioning archive bucket" << dendl;
return NULL;
versioned_epoch = 0;
dest_key = key;
if (key.instance.empty()) {
- sync_env->store->gen_rand_obj_instance_name(&(*dest_key));
+ sync_env->store->getRados()->gen_rand_obj_instance_name(&(*dest_key));
}
}
{
RGWZone *zone_def;
- if (!store->svc.zone->find_zone_by_id(source_zone, &zone_def)) {
+ if (!store->svc()->zone->find_zone_by_id(source_zone, &zone_def)) {
ldpp_dout(this, 0) << "ERROR: failed to find zone config info for zone=" << source_zone << dendl;
return -EIO;
}
- if (!store->svc.sync_modules->get_manager()->supports_data_export(zone_def->tier_type)) {
+ if (!store->svc()->sync_modules->get_manager()->supports_data_export(zone_def->tier_type)) {
return -ENOTSUP;
}
- const RGWZoneParams& zone_params = store->svc.zone->get_zone_params();
+ const RGWZoneParams& zone_params = store->svc()->zone->get_zone_params();
if (sync_module == nullptr) {
- sync_module = store->get_sync_module();
+ sync_module = store->getRados()->get_sync_module();
}
- conn = store->svc.zone->get_zone_conn_by_id(source_zone);
+ conn = store->svc()->zone->get_zone_conn_by_id(source_zone);
if (!conn) {
ldpp_dout(this, 0) << "connection object to zone " << source_zone << " does not exist" << dendl;
return -EINVAL;
error_logger = new RGWSyncErrorLogger(store, RGW_SYNC_ERROR_LOG_SHARD_PREFIX, ERROR_LOGGER_SHARDS);
- int r = source_log.init(source_zone, conn, error_logger, store->get_sync_tracer(),
+ int r = source_log.init(source_zone, conn, error_logger, store->getRados()->get_sync_tracer(),
sync_module, counters);
if (r < 0) {
ldpp_dout(this, 0) << "ERROR: failed to init remote log, r=" << r << dendl;
return string(buf);
}
-RGWRemoteBucketLog::RGWRemoteBucketLog(const DoutPrefixProvider *_dpp, RGWRados *_store,
+RGWRemoteBucketLog::RGWRemoteBucketLog(const DoutPrefixProvider *_dpp,
+ rgw::sal::RGWRadosStore *_store,
RGWBucketSyncStatusManager *_sm,
RGWAsyncRadosProcessor *_async_rados,
RGWHTTPManager *_http_manager)
- : RGWCoroutinesManager(_store->ctx(), _store->get_cr_registry()),
+ : RGWCoroutinesManager(_store->ctx(), _store->getRados()->get_cr_registry()),
dpp(_dpp), store(_store), status_manager(_sm),
async_rados(_async_rados), http_manager(_http_manager)
{
}
yield {
auto store = sync_env->store;
- rgw_raw_obj obj(store->svc.zone->get_zone_params().log_pool, sync_status_oid);
+ rgw_raw_obj obj(store->svc()->zone->get_zone_params().log_pool, sync_status_oid);
if (info.syncstopped) {
call(new RGWRadosRemoveCR(store, obj));
}
map<string, bufferlist> attrs;
status.encode_all_attrs(attrs);
- call(new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, store->svc.sysobj, obj, attrs));
+ call(new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, store->svc()->sysobj, obj, attrs));
}
}
if (info.syncstopped) {
int RGWReadBucketSyncStatusCoroutine::operate()
{
reenter(this) {
- yield call(new RGWSimpleRadosReadAttrsCR(sync_env->async_rados, sync_env->store->svc.sysobj,
- rgw_raw_obj(sync_env->store->svc.zone->get_zone_params().log_pool, oid),
+ yield call(new RGWSimpleRadosReadAttrsCR(sync_env->async_rados, sync_env->store->svc()->sysobj,
+ rgw_raw_obj(sync_env->store->svc()->zone->get_zone_params().log_pool, oid),
&attrs, true));
if (retcode == -ENOENT) {
*status = rgw_bucket_shard_sync_info();
#define OMAP_READ_MAX_ENTRIES 10
class RGWReadRecoveringBucketShardsCoroutine : public RGWCoroutine {
RGWDataSyncEnv *sync_env;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
const int shard_id;
int max_entries;
count = 0;
do {
omapkeys = std::make_shared<RGWRadosGetOmapKeysCR::Result>();
- yield call(new RGWRadosGetOmapKeysCR(store, rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, error_oid),
+ yield call(new RGWRadosGetOmapKeysCR(store, rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, error_oid),
marker, max_omap_entries, omapkeys));
if (retcode == -ENOENT) {
class RGWReadPendingBucketShardsCoroutine : public RGWCoroutine {
RGWDataSyncEnv *sync_env;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
const int shard_id;
int max_entries;
reenter(this){
//read sync status marker
using CR = RGWSimpleRadosReadCR<rgw_data_sync_marker>;
- yield call(new CR(sync_env->async_rados, store->svc.sysobj,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, status_oid),
+ yield call(new CR(sync_env->async_rados, store->svc()->sysobj,
+ rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, status_oid),
sync_marker));
if (retcode < 0) {
ldout(sync_env->cct,0) << "failed to read sync status marker with "
int RGWRemoteDataLog::read_shard_status(int shard_id, set<string>& pending_buckets, set<string>& recovering_buckets, rgw_data_sync_marker *sync_marker, const int max_entries)
{
// cannot run concurrently with run_sync(), so run in a separate manager
- RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
+ RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
RGWHTTPManager http_manager(store->ctx(), crs.get_completion_mgr());
int ret = http_manager.start();
if (ret < 0) {
return new RGWReadBucketSyncStatusCoroutine(&sync_env, bs, sync_status);
}
-RGWBucketSyncStatusManager::RGWBucketSyncStatusManager(RGWRados *_store, const string& _source_zone,
- const rgw_bucket& bucket) : store(_store),
- cr_mgr(_store->ctx(), _store->get_cr_registry()),
- http_manager(store->ctx(), cr_mgr.get_completion_mgr()),
- source_zone(_source_zone),
- conn(NULL), error_logger(NULL),
- bucket(bucket),
- num_shards(0)
+RGWBucketSyncStatusManager::RGWBucketSyncStatusManager(rgw::sal::RGWRadosStore *_store, const string& _source_zone,
+ const rgw_bucket& bucket) : store(_store),
+ cr_mgr(_store->ctx(), _store->getRados()->get_cr_registry()),
+ http_manager(store->ctx(), cr_mgr.get_completion_mgr()),
+ source_zone(_source_zone),
+ conn(NULL), error_logger(NULL),
+ bucket(bucket),
+ num_shards(0)
{
}
map<string, bufferlist> attrs;
sync_marker.encode_attr(attrs);
- RGWRados *store = sync_env->store;
+ RGWRados *rados = sync_env->store->getRados();
tn->log(20, SSTR("updating marker marker_oid=" << marker_oid << " marker=" << new_marker));
- return new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, store->svc.sysobj,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, marker_oid),
+ return new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, rados->svc.sysobj,
+ rgw_raw_obj(rados->svc.zone->get_zone_params().log_pool, marker_oid),
attrs);
}
map<string, bufferlist> attrs;
sync_marker.encode_attr(attrs);
- RGWRados *store = sync_env->store;
+ RGWRados *rados = sync_env->store->getRados();
tn->log(20, SSTR("updating marker marker_oid=" << marker_oid << " marker=" << new_marker));
return new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados,
- store->svc.sysobj,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, marker_oid),
+ rados->svc.sysobj,
+ rgw_raw_obj(rados->svc.zone->get_zone_params().log_pool, marker_oid),
attrs);
}
data_sync_module = sync_env->sync_module->get_data_handler();
zones_trace = _zones_trace;
- zones_trace.insert(sync_env->store->svc.zone->get_zone().id);
+ zones_trace.insert(sync_env->store->svc()->zone->get_zone().id);
}
int operate() override {
sync_info.state = rgw_bucket_shard_sync_info::StateIncrementalSync;
map<string, bufferlist> attrs;
sync_info.encode_state_attr(attrs);
- RGWRados *store = sync_env->store;
- call(new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, store->svc.sysobj,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, status_oid),
+ RGWRados *rados = sync_env->store->getRados();
+ call(new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, rados->svc.sysobj,
+ rgw_raw_obj(rados->svc.zone->get_zone_params().log_pool, status_oid),
attrs));
}
} else {
: RGWCoroutine(_sync_env->cct), sync_env(_sync_env), bs(bs),
bucket_info(_bucket_info), lease_cr(lease_cr), sync_info(sync_info),
marker_tracker(sync_env, status_oid, sync_info.inc_marker),
- status_oid(status_oid), zone_id(_sync_env->store->svc.zone->get_zone().id),
+ status_oid(status_oid), zone_id(_sync_env->store->svc()->zone->get_zone().id),
tn(sync_env->sync_tracer->add_node(_tn_parent, "inc_sync",
SSTR(bucket_shard_str{bs})))
{
reenter(this) {
yield {
set_status("acquiring sync lock");
- auto store = sync_env->store;
- lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, store,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, status_oid),
+ lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, sync_env->store,
+ rgw_raw_obj(sync_env->store->svc()->zone->get_zone_params().log_pool, status_oid),
"sync_lock",
cct->_conf->rgw_sync_lease_period,
this));
tn->log(10, SSTR("no local info for bucket:" << ": fetching metadata"));
string raw_key = string("bucket.instance:") + bs.bucket.get_key();
- meta_sync_env.init(sync_env->dpp, cct, sync_env->store, sync_env->store->svc.zone->get_master_conn(), sync_env->async_rados,
+ meta_sync_env.init(sync_env->dpp, cct, sync_env->store, sync_env->store->svc()->zone->get_master_conn(), sync_env->async_rados,
sync_env->http_manager, sync_env->error_logger, sync_env->sync_tracer);
call(new RGWMetaSyncSingleEntryCR(&meta_sync_env, raw_key,
int RGWBucketSyncStatusManager::init()
{
- conn = store->svc.zone->get_zone_conn_by_id(source_zone);
+ conn = store->svc()->zone->get_zone_conn_by_id(source_zone);
if (!conn) {
ldpp_dout(this, 0) << "connection object to zone " << source_zone << " does not exist" << dendl;
return -EINVAL;
int effective_num_shards = (num_shards ? num_shards : 1);
- auto async_rados = store->svc.rados->get_async_processor();
+ auto async_rados = store->svc()->rados->get_async_processor();
for (int i = 0; i < effective_num_shards; i++) {
RGWRemoteBucketLog *l = new RGWRemoteBucketLog(this, store, this, async_rados, &http_manager);
- ret = l->init(source_zone, conn, bucket, (num_shards ? i : -1), error_logger, store->get_sync_tracer(), sync_module);
+ ret = l->init(source_zone, conn, bucket, (num_shards ? i : -1), error_logger, store->getRados()->get_sync_tracer(), sync_module);
if (ret < 0) {
ldpp_dout(this, 0) << "ERROR: failed to initialize RGWRemoteBucketLog object" << dendl;
return ret;
class RGWCollectBucketSyncStatusCR : public RGWShardCollectCR {
static constexpr int max_concurrent_shards = 16;
- RGWRados *const store;
+ rgw::sal::RGWRadosStore *const store;
RGWDataSyncEnv *const env;
const int num_shards;
rgw_bucket_shard bs;
Vector::iterator i, end;
public:
- RGWCollectBucketSyncStatusCR(RGWRados *store, RGWDataSyncEnv *env,
+ RGWCollectBucketSyncStatusCR(rgw::sal::RGWRadosStore *store, RGWDataSyncEnv *env,
int num_shards, const rgw_bucket& bucket,
Vector *status)
: RGWShardCollectCR(store->ctx(), max_concurrent_shards),
}
};
-int rgw_bucket_sync_status(const DoutPrefixProvider *dpp, RGWRados *store, const std::string& source_zone,
+int rgw_bucket_sync_status(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, const std::string& source_zone,
const RGWBucketInfo& bucket_info,
std::vector<rgw_bucket_shard_sync_info> *status)
{
RGWDataSyncEnv env;
RGWSyncModuleInstanceRef module; // null sync module
- env.init(dpp, store->ctx(), store, nullptr, store->svc.rados->get_async_processor(),
+ env.init(dpp, store->ctx(), store, nullptr, store->svc()->rados->get_async_processor(),
nullptr, nullptr, nullptr, source_zone, module, nullptr);
- RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
+ RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
return crs.run(new RGWCollectBucketSyncStatusCR(store, &env, num_shards,
bucket_info.bucket, status));
}
#include "rgw_coroutine.h"
#include "rgw_http_client.h"
+#include "rgw_sal.h"
#include "rgw_sync_module.h"
#include "rgw_sync_trace.h"
struct RGWDataSyncEnv {
const DoutPrefixProvider *dpp{nullptr};
CephContext *cct{nullptr};
- RGWRados *store{nullptr};
+ rgw::sal::RGWRadosStore *store{nullptr};
RGWRESTConn *conn{nullptr};
RGWAsyncRadosProcessor *async_rados{nullptr};
RGWHTTPManager *http_manager{nullptr};
RGWDataSyncEnv() {}
- void init(const DoutPrefixProvider *_dpp, CephContext *_cct, RGWRados *_store, RGWRESTConn *_conn,
+ void init(const DoutPrefixProvider *_dpp, CephContext *_cct, rgw::sal::RGWRadosStore *_store, RGWRESTConn *_conn,
RGWAsyncRadosProcessor *_async_rados, RGWHTTPManager *_http_manager,
RGWSyncErrorLogger *_error_logger, RGWSyncTraceManager *_sync_tracer,
const string& _source_zone, RGWSyncModuleInstanceRef& _sync_module,
class RGWRemoteDataLog : public RGWCoroutinesManager {
const DoutPrefixProvider *dpp;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
RGWAsyncRadosProcessor *async_rados;
RGWHTTPManager http_manager;
bool initialized;
public:
- RGWRemoteDataLog(const DoutPrefixProvider *dpp, RGWRados *_store,
+ RGWRemoteDataLog(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *_store,
RGWAsyncRadosProcessor *async_rados);
int init(const string& _source_zone, RGWRESTConn *_conn, RGWSyncErrorLogger *_error_logger,
RGWSyncTraceManager *_sync_tracer, RGWSyncModuleInstanceRef& module,
};
class RGWDataSyncStatusManager : public DoutPrefixProvider {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
string source_zone;
RGWRESTConn *conn;
int num_shards;
public:
- RGWDataSyncStatusManager(RGWRados *_store, RGWAsyncRadosProcessor *async_rados,
+ RGWDataSyncStatusManager(rgw::sal::RGWRadosStore *_store, RGWAsyncRadosProcessor *async_rados,
const string& _source_zone, PerfCounters* counters)
: store(_store), source_zone(_source_zone), conn(NULL), error_logger(NULL),
sync_module(nullptr), counters(counters),
source_log(this, store, async_rados), num_shards(0) {}
- RGWDataSyncStatusManager(RGWRados *_store, RGWAsyncRadosProcessor *async_rados,
+ RGWDataSyncStatusManager(rgw::sal::RGWRadosStore *_store, RGWAsyncRadosProcessor *async_rados,
const string& _source_zone, PerfCounters* counters,
const RGWSyncModuleInstanceRef& _sync_module)
: store(_store), source_zone(_source_zone), conn(NULL), error_logger(NULL),
class RGWRemoteBucketLog : public RGWCoroutinesManager {
const DoutPrefixProvider *dpp;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
RGWRESTConn *conn{nullptr};
string source_zone;
rgw_bucket_shard bs;
RGWBucketSyncCR *sync_cr{nullptr};
public:
- RGWRemoteBucketLog(const DoutPrefixProvider *_dpp, RGWRados *_store,
+ RGWRemoteBucketLog(const DoutPrefixProvider *_dpp, rgw::sal::RGWRadosStore *_store,
RGWBucketSyncStatusManager *_sm,
RGWAsyncRadosProcessor *_async_rados,
RGWHTTPManager *_http_manager);
};
class RGWBucketSyncStatusManager : public DoutPrefixProvider {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
RGWCoroutinesManager cr_mgr;
int num_shards;
public:
- RGWBucketSyncStatusManager(RGWRados *_store, const string& _source_zone,
+ RGWBucketSyncStatusManager(rgw::sal::RGWRadosStore *_store, const string& _source_zone,
const rgw_bucket& bucket);
~RGWBucketSyncStatusManager();
};
/// read the sync status of all bucket shards from the given source zone
-int rgw_bucket_sync_status(const DoutPrefixProvider *dpp, RGWRados *store, const std::string& source_zone,
+int rgw_bucket_sync_status(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, const std::string& source_zone,
const RGWBucketInfo& bucket_info,
std::vector<rgw_bucket_shard_sync_info> *status);
struct req_state* s = get_state();
auto compression_type =
- get_store()->svc.zone->get_zone_params().get_compression_type(
+ get_store()->svc()->zone->get_zone_params().get_compression_type(
s->bucket_info.placement_rule);
/* not obviously supportable */
if (!version_id.empty()) {
obj.key.set_instance(version_id);
} else {
- get_store()->gen_rand_obj_instance_name(&obj);
+ get_store()->getRados()->gen_rand_obj_instance_name(&obj);
version_id = obj.key.instance;
}
}
return -EIO;
}
- op_ret = get_store()->check_quota(s->bucket_owner.get_id(), s->bucket,
+ op_ret = get_store()->getRados()->check_quota(s->bucket_owner.get_id(), s->bucket,
user_quota, bucket_quota, real_ofs, true);
/* max_size exceed */
if (op_ret < 0)
goto done;
}
- op_ret = get_store()->check_quota(s->bucket_owner.get_id(), s->bucket,
+ op_ret = get_store()->getRados()->check_quota(s->bucket_owner.get_id(), s->bucket,
user_quota, bucket_quota, s->obj_size, true);
/* max_size exceed */
if (op_ret < 0) {
goto done;
}
- op_ret = get_store()->check_bucket_shards(s->bucket_info, s->bucket,
+ op_ret = get_store()->getRados()->check_bucket_shards(s->bucket_info, s->bucket,
bucket_quota);
if (op_ret < 0) {
goto done;
(void) fh_lru.unref(fh, cohort::lru::FLAG_NONE);
}
- int authorize(RGWRados* store) {
- int ret = store->ctl.user->get_info_by_access_key(key.id, &user, null_yield);
+ int authorize(rgw::sal::RGWRadosStore* store) {
+ int ret = store->ctl()->user->get_info_by_access_key(key.id, &user, null_yield);
if (ret == 0) {
RGWAccessKey* k = user.get_key(key.id);
if (!k || (k->key != key.key))
}
if (token.valid() && (ldh->auth(token.id, token.key) == 0)) {
/* try to store user if it doesn't already exist */
- if (store->ctl.user->get_info_by_uid(token.id, &user, null_yield) < 0) {
- int ret = store->ctl.user->store_info(user, null_yield,
+ if (store->ctl()->user->get_info_by_uid(token.id, &user, null_yield) < 0) {
+ int ret = store->ctl()->user->store_info(user, null_yield,
RGWUserCtl::PutParams()
.set_exclusive(true));
if (ret < 0) {
void update_user() {
RGWUserInfo _user = user;
- auto user_ctl = rgwlib.get_store()->ctl.user;
+ auto user_ctl = rgwlib.get_store()->ctl()->user;
int ret = user_ctl->get_info_by_access_key(key.id, &user, null_yield);
if (ret != 0)
user = _user;
virtual void join() = 0;
virtual void pause_for_new_config() = 0;
- virtual void unpause_with_new_config(RGWRados* store,
+ virtual void unpause_with_new_config(rgw::sal::RGWRadosStore* store,
rgw_auth_registry_ptr_t auth_registry) = 0;
};
env.mutex.get_write();
}
- void unpause_with_new_config(RGWRados* const store,
+ void unpause_with_new_config(rgw::sal::RGWRadosStore* const store,
rgw_auth_registry_ptr_t auth_registry) override {
env.store = store;
env.auth_registry = std::move(auth_registry);
pprocess->pause();
}
- void unpause_with_new_config(RGWRados* const store,
+ void unpause_with_new_config(rgw::sal::RGWRadosStore* const store,
rgw_auth_registry_ptr_t auth_registry) override {
env.store = store;
env.auth_registry = auth_registry;
rgw_user uid(uid_str);
RGWUserInfo user_info;
- int ret = env.store->ctl.user->get_info_by_uid(uid, &user_info, null_yield);
+ int ret = env.store->ctl()->user->get_info_by_uid(uid, &user_info, null_yield);
if (ret < 0) {
derr << "ERROR: failed reading user info: uid=" << uid << " ret="
<< ret << dendl;
if (pauser)
pauser->pause();
}
- void resume(RGWRados *store) override {
+ void resume(rgw::sal::RGWRadosStore *store) override {
/* Initialize the registry of auth strategies which will coordinate
* the dynamic reconfiguration. */
auto auth_registry = \
- rgw::auth::StrategyRegistry::create(g_ceph_context, implicit_tenants, store->pctl);
+ rgw::auth::StrategyRegistry::create(g_ceph_context, implicit_tenants, store->getRados()->pctl);
for (auto frontend : frontends)
frontend->unpause_with_new_config(store, auth_registry);
};
using namespace librados;
+using namespace rgw::sal;
bool LCRule::valid() const
{
return NULL;
}
-void RGWLC::initialize(CephContext *_cct, RGWRados *_store) {
+void RGWLC::initialize(CephContext *_cct, RGWRadosStore *_store) {
cct = _cct;
store = _store;
max_objs = cct->_conf->rgw_lc_max_objs;
#define MAX_LC_LIST_ENTRIES 100
do {
- int ret = cls_rgw_lc_list(store->lc_pool_ctx, obj_names[index], marker, MAX_LC_LIST_ENTRIES, entries);
+ int ret = cls_rgw_lc_list(store->getRados()->lc_pool_ctx, obj_names[index], marker, MAX_LC_LIST_ENTRIES, entries);
if (ret < 0)
return ret;
map<string, int>::iterator iter;
for (iter = entries.begin(); iter != entries.end(); ++iter) {
pair<string, int > entry(iter->first, lc_uninitial);
- ret = cls_rgw_lc_set_entry(store->lc_pool_ctx, obj_names[index], entry);
+ ret = cls_rgw_lc_set_entry(store->getRados()->lc_pool_ctx, obj_names[index], entry);
if (ret < 0) {
ldpp_dout(this, 0) << "RGWLC::bucket_lc_prepare() failed to set entry on "
<< obj_names[index] << dendl;
}
class LCObjsLister {
- RGWRados *store;
+ RGWRadosStore *store;
RGWBucketInfo& bucket_info;
RGWRados::Bucket target;
RGWRados::Bucket::List list_op;
int64_t delay_ms;
public:
- LCObjsLister(RGWRados *_store, RGWBucketInfo& _bucket_info) :
+ LCObjsLister(RGWRadosStore *_store, RGWBucketInfo& _bucket_info) :
store(_store), bucket_info(_bucket_info),
- target(store, bucket_info), list_op(&target) {
+ target(store->getRados(), bucket_info), list_op(&target) {
list_op.params.list_versions = bucket_info.versioned();
list_op.params.allow_unordered = true;
delay_ms = store->ctx()->_conf.get_val<int64_t>("rgw_lc_thread_delay");
struct op_env {
lc_op& op;
- RGWRados *store;
+ RGWRadosStore *store;
RGWLC *lc;
RGWBucketInfo& bucket_info;
LCObjsLister& ol;
- op_env(lc_op& _op, RGWRados *_store, RGWLC *_lc, RGWBucketInfo& _bucket_info,
+ op_env(lc_op& _op, RGWRadosStore *_store, RGWLC *_lc, RGWBucketInfo& _bucket_info,
LCObjsLister& _ol) : op(_op), store(_store), lc(_lc), bucket_info(_bucket_info), ol(_ol) {}
};
op_env& env;
rgw_bucket_dir_entry& o;
- RGWRados *store;
+ RGWRadosStore *store;
RGWBucketInfo& bucket_info;
lc_op& op;
LCObjsLister& ol;
obj_owner.set_id(rgw_user {meta.owner});
obj_owner.set_name(meta.owner_display_name);
- RGWRados::Object del_target(store, bucket_info, oc.rctx, obj);
+ RGWRados::Object del_target(store->getRados(), bucket_info, oc.rctx, obj);
RGWRados::Object::Delete del_op(&del_target);
del_op.params.bucket_owner = bucket_info.owner;
*skip = true;
bufferlist tags_bl;
- int ret = read_obj_tags(oc.store, oc.bucket_info, oc.obj, oc.rctx, tags_bl);
+ int ret = read_obj_tags(oc.store->getRados(), oc.bucket_info, oc.obj, oc.rctx, tags_bl);
if (ret < 0) {
if (ret != -ENODATA) {
ldout(oc.cct, 5) << "ERROR: read_obj_tags returned r=" << ret << dendl;
bool is_expired = obj_has_expired(oc.cct, mtime, expiration, exp_time);
ldout(oc.cct, 20) << __func__ << "(): key=" << o.key << ": is_expired=" << is_expired << dendl;
- return is_expired && pass_object_lock_check(oc.store, oc.bucket_info, oc.obj, oc.rctx);
+ return is_expired && pass_object_lock_check(oc.store->getRados(), oc.bucket_info, oc.obj, oc.rctx);
}
int process(lc_op_ctx& oc) {
target_placement.inherit_from(oc.bucket_info.placement_rule);
target_placement.storage_class = transition.storage_class;
- if (!oc.store->svc.zone->get_zone_params().valid_placement(target_placement)) {
+ if (!oc.store->svc()->zone->get_zone_params().valid_placement(target_placement)) {
ldpp_dout(oc.dpp, 0) << "ERROR: non existent dest placement: " << target_placement
<< " bucket="<< oc.bucket_info.bucket
<< " rule_id=" << oc.op.id << dendl;
return -EINVAL;
}
- int r = oc.store->transition_obj(oc.rctx, oc.bucket_info, oc.obj,
+ int r = oc.store->getRados()->transition_obj(oc.rctx, oc.bucket_info, oc.obj,
target_placement, o.meta.mtime, o.versioned_epoch, oc.dpp, null_yield);
if (r < 0) {
ldpp_dout(oc.dpp, 0) << "ERROR: failed to transition obj (r=" << r << ")" << dendl;
map<string, bufferlist> bucket_attrs;
string no_ns, list_versions;
vector<rgw_bucket_dir_entry> objs;
- auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+ auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
vector<std::string> result;
boost::split(result, shard_id, boost::is_any_of(":"));
string bucket_tenant = result[0];
string bucket_name = result[1];
string bucket_marker = result[2];
- int ret = store->get_bucket_info(obj_ctx, bucket_tenant, bucket_name, bucket_info, NULL, null_yield, &bucket_attrs);
+ int ret = store->getRados()->get_bucket_info(obj_ctx, bucket_tenant, bucket_name, bucket_info, NULL, null_yield, &bucket_attrs);
if (ret < 0) {
ldpp_dout(this, 0) << "LC:get_bucket_info for " << bucket_name << " failed" << dendl;
return ret;
return -ENOENT;
}
- RGWRados::Bucket target(store, bucket_info);
+ RGWRados::Bucket target(store->getRados(), bucket_info);
map<string, bufferlist>::iterator aiter = bucket_attrs.find(RGW_ATTR_LC);
if (aiter == bucket_attrs.end())
l.set_duration(lock_duration);
do {
- int ret = l.lock_exclusive(&store->lc_pool_ctx, obj_names[index]);
+ int ret = l.lock_exclusive(&store->getRados()->lc_pool_ctx, obj_names[index]);
if (ret == -EBUSY || ret == -EEXIST) { /* already locked by another lc processor */
ldpp_dout(this, 0) << "RGWLC::bucket_lc_post() failed to acquire lock on "
<< obj_names[index] << ", sleep 5, try again" << dendl;
return 0;
ldpp_dout(this, 20) << "RGWLC::bucket_lc_post() lock " << obj_names[index] << dendl;
if (result == -ENOENT) {
- ret = cls_rgw_lc_rm_entry(store->lc_pool_ctx, obj_names[index], entry);
+ ret = cls_rgw_lc_rm_entry(store->getRados()->lc_pool_ctx, obj_names[index], entry);
if (ret < 0) {
ldpp_dout(this, 0) << "RGWLC::bucket_lc_post() failed to remove entry "
<< obj_names[index] << dendl;
entry.second = lc_complete;
}
- ret = cls_rgw_lc_set_entry(store->lc_pool_ctx, obj_names[index], entry);
+ ret = cls_rgw_lc_set_entry(store->getRados()->lc_pool_ctx, obj_names[index], entry);
if (ret < 0) {
ldpp_dout(this, 0) << "RGWLC::process() failed to set entry on "
<< obj_names[index] << dendl;
}
clean:
- l.unlock(&store->lc_pool_ctx, obj_names[index]);
+ l.unlock(&store->getRados()->lc_pool_ctx, obj_names[index]);
ldpp_dout(this, 20) << "RGWLC::bucket_lc_post() unlock " << obj_names[index] << dendl;
return 0;
} while (true);
progress_map->clear();
for(; index <max_objs; index++) {
map<string, int > entries;
- int ret = cls_rgw_lc_list(store->lc_pool_ctx, obj_names[index], marker, max_entries, entries);
+ int ret = cls_rgw_lc_list(store->getRados()->lc_pool_ctx, obj_names[index], marker, max_entries, entries);
if (ret < 0) {
if (ret == -ENOENT) {
ldpp_dout(this, 10) << __func__ << "() ignoring unfound lc object="
utime_t time(max_lock_secs, 0);
l.set_duration(time);
- int ret = l.lock_exclusive(&store->lc_pool_ctx, obj_names[index]);
+ int ret = l.lock_exclusive(&store->getRados()->lc_pool_ctx, obj_names[index]);
if (ret == -EBUSY || ret == -EEXIST) { /* already locked by another lc processor */
ldpp_dout(this, 0) << "RGWLC::process() failed to acquire lock on "
<< obj_names[index] << ", sleep 5, try again" << dendl;
return 0;
cls_rgw_lc_obj_head head;
- ret = cls_rgw_lc_get_head(store->lc_pool_ctx, obj_names[index], head);
+ ret = cls_rgw_lc_get_head(store->getRados()->lc_pool_ctx, obj_names[index], head);
if (ret < 0) {
ldpp_dout(this, 0) << "RGWLC::process() failed to get obj head "
<< obj_names[index] << ", ret=" << ret << dendl;
}
}
- ret = cls_rgw_lc_get_next_entry(store->lc_pool_ctx, obj_names[index], head.marker, entry);
+ ret = cls_rgw_lc_get_next_entry(store->getRados()->lc_pool_ctx, obj_names[index], head.marker, entry);
if (ret < 0) {
ldpp_dout(this, 0) << "RGWLC::process() failed to get obj entry "
<< obj_names[index] << dendl;
goto exit;
entry.second = lc_processing;
- ret = cls_rgw_lc_set_entry(store->lc_pool_ctx, obj_names[index], entry);
+ ret = cls_rgw_lc_set_entry(store->getRados()->lc_pool_ctx, obj_names[index], entry);
if (ret < 0) {
ldpp_dout(this, 0) << "RGWLC::process() failed to set obj entry " << obj_names[index]
<< " (" << entry.first << "," << entry.second << ")" << dendl;
}
head.marker = entry.first;
- ret = cls_rgw_lc_put_head(store->lc_pool_ctx, obj_names[index], head);
+ ret = cls_rgw_lc_put_head(store->getRados()->lc_pool_ctx, obj_names[index], head);
if (ret < 0) {
ldpp_dout(this, 0) << "RGWLC::process() failed to put head " << obj_names[index] << dendl;
goto exit;
}
- l.unlock(&store->lc_pool_ctx, obj_names[index]);
+ l.unlock(&store->getRados()->lc_pool_ctx, obj_names[index]);
ret = bucket_lc_process(entry.first);
bucket_lc_post(index, max_lock_secs, entry, ret);
}while(1);
exit:
- l.unlock(&store->lc_pool_ctx, obj_names[index]);
+ l.unlock(&store->getRados()->lc_pool_ctx, obj_names[index]);
return 0;
}
}
template<typename F>
-static int guard_lc_modify(RGWRados* store, const rgw_bucket& bucket, const string& cookie, const F& f) {
+static int guard_lc_modify(RGWRadosStore* store, const rgw_bucket& bucket, const string& cookie, const F& f) {
CephContext *cct = store->ctx();
string shard_id = get_lc_shard_name(bucket);
l.set_duration(time);
l.set_cookie(cookie);
- librados::IoCtx *ctx = store->get_lc_pool_ctx();
+ librados::IoCtx *ctx = store->getRados()->get_lc_pool_ctx();
int ret;
do {
attrs[RGW_ATTR_LC] = std::move(lc_bl);
- int ret = store->ctl.bucket->set_bucket_instance_attrs(bucket_info, attrs,
+ int ret = store->ctl()->bucket->set_bucket_instance_attrs(bucket_info, attrs,
&bucket_info.objv_tracker,
null_yield);
if (ret < 0)
{
map<string, bufferlist> attrs = bucket_attrs;
attrs.erase(RGW_ATTR_LC);
- int ret = store->ctl.bucket->set_bucket_instance_attrs(bucket_info, attrs,
+ int ret = store->ctl()->bucket->set_bucket_instance_attrs(bucket_info, attrs,
&bucket_info.objv_tracker,
null_yield);
namespace rgw::lc {
-int fix_lc_shard_entry(RGWRados* store, const RGWBucketInfo& bucket_info,
+int fix_lc_shard_entry(rgw::sal::RGWRadosStore* store, const RGWBucketInfo& bucket_info,
const map<std::string,bufferlist>& battrs)
{
if (auto aiter = battrs.find(RGW_ATTR_LC);
// 2. entry doesn't exist, which usually happens when reshard has happened prior to update and next LC process has already dropped the update
// 3. entry exists matching the current bucket id which was after a reshard (needs to be updated to the marker)
// We are not dropping the old marker here as that would be caught by the next LC process update
- auto lc_pool_ctx = store->get_lc_pool_ctx();
+ auto lc_pool_ctx = store->getRados()->get_lc_pool_ctx();
int ret = cls_rgw_lc_get_entry(*lc_pool_ctx,
lc_oid, shard_name, entry);
if (ret == 0) {
#include "rgw_rados.h"
#include "cls/rgw/cls_rgw_types.h"
#include "rgw_tag.h"
+#include "rgw_sal.h"
#include <atomic>
#include <tuple>
class RGWLC : public DoutPrefixProvider {
CephContext *cct;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
int max_objs{0};
string *obj_names{nullptr};
std::atomic<bool> down_flag = { false };
finalize();
}
- void initialize(CephContext *_cct, RGWRados *_store);
+ void initialize(CephContext *_cct, rgw::sal::RGWRadosStore *_store);
void finalize();
int process();
namespace rgw::lc {
-int fix_lc_shard_entry(RGWRados *store, const RGWBucketInfo& bucket_info,
+int fix_lc_shard_entry(rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info,
const map<std::string,bufferlist>& battrs);
std::string s3_expiration_header(
OpsLogSocket* olog;
rgw::LDAPHelper* ldh{nullptr};
RGWREST rest; // XXX needed for RGWProcessEnv
- RGWRados* store;
+ rgw::sal::RGWRadosStore* store;
boost::intrusive_ptr<CephContext> cct;
public:
{}
~RGWLib() {}
- RGWRados* get_store() { return store; }
+ rgw::sal::RGWRadosStore* get_store() { return store; }
RGWLibFrontend* get_fe() { return fe; }
return user_info;
}
- int set_uid(RGWRados* store, const rgw_user& uid);
+ int set_uid(rgw::sal::RGWRadosStore* store, const rgw_user& uid);
int write_data(const char *buf, int len);
int read_data(char *buf, int len);
inline struct req_state* get_state() { return this->RGWRequest::s; }
RGWLibRequest(CephContext* _cct, RGWUserInfo* _user)
- : RGWRequest(rgwlib.get_store()->get_new_req_id()), cct(_cct),
+ : RGWRequest(rgwlib.get_store()->getRados()->get_new_req_id()), cct(_cct),
user(_user)
{}
RGWRequest::init_state(_s);
RGWHandler::init(rados_ctx->get_store(), _s, io);
- sysobj_ctx.emplace(store->svc.sysobj);
+ sysobj_ctx.emplace(store->svc()->sysobj);
get_state()->obj_ctx = rados_ctx;
get_state()->sysobj_ctx = &(sysobj_ctx.get());
- get_state()->req_id = store->svc.zone_utils->unique_id(id);
- get_state()->trans_id = store->svc.zone_utils->unique_trans_id(id);
+ get_state()->req_id = store->svc()->zone_utils->unique_id(id);
+ get_state()->trans_id = store->svc()->zone_utils->unique_trans_id(id);
ldpp_dout(_s, 2) << "initializing for trans_id = "
<< get_state()->trans_id.c_str() << dendl;
RGWRequest::init_state(&rstate);
RGWHandler::init(rados_ctx.get_store(), &rstate, &io_ctx);
- sysobj_ctx.emplace(store->svc.sysobj);
+ sysobj_ctx.emplace(store->svc()->sysobj);
get_state()->obj_ctx = &rados_ctx;
get_state()->sysobj_ctx = &(sysobj_ctx.get());
- get_state()->req_id = store->svc.zone_utils->unique_id(id);
- get_state()->trans_id = store->svc.zone_utils->unique_trans_id(id);
+ get_state()->req_id = store->svc()->zone_utils->unique_id(id);
+ get_state()->trans_id = store->svc()->zone_utils->unique_trans_id(id);
ldpp_dout(get_state(), 2) << "initializing for trans_id = "
<< get_state()->trans_id.c_str() << dendl;
}
- inline RGWRados* get_store() { return store; }
+ inline rgw::sal::RGWRadosStore* get_store() { return store; }
virtual int execute() final { ceph_abort(); }
virtual int exec_start() = 0;
int content_length, std::atomic<bool>* fail_flag)
{
RGWLoadGenRequest* req =
- new RGWLoadGenRequest(store->get_new_req_id(), method, resource,
+ new RGWLoadGenRequest(store->getRados()->get_new_req_id(), method, resource,
content_length, fail_flag);
dout(10) << "allocated request req=" << hex << req << dec << dendl;
req_throttle.get(1);
FCGX_Init();
#endif
- RGWRados *store =
+ rgw::sal::RGWRadosStore *store =
RGWStoreManager::get_storage(g_ceph_context,
g_conf()->rgw_enable_gc_threads,
g_conf()->rgw_enable_lc_threads,
return -r;
}
- rgw_rest_init(g_ceph_context, store, store->svc.zone->get_zonegroup());
+ rgw_rest_init(g_ceph_context, store->svc()->zone->get_zonegroup());
mutex.lock();
init_timer.cancel_all_events();
init_timer.shutdown();
mutex.unlock();
- rgw_log_usage_init(g_ceph_context, store);
+ rgw_log_usage_init(g_ceph_context, store->getRados());
RGWREST rest;
const bool swift_at_root = g_conf()->rgw_swift_url_prefix == "/";
if (apis_map.count("s3") > 0 || s3website_enabled) {
if (! swift_at_root) {
- rest.register_default_mgr(set_logging(rest_filter(store, RGW_REST_S3,
+ rest.register_default_mgr(set_logging(rest_filter(store->getRados(), RGW_REST_S3,
new RGWRESTMgr_S3(s3website_enabled, sts_enabled, iam_enabled))));
} else {
derr << "Cannot have the S3 or S3 Website enabled together with "
if (! swift_at_root) {
rest.register_resource(g_conf()->rgw_swift_url_prefix,
- set_logging(rest_filter(store, RGW_REST_SWIFT,
+ set_logging(rest_filter(store->getRados(), RGW_REST_SWIFT,
swift_resource)));
} else {
- if (store->svc.zone->get_zonegroup().zones.size() > 1) {
+ if (store->svc()->zone->get_zonegroup().zones.size() > 1) {
derr << "Placing Swift API in the root of URL hierarchy while running"
<< " multi-site configuration requires another instance of RadosGW"
<< " with S3 API enabled!" << dendl;
rgw::auth::ImplicitTenants implicit_tenant_context{g_conf()};
g_conf().add_observer(&implicit_tenant_context);
auto auth_registry = \
- rgw::auth::StrategyRegistry::create(g_ceph_context, implicit_tenant_context, store->pctl);
+ rgw::auth::StrategyRegistry::create(g_ceph_context, implicit_tenant_context, store->getRados()->pctl);
/* Header custom behavior */
rest.register_x_headers(g_conf()->rgw_log_http_headers);
fes.push_back(fe);
}
- r = store->register_to_service_map("rgw", service_map_meta);
+ r = store->getRados()->register_to_service_map("rgw", service_map_meta);
if (r < 0) {
derr << "ERROR: failed to register to service map: " << cpp_strerror(-r) << dendl;
RGWFrontendPauser pauser(fes, implicit_tenant_context, &pusher);
RGWRealmReloader reloader(store, service_map_meta, &pauser);
- RGWRealmWatcher realm_watcher(g_ceph_context, store->svc.zone->get_realm());
+ RGWRealmWatcher realm_watcher(g_ceph_context, store->svc()->zone->get_realm());
realm_watcher.add_watcher(RGWRealmNotify::Reload, reloader);
realm_watcher.add_watcher(RGWRealmNotify::ZonesNeedPeriod, pusher);
#include "rgw_zone.h"
#include "rgw_tools.h"
#include "rgw_mdlog.h"
+#include "rgw_sal.h"
#include "rgw_cr_rados.h"
#include <boost/asio/yield.hpp>
+using namespace rgw::sal;
+
#define dout_subsys ceph_subsys_rgw
const std::string RGWMetadataLogHistory::oid = "meta.history";
}
RGWMetadataHandler_GenericMetaBE::Put::Put(RGWMetadataHandler_GenericMetaBE *_handler,
- RGWSI_MetaBackend_Handler::Op *_op,
- string& _entry, RGWMetadataObject *_obj,
- RGWObjVersionTracker& _objv_tracker,
- optional_yield _y,
- RGWMDLogSyncType _type) :
+ RGWSI_MetaBackend_Handler::Op *_op,
+ string& _entry, RGWMetadataObject *_obj,
+ RGWObjVersionTracker& _objv_tracker,
+ optional_yield _y,
+ RGWMDLogSyncType _type):
handler(_handler), op(_op),
entry(_entry), obj(_obj),
objv_tracker(_objv_tracker),
#include "cls/log/cls_log_types.h"
#include "common/RefCountedObj.h"
#include "common/ceph_time.h"
-
#include "services/svc_meta_be.h"
-class RGWRados;
+namespace rgw { namespace sal {
+class RGWRadosStore;
+} }
class RGWCoroutine;
class JSONObj;
struct RGWObjVersionTracker;
return handler->do_get(op, entry, obj, y);
}
public:
- Put(RGWMetadataHandler_GenericMetaBE *handler, RGWSI_MetaBackend_Handler::Op *_op,
+ Put(RGWMetadataHandler_GenericMetaBE *_handler, RGWSI_MetaBackend_Handler::Op *_op,
string& _entry, RGWMetadataObject *_obj,
RGWObjVersionTracker& _objv_tracker, optional_yield _y,
RGWMDLogSyncType _type);
#include "rgw_xml.h"
#include "rgw_multi.h"
#include "rgw_op.h"
+#include "rgw_sal.h"
#include "services/svc_sys_obj.h"
#include "services/svc_tier_rados.h"
(strncmp(uid, MULTIPART_UPLOAD_ID_PREFIX_LEGACY, sizeof(MULTIPART_UPLOAD_ID_PREFIX_LEGACY) - 1) == 0);
}
-int list_multipart_parts(RGWRados *store, RGWBucketInfo& bucket_info,
+int list_multipart_parts(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info,
CephContext *cct,
const string& upload_id,
const string& meta_oid, int num_parts,
obj.set_in_extra_data(true);
rgw_raw_obj raw_obj;
- store->obj_to_raw(bucket_info.placement_rule, obj, &raw_obj);
+ store->getRados()->obj_to_raw(bucket_info.placement_rule, obj, &raw_obj);
bool sorted_omap = is_v2_upload_id(upload_id) && !assume_unsorted;
parts.clear();
- auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+ auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
auto sysobj = obj_ctx.get_obj(raw_obj);
int ret;
if (sorted_omap) {
return 0;
}
-int list_multipart_parts(RGWRados *store, struct req_state *s,
+int list_multipart_parts(rgw::sal::RGWRadosStore *store, struct req_state *s,
const string& upload_id,
const string& meta_oid, int num_parts,
int marker, map<uint32_t, RGWUploadPartInfo>& parts,
next_marker, truncated, assume_unsorted);
}
-int abort_multipart_upload(RGWRados *store, CephContext *cct,
+int abort_multipart_upload(rgw::sal::RGWRadosStore *store, CephContext *cct,
RGWObjectCtx *obj_ctx, RGWBucketInfo& bucket_info,
RGWMPObj& mp_obj)
{
string oid = mp_obj.get_part(obj_iter->second.num);
obj.init_ns(bucket_info.bucket, oid, RGW_OBJ_NS_MULTIPART);
obj.index_hash_source = mp_obj.get_key();
- ret = store->delete_obj(*obj_ctx, bucket_info, obj, 0);
+ ret = store->getRados()->delete_obj(*obj_ctx, bucket_info, obj, 0);
if (ret < 0 && ret != -ENOENT)
return ret;
} else {
- store->update_gc_chain(meta_obj, obj_part.manifest, &chain);
+ store->getRados()->update_gc_chain(meta_obj, obj_part.manifest, &chain);
RGWObjManifest::obj_iterator oiter = obj_part.manifest.obj_begin();
if (oiter != obj_part.manifest.obj_end()) {
rgw_obj head;
- rgw_raw_obj raw_head = oiter.get_location().get_raw_obj(store);
+ rgw_raw_obj raw_head = oiter.get_location().get_raw_obj(store->getRados());
RGWSI_Tier_RADOS::raw_obj_to_obj(bucket_info.bucket, raw_head, &head);
rgw_obj_index_key key;
} while (truncated);
/* use upload id as tag and do it asynchronously */
- ret = store->send_chain_to_gc(chain, mp_obj.get_upload_id(), false);
+ ret = store->getRados()->send_chain_to_gc(chain, mp_obj.get_upload_id(), false);
if (ret < 0) {
ldout(cct, 5) << __func__ << ": gc->send_chain() returned " << ret << dendl;
return (ret == -ENOENT) ? -ERR_NO_SUCH_UPLOAD : ret;
}
- RGWRados::Object del_target(store, bucket_info, *obj_ctx, meta_obj);
+ RGWRados::Object del_target(store->getRados(), bucket_info, *obj_ctx, meta_obj);
RGWRados::Object::Delete del_op(&del_target);
del_op.params.bucket_owner = bucket_info.owner;
del_op.params.versioning_status = 0;
return (ret == -ENOENT) ? -ERR_NO_SUCH_UPLOAD : ret;
}
-int list_bucket_multiparts(RGWRados *store, RGWBucketInfo& bucket_info,
+int list_bucket_multiparts(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info,
const string& prefix, const string& marker,
const string& delim,
const int& max_uploads,
vector<rgw_bucket_dir_entry> *objs,
map<string, bool> *common_prefixes, bool *is_truncated)
{
- RGWRados::Bucket target(store, bucket_info);
+ RGWRados::Bucket target(store->getRados(), bucket_info);
RGWRados::Bucket::List list_op(&target);
MultipartMetaFilter mp_filter;
return(list_op.list_objects(max_uploads, objs, common_prefixes, is_truncated, null_yield));
}
-int abort_bucket_multiparts(RGWRados *store, CephContext *cct, RGWBucketInfo& bucket_info,
+int abort_bucket_multiparts(rgw::sal::RGWRadosStore *store, CephContext *cct, RGWBucketInfo& bucket_info,
string& prefix, string& delim)
{
constexpr int max = 1000;
#include "rgw_obj_manifest.h"
#include "rgw_compression_types.h"
+namespace rgw { namespace sal {
+ class RGWRadosStore;
+} }
+
#define MULTIPART_UPLOAD_ID_PREFIX_LEGACY "2/"
#define MULTIPART_UPLOAD_ID_PREFIX "2~" // must contain a unique char that may not come up in gen_rand_alpha()
extern bool is_v2_upload_id(const string& upload_id);
-extern int list_multipart_parts(RGWRados *store, RGWBucketInfo& bucket_info,
+extern int list_multipart_parts(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info,
CephContext *cct,
const string& upload_id,
const string& meta_oid, int num_parts,
int *next_marker, bool *truncated,
bool assume_unsorted = false);
-extern int list_multipart_parts(RGWRados *store, struct req_state *s,
+extern int list_multipart_parts(rgw::sal::RGWRadosStore *store, struct req_state *s,
const string& upload_id,
const string& meta_oid, int num_parts,
int marker, map<uint32_t, RGWUploadPartInfo>& parts,
int *next_marker, bool *truncated,
bool assume_unsorted = false);
-extern int abort_multipart_upload(RGWRados *store, CephContext *cct, RGWObjectCtx *obj_ctx,
+extern int abort_multipart_upload(rgw::sal::RGWRadosStore *store, CephContext *cct, RGWObjectCtx *obj_ctx,
RGWBucketInfo& bucket_info, RGWMPObj& mp_obj);
-extern int list_bucket_multiparts(RGWRados *store, RGWBucketInfo& bucket_info,
+extern int list_bucket_multiparts(rgw::sal::RGWRadosStore *store, RGWBucketInfo& bucket_info,
const string& prefix,
const string& marker,
const string& delim,
vector<rgw_bucket_dir_entry> *objs,
map<string, bool> *common_prefixes, bool *is_truncated);
-extern int abort_bucket_multiparts(RGWRados *store, CephContext *cct, RGWBucketInfo& bucket_info,
+extern int abort_bucket_multiparts(rgw::sal::RGWRadosStore *store, CephContext *cct, RGWBucketInfo& bucket_info,
string& prefix, string& delim);
#endif
class RGWSI_Zone;
struct RGWZoneGroup;
struct RGWZoneParams;
+class RGWRados;
class rgw_obj_select {
rgw_placement_rule placement_rule;
#define dout_subsys ceph_subsys_rgw
-static RGWRados *store = NULL;
+static rgw::sal::RGWRadosStore *store = NULL;
class StoreDestructor {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
public:
- explicit StoreDestructor(RGWRados *_s) : store(_s) {}
+ explicit StoreDestructor(rgw::sal::RGWRadosStore *_s) : store(_s) {}
~StoreDestructor() {
if (store) {
RGWStoreManager::close_storage(store);
const string& bucket_id,
RGWBucketInfo& bucket_info)
{
- auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+ auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
/*
* XXX Here's where it gets tricky. We went to all the trouble of
* are ephemeral, good call encoding tenant info!
*/
- return store->get_bucket_info(obj_ctx, tenant_name, bucket_name,
+ return store->getRados()->get_bucket_info(obj_ctx, tenant_name, bucket_name,
bucket_info, nullptr, null_yield, nullptr);
}
}
rgw_obj obj(bucket_info.bucket, key);
- store->set_atomic(&rctx, obj);
- ret = store->delete_obj(rctx, bucket_info, obj,
+ store->getRados()->set_atomic(&rctx, obj);
+ ret = store->getRados()->delete_obj(rctx, bucket_info, obj,
bucket_info.versioning_status(), 0, hint.exp_time);
return ret;
ldout(store->ctx(), 15) << "got removal hint for: " << iter->key_ts.sec() \
<< " - " << iter->key_ext << dendl;
- int ret = objexp_hint_parse(store->ctx(), *iter, &hint);
+ int ret = objexp_hint_parse(store->getRados()->ctx(), *iter, &hint);
if (ret < 0) {
ldout(store->ctx(), 1) << "cannot parse removal hint for " << hint.obj_key << dendl;
continue;
utime_t time(max_secs, 0);
l.set_duration(time);
- int ret = l.lock_exclusive(&store->objexp_pool_ctx, shard);
+ int ret = l.lock_exclusive(&store->getRados()->objexp_pool_ctx, shard);
if (ret == -EBUSY) { /* already locked by another processor */
dout(5) << __func__ << "(): failed to acquire lock on " << shard << dendl;
return false;
marker = out_marker;
} while (truncated);
- l.unlock(&store->objexp_pool_ctx, shard);
+ l.unlock(&store->getRados()->objexp_pool_ctx, shard);
return done;
}
#include "include/utime.h"
#include "include/str_list.h"
+#include "rgw_sal.h"
+
class CephContext;
class RGWSI_RADOS;
class RGWSI_Zone;
-class RGWRados;
class RGWBucketInfo;
class cls_timeindex_entry;
class RGWObjectExpirer {
protected:
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
RGWObjExpStore exp_store;
int init_bucket_info(const std::string& tenant_name,
std::atomic<bool> down_flag = { false };
public:
- explicit RGWObjectExpirer(RGWRados *_store)
+ explicit RGWObjectExpirer(rgw::sal::RGWRadosStore *_store)
: store(_store),
- exp_store(_store->ctx(), _store->svc.rados, _store->svc.zone),
+ exp_store(_store->getRados()->ctx(), _store->svc()->rados, _store->svc()->zone),
worker(NULL) {
}
~RGWObjectExpirer() {
static string shadow_ns = RGW_OBJ_NS_SHADOW;
static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name);
-static int forward_request_to_master(struct req_state *s, obj_version *objv, RGWRados *store,
+static int forward_request_to_master(struct req_state *s, obj_version *objv, rgw::sal::RGWRadosStore *store,
bufferlist& in_data, JSONParser *jp, req_info *forward_info = nullptr);
static MultipartMetaFilter mp_filter;
static int get_user_policy_from_attr(CephContext * const cct,
- RGWRados * const store,
+ rgw::sal::RGWRadosStore * const store,
map<string, bufferlist>& attrs,
RGWAccessControlPolicy& policy /* out */)
{
}
static int get_obj_policy_from_attr(CephContext *cct,
- RGWRados *store,
+ rgw::sal::RGWRadosStore *store,
RGWObjectCtx& obj_ctx,
RGWBucketInfo& bucket_info,
map<string, bufferlist>& bucket_attrs,
bufferlist bl;
int ret = 0;
- RGWRados::Object op_target(store, bucket_info, obj_ctx, obj);
+ RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, obj);
RGWRados::Object::Read rop(&op_target);
ret = rop.get_attr(RGW_ATTR_ACL, bl, y);
/* object exists, but policy is broken */
ldout(cct, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl;
RGWUserInfo uinfo;
- ret = store->ctl.user->get_info_by_uid(bucket_info.owner, &uinfo, y);
+ ret = store->ctl()->user->get_info_by_uid(bucket_info.owner, &uinfo, y);
if (ret < 0)
return ret;
}
static boost::optional<Policy> get_iam_policy_from_attr(CephContext* cct,
- RGWRados* store,
+ rgw::sal::RGWRadosStore* store,
map<string, bufferlist>& attrs,
const string& tenant) {
auto i = attrs.find(RGW_ATTR_IAM_POLICY);
}
vector<Policy> get_iam_user_policy_from_attr(CephContext* cct,
- RGWRados* store,
+ rgw::sal::RGWRadosStore* store,
map<string, bufferlist>& attrs,
const string& tenant) {
vector<Policy> policies;
return policies;
}
-static int get_obj_attrs(RGWRados *store, struct req_state *s, const rgw_obj& obj, map<string, bufferlist>& attrs, rgw_obj *target_obj = nullptr)
+static int get_obj_attrs(rgw::sal::RGWRadosStore *store, struct req_state *s, const rgw_obj& obj, map<string, bufferlist>& attrs, rgw_obj *target_obj = nullptr)
{
- RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
+ RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
RGWRados::Object::Read read_op(&op_target);
read_op.params.attrs = &attrs;
return read_op.prepare(s->yield);
}
-static int get_obj_head(RGWRados *store, struct req_state *s,
+static int get_obj_head(rgw::sal::RGWRadosStore *store, struct req_state *s,
const rgw_obj& obj,
map<string, bufferlist> *attrs,
bufferlist *pbl)
{
- store->set_prefetch_data(s->obj_ctx, obj);
+ store->getRados()->set_prefetch_data(s->obj_ctx, obj);
- RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
+ RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
RGWRados::Object::Read read_op(&op_target);
read_op.params.attrs = attrs;
};
WRITE_CLASS_ENCODER(multipart_upload_info)
-static int get_multipart_info(RGWRados *store, struct req_state *s,
+static int get_multipart_info(rgw::sal::RGWRadosStore *store, struct req_state *s,
const rgw_obj& obj,
RGWAccessControlPolicy *policy,
map<string, bufferlist> *attrs,
return 0;
}
-static int get_multipart_info(RGWRados *store, struct req_state *s,
+static int get_multipart_info(rgw::sal::RGWRadosStore *store, struct req_state *s,
const string& meta_oid,
RGWAccessControlPolicy *policy,
map<string, bufferlist> *attrs,
return get_multipart_info(store, s, meta_obj, policy, attrs, upload_info);
}
-static int modify_obj_attr(RGWRados *store, struct req_state *s, const rgw_obj& obj, const char* attr_name, bufferlist& attr_val)
+static int modify_obj_attr(rgw::sal::RGWRadosStore *store, struct req_state *s, const rgw_obj& obj, const char* attr_name, bufferlist& attr_val)
{
map<string, bufferlist> attrs;
- RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
+ RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
RGWRados::Object::Read read_op(&op_target);
read_op.params.attrs = &attrs;
if (r < 0) {
return r;
}
- store->set_atomic(s->obj_ctx, read_op.state.obj);
+ store->getRados()->set_atomic(s->obj_ctx, read_op.state.obj);
attrs[attr_name] = attr_val;
- return store->set_attrs(s->obj_ctx, s->bucket_info, read_op.state.obj, attrs, NULL, s->yield);
+ return store->getRados()->set_attrs(s->obj_ctx, s->bucket_info, read_op.state.obj, attrs, NULL, s->yield);
}
static int read_bucket_policy(RGWUserCtl *user_ctl,
return ret;
}
-static int read_obj_policy(RGWRados *store,
+static int read_obj_policy(rgw::sal::RGWRadosStore *store,
struct req_state *s,
RGWBucketInfo& bucket_info,
map<string, bufferlist>& bucket_attrs,
/* object does not exist checking the bucket's ACL to make sure
that we send a proper error code */
RGWAccessControlPolicy bucket_policy(s->cct);
- ret = rgw_op_get_bucket_policy_from_attr(s->cct, store->ctl.user, bucket_info, bucket_attrs, &bucket_policy);
+ ret = rgw_op_get_bucket_policy_from_attr(s->cct, store->ctl()->user, bucket_info, bucket_attrs, &bucket_policy);
if (ret < 0) {
return ret;
}
* only_bucket: If true, reads the user and bucket ACLs rather than the object ACL.
* Returns: 0 on success, -ERR# otherwise.
*/
-int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
+int rgw_build_bucket_policies(rgw::sal::RGWRadosStore* store, struct req_state* s)
{
int ret = 0;
rgw_obj_key obj;
RGWUserInfo bucket_owner_info;
- auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+ auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
string bi = s->info.args.get(RGW_SYS_PARAM_PREFIX "bucket-instance");
if (!bi.empty()) {
RGWBucketInfo source_info;
if (s->bucket_instance_id.empty()) {
- ret = store->get_bucket_info(obj_ctx, s->src_tenant_name, s->src_bucket_name, source_info, NULL, s->yield);
+ ret = store->getRados()->get_bucket_info(obj_ctx, s->src_tenant_name, s->src_bucket_name, source_info, NULL, s->yield);
} else {
- ret = store->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, source_info, NULL, NULL, s->yield);
+ ret = store->getRados()->get_bucket_instance_info(obj_ctx, s->bucket_instance_id, source_info, NULL, NULL, s->yield);
}
if (ret == 0) {
string& zonegroup = source_info.zonegroup;
- s->local_source = store->svc.zone->get_zonegroup().equals(zonegroup);
+ s->local_source = store->svc()->zone->get_zonegroup().equals(zonegroup);
}
}
auto b = rgw_bucket(rgw_bucket_key(s->bucket_tenant, s->bucket_name, s->bucket_instance_id));
RGWObjVersionTracker ep_ot;
- ret = store->ctl.bucket->read_bucket_info(b, &s->bucket_info,
+ ret = store->ctl()->bucket->read_bucket_info(b, &s->bucket_info,
s->yield,
RGWBucketCtl::BucketInstance::GetParams()
.set_mtime(&s->bucket_mtime)
s->bucket = s->bucket_info.bucket;
if (s->bucket_exists) {
- ret = read_bucket_policy(store->ctl.user, s, s->bucket_info, s->bucket_attrs,
+ ret = read_bucket_policy(store->ctl()->user, s, s->bucket_info, s->bucket_attrs,
s->bucket_acl.get(), s->bucket);
acct_acl_user = {
s->bucket_info.owner,
s->bucket_owner = s->bucket_acl->get_owner();
RGWZoneGroup zonegroup;
- int r = store->svc.zone->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
+ int r = store->svc()->zone->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
if (!r) {
if (!zonegroup.endpoints.empty()) {
s->zonegroup_endpoint = zonegroup.endpoints.front();
ret = r;
}
- if (s->bucket_exists && !store->svc.zone->get_zonegroup().equals(s->bucket_info.zonegroup)) {
+ if (s->bucket_exists && !store->svc()->zone->get_zonegroup().equals(s->bucket_info.zonegroup)) {
ldpp_dout(s, 0) << "NOTICE: request for data in a different zonegroup ("
<< s->bucket_info.zonegroup << " != "
- << store->svc.zone->get_zonegroup().get_id() << ")" << dendl;
+ << store->svc()->zone->get_zonegroup().get_id() << ")" << dendl;
/* we now need to make sure that the operation actually requires copy source, that is
* it's a copy operation
*/
- if (store->svc.zone->get_zonegroup().is_master_zonegroup() && s->system_request) {
+ if (store->svc()->zone->get_zonegroup().is_master_zonegroup() && s->system_request) {
/*If this is the master, don't redirect*/
} else if (s->op_type == RGW_OP_GET_BUCKET_LOCATION ) {
/* If op is get bucket location, don't redirect */
s->dest_placement.storage_class = s->info.storage_class;
s->dest_placement.inherit_from(s->bucket_info.placement_rule);
- if (!store->svc.zone->get_zone_params().valid_placement(s->dest_placement)) {
+ if (!store->svc()->zone->get_zone_params().valid_placement(s->dest_placement)) {
ldpp_dout(s, 0) << "NOTICE: invalid dest placement: " << s->dest_placement.to_str() << dendl;
return -EINVAL;
}
/* handle user ACL only for those APIs which support it */
if (s->user_acl) {
map<string, bufferlist> uattrs;
- ret = store->ctl.user->get_attrs_by_uid(acct_acl_user.uid, &uattrs, s->yield);
+ ret = store->ctl()->user->get_attrs_by_uid(acct_acl_user.uid, &uattrs, s->yield);
if (!ret) {
ret = get_user_policy_from_attr(s->cct, store, uattrs, *s->user_acl);
}
if (! s->user->user_id.empty() && s->auth.identity->get_identity_type() != TYPE_ROLE) {
try {
map<string, bufferlist> uattrs;
- if (ret = store->ctl.user->get_attrs_by_uid(s->user->user_id, &uattrs, s->yield); ! ret) {
+ if (ret = store->ctl()->user->get_attrs_by_uid(s->user->user_id, &uattrs, s->yield); ! ret) {
if (s->iam_user_policies.empty()) {
s->iam_user_policies = get_iam_user_policy_from_attr(s->cct, store, uattrs, s->user->user_id.tenant);
} else {
ret = -EACCES;
}
- bool success = store->svc.zone->get_redirect_zone_endpoint(&s->redirect_zone_endpoint);
+ bool success = store->svc()->zone->get_redirect_zone_endpoint(&s->redirect_zone_endpoint);
if (success) {
ldpp_dout(s, 20) << "redirect_zone_endpoint=" << s->redirect_zone_endpoint << dendl;
}
* only_bucket: If true, reads the bucket ACL rather than the object ACL.
* Returns: 0 on success, -ERR# otherwise.
*/
-int rgw_build_object_policies(RGWRados *store, struct req_state *s,
+int rgw_build_object_policies(rgw::sal::RGWRadosStore *store, struct req_state *s,
bool prefetch_data)
{
int ret = 0;
s->object_acl = std::make_unique<RGWAccessControlPolicy>(s->cct);
rgw_obj obj(s->bucket, s->object);
- store->set_atomic(s->obj_ctx, obj);
+ store->getRados()->set_atomic(s->obj_ctx, obj);
if (prefetch_data) {
- store->set_prefetch_data(s->obj_ctx, obj);
+ store->getRados()->set_prefetch_data(s->obj_ctx, obj);
}
ret = read_obj_policy(store, s, s->bucket_info, s->bucket_attrs,
s->object_acl.get(), nullptr, s->iam_policy, s->bucket,
return 0;
}
-static int rgw_iam_add_existing_objtags(RGWRados* store, struct req_state* s, rgw_obj& obj, std::uint64_t action){
+static int rgw_iam_add_existing_objtags(rgw::sal::RGWRadosStore* store, struct req_state* s, rgw_obj& obj, std::uint64_t action){
map <string, bufferlist> attrs;
- store->set_atomic(s->obj_ctx, obj);
+ store->getRados()->set_atomic(s->obj_ctx, obj);
int op_ret = get_obj_attrs(store, s, obj, attrs);
if (op_ret < 0)
return op_ret;
}
}
-void rgw_build_iam_environment(RGWRados* store,
+void rgw_build_iam_environment(rgw::sal::RGWRadosStore* store,
struct req_state* s)
{
const auto& m = s->info.env->get_map();
int RGWGetObj::verify_permission()
{
obj = rgw_obj(s->bucket, s->object);
- store->set_atomic(s->obj_ctx, obj);
+ store->getRados()->set_atomic(s->obj_ctx, obj);
if (get_data) {
- store->set_prefetch_data(s->obj_ctx, obj);
+ store->getRados()->set_prefetch_data(s->obj_ctx, obj);
}
if (torrent.get_flag()) {
return -EPERM;
}
- if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->svc.zone->zone_is_writeable()) {
+ if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->svc()->zone->zone_is_writeable()) {
ldpp_dout(this, 5) << "NOTICE: modify request to a read-only zone by a "
"non-system user, permission denied" << dendl;
return -EPERM;
obj = rgw_obj(s->bucket, s->object);
- store->set_atomic(s->obj_ctx, obj);
+ store->getRados()->set_atomic(s->obj_ctx, obj);
op_ret = get_obj_attrs(store, s, obj, attrs);
if (op_ret < 0) {
rgw_obj obj;
obj = rgw_obj(s->bucket, s->object);
- store->set_atomic(s->obj_ctx, obj);
+ store->getRados()->set_atomic(s->obj_ctx, obj);
op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_TAGS, tags_bl);
if (op_ret == -ECANCELED){
op_ret = -ERR_TAG_CONFLICT;
rgw_obj obj;
obj = rgw_obj(s->bucket, s->object);
- store->set_atomic(s->obj_ctx, obj);
+ store->getRados()->set_atomic(s->obj_ctx, obj);
map <string, bufferlist> attrs;
map <string, bufferlist> rmattr;
bufferlist bl;
rmattr[RGW_ATTR_TAGS] = bl;
- op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattr, s->yield);
+ op_ret = store->getRados()->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, &rmattr, s->yield);
}
int RGWGetBucketTags::verify_permission()
if (op_ret < 0)
return;
- if (!store->svc.zone->is_meta_master()) {
+ if (!store->svc()->zone->is_meta_master()) {
op_ret = forward_request_to_master(s, nullptr, store, in_data, nullptr);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
}
}
- op_ret = retry_raced_bucket_write(store, s, [this] {
+ op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
map<string, bufferlist> attrs = s->bucket_attrs;
attrs[RGW_ATTR_TAGS] = tags_bl;
- return store->ctl.bucket->set_bucket_instance_attrs(s->bucket_info, attrs, &s->bucket_info.objv_tracker, s->yield);
+ return store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs, &s->bucket_info.objv_tracker, s->yield);
});
}
void RGWDeleteBucketTags::execute()
{
- if (!store->svc.zone->is_meta_master()) {
+ if (!store->svc()->zone->is_meta_master()) {
bufferlist in_data;
op_ret = forward_request_to_master(s, nullptr, store, in_data, nullptr);
if (op_ret < 0) {
}
}
- op_ret = retry_raced_bucket_write(store, s, [this] {
+ op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
map<string, bufferlist> attrs = s->bucket_attrs;
attrs.erase(RGW_ATTR_TAGS);
- op_ret = store->ctl.bucket->set_bucket_instance_attrs(s->bucket_info, attrs, &s->bucket_info.objv_tracker, s->yield);
+ op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs, &s->bucket_info.objv_tracker, s->yield);
if (op_ret < 0) {
ldpp_dout(this, 0) << "RGWDeleteBucketTags() failed to remove RGW_ATTR_TAGS on bucket="
<< s->bucket.name
if (s->user->user_id == s->bucket_owner.get_id()) {
uinfo = s->user;
} else {
- int r = store->ctl.user->get_info_by_uid(s->bucket_info.owner, &owner_info, s->yield);
+ int r = store->ctl()->user->get_info_by_uid(s->bucket_info.owner, &owner_info, s->yield);
if (r < 0)
return r;
uinfo = &owner_info;
} else if (uinfo->bucket_quota.enabled) {
bucket_quota = uinfo->bucket_quota;
} else {
- bucket_quota = store->svc.quota->get_bucket_quota();
+ bucket_quota = store->svc()->quota->get_bucket_quota();
}
if (uinfo->user_quota.enabled) {
user_quota = uinfo->user_quota;
} else {
- user_quota = store->svc.quota->get_user_quota();
+ user_quota = store->svc()->quota->get_user_quota();
}
return 0;
<< " end=" << cur_end << dendl;
obj_ctx.set_atomic(part);
- store->set_prefetch_data(&obj_ctx, part);
+ store->getRados()->set_prefetch_data(&obj_ctx, part);
- RGWRados::Object op_target(store, s->bucket_info, obj_ctx, part);
+ RGWRados::Object op_target(store->getRados(), s->bucket_info, obj_ctx, part);
RGWRados::Object::Read read_op(&op_target);
if (!swift_slo) {
}
static int iterate_user_manifest_parts(CephContext * const cct,
- RGWRados * const store,
+ rgw::sal::RGWRadosStore * const store,
const off_t ofs,
const off_t end,
RGWBucketInfo *pbucket_info,
utime_t start_time = ceph_clock_now();
- RGWRados::Bucket target(store, *pbucket_info);
+ RGWRados::Bucket target(store->getRados(), *pbucket_info);
RGWRados::Bucket::List list_op(&target);
list_op.params.prefix = obj_prefix;
};
static int iterate_slo_parts(CephContext *cct,
- RGWRados *store,
+ rgw::sal::RGWRadosStore *store,
off_t ofs,
off_t end,
map<uint64_t, rgw_slo_part>& slo_parts,
if (bucket_name.compare(s->bucket.name) != 0) {
map<string, bufferlist> bucket_attrs;
- auto obj_ctx = store->svc.sysobj->init_obj_ctx();
- int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
+ auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
+ int r = store->getRados()->get_bucket_info(obj_ctx, s->user->user_id.tenant,
bucket_name, bucket_info, NULL,
s->yield, &bucket_attrs);
if (r < 0) {
bucket = bucket_info.bucket;
pbucket_info = &bucket_info;
bucket_acl = &_bucket_acl;
- r = read_bucket_policy(store->ctl.user, s, bucket_info, bucket_attrs, bucket_acl, bucket);
+ r = read_bucket_policy(store->ctl()->user, s, bucket_info, bucket_attrs, bucket_acl, bucket);
if (r < 0) {
ldpp_dout(this, 0) << "failed to read bucket policy" << dendl;
return r;
RGWBucketInfo bucket_info;
map<string, bufferlist> bucket_attrs;
- auto obj_ctx = store->svc.sysobj->init_obj_ctx();
- int r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
+ auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
+ int r = store->getRados()->get_bucket_info(obj_ctx, s->user->user_id.tenant,
bucket_name, bucket_info, nullptr,
s->yield, &bucket_attrs);
if (r < 0) {
}
bucket = bucket_info.bucket;
bucket_acl = &_bucket_acl;
- r = read_bucket_policy(store->ctl.user, s, bucket_info, bucket_attrs, bucket_acl,
+ r = read_bucket_policy(store->ctl()->user, s, bucket_info, bucket_attrs, bucket_acl,
bucket);
if (r < 0) {
ldpp_dout(this, 0) << "failed to read bucket ACL for bucket "
/* garbage collection related handling */
utime_t start_time = ceph_clock_now();
if (start_time > gc_invalidate_time) {
- int r = store->defer_gc(s->obj_ctx, s->bucket_info, obj, s->yield);
+ int r = store->getRados()->defer_gc(s->obj_ctx, s->bucket_info, obj, s->yield);
if (r < 0) {
ldpp_dout(this, 0) << "WARNING: could not defer gc entry for obj" << dendl;
}
perfcounter->inc(l_rgw_get);
- RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
+ RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
RGWRados::Object::Read read_op(&op_target);
op_ret = get_params();
}
if (supports_account_metadata()) {
- op_ret = store->ctl.user->get_attrs_by_uid(s->user->user_id, &attrs, s->yield);
+ op_ret = store->ctl()->user->get_attrs_by_uid(s->user->user_id, &attrs, s->yield);
if (op_ret < 0) {
goto send_end;
}
/* We need to have stats for all our policies - even if a given policy
* isn't actually used in a given account. In such situation its usage
* stats would be simply full of zeros. */
- for (const auto& policy : store->svc.zone->get_zonegroup().placement_targets) {
+ for (const auto& policy : store->svc()->zone->get_zonegroup().placement_targets) {
policies_stats.emplace(policy.second.name,
decltype(policies_stats)::mapped_type());
}
RGWUsageIter usage_iter;
while (is_truncated) {
- op_ret = store->read_usage(s->user->user_id, s->bucket_name, start_epoch, end_epoch, max_entries,
+ op_ret = store->getRados()->read_usage(s->user->user_id, s->bucket_name, start_epoch, end_epoch, max_entries,
&is_truncated, usage_iter, usage);
if (op_ret == -ENOENT) {
return;
}
- op_ret = store->ctl.user->read_stats(s->user->user_id, &stats);
+ op_ret = store->ctl()->user->read_stats(s->user->user_id, &stats);
if (op_ret < 0) {
ldpp_dout(this, 0) << "ERROR: can't read user header" << dendl;
return;
/* We need to have stats for all our policies - even if a given policy
* isn't actually used in a given account. In such situation its usage
* stats would be simply full of zeros. */
- for (const auto& policy : store->svc.zone->get_zonegroup().placement_targets) {
+ for (const auto& policy : store->svc()->zone->get_zonegroup().placement_targets) {
policies_stats.emplace(policy.second.name,
decltype(policies_stats)::mapped_type());
}
return;
}
- if (!store->svc.zone->is_meta_master()) {
+ if (!store->svc()->zone->is_meta_master()) {
op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
bool modified = mfa_set_status;
- op_ret = retry_raced_bucket_write(store, s, [&] {
+ op_ret = retry_raced_bucket_write(store->getRados(), s, [&] {
if (mfa_set_status) {
if (mfa_status) {
s->bucket_info.flags |= BUCKET_MFA_ENABLED;
} else {
return op_ret;
}
- return store->put_bucket_instance_info(s->bucket_info, false, real_time(),
+ return store->getRados()->put_bucket_instance_info(s->bucket_info, false, real_time(),
&s->bucket_attrs);
});
if (op_ret < 0)
return;
- if (!store->svc.zone->is_meta_master()) {
+ if (!store->svc()->zone->is_meta_master()) {
op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
if (op_ret < 0) {
ldpp_dout(this, 0) << " forward_request_to_master returned ret=" << op_ret << dendl;
}
}
- op_ret = retry_raced_bucket_write(store, s, [this] {
+ op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
s->bucket_info.has_website = true;
s->bucket_info.website_conf = website_conf;
- op_ret = store->put_bucket_instance_info(s->bucket_info, false,
+ op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false,
real_time(), &s->bucket_attrs);
return op_ret;
});
void RGWDeleteBucketWebsite::execute()
{
- if (!store->svc.zone->is_meta_master()) {
+ if (!store->svc()->zone->is_meta_master()) {
bufferlist in_data;
op_ret = forward_request_to_master(s, nullptr, store, in_data, nullptr);
if (op_ret < 0) {
return;
}
}
- op_ret = retry_raced_bucket_write(store, s, [this] {
+ op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
s->bucket_info.has_website = false;
s->bucket_info.website_conf = RGWBucketWebsiteConf();
- op_ret = store->put_bucket_instance_info(s->bucket_info, false,
+ op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false,
real_time(), &s->bucket_attrs);
return op_ret;
});
bucket.bucket = s->bucket;
buckets.add(bucket);
map<string, RGWBucketEnt>& m = buckets.get_buckets();
- op_ret = store->update_containers_stats(m);
+ op_ret = store->getRados()->update_containers_stats(m);
if (! op_ret)
op_ret = -EEXIST;
if (op_ret > 0) {
map<string, RGWBucketEnt> m;
m[s->bucket.name] = RGWBucketEnt();
m.begin()->second.bucket = s->bucket;
- op_ret = store->update_containers_stats(m);
+ op_ret = store->getRados()->update_containers_stats(m);
if (op_ret > 0) {
bucket = m.begin()->second;
}
}
- RGWRados::Bucket target(store, s->bucket_info);
+ RGWRados::Bucket target(store->getRados(), s->bucket_info);
if (shard_id >= 0) {
target.set_shard_id(shard_id);
}
}
static int forward_request_to_master(struct req_state *s, obj_version *objv,
- RGWRados *store, bufferlist& in_data,
+ rgw::sal::RGWRadosStore *store, bufferlist& in_data,
JSONParser *jp, req_info *forward_info)
{
- if (!store->svc.zone->get_master_conn()) {
+ if (!store->svc()->zone->get_master_conn()) {
ldpp_dout(s, 0) << "rest connection is invalid" << dendl;
return -EINVAL;
}
bufferlist response;
string uid_str = s->user->user_id.to_str();
#define MAX_REST_RESPONSE (128 * 1024) // we expect a very small response
- int ret = store->svc.zone->get_master_conn()->forward(uid_str, (forward_info ? *forward_info : s->info),
+ int ret = store->svc()->zone->get_master_conn()->forward(uid_str, (forward_info ? *forward_info : s->info),
objv, MAX_REST_RESPONSE, &in_data, &response);
if (ret < 0)
return ret;
bool existed;
string bucket_name;
rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_name);
- rgw_raw_obj obj(store->svc.zone->get_zone_params().domain_root, bucket_name);
+ rgw_raw_obj obj(store->svc()->zone->get_zone_params().domain_root, bucket_name);
obj_version objv, *pobjv = NULL;
op_ret = get_params();
if (!relaxed_region_enforcement &&
!location_constraint.empty() &&
- !store->svc.zone->has_zonegroup_api(location_constraint)) {
+ !store->svc()->zone->has_zonegroup_api(location_constraint)) {
ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")"
<< " can't be found." << dendl;
op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
return;
}
- if (!relaxed_region_enforcement && !store->svc.zone->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() &&
- store->svc.zone->get_zonegroup().api_name != location_constraint) {
+ if (!relaxed_region_enforcement && !store->svc()->zone->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() &&
+ store->svc()->zone->get_zonegroup().api_name != location_constraint) {
ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")"
- << " doesn't match zonegroup" << " (" << store->svc.zone->get_zonegroup().api_name << ")"
+ << " doesn't match zonegroup" << " (" << store->svc()->zone->get_zonegroup().api_name << ")"
<< dendl;
op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
s->err.message = "The specified location-constraint is not valid";
return;
}
- const auto& zonegroup = store->svc.zone->get_zonegroup();
+ const auto& zonegroup = store->svc()->zone->get_zonegroup();
if (!placement_rule.name.empty() &&
!zonegroup.placement_targets.count(placement_rule.name)) {
ldpp_dout(this, 0) << "placement target (" << placement_rule.name << ")"
<< " doesn't exist in the placement targets of zonegroup"
- << " (" << store->svc.zone->get_zonegroup().api_name << ")" << dendl;
+ << " (" << store->svc()->zone->get_zonegroup().api_name << ")" << dendl;
op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
s->err.message = "The specified placement target does not exist";
return;
/* we need to make sure we read bucket info, it's not read before for this
* specific request */
- op_ret = store->get_bucket_info(*s->sysobj_ctx, s->bucket_tenant, s->bucket_name,
+ op_ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, s->bucket_tenant, s->bucket_name,
s->bucket_info, nullptr, s->yield, &s->bucket_attrs);
if (op_ret < 0 && op_ret != -ENOENT)
return;
s->bucket_owner.set_id(s->user->user_id);
s->bucket_owner.set_name(s->user->display_name);
if (s->bucket_exists) {
- int r = rgw_op_get_bucket_policy_from_attr(s->cct, store->ctl.user, s->bucket_info,
+ int r = rgw_op_get_bucket_policy_from_attr(s->cct, store->ctl()->user, s->bucket_info,
s->bucket_attrs, &old_policy);
if (r >= 0) {
if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) {
uint32_t *pmaster_num_shards;
real_time creation_time;
- if (!store->svc.zone->is_meta_master()) {
+ if (!store->svc()->zone->is_meta_master()) {
JSONParser jp;
op_ret = forward_request_to_master(s, NULL, store, in_data, &jp);
if (op_ret < 0) {
if (s->system_request) {
zonegroup_id = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup");
if (zonegroup_id.empty()) {
- zonegroup_id = store->svc.zone->get_zonegroup().get_id();
+ zonegroup_id = store->svc()->zone->get_zonegroup().get_id();
}
} else {
- zonegroup_id = store->svc.zone->get_zonegroup().get_id();
+ zonegroup_id = store->svc()->zone->get_zonegroup().get_id();
}
if (s->bucket_exists) {
rgw_bucket bucket;
bucket.tenant = s->bucket_tenant;
bucket.name = s->bucket_name;
- op_ret = store->svc.zone->select_bucket_placement(*(s->user), zonegroup_id,
+ op_ret = store->svc()->zone->select_bucket_placement(*(s->user), zonegroup_id,
placement_rule,
&selected_placement_rule, nullptr);
if (selected_placement_rule != s->bucket_info.placement_rule) {
}
- op_ret = store->create_bucket(*(s->user), s->bucket, zonegroup_id,
+ op_ret = store->getRados()->create_bucket(*(s->user), s->bucket, zonegroup_id,
placement_rule, s->bucket_info.swift_ver_location,
pquota_info, attrs,
info, pobjv, &ep_objv, creation_time,
s->bucket = info.bucket;
}
- op_ret = store->ctl.bucket->link_bucket(s->user->user_id, s->bucket,
+ op_ret = store->ctl()->bucket->link_bucket(s->user->user_id, s->bucket,
info.creation_time, s->yield, false);
if (op_ret && !existed && op_ret != -EEXIST) {
/* if it exists (or previously existed), don't remove it! */
- op_ret = store->ctl.bucket->unlink_bucket(s->user->user_id, s->bucket, s->yield);
+ op_ret = store->ctl()->bucket->unlink_bucket(s->user->user_id, s->bucket, s->yield);
if (op_ret < 0) {
ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
<< dendl;
RGWBucketInfo binfo;
map<string, bufferlist> battrs;
- op_ret = store->get_bucket_info(*s->sysobj_ctx, s->bucket_tenant, s->bucket_name,
+ op_ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, s->bucket_tenant, s->bucket_name,
binfo, nullptr, s->yield, &battrs);
if (op_ret < 0) {
return;
s->bucket_info.has_website = !s->bucket_info.website_conf.is_empty();
/* This will also set the quota on the bucket. */
- op_ret = store->ctl.bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
+ op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
&s->bucket_info.objv_tracker,
s->yield);
} while (op_ret == -ECANCELED && tries++ < 20);
}
}
- op_ret = store->ctl.bucket->sync_user_stats(s->user->user_id, s->bucket_info);
+ op_ret = store->ctl()->bucket->sync_user_stats(s->user->user_id, s->bucket_info);
if ( op_ret < 0) {
ldpp_dout(this, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl;
}
- op_ret = store->check_bucket_empty(s->bucket_info, s->yield);
+ op_ret = store->getRados()->check_bucket_empty(s->bucket_info, s->yield);
if (op_ret < 0) {
return;
}
- if (!store->svc.zone->is_meta_master()) {
+ if (!store->svc()->zone->is_meta_master()) {
bufferlist in_data;
op_ret = forward_request_to_master(s, &ot.read_version, store, in_data,
NULL);
return;
}
- op_ret = store->delete_bucket(s->bucket_info, ot, s->yield, false);
+ op_ret = store->getRados()->delete_bucket(s->bucket_info, ot, s->yield, false);
if (op_ret == -ECANCELED) {
// lost a race, either with mdlog sync or another delete bucket operation.
}
if (op_ret == 0) {
- op_ret = store->ctl.bucket->unlink_bucket(s->bucket_info.owner,
+ op_ret = store->ctl()->bucket->unlink_bucket(s->bucket_info.owner,
s->bucket, s->yield, false);
if (op_ret < 0) {
ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
rgw_obj_key cs_object(copy_source_object_name, copy_source_version_id);
rgw_obj obj(cs_bucket, cs_object);
- store->set_atomic(s->obj_ctx, obj);
- store->set_prefetch_data(s->obj_ctx, obj);
+ store->getRados()->set_atomic(s->obj_ctx, obj);
+ store->getRados()->set_prefetch_data(s->obj_ctx, obj);
/* check source object permissions */
if (read_obj_policy(store, s, copy_source_bucket_info, cs_attrs, &cs_acl, nullptr,
rgw_obj_key obj_key(copy_source_object_name, copy_source_version_id);
rgw_obj obj(copy_source_bucket_info.bucket, obj_key);
- RGWRados::Object op_target(store, copy_source_bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
+ RGWRados::Object op_target(store->getRados(), copy_source_bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
RGWRados::Object::Read read_op(&op_target);
read_op.params.obj_size = &obj_size;
read_op.params.attrs = &attrs;
if (!chunked_upload) { /* with chunked upload we don't know how big is the upload.
we also check sizes at the end anyway */
- op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
+ op_ret = store->getRados()->check_quota(s->bucket_owner.get_id(), s->bucket,
user_quota, bucket_quota, s->content_length);
if (op_ret < 0) {
ldpp_dout(this, 20) << "check_quota() returned ret=" << op_ret << dendl;
return;
}
- op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
+ op_ret = store->getRados()->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
if (op_ret < 0) {
ldpp_dout(this, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
return;
/* Handle object versioning of Swift API. */
if (! multipart) {
- op_ret = store->swift_versioning_copy(obj_ctx,
+ op_ret = store->getRados()->swift_versioning_copy(obj_ctx,
s->bucket_owner.get_id(),
s->bucket_info,
obj,
if (!version_id.empty()) {
obj.key.set_instance(version_id);
} else {
- store->gen_rand_obj_instance_name(&obj);
+ store->getRados()->gen_rand_obj_instance_name(&obj);
version_id = obj.key.instance;
}
}
rgw_obj obj(copy_source_bucket_info.bucket, obj_key.name);
RGWObjState *astate;
- op_ret = store->get_obj_state(&obj_ctx, copy_source_bucket_info, obj,
+ op_ret = store->getRados()->get_obj_state(&obj_ctx, copy_source_bucket_info, obj,
&astate, true, s->yield, false);
if (op_ret < 0) {
ldpp_dout(this, 0) << "ERROR: get copy source obj state returned with error" << op_ret << dendl;
// no filters by default
DataProcessor *filter = processor.get();
- const auto& compression_type = store->svc.zone->get_zone_params().get_compression_type(*pdest_placement);
+ const auto& compression_type = store->svc()->zone->get_zone_params().get_compression_type(*pdest_placement);
CompressorRef plugin;
boost::optional<RGWPutObj_Compress> compressor;
return;
}
- op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
+ op_ret = store->getRados()->check_quota(s->bucket_owner.get_id(), s->bucket,
user_quota, bucket_quota, s->obj_size);
if (op_ret < 0) {
ldpp_dout(this, 20) << "second check_quota() returned op_ret=" << op_ret << dendl;
return;
}
- op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
+ op_ret = store->getRados()->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
if (op_ret < 0) {
ldpp_dout(this, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
return;
ceph::buffer::list bl, aclbl;
int len = 0;
- op_ret = store->check_quota(s->bucket_owner.get_id(),
+ op_ret = store->getRados()->check_quota(s->bucket_owner.get_id(),
s->bucket,
user_quota,
bucket_quota,
return;
}
- op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
+ op_ret = store->getRados()->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
if (op_ret < 0) {
return;
}
rgw_obj obj(s->bucket, get_current_filename());
if (s->bucket_info.versioning_enabled()) {
- store->gen_rand_obj_instance_name(&obj);
+ store->getRados()->gen_rand_obj_instance_name(&obj);
}
auto aio = rgw::make_throttle(s->cct->_conf->rgw_put_obj_min_window_size,
if (encrypt != nullptr) {
filter = encrypt.get();
} else {
- const auto& compression_type = store->svc.zone->get_zone_params().get_compression_type(
+ const auto& compression_type = store->svc()->zone->get_zone_params().get_compression_type(
s->dest_placement);
if (compression_type != "none") {
plugin = Compressor::create(s->cct, compression_type);
s->obj_size = ofs;
- op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
+ op_ret = store->getRados()->check_quota(s->bucket_owner.get_id(), s->bucket,
user_quota, bucket_quota, s->obj_size);
if (op_ret < 0) {
return;
}
- op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
+ op_ret = store->getRados()->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
if (op_ret < 0) {
return;
}
return op_ret;
}
- op_ret = store->ctl.user->get_attrs_by_uid(s->user->user_id, &orig_attrs,
+ op_ret = store->ctl()->user->get_attrs_by_uid(s->user->user_id, &orig_attrs,
s->yield,
&acct_op_tracker);
if (op_ret < 0) {
{
/* Params have been extracted earlier. See init_processing(). */
RGWUserInfo new_uinfo;
- op_ret = store->ctl.user->get_info_by_uid(s->user->user_id, &new_uinfo, s->yield,
+ op_ret = store->ctl()->user->get_info_by_uid(s->user->user_id, &new_uinfo, s->yield,
RGWUserCtl::GetParams()
.set_objv_tracker(&acct_op_tracker));
if (op_ret < 0) {
/* We are passing here the current (old) user info to allow the function
* optimize-out some operations. */
- op_ret = store->ctl.user->store_info(new_uinfo, s->yield,
+ op_ret = store->ctl()->user->store_info(new_uinfo, s->yield,
RGWUserCtl::PutParams()
.set_old_info(s->user)
.set_objv_tracker(&acct_op_tracker)
return;
}
- op_ret = retry_raced_bucket_write(store, s, [this] {
+ op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
/* Encode special metadata first as we're using std::map::emplace under
* the hood. This method will add the new items only if the map doesn't
* contain such keys yet. */
/* Setting attributes also stores the provided bucket info. Due
* to this fact, the new quota settings can be serialized with
* the same call. */
- op_ret = store->ctl.bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
+ op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
&s->bucket_info.objv_tracker,
s->yield);
return op_ret;
rgw_obj target_obj;
map<string, bufferlist> attrs, orig_attrs, rmattrs;
- store->set_atomic(s->obj_ctx, obj);
+ store->getRados()->set_atomic(s->obj_ctx, obj);
op_ret = get_params();
if (op_ret < 0) {
}
}
- op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, target_obj,
+ op_ret = store->getRados()->set_attrs(s->obj_ctx, s->bucket_info, target_obj,
attrs, &rmattrs, s->yield);
}
obj_ctx->set_atomic(obj);
bool ver_restored = false;
- op_ret = store->swift_versioning_restore(*s->sysobj_ctx, *obj_ctx, s->bucket_owner.get_id(),
+ op_ret = store->getRados()->swift_versioning_restore(*s->sysobj_ctx, *obj_ctx, s->bucket_owner.get_id(),
s->bucket_info, obj, ver_restored, this);
if (op_ret < 0) {
return;
/* Swift's versioning mechanism hasn't found any previous version of
* the object that could be restored. This means we should proceed
* with the regular delete path. */
- RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj);
+ RGWRados::Object del_target(store->getRados(), s->bucket_info, *obj_ctx, obj);
RGWRados::Object::Delete del_op(&del_target);
op_ret = get_system_versioning_params(s, &del_op.params.olh_epoch,
map<string, bufferlist> src_attrs;
if (s->bucket_instance_id.empty()) {
- op_ret = store->get_bucket_info(*s->sysobj_ctx, src_tenant_name, src_bucket_name, src_bucket_info, NULL, s->yield, &src_attrs);
+ op_ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, src_tenant_name, src_bucket_name, src_bucket_info, NULL, s->yield, &src_attrs);
} else {
/* will only happen in intra region sync where the source and dest bucket is the same */
rgw_bucket b(rgw_bucket_key(src_tenant_name, src_bucket_name, s->bucket_instance_id));
- op_ret = store->get_bucket_instance_info(*s->sysobj_ctx, b, src_bucket_info, NULL, &src_attrs, s->yield);
+ op_ret = store->getRados()->get_bucket_instance_info(*s->sysobj_ctx, b, src_bucket_info, NULL, &src_attrs, s->yield);
}
if (op_ret < 0) {
if (op_ret == -ENOENT) {
/* get buckets info (source and dest) */
if (s->local_source && source_zone.empty()) {
rgw_obj src_obj(src_bucket, src_object);
- store->set_atomic(s->obj_ctx, src_obj);
- store->set_prefetch_data(s->obj_ctx, src_obj);
+ store->getRados()->set_atomic(s->obj_ctx, src_obj);
+ store->getRados()->set_prefetch_data(s->obj_ctx, src_obj);
rgw_placement_rule src_placement;
dest_bucket_info = src_bucket_info;
dest_attrs = src_attrs;
} else {
- op_ret = store->get_bucket_info(*s->sysobj_ctx, dest_tenant_name, dest_bucket_name,
+ op_ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, dest_tenant_name, dest_bucket_name,
dest_bucket_info, nullptr, s->yield, &dest_attrs);
if (op_ret < 0) {
if (op_ret == -ENOENT) {
dest_bucket = dest_bucket_info.bucket;
rgw_obj dest_obj(dest_bucket, dest_object);
- store->set_atomic(s->obj_ctx, dest_obj);
+ store->getRados()->set_atomic(s->obj_ctx, dest_obj);
/* check dest bucket permissions */
- op_ret = read_bucket_policy(store->ctl.user, s, dest_bucket_info, dest_attrs,
+ op_ret = read_bucket_policy(store->ctl()->user, s, dest_bucket_info, dest_attrs,
&dest_bucket_policy, dest_bucket);
if (op_ret < 0) {
return op_ret;
if ( ! version_id.empty()) {
dst_obj.key.set_instance(version_id);
} else if (dest_bucket_info.versioning_enabled()) {
- store->gen_rand_obj_instance_name(&dst_obj);
+ store->getRados()->gen_rand_obj_instance_name(&dst_obj);
}
obj_ctx.set_atomic(src_obj);
/* Handle object versioning of Swift API. In case of copying to remote this
* should fail gently (op_ret == 0) as the dst_obj will not exist here. */
- op_ret = store->swift_versioning_copy(obj_ctx,
+ op_ret = store->getRados()->swift_versioning_copy(obj_ctx,
dest_bucket_info.owner,
dest_bucket_info,
dst_obj,
return;
}
- op_ret = store->copy_obj(obj_ctx,
+ op_ret = store->getRados()->copy_obj(obj_ctx,
s->user->user_id,
&s->info,
source_zone,
}
// forward bucket acl requests to meta master zone
- if (s->object.empty() && !store->svc.zone->is_meta_master()) {
+ if (s->object.empty() && !store->svc()->zone->is_meta_master()) {
bufferlist in_data;
// include acl data unless it was generated from a canned_acl
if (s->canned_acl.empty()) {
*_dout << dendl;
}
- op_ret = policy->rebuild(store->ctl.user, &owner, new_policy);
+ op_ret = policy->rebuild(store->ctl()->user, &owner, new_policy);
if (op_ret < 0)
return;
if (!s->object.empty()) {
obj = rgw_obj(s->bucket, s->object);
- store->set_atomic(s->obj_ctx, obj);
+ store->getRados()->set_atomic(s->obj_ctx, obj);
//if instance is empty, we should modify the latest object
op_ret = modify_obj_attr(store, s, obj, RGW_ATTR_ACL, bl);
} else {
attrs = s->bucket_attrs;
attrs[RGW_ATTR_ACL] = bl;
- op_ret = store->ctl.bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
+ op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
&s->bucket_info.objv_tracker,
s->yield);
}
return;
}
- op_ret = config.rebuild(store, new_config);
+ op_ret = config.rebuild(store->getRados(), new_config);
if (op_ret < 0)
return;
ldpp_dout(this, 15) << "New LifecycleConfiguration:" << ss.str() << dendl;
}
- if (!store->svc.zone->is_meta_master()) {
+ if (!store->svc()->zone->is_meta_master()) {
op_ret = forward_request_to_master(s, nullptr, store, data, nullptr);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
}
}
- op_ret = store->get_lc()->set_bucket_config(s->bucket_info, s->bucket_attrs, &new_config);
+ op_ret = store->getRados()->get_lc()->set_bucket_config(s->bucket_info, s->bucket_attrs, &new_config);
if (op_ret < 0) {
return;
}
void RGWDeleteLC::execute()
{
- if (!store->svc.zone->is_meta_master()) {
+ if (!store->svc()->zone->is_meta_master()) {
bufferlist data;
op_ret = forward_request_to_master(s, nullptr, store, data, nullptr);
if (op_ret < 0) {
}
map<string, bufferlist> attrs = s->bucket_attrs;
attrs.erase(RGW_ATTR_LC);
- op_ret = store->ctl.bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
+ op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
&s->bucket_info.objv_tracker,
s->yield);
if (op_ret < 0) {
return;
}
- op_ret = store->get_lc()->remove_bucket_config(s->bucket_info, s->bucket_attrs);
+ op_ret = store->getRados()->get_lc()->remove_bucket_config(s->bucket_info, s->bucket_attrs);
if (op_ret < 0) {
return;
}
if (op_ret < 0)
return;
- if (!store->svc.zone->is_meta_master()) {
+ if (!store->svc()->zone->is_meta_master()) {
op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
}
}
- op_ret = retry_raced_bucket_write(store, s, [this] {
+ op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
map<string, bufferlist> attrs = s->bucket_attrs;
attrs[RGW_ATTR_CORS] = cors_bl;
- return store->ctl.bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
+ return store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
&s->bucket_info.objv_tracker,
s->yield);
});
void RGWDeleteCORS::execute()
{
- if (!store->svc.zone->is_meta_master()) {
+ if (!store->svc()->zone->is_meta_master()) {
bufferlist data;
op_ret = forward_request_to_master(s, nullptr, store, data, nullptr);
if (op_ret < 0) {
}
}
- op_ret = retry_raced_bucket_write(store, s, [this] {
+ op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
op_ret = read_bucket_cors();
if (op_ret < 0)
return op_ret;
map<string, bufferlist> attrs = s->bucket_attrs;
attrs.erase(RGW_ATTR_CORS);
- op_ret = store->ctl.bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
+ op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
&s->bucket_info.objv_tracker,
s->yield);
if (op_ret < 0) {
void RGWSetRequestPayment::execute()
{
- if (!store->svc.zone->is_meta_master()) {
+ if (!store->svc()->zone->is_meta_master()) {
op_ret = forward_request_to_master(s, nullptr, store, in_data, nullptr);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
s->bucket_info.requester_pays = requester_pays;
- op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(),
+ op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false, real_time(),
&s->bucket_attrs);
if (op_ret < 0) {
ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
obj.set_in_extra_data(true);
obj.index_hash_source = s->object.name;
- RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
+ RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
op_target.set_versioning_disabled(true); /* no versioning for multipart meta */
RGWRados::Object::Write obj_op(&op_target);
s->cct->_conf.get_val<int64_t>("rgw_mp_lock_max_time");
utime_t dur(max_lock_secs_mp, 0);
- store->obj_to_raw((s->bucket_info).placement_rule, meta_obj, &raw_obj);
- store->get_obj_data_pool((s->bucket_info).placement_rule,
+ store->getRados()->obj_to_raw((s->bucket_info).placement_rule, meta_obj, &raw_obj);
+ store->getRados()->get_obj_data_pool((s->bucket_info).placement_rule,
meta_obj,&meta_pool);
- store->open_pool_ctx(meta_pool, serializer.ioctx, true);
+ store->getRados()->open_pool_ctx(meta_pool, serializer.ioctx, true);
op_ret = serializer.try_lock(raw_obj.oid, dur);
if (op_ret < 0) {
op_ret = -ERR_INVALID_PART;
return;
} else {
- manifest.append(obj_part.manifest, store->svc.zone);
+ manifest.append(obj_part.manifest, store->svc()->zone);
}
bool part_compressed = (obj_part.cs_info.compression_type != "none");
if (!version_id.empty()) {
target_obj.key.set_instance(version_id);
} else {
- store->gen_rand_obj_instance_name(&target_obj);
+ store->getRados()->gen_rand_obj_instance_name(&target_obj);
version_id = target_obj.key.get_instance();
}
}
obj_ctx.set_atomic(target_obj);
- RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), target_obj);
+ RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), target_obj);
RGWRados::Object::Write obj_op(&op_target);
obj_op.meta.manifest = &manifest;
return;
// remove the upload obj
- int r = store->delete_obj(*static_cast<RGWObjectCtx *>(s->obj_ctx),
+ int r = store->getRados()->delete_obj(*static_cast<RGWObjectCtx *>(s->obj_ctx),
s->bucket_info, meta_obj, 0);
if (r >= 0) {
/* serializer's exclusive lock is released */
obj_ctx->set_atomic(obj);
- RGWRados::Object del_target(store, s->bucket_info, *obj_ctx, obj);
+ RGWRados::Object del_target(store->getRados(), s->bucket_info, *obj_ctx, obj);
RGWRados::Object::Delete del_op(&del_target);
del_op.params.bucket_owner = s->bucket_owner.get_id();
ACLOwner& bucket_owner /* out */)
{
RGWAccessControlPolicy bacl(store->ctx());
- int ret = read_bucket_policy(store->ctl.user, s, binfo, battrs, &bacl, binfo.bucket);
+ int ret = read_bucket_policy(store->ctl()->user, s, binfo, battrs, &bacl, binfo.bucket);
if (ret < 0) {
return false;
}
rgw_bucket b(rgw_bucket_key(s->user->user_id.tenant, path.bucket_name));
- int ret = store->ctl.bucket->read_bucket_info(b, &binfo, s->yield,
+ int ret = store->ctl()->bucket->read_bucket_info(b, &binfo, s->yield,
RGWBucketCtl::BucketInstance::GetParams()
.set_attrs(&battrs),
&ot);
rgw_obj obj(binfo.bucket, path.obj_key);
obj_ctx.set_atomic(obj);
- RGWRados::Object del_target(store, binfo, obj_ctx, obj);
+ RGWRados::Object del_target(store->getRados(), binfo, obj_ctx, obj);
RGWRados::Object::Delete del_op(&del_target);
del_op.params.bucket_owner = binfo.owner;
goto delop_fail;
}
} else {
- ret = store->delete_bucket(binfo, ot, s->yield);
+ ret = store->getRados()->delete_bucket(binfo, ot, s->yield);
if (0 == ret) {
- ret = store->ctl.bucket->unlink_bucket(binfo.owner, binfo.bucket, s->yield, false);
+ ret = store->ctl()->bucket->unlink_bucket(binfo.owner, binfo.bucket, s->yield, false);
if (ret < 0) {
ldpp_dout(s, 0) << "WARNING: failed to unlink bucket: ret=" << ret << dendl;
}
goto delop_fail;
}
- if (!store->svc.zone->is_meta_master()) {
+ if (!store->svc()->zone->is_meta_master()) {
bufferlist in_data;
ret = forward_request_to_master(s, &ot.read_version, store, in_data,
nullptr);
info.effective_uri = "/" + bucket_name;
}
-void RGWBulkUploadOp::init(RGWRados* const store,
+void RGWBulkUploadOp::init(rgw::sal::RGWRadosStore* const store,
struct req_state* const s,
RGWHandler* const h)
{
RGWOp::init(store, s, h);
- dir_ctx.emplace(store->svc.sysobj->init_obj_ctx());
+ dir_ctx.emplace(store->svc()->sysobj->init_obj_ctx());
}
int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
rgw_obj_key object_junk;
std::tie(bucket_name, object_junk) = *parse_path(path);
- rgw_raw_obj obj(store->svc.zone->get_zone_params().domain_root,
+ rgw_raw_obj obj(store->svc()->zone->get_zone_params().domain_root,
rgw_make_bucket_entry_name(s->bucket_tenant, bucket_name));
/* we need to make sure we read bucket info, it's not read before for this
* specific request */
RGWBucketInfo binfo;
std::map<std::string, ceph::bufferlist> battrs;
- op_ret = store->get_bucket_info(*dir_ctx, s->bucket_tenant, bucket_name,
+ op_ret = store->getRados()->get_bucket_info(*dir_ctx, s->bucket_tenant, bucket_name,
binfo, nullptr, s->yield, &battrs);
if (op_ret < 0 && op_ret != -ENOENT) {
return op_ret;
if (bucket_exists) {
RGWAccessControlPolicy old_policy(s->cct);
- int r = rgw_op_get_bucket_policy_from_attr(s->cct, store->ctl.user, binfo,
+ int r = rgw_op_get_bucket_policy_from_attr(s->cct, store->ctl()->user, binfo,
battrs, &old_policy);
if (r >= 0) {
if (old_policy.get_owner().get_id().compare(s->user->user_id) != 0) {
real_time creation_time;
obj_version objv, ep_objv, *pobjv = nullptr;
- if (! store->svc.zone->is_meta_master()) {
+ if (! store->svc()->zone->is_meta_master()) {
JSONParser jp;
ceph::bufferlist in_data;
req_info info = s->info;
rgw_bucket bucket;
bucket.tenant = s->bucket_tenant;
bucket.name = s->bucket_name;
- op_ret = store->svc.zone->select_bucket_placement(*(s->user),
- store->svc.zone->get_zonegroup().get_id(),
+ op_ret = store->svc()->zone->select_bucket_placement(*(s->user),
+ store->svc()->zone->get_zonegroup().get_id(),
placement_rule,
&selected_placement_rule,
nullptr);
RGWBucketInfo out_info;
- op_ret = store->create_bucket(*(s->user),
+ op_ret = store->getRados()->create_bucket(*(s->user),
bucket,
- store->svc.zone->get_zonegroup().get_id(),
+ store->svc()->zone->get_zonegroup().get_id(),
placement_rule, binfo.swift_ver_location,
pquota_info, attrs,
out_info, pobjv, &ep_objv, creation_time,
bucket = out_info.bucket;
}
- op_ret = store->ctl.bucket->link_bucket(s->user->user_id, bucket,
+ op_ret = store->ctl()->bucket->link_bucket(s->user->user_id, bucket,
out_info.creation_time,
s->yield, false);
if (op_ret && !existed && op_ret != -EEXIST) {
/* if it exists (or previously existed), don't remove it! */
- op_ret = store->ctl.bucket->unlink_bucket(s->user->user_id, bucket, s->yield);
+ op_ret = store->ctl()->bucket->unlink_bucket(s->user->user_id, bucket, s->yield);
if (op_ret < 0) {
ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret << dendl;
}
ACLOwner& bucket_owner /* out */)
{
RGWAccessControlPolicy bacl(store->ctx());
- op_ret = read_bucket_policy(store->ctl.user, s, binfo, battrs, &bacl, binfo.bucket);
+ op_ret = read_bucket_policy(store->ctl()->user, s, binfo, battrs, &bacl, binfo.bucket);
if (op_ret < 0) {
ldpp_dout(this, 20) << "cannot read_policy() for bucket" << dendl;
return false;
RGWBucketInfo binfo;
std::map<std::string, ceph::bufferlist> battrs;
ACLOwner bowner;
- op_ret = store->get_bucket_info(*s->sysobj_ctx, s->user->user_id.tenant,
+ op_ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, s->user->user_id.tenant,
bucket_name, binfo, nullptr, s->yield, &battrs);
if (op_ret == -ENOENT) {
ldpp_dout(this, 20) << "non existent directory=" << bucket_name << dendl;
return op_ret;
}
- op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
+ op_ret = store->getRados()->check_quota(bowner.get_id(), binfo.bucket,
user_quota, bucket_quota, size);
if (op_ret < 0) {
return op_ret;
}
- op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
+ op_ret = store->getRados()->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
if (op_ret < 0) {
return op_ret;
}
rgw_obj obj(binfo.bucket, object);
if (s->bucket_info.versioning_enabled()) {
- store->gen_rand_obj_instance_name(&obj);
+ store->getRados()->gen_rand_obj_instance_name(&obj);
}
rgw_placement_rule dest_placement = s->dest_placement;
/* No filters by default. */
DataProcessor *filter = &processor;
- const auto& compression_type = store->svc.zone->get_zone_params().get_compression_type(
+ const auto& compression_type = store->svc()->zone->get_zone_params().get_compression_type(
dest_placement);
CompressorRef plugin;
boost::optional<RGWPutObj_Compress> compressor;
return op_ret;
}
- op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
+ op_ret = store->getRados()->check_quota(bowner.get_id(), binfo.bucket,
user_quota, bucket_quota, size);
if (op_ret < 0) {
ldpp_dout(this, 20) << "quota exceeded for path=" << path << dendl;
return op_ret;
}
- op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
+ op_ret = store->getRados()->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
if (op_ret < 0) {
return op_ret;
}
rgw_obj obj(s->bucket, s->object);
if (!s->object.empty()) {
- store->set_atomic(s->obj_ctx, obj);
- op_ret = store->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, nullptr, s->yield);
+ store->getRados()->set_atomic(s->obj_ctx, obj);
+ op_ret = store->getRados()->set_attrs(s->obj_ctx, s->bucket_info, obj, attrs, nullptr, s->yield);
} else {
for (auto& iter : attrs) {
s->bucket_attrs[iter.first] = std::move(iter.second);
}
- op_ret = store->ctl.bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
+ op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
&s->bucket_info.objv_tracker,
s->yield);
}
void RGWGetObjLayout::execute()
{
rgw_obj obj(s->bucket, s->object);
- RGWRados::Object target(store,
+ RGWRados::Object target(store->getRados(),
s->bucket_info,
*static_cast<RGWObjectCtx *>(s->obj_ctx),
rgw_obj(s->bucket, s->object));
s->bucket_info.mdsearch_config = mdsearch_config;
- op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
+ op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
if (op_ret < 0) {
ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
<< " returned err=" << op_ret << dendl;
{
s->bucket_info.mdsearch_config.clear();
- op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
+ op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
if (op_ret < 0) {
ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
<< " returned err=" << op_ret << dendl;
{
}
-int RGWHandler::init(RGWRados *_store,
+int RGWHandler::init(rgw::sal::RGWRadosStore *_store,
struct req_state *_s,
rgw::io::BasicClient *cio)
{
return;
}
- if (!store->svc.zone->is_meta_master()) {
+ if (!store->svc()->zone->is_meta_master()) {
op_ret = forward_request_to_master(s, NULL, store, data, nullptr);
if (op_ret < 0) {
ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret << dendl;
try {
const Policy p(s->cct, s->bucket_tenant, data);
- op_ret = retry_raced_bucket_write(store, s, [&p, this] {
+ op_ret = retry_raced_bucket_write(store->getRados(), s, [&p, this] {
auto attrs = s->bucket_attrs;
attrs[RGW_ATTR_IAM_POLICY].clear();
attrs[RGW_ATTR_IAM_POLICY].append(p.text);
- op_ret = store->ctl.bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
+ op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
&s->bucket_info.objv_tracker,
s->yield);
return op_ret;
void RGWDeleteBucketPolicy::execute()
{
- op_ret = retry_raced_bucket_write(store, s, [this] {
+ op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
auto attrs = s->bucket_attrs;
attrs.erase(RGW_ATTR_IAM_POLICY);
- op_ret = store->ctl.bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
+ op_ret = store->ctl()->bucket->set_bucket_instance_attrs(s->bucket_info, attrs,
&s->bucket_info.objv_tracker,
s->yield);
return op_ret;
return;
}
- if (!store->svc.zone->is_meta_master()) {
+ if (!store->svc()->zone->is_meta_master()) {
op_ret = forward_request_to_master(s, NULL, store, data, nullptr);
if (op_ret < 0) {
ldout(s->cct, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
}
}
- op_ret = retry_raced_bucket_write(store, s, [this] {
+ op_ret = retry_raced_bucket_write(store->getRados(), s, [this] {
s->bucket_info.obj_lock = obj_lock;
- op_ret = store->put_bucket_instance_info(s->bucket_info, false,
+ op_ret = store->getRados()->put_bucket_instance_info(s->bucket_info, false,
real_time(), &s->bucket_attrs);
return op_ret;
});
void RGWGetClusterStat::execute()
{
- op_ret = this->store->get_rados_handle()->cluster_stat(stats_op);
+ op_ret = this->store->getRados()->get_rados_handle()->cluster_stat(stats_op);
}
class RGWHandler {
protected:
- RGWRados *store{nullptr};
+ rgw::sal::RGWRadosStore* store{nullptr};
struct req_state *s{nullptr};
int do_init_permissions();
RGWHandler() {}
virtual ~RGWHandler();
- virtual int init(RGWRados *store,
+ virtual int init(rgw::sal::RGWRadosStore* store,
struct req_state* _s,
rgw::io::BasicClient* cio);
protected:
struct req_state *s;
RGWHandler *dialect_handler;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
RGWCORSConfiguration bucket_cors;
bool cors_exist;
RGWQuotaInfo bucket_quota;
return 0;
}
- virtual void init(RGWRados *store, struct req_state *s, RGWHandler *dialect_handler) {
+ virtual void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *dialect_handler) {
this->store = store;
this->s = s;
this->dialect_handler = dialect_handler;
unsigned int num_unfound;
std::list<fail_desc_t> failures;
- RGWRados * const store;
+ rgw::sal::RGWRadosStore * const store;
req_state * const s;
public:
- Deleter(const DoutPrefixProvider* dpp, RGWRados * const str, req_state * const s)
+ Deleter(const DoutPrefixProvider* dpp, rgw::sal::RGWRadosStore * const str, req_state * const s)
: dpp(dpp),
num_deleted(0),
num_unfound(0),
: num_created(0) {
}
- void init(RGWRados* const store,
+ void init(rgw::sal::RGWRadosStore* const store,
struct req_state* const s,
RGWHandler* const h) override;
int verify_permission() override;
void pre_exec() override;
void execute() override;
- void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+ void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
RGWOp::init(store, s, h);
policy.set_ctx(s->cct);
relaxed_region_enforcement =
delete obj_legal_hold;
}
- void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+ void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
RGWOp::init(store, s, h);
policy.set_ctx(s->cct);
}
attrs.emplace(std::move(key), std::move(bl)); /* key and bl are r-value refs */
}
- void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+ void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
RGWOp::init(store, s, h);
policy.set_ctx(s->cct);
}
has_policy(false) {
}
- void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+ void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
RGWOp::init(store, s, h);
policy.set_ctx(s->cct);
}
attrs.emplace(std::move(key), std::move(bl)); /* key and bl are r-value refs */
}
- void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+ void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
RGWOp::init(store, s, h);
policy.set_ctx(s->cct);
}
: dlo_manifest(NULL)
{}
- void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+ void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
RGWOp::init(store, s, h);
policy.set_ctx(s->cct);
}
attrs.emplace(std::move(key), std::move(bl));
}
- void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+ void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
RGWOp::init(store, s, h);
dest_policy.set_ctx(s->cct);
}
void pre_exec() override;
void execute() override;
- virtual int get_policy_from_state(RGWRados *store, struct req_state *s, stringstream& ss) { return 0; }
+ virtual int get_policy_from_state(rgw::sal::RGWRadosStore *store, struct req_state *s, stringstream& ss) { return 0; }
virtual int get_params() = 0;
void send_response() override = 0;
const char* name() const override { return "put_acls"; }
}
~RGWPutLC() override {}
- void init(RGWRados *store, struct req_state *s, RGWHandler *dialect_handler) override {
+ void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *dialect_handler) override {
#define COOKIE_LEN 16
char buf[COOKIE_LEN + 1];
public:
RGWInitMultipart() {}
- void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+ void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
RGWOp::init(store, s, h);
policy.set_ctx(s->cct);
}
truncated = false;
}
- void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+ void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
RGWOp::init(store, s, h);
policy = RGWAccessControlPolicy(s->cct);
}
default_max = 0;
}
- void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+ void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
RGWOp::init(store, s, h);
max_uploads = default_max;
}
uint32_t op_mask() override { return RGW_OP_TYPE_READ; }
};
-extern int rgw_build_bucket_policies(RGWRados* store, struct req_state* s);
-extern int rgw_build_object_policies(RGWRados *store, struct req_state *s,
+extern int rgw_build_bucket_policies(rgw::sal::RGWRadosStore* store, struct req_state* s);
+extern int rgw_build_object_policies(rgw::sal::RGWRadosStore *store, struct req_state *s,
bool prefetch_data);
-extern void rgw_build_iam_environment(RGWRados* store,
+extern void rgw_build_iam_environment(rgw::sal::RGWRadosStore* store,
struct req_state* s);
extern vector<rgw::IAM::Policy> get_iam_user_policy_from_attr(CephContext* cct,
- RGWRados* store,
+ rgw::sal::RGWRadosStore* store,
map<string, bufferlist>& attrs,
const string& tenant);
public:
RGWGetClusterStat() {}
- void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+ void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
RGWOp::init(store, s, h);
}
int verify_permission() override {return 0;}
int RGWOrphanStore::init()
{
- const rgw_pool& log_pool = store->svc.zone->get_zone_params().log_pool;
- int r = rgw_init_ioctx(store->get_rados_handle(), log_pool, ioctx);
+ const rgw_pool& log_pool = store->svc()->zone->get_zone_params().log_pool;
+ int r = rgw_init_ioctx(store->getRados()->get_rados_handle(), log_pool, ioctx);
if (r < 0) {
cerr << "ERROR: failed to open log pool (" << log_pool << " ret=" << r << std::endl;
return r;
{
librados::IoCtx ioctx;
- int ret = rgw_init_ioctx(store->get_rados_handle(), search_info.pool, ioctx);
+ int ret = rgw_init_ioctx(store->getRados()->get_rados_handle(), search_info.pool, ioctx);
if (ret < 0) {
lderr(store->ctx()) << __func__ << ": rgw_init_ioctx() returned ret=" << ret << dendl;
return ret;
void *handle;
int max = 1000;
string section = "bucket.instance";
- int ret = store->ctl.meta.mgr->list_keys_init(section, &handle);
+ int ret = store->ctl()->meta.mgr->list_keys_init(section, &handle);
if (ret < 0) {
lderr(store->ctx()) << "ERROR: can't get key: " << cpp_strerror(-ret) << dendl;
return -ret;
do {
list<string> keys;
- ret = store->ctl.meta.mgr->list_keys_next(handle, max, keys, &truncated);
+ ret = store->ctl()->meta.mgr->list_keys_next(handle, max, keys, &truncated);
if (ret < 0) {
lderr(store->ctx()) << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << dendl;
return -ret;
lderr(store->ctx()) << __func__ << ": ERROR: log_oids() returned ret=" << ret << dendl;
return ret;
}
- store->ctl.meta.mgr->list_keys_complete(handle);
+ store->ctl()->meta.mgr->list_keys_complete(handle);
return 0;
}
RGWObjManifest::obj_iterator miter;
for (miter = manifest.obj_begin(); miter != manifest.obj_end(); ++miter) {
- const rgw_raw_obj& loc = miter.get_location().get_raw_obj(store);
+ const rgw_raw_obj& loc = miter.get_location().get_raw_obj(store->getRados());
string s = loc.oid;
obj_oids.insert(obj_fingerprint(s));
}
int RGWOrphanSearch::build_linked_oids_for_bucket(const string& bucket_instance_id, map<int, list<string> >& oids)
{
RGWObjectCtx obj_ctx(store);
- auto sysobj_ctx = store->svc.sysobj->init_obj_ctx();
+ auto sysobj_ctx = store->svc()->sysobj->init_obj_ctx();
rgw_bucket orphan_bucket;
int shard_id;
}
RGWBucketInfo cur_bucket_info;
- ret = store->get_bucket_info(sysobj_ctx, orphan_bucket.tenant,
+ ret = store->getRados()->get_bucket_info(sysobj_ctx, orphan_bucket.tenant,
orphan_bucket.name, cur_bucket_info, nullptr, null_yield);
if (ret < 0) {
if (ret == -ENOENT) {
}
RGWBucketInfo bucket_info;
- ret = store->get_bucket_instance_info(sysobj_ctx, bucket_instance_id, bucket_info, nullptr, nullptr, null_yield);
+ ret = store->getRados()->get_bucket_instance_info(sysobj_ctx, bucket_instance_id, bucket_info, nullptr, nullptr, null_yield);
if (ret < 0) {
if (ret == -ENOENT) {
/* probably raced with bucket removal */
}
ldout(store->ctx(), 10) << "building linked oids for bucket instance: " << bucket_instance_id << dendl;
- RGWRados::Bucket target(store, bucket_info);
+ RGWRados::Bucket target(store->getRados(), bucket_info);
RGWRados::Bucket::List list_op(&target);
string marker;
rgw_obj obj(bucket_info.bucket, entry.key);
- RGWRados::Object op_target(store, bucket_info, obj_ctx, obj);
+ RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, obj);
stat_ops.push_back(RGWRados::Object::Stat(&op_target));
RGWRados::Object::Stat& op = stat_ops.back();
librados::IoCtx data_ioctx;
- int ret = rgw_init_ioctx(store->get_rados_handle(), search_info.pool, data_ioctx);
+ int ret = rgw_init_ioctx(store->getRados()->get_rados_handle(), search_info.pool, data_ioctx);
if (ret < 0) {
lderr(store->ctx()) << __func__ << ": rgw_init_ioctx() returned ret=" << ret << dendl;
return ret;
#include "common/Formatter.h"
#include "common/errno.h"
-#include "rgw_rados.h"
+#include "rgw_sal.h"
#define dout_subsys ceph_subsys_rgw
WRITE_CLASS_ENCODER(RGWOrphanSearchState)
class RGWOrphanStore {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
librados::IoCtx ioctx;
string oid;
public:
- explicit RGWOrphanStore(RGWRados *_store) : store(_store), oid(RGW_ORPHAN_INDEX_OID) {}
+ explicit RGWOrphanStore(rgw::sal::RGWRadosStore *_store) : store(_store), oid(RGW_ORPHAN_INDEX_OID) {}
librados::IoCtx& get_ioctx() { return ioctx; }
class RGWOrphanSearch {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
RGWOrphanStore orphan_store;
int remove_index(map<int, string>& index);
public:
- RGWOrphanSearch(RGWRados *_store, int _max_ios, uint64_t _stale_secs) : store(_store), orphan_store(store), max_concurrent_ios(_max_ios), stale_secs(_stale_secs) {}
+ RGWOrphanSearch(rgw::sal::RGWRadosStore *_store, int _max_ios, uint64_t _stale_secs) : store(_store), orphan_store(store), max_concurrent_ios(_max_ios), stale_secs(_stale_secs) {}
int save_state() {
RGWOrphanSearchState state;
#ifndef CEPH_RGW_OTP_H
#define CEPH_RGW_OTP_H
+namespace rgw { namespace sal {
+class RGWRadosStore;
+} }
+
#include "cls/otp/cls_otp_types.h"
#include "services/svc_meta_be_otp.h"
#include "rgw_period_pusher.h"
#include "rgw_cr_rest.h"
#include "rgw_zone.h"
+#include "rgw_sal.h"
#include "services/svc_zone.h"
};
-RGWPeriodPusher::RGWPeriodPusher(RGWRados* store)
+RGWPeriodPusher::RGWPeriodPusher(rgw::sal::RGWRadosStore* store)
: cct(store->ctx()), store(store)
{
- const auto& realm = store->svc.zone->get_realm();
+ const auto& realm = store->svc()->zone->get_realm();
auto& realm_id = realm.get_id();
if (realm_id.empty()) // no realm configuration
return;
// always send out the current period on startup
RGWPeriod period;
- int r = period.init(cct, store->svc.sysobj, realm_id, realm.get_name());
+ int r = period.init(cct, store->svc()->sysobj, realm_id, realm.get_name());
if (r < 0) {
lderr(cct) << "failed to load period for realm " << realm_id << dendl;
return;
// find our zonegroup in the new period
auto& zonegroups = period.get_map().zonegroups;
- auto i = zonegroups.find(store->svc.zone->get_zonegroup().get_id());
+ auto i = zonegroups.find(store->svc()->zone->get_zonegroup().get_id());
if (i == zonegroups.end()) {
lderr(cct) << "The new period does not contain my zonegroup!" << dendl;
return;
auto& my_zonegroup = i->second;
// if we're not a master zone, we're not responsible for pushing any updates
- if (my_zonegroup.master_zone != store->svc.zone->get_zone_params().get_id())
+ if (my_zonegroup.master_zone != store->svc()->zone->get_zone_params().get_id())
return;
// construct a map of the zones that need this period. the map uses the same
auto hint = conns.end();
// are we the master zonegroup in this period?
- if (period.get_map().master_zonegroup == store->svc.zone->get_zonegroup().get_id()) {
+ if (period.get_map().master_zonegroup == store->svc()->zone->get_zonegroup().get_id()) {
// update other zonegroup endpoints
for (auto& zg : zonegroups) {
auto& zonegroup = zg.second;
- if (zonegroup.get_id() == store->svc.zone->get_zonegroup().get_id())
+ if (zonegroup.get_id() == store->svc()->zone->get_zonegroup().get_id())
continue;
if (zonegroup.endpoints.empty())
continue;
hint = conns.emplace_hint(
hint, std::piecewise_construct,
std::forward_as_tuple(zonegroup.get_id()),
- std::forward_as_tuple(cct, store->svc.zone, zonegroup.get_id(), zonegroup.endpoints));
+ std::forward_as_tuple(cct, store->svc()->zone, zonegroup.get_id(), zonegroup.endpoints));
}
}
// update other zone endpoints
for (auto& z : my_zonegroup.zones) {
auto& zone = z.second;
- if (zone.id == store->svc.zone->get_zone_params().get_id())
+ if (zone.id == store->svc()->zone->get_zone_params().get_id())
continue;
if (zone.endpoints.empty())
continue;
hint = conns.emplace_hint(
hint, std::piecewise_construct,
std::forward_as_tuple(zone.id),
- std::forward_as_tuple(cct, store->svc.zone, zone.id, zone.endpoints));
+ std::forward_as_tuple(cct, store->svc()->zone, zone.id, zone.endpoints));
}
if (conns.empty()) {
store = nullptr;
}
-void RGWPeriodPusher::resume(RGWRados* store)
+void RGWPeriodPusher::resume(rgw::sal::RGWRadosStore* store)
{
std::lock_guard<std::mutex> lock(mutex);
this->store = store;
#include "rgw_realm_reloader.h"
-class RGWRados;
+namespace rgw {
+namespace sal {
+class RGWRadosStore;
+}
+}
+
class RGWPeriod;
// RGWRealmNotify payload for push coordination
class RGWPeriodPusher final : public RGWRealmWatcher::Watcher,
public RGWRealmReloader::Pauser {
public:
- explicit RGWPeriodPusher(RGWRados* store);
+ explicit RGWPeriodPusher(rgw::sal::RGWRadosStore* store);
~RGWPeriodPusher() override;
/// respond to realm notifications by pushing new periods to other zones
void pause() override;
/// continue processing notifications with a new RGWRados instance
- void resume(RGWRados* store) override;
+ void resume(rgw::sal::RGWRadosStore* store) override;
private:
void handle_notify(RGWZonesNeedPeriod&& period);
CephContext *const cct;
- RGWRados* store;
+ rgw::sal::RGWRadosStore* store;
std::mutex mutex;
epoch_t realm_epoch{0}; //< the current realm epoch being sent
return 0;
}
-int process_request(RGWRados* const store,
+int process_request(rgw::sal::RGWRadosStore* const store,
RGWREST* const rest,
RGWRequest* const req,
const std::string& frontend_prefix,
RGWObjectCtx rados_ctx(store, s);
s->obj_ctx = &rados_ctx;
- auto sysobj_ctx = store->svc.sysobj->init_obj_ctx();
+ auto sysobj_ctx = store->svc()->sysobj->init_obj_ctx();
s->sysobj_ctx = &sysobj_ctx;
if (ret < 0) {
return ret;
}
- s->req_id = store->svc.zone_utils->unique_id(req->id);
- s->trans_id = store->svc.zone_utils->unique_trans_id(req->id);
- s->host_id = store->host_id;
+ s->req_id = store->svc()->zone_utils->unique_id(req->id);
+ s->trans_id = store->svc()->zone_utils->unique_trans_id(req->id);
+ s->host_id = store->getRados()->host_id;
s->yield = yield;
ldpp_dout(s, 2) << "initializing for trans_id = " << s->trans_id << dendl;
}
if (should_log) {
- rgw_log_op(store, rest, s, (op ? op->name() : "unknown"), olog);
+ rgw_log_op(store->getRados(), rest, s, (op ? op->name() : "unknown"), olog);
}
if (http_ret != nullptr) {
}
struct RGWProcessEnv {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
RGWREST *rest;
OpsLogSocket *olog;
int port;
deque<RGWRequest*> m_req_queue;
protected:
CephContext *cct;
- RGWRados* store;
+ rgw::sal::RGWRadosStore* store;
rgw_auth_registry_ptr_t auth_registry;
OpsLogSocket* olog;
ThreadPool m_tp;
m_tp.pause();
}
- void unpause_with_new_config(RGWRados* const store,
+ void unpause_with_new_config(rgw::sal::RGWRadosStore* const store,
rgw_auth_registry_ptr_t auth_registry) {
this->store = store;
this->auth_registry = std::move(auth_registry);
void set_access_key(RGWAccessKey& key) { access_key = key; }
};
/* process stream request */
-extern int process_request(RGWRados* store,
+extern int process_request(rgw::sal::RGWRadosStore* store,
RGWREST* rest,
RGWRequest* req,
const std::string& frontend_prefix,
// vim: ts=8 sw=2 smarttab
#include "rgw_b64.h"
-#include "rgw_rados.h"
+#include "rgw_sal.h"
#include "rgw_pubsub.h"
#include "rgw_tools.h"
#include "rgw_xml.h"
encode_json("s3_id", s3_id, f);
}
-RGWUserPubSub::RGWUserPubSub(RGWRados *_store, const rgw_user& _user) : store(_store),
+RGWUserPubSub::RGWUserPubSub(rgw::sal::RGWRadosStore *_store, const rgw_user& _user) : store(_store),
user(_user),
- obj_ctx(store->svc.sysobj->init_obj_ctx())
+ obj_ctx(store->svc()->sysobj->init_obj_ctx())
{
get_user_meta_obj(&user_meta_obj);
}
void RGWUserPubSub::get_user_meta_obj(rgw_raw_obj *obj) const {
- *obj = rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, user_meta_oid());
+ *obj = rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, user_meta_oid());
}
void RGWUserPubSub::get_bucket_meta_obj(const rgw_bucket& bucket, rgw_raw_obj *obj) const {
- *obj = rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, bucket_meta_oid(bucket));
+ *obj = rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, bucket_meta_oid(bucket));
}
void RGWUserPubSub::get_sub_meta_obj(const string& name, rgw_raw_obj *obj) const {
- *obj = rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sub_meta_oid(name));
+ *obj = rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sub_meta_oid(name));
}
int RGWUserPubSub::remove(const rgw_raw_obj& obj, RGWObjVersionTracker *objv_tracker)
{
- int ret = rgw_delete_system_obj(store->svc.sysobj, obj.pool, obj.oid, objv_tracker);
+ int ret = rgw_delete_system_obj(store->svc()->sysobj, obj.pool, obj.oid, objv_tracker);
if (ret < 0) {
return ret;
}
int RGWUserPubSub::Bucket::create_notification(const string& topic_name, const EventTypeList& events)
{
rgw_pubsub_topic_subs user_topic_info;
- RGWRados *store = ps->store;
+ rgw::sal::RGWRadosStore *store = ps->store;
int ret = ps->get_topic(topic_name, &user_topic_info);
if (ret < 0) {
int RGWUserPubSub::Bucket::remove_notification(const string& topic_name)
{
rgw_pubsub_topic_subs user_topic_info;
- RGWRados *store = ps->store;
+ rgw::sal::RGWRadosStore *store = ps->store;
int ret = ps->get_topic(topic_name, &user_topic_info);
if (ret < 0) {
{
RGWObjVersionTracker user_objv_tracker;
rgw_pubsub_user_topics topics;
- RGWRados *store = ps->store;
+ rgw::sal::RGWRadosStore *store = ps->store;
int ret = ps->read_user_topics(&topics, &user_objv_tracker);
if (ret < 0) {
{
string topic = _topic;
RGWObjVersionTracker sobjv_tracker;
- RGWRados *store = ps->store;
+ rgw::sal::RGWRadosStore *store = ps->store;
if (topic.empty()) {
rgw_pubsub_sub_config sub_conf;
template<typename EventType>
int RGWUserPubSub::SubWithEvents<EventType>::list_events(const string& marker, int max_events)
{
- RGWRados *store = ps->store;
+ RGWRados *store = ps->store->getRados();
rgw_pubsub_sub_config sub_conf;
int ret = get_conf(&sub_conf);
if (ret < 0) {
template<typename EventType>
int RGWUserPubSub::SubWithEvents<EventType>::remove_event(const string& event_id)
{
- RGWRados *store = ps->store;
+ rgw::sal::RGWRadosStore *store = ps->store;
rgw_pubsub_sub_config sub_conf;
int ret = get_conf(&sub_conf);
if (ret < 0) {
RGWBucketInfo bucket_info;
string tenant;
- RGWSysObjectCtx sysobj_ctx(store->svc.sysobj->init_obj_ctx());
- ret = store->get_bucket_info(sysobj_ctx, tenant, sub_conf.dest.bucket_name, bucket_info, nullptr, null_yield, nullptr);
+ RGWSysObjectCtx sysobj_ctx(store->svc()->sysobj->init_obj_ctx());
+ ret = store->getRados()->get_bucket_info(sysobj_ctx, tenant, sub_conf.dest.bucket_name, bucket_info, nullptr, null_yield, nullptr);
if (ret < 0) {
ldout(store->ctx(), 1) << "ERROR: failed to read bucket info for events bucket: bucket=" << sub_conf.dest.bucket_name << " ret=" << ret << dendl;
return ret;
obj_ctx.set_atomic(obj);
- RGWRados::Object del_target(store, bucket_info, obj_ctx, obj);
+ RGWRados::Object del_target(store->getRados(), bucket_info, obj_ctx, obj);
RGWRados::Object::Delete del_op(&del_target);
del_op.params.bucket_owner = bucket_info.owner;
{
friend class Bucket;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
rgw_user user;
RGWSysObjectCtx obj_ctx;
int write_user_topics(const rgw_pubsub_user_topics& topics, RGWObjVersionTracker *objv_tracker);
public:
- RGWUserPubSub(RGWRados *_store, const rgw_user& _user);
+ RGWUserPubSub(rgw::sal::RGWRadosStore *_store, const rgw_user& _user);
class Bucket {
friend class RGWUserPubSub;
bufferlist bl;
encode(info, bl);
- auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+ auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
int ret = rgw_put_system_obj(obj_ctx, obj.pool, obj.oid,
bl, false, objv_tracker,
real_time());
int RadosWriter::set_stripe_obj(const rgw_raw_obj& raw_obj)
{
- stripe_obj = store->svc.rados->obj(raw_obj);
+ stripe_obj = store->svc()->rados->obj(raw_obj);
return stripe_obj.open();
}
std::optional<rgw_raw_obj> raw_head;
if (!head_obj.empty()) {
raw_head.emplace();
- store->obj_to_raw(bucket_info.placement_rule, head_obj, &*raw_head);
+ store->getRados()->obj_to_raw(bucket_info.placement_rule, head_obj, &*raw_head);
}
/**
continue;
}
- int r = store->delete_raw_obj(obj);
+ int r = store->getRados()->delete_raw_obj(obj);
if (r < 0 && r != -ENOENT) {
ldpp_dout(dpp, 5) << "WARNING: failed to remove obj (" << obj << "), leaked" << dendl;
}
if (need_to_remove_head) {
ldpp_dout(dpp, 5) << "NOTE: we are going to process the head obj (" << *raw_head << ")" << dendl;
- int r = store->delete_obj(obj_ctx, bucket_info, head_obj, 0, 0);
+ int r = store->getRados()->delete_obj(obj_ctx, bucket_info, head_obj, 0, 0);
if (r < 0 && r != -ENOENT) {
ldpp_dout(dpp, 0) << "WARNING: failed to remove obj (" << *raw_head << "), leaked" << dendl;
}
return r;
}
- rgw_raw_obj stripe_obj = manifest_gen.get_cur_obj(store);
+ rgw_raw_obj stripe_obj = manifest_gen.get_cur_obj(store->getRados());
uint64_t chunk_size = 0;
- r = store->get_max_chunk_size(stripe_obj.pool, &chunk_size);
+ r = store->getRados()->get_max_chunk_size(stripe_obj.pool, &chunk_size);
if (r < 0) {
return r;
}
uint64_t alignment;
rgw_pool head_pool;
- if (!store->get_obj_data_pool(bucket_info.placement_rule, head_obj, &head_pool)) {
+ if (!store->getRados()->get_obj_data_pool(bucket_info.placement_rule, head_obj, &head_pool)) {
return -EIO;
}
- int r = store->get_max_chunk_size(head_pool, &max_head_chunk_size, &alignment);
+ int r = store->getRados()->get_max_chunk_size(head_pool, &max_head_chunk_size, &alignment);
if (r < 0) {
return r;
}
if (bucket_info.placement_rule != tail_placement_rule) {
rgw_pool tail_pool;
- if (!store->get_obj_data_pool(tail_placement_rule, head_obj, &tail_pool)) {
+ if (!store->getRados()->get_obj_data_pool(tail_placement_rule, head_obj, &tail_pool)) {
return -EIO;
}
if (tail_pool != head_pool) {
same_pool = false;
- r = store->get_max_chunk_size(tail_pool, &chunk_size);
+ r = store->getRados()->get_max_chunk_size(tail_pool, &chunk_size);
if (r < 0) {
return r;
}
uint64_t stripe_size;
const uint64_t default_stripe_size = store->ctx()->_conf->rgw_obj_stripe_size;
- store->get_max_aligned_size(default_stripe_size, alignment, &stripe_size);
+ store->getRados()->get_max_aligned_size(default_stripe_size, alignment, &stripe_size);
manifest.set_trivial_rule(head_max_size, stripe_size);
return r;
}
- rgw_raw_obj stripe_obj = manifest_gen.get_cur_obj(store);
+ rgw_raw_obj stripe_obj = manifest_gen.get_cur_obj(store->getRados());
r = writer.set_stripe_obj(stripe_obj);
if (r < 0) {
obj_ctx.set_atomic(head_obj);
- RGWRados::Object op_target(store, bucket_info, obj_ctx, head_obj);
+ RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, head_obj);
/* some object types shouldn't be versioned, e.g., multipart parts */
op_target.set_versioning_disabled(!bucket_info.versioning_enabled());
uint64_t stripe_size;
uint64_t alignment;
- int r = store->get_max_chunk_size(tail_placement_rule, target_obj, &chunk_size, &alignment);
+ int r = store->getRados()->get_max_chunk_size(tail_placement_rule, target_obj, &chunk_size, &alignment);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: unexpected: get_max_chunk_size(): placement_rule=" << tail_placement_rule.to_str() << " obj=" << target_obj << " returned r=" << r << dendl;
return r;
}
- store->get_max_aligned_size(default_stripe_size, alignment, &stripe_size);
+ store->getRados()->get_max_aligned_size(default_stripe_size, alignment, &stripe_size);
manifest.set_multipart_part_rule(stripe_size, part_num);
return r;
}
- rgw_raw_obj stripe_obj = manifest_gen.get_cur_obj(store);
+ rgw_raw_obj stripe_obj = manifest_gen.get_cur_obj(store->getRados());
RGWSI_Tier_RADOS::raw_obj_to_obj(head_obj.bucket, stripe_obj, &head_obj);
head_obj.index_hash_source = target_obj.key.name;
return r;
}
- RGWRados::Object op_target(store, bucket_info, obj_ctx, head_obj);
+ RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, head_obj);
op_target.set_versioning_disabled(true);
RGWRados::Object::Write obj_op(&op_target);
rgw_raw_obj raw_meta_obj;
- store->obj_to_raw(bucket_info.placement_rule, meta_obj, &raw_meta_obj);
+ store->getRados()->obj_to_raw(bucket_info.placement_rule, meta_obj, &raw_meta_obj);
- auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+ auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
auto sysobj = obj_ctx.get_obj(raw_meta_obj);
r = sysobj.omap()
int AppendObjectProcessor::prepare(optional_yield y)
{
RGWObjState *astate;
- int r = store->get_obj_state(&obj_ctx, bucket_info, head_obj, &astate, y);
+ int r = store->getRados()->get_obj_state(&obj_ctx, bucket_info, head_obj, &astate, y);
if (r < 0) {
return r;
}
if (r < 0) {
return r;
}
- rgw_raw_obj stripe_obj = manifest_gen.get_cur_obj(store);
+ rgw_raw_obj stripe_obj = manifest_gen.get_cur_obj(store->getRados());
uint64_t chunk_size = 0;
- r = store->get_max_chunk_size(stripe_obj.pool, &chunk_size);
+ r = store->getRados()->get_max_chunk_size(stripe_obj.pool, &chunk_size);
if (r < 0) {
return r;
}
return r;
}
obj_ctx.set_atomic(head_obj);
- RGWRados::Object op_target(store, bucket_info, obj_ctx, head_obj);
+ RGWRados::Object op_target(store->getRados(), bucket_info, obj_ctx, head_obj);
//For Append obj, disable versioning
op_target.set_versioning_disabled(true);
RGWRados::Object::Write obj_op(&op_target);
if (cur_manifest) {
- cur_manifest->append(manifest, store->svc.zone);
+ cur_manifest->append(manifest, store->svc()->zone);
obj_op.meta.manifest = cur_manifest;
} else {
obj_op.meta.manifest = &manifest;
// a data sink that writes to rados objects and deletes them on cancelation
class RadosWriter : public DataProcessor {
Aio *const aio;
- RGWRados *const store;
+ rgw::sal::RGWRadosStore *const store;
const RGWBucketInfo& bucket_info;
RGWObjectCtx& obj_ctx;
const rgw_obj head_obj;
optional_yield y;
public:
- RadosWriter(Aio *aio, RGWRados *store, const RGWBucketInfo& bucket_info,
+ RadosWriter(Aio *aio, rgw::sal::RGWRadosStore *store,
+ const RGWBucketInfo& bucket_info,
RGWObjectCtx& obj_ctx, const rgw_obj& head_obj,
const DoutPrefixProvider *dpp, optional_yield y)
: aio(aio), store(store), bucket_info(bucket_info),
class ManifestObjectProcessor : public HeadObjectProcessor,
public StripeGenerator {
protected:
- RGWRados *const store;
+ rgw::sal::RGWRadosStore *const store;
const RGWBucketInfo& bucket_info;
rgw_placement_rule tail_placement_rule;
const rgw_user& owner;
int next(uint64_t offset, uint64_t *stripe_size) override;
public:
- ManifestObjectProcessor(Aio *aio, RGWRados *store,
+ ManifestObjectProcessor(Aio *aio, rgw::sal::RGWRadosStore *store,
const RGWBucketInfo& bucket_info,
const rgw_placement_rule *ptail_placement_rule,
const rgw_user& owner, RGWObjectCtx& obj_ctx,
int process_first_chunk(bufferlist&& data, DataProcessor **processor) override;
public:
- AtomicObjectProcessor(Aio *aio, RGWRados *store,
+ AtomicObjectProcessor(Aio *aio, rgw::sal::RGWRadosStore *store,
const RGWBucketInfo& bucket_info,
const rgw_placement_rule *ptail_placement_rule,
const rgw_user& owner,
// prepare the head stripe and manifest
int prepare_head();
public:
- MultipartObjectProcessor(Aio *aio, RGWRados *store,
+ MultipartObjectProcessor(Aio *aio, rgw::sal::RGWRadosStore *store,
const RGWBucketInfo& bucket_info,
const rgw_placement_rule *ptail_placement_rule,
const rgw_user& owner, RGWObjectCtx& obj_ctx,
int process_first_chunk(bufferlist&& data, DataProcessor **processor) override;
public:
- AppendObjectProcessor(Aio *aio, RGWRados *store, const RGWBucketInfo& bucket_info,
+ AppendObjectProcessor(Aio *aio, rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info,
const rgw_placement_rule *ptail_placement_rule,
const rgw_user& owner, RGWObjectCtx& obj_ctx,const rgw_obj& head_obj,
const std::string& unique_tag, uint64_t position,
#include "common/ceph_mutex.h"
#include "rgw_common.h"
-#include "rgw_rados.h"
+#include "rgw_sal.h"
#include "rgw_quota.h"
#include "rgw_bucket.h"
#include "rgw_user.h"
template<class T>
class RGWQuotaCache {
protected:
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
lru_map<T, RGWQuotaCacheStats> stats_map;
RefCountedWaitObject *async_refcount;
virtual void data_modified(const rgw_user& user, rgw_bucket& bucket) {}
public:
- RGWQuotaCache(RGWRados *_store, int size) : store(_store), stats_map(size) {
+ RGWQuotaCache(rgw::sal::RGWRadosStore *_store, int size) : store(_store), stats_map(size) {
async_refcount = new RefCountedWaitObject;
}
virtual ~RGWQuotaCache() {
class AsyncRefreshHandler {
protected:
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
RGWQuotaCache<T> *cache;
public:
- AsyncRefreshHandler(RGWRados *_store, RGWQuotaCache<T> *_cache) : store(_store), cache(_cache) {}
+ AsyncRefreshHandler(rgw::sal::RGWRadosStore *_store, RGWQuotaCache<T> *_cache) : store(_store), cache(_cache) {}
virtual ~AsyncRefreshHandler() {}
virtual int init_fetch() = 0;
public RGWGetBucketStats_CB {
rgw_user user;
public:
- BucketAsyncRefreshHandler(RGWRados *_store, RGWQuotaCache<rgw_bucket> *_cache,
+ BucketAsyncRefreshHandler(rgw::sal::RGWRadosStore *_store, RGWQuotaCache<rgw_bucket> *_cache,
const rgw_user& _user, const rgw_bucket& _bucket) :
RGWQuotaCache<rgw_bucket>::AsyncRefreshHandler(_store, _cache),
RGWGetBucketStats_CB(_bucket), user(_user) {}
{
RGWBucketInfo bucket_info;
- auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+ auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
- int r = store->get_bucket_instance_info(obj_ctx, bucket, bucket_info, NULL, NULL, null_yield);
+ int r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket, bucket_info, NULL, NULL, null_yield);
if (r < 0) {
ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket << " r=" << r << dendl;
return r;
ldout(store->ctx(), 20) << "initiating async quota refresh for bucket=" << bucket << dendl;
- r = store->get_bucket_stats_async(bucket_info, RGW_NO_SHARD, this);
+ r = store->getRados()->get_bucket_stats_async(bucket_info, RGW_NO_SHARD, this);
if (r < 0) {
ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket.name << dendl;
int fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats) override;
public:
- explicit RGWBucketStatsCache(RGWRados *_store) : RGWQuotaCache<rgw_bucket>(_store, _store->ctx()->_conf->rgw_bucket_quota_cache_size) {
+ explicit RGWBucketStatsCache(rgw::sal::RGWRadosStore *_store) : RGWQuotaCache<rgw_bucket>(_store, _store->ctx()->_conf->rgw_bucket_quota_cache_size) {
}
AsyncRefreshHandler *allocate_refresh_handler(const rgw_user& user, const rgw_bucket& bucket) override {
{
RGWBucketInfo bucket_info;
- RGWSysObjectCtx obj_ctx = store->svc.sysobj->init_obj_ctx();
+ RGWSysObjectCtx obj_ctx = store->svc()->sysobj->init_obj_ctx();
- int r = store->get_bucket_instance_info(obj_ctx, bucket, bucket_info, NULL, NULL, null_yield);
+ int r = store->getRados()->get_bucket_instance_info(obj_ctx, bucket, bucket_info, NULL, NULL, null_yield);
if (r < 0) {
ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket << " r=" << r << dendl;
return r;
string master_ver;
map<RGWObjCategory, RGWStorageStats> bucket_stats;
- r = store->get_bucket_stats(bucket_info, RGW_NO_SHARD, &bucket_ver,
+ r = store->getRados()->get_bucket_stats(bucket_info, RGW_NO_SHARD, &bucket_ver,
&master_ver, bucket_stats, nullptr);
if (r < 0) {
ldout(store->ctx(), 0) << "could not get bucket stats for bucket="
public RGWGetUserStats_CB {
rgw_bucket bucket;
public:
- UserAsyncRefreshHandler(RGWRados *_store, RGWQuotaCache<rgw_user> *_cache,
+ UserAsyncRefreshHandler(rgw::sal::RGWRadosStore *_store, RGWQuotaCache<rgw_user> *_cache,
const rgw_user& _user, const rgw_bucket& _bucket) :
RGWQuotaCache<rgw_user>::AsyncRefreshHandler(_store, _cache),
RGWGetUserStats_CB(_user),
int UserAsyncRefreshHandler::init_fetch()
{
ldout(store->ctx(), 20) << "initiating async quota refresh for user=" << user << dendl;
- int r = store->ctl.user->read_stats_async(user, this);
+ int r = store->ctl()->user->read_stats_async(user, this);
if (r < 0) {
ldout(store->ctx(), 0) << "could not get bucket info for user=" << user << dendl;
}
public:
- RGWUserStatsCache(RGWRados *_store, bool quota_threads) : RGWQuotaCache<rgw_user>(_store, _store->ctx()->_conf->rgw_bucket_quota_cache_size),
+ RGWUserStatsCache(rgw::sal::RGWRadosStore *_store, bool quota_threads) : RGWQuotaCache<rgw_user>(_store, _store->ctx()->_conf->rgw_bucket_quota_cache_size),
rwlock("RGWUserStatsCache::rwlock") {
if (quota_threads) {
buckets_sync_thread = new BucketsSyncThread(store->ctx(), this);
int RGWUserStatsCache::fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats)
{
- int r = store->ctl.user->read_stats(user, &stats);
+ int r = store->ctl()->user->read_stats(user, &stats);
if (r < 0) {
ldout(store->ctx(), 0) << "could not get user stats for user=" << user << dendl;
return r;
{
RGWBucketInfo bucket_info;
- int r = store->ctl.bucket->read_bucket_instance_info(bucket, &bucket_info, null_yield);
+ int r = store->ctl()->bucket->read_bucket_instance_info(bucket, &bucket_info, null_yield);
if (r < 0) {
ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket << " r=" << r << dendl;
return r;
}
- r = store->ctl.bucket->sync_user_stats(user, bucket_info);
+ r = store->ctl()->bucket->sync_user_stats(user, bucket_info);
if (r < 0) {
ldout(store->ctx(), 0) << "ERROR: sync_user_stats() for user=" << user << ", bucket=" << bucket << " returned " << r << dendl;
return r;
ceph::real_time last_stats_sync;
ceph::real_time last_stats_update;
- int ret = store->ctl.user->read_stats(user_str, &stats, &last_stats_sync, &last_stats_update);
+ int ret = store->ctl()->user->read_stats(user_str, &stats, &last_stats_sync, &last_stats_update);
if (ret < 0) {
ldout(store->ctx(), 5) << "ERROR: can't read user header: ret=" << ret << dendl;
return ret;
string key = "user";
void *handle;
- int ret = store->ctl.meta.mgr->list_keys_init(key, &handle);
+ int ret = store->ctl()->meta.mgr->list_keys_init(key, &handle);
if (ret < 0) {
ldout(store->ctx(), 10) << "ERROR: can't get key: ret=" << ret << dendl;
return ret;
do {
list<string> keys;
- ret = store->ctl.meta.mgr->list_keys_next(handle, max, keys, &truncated);
+ ret = store->ctl()->meta.mgr->list_keys_next(handle, max, keys, &truncated);
if (ret < 0) {
ldout(store->ctx(), 0) << "ERROR: lists_keys_next(): ret=" << ret << dendl;
goto done;
ret = 0;
done:
- store->ctl.meta.mgr->list_keys_complete(handle);
+ store->ctl()->meta.mgr->list_keys_complete(handle);
return ret;
}
class RGWQuotaHandlerImpl : public RGWQuotaHandler {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
RGWBucketStatsCache bucket_stats_cache;
RGWUserStatsCache user_stats_cache;
return 0;
}
public:
- RGWQuotaHandlerImpl(RGWRados *_store, bool quota_threads) : store(_store),
+ RGWQuotaHandlerImpl(rgw::sal::RGWRadosStore *_store, bool quota_threads) : store(_store),
bucket_stats_cache(_store),
user_stats_cache(_store, quota_threads) {}
};
-RGWQuotaHandler *RGWQuotaHandler::generate_handler(RGWRados *store, bool quota_threads)
+RGWQuotaHandler *RGWQuotaHandler::generate_handler(rgw::sal::RGWRadosStore *store, bool quota_threads)
{
return new RGWQuotaHandlerImpl(store, quota_threads);
}
return (bytes + 1023) / 1024;
}
-class RGWRados;
class JSONObj;
+namespace rgw { namespace sal {
+ class RGWRadosStore;
+} }
struct RGWQuotaInfo {
template<class T> friend class RGWQuotaCache;
virtual void update_stats(const rgw_user& bucket_owner, rgw_bucket& bucket, int obj_delta, uint64_t added_bytes, uint64_t removed_bytes) = 0;
- static RGWQuotaHandler *generate_handler(RGWRados *store, bool quota_threads);
+ static RGWQuotaHandler *generate_handler(rgw::sal::RGWRadosStore *store, bool quota_threads);
static void free_handler(RGWQuotaHandler *handler);
};
#include "common/Formatter.h"
#include "common/Throttle.h"
-#include "rgw_rados.h"
+#include "rgw_sal.h"
#include "rgw_zone.h"
#include "rgw_cache.h"
#include "rgw_acl.h"
sync.stop();
}
public:
- RGWMetaSyncProcessorThread(RGWRados *_store, RGWAsyncRadosProcessor *async_rados)
- : RGWSyncProcessorThread(_store, "meta-sync"), sync(_store, async_rados) {}
+ RGWMetaSyncProcessorThread(rgw::sal::RGWRadosStore *_store, RGWAsyncRadosProcessor *async_rados)
+ : RGWSyncProcessorThread(_store->getRados(), "meta-sync"), sync(_store, async_rados) {}
void wakeup_sync_shards(set<int>& shard_ids) {
for (set<int>::iterator iter = shard_ids.begin(); iter != shard_ids.end(); ++iter) {
sync.stop();
}
public:
- RGWDataSyncProcessorThread(RGWRados *_store, RGWAsyncRadosProcessor *async_rados,
+ RGWDataSyncProcessorThread(rgw::sal::RGWRadosStore *_store, RGWAsyncRadosProcessor *async_rados,
const RGWZone* source_zone)
- : RGWSyncProcessorThread(_store, "data-sync"),
+ : RGWSyncProcessorThread(_store->getRados(), "data-sync"),
counters(sync_counters::build(store->ctx(), std::string("data-sync-from-") + source_zone->name)),
sync(_store, async_rados, source_zone->id, counters.get()),
initialized(false) {}
class RGWSyncLogTrimThread : public RGWSyncProcessorThread, DoutPrefixProvider
{
RGWCoroutinesManager crs;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
rgw::BucketTrimManager *bucket_trim;
RGWHTTPManager http;
const utime_t trim_interval;
uint64_t interval_msec() override { return 0; }
void stop_process() override { crs.stop(); }
public:
- RGWSyncLogTrimThread(RGWRados *store, rgw::BucketTrimManager *bucket_trim,
+ RGWSyncLogTrimThread(rgw::sal::RGWRadosStore *store, rgw::BucketTrimManager *bucket_trim,
int interval)
- : RGWSyncProcessorThread(store, "sync-log-trim"),
- crs(store->ctx(), store->get_cr_registry()), store(store),
+ : RGWSyncProcessorThread(store->getRados(), "sync-log-trim"),
+ crs(store->ctx(), store->getRados()->get_cr_registry()), store(store),
bucket_trim(bucket_trim),
http(store->ctx(), crs.get_completion_mgr()),
trim_interval(interval, 0)
gc = new RGWGC();
gc->initialize(cct, this);
- obj_expirer = new RGWObjectExpirer(this);
+ obj_expirer = new RGWObjectExpirer(this->store);
if (use_gc_thread) {
gc->start_processor();
}
auto async_processor = svc.rados->get_async_processor();
std::lock_guard l{meta_sync_thread_lock};
- meta_sync_processor_thread = new RGWMetaSyncProcessorThread(this, async_processor);
+ meta_sync_processor_thread = new RGWMetaSyncProcessorThread(this->store, async_processor);
ret = meta_sync_processor_thread->init();
if (ret < 0) {
ldout(cct, 0) << "ERROR: failed to initialize meta sync thread" << dendl;
rgw::BucketTrimConfig config;
rgw::configure_bucket_trim(cct, config);
- bucket_trim.emplace(this, config);
+ bucket_trim.emplace(this->store, config);
ret = bucket_trim->init();
if (ret < 0) {
ldout(cct, 0) << "ERROR: failed to start bucket trim manager" << dendl;
std::lock_guard dl{data_sync_thread_lock};
for (auto source_zone : svc.zone->get_data_sync_source_zones()) {
ldout(cct, 5) << "starting data sync thread for zone " << source_zone->name << dendl;
- auto *thread = new RGWDataSyncProcessorThread(this, svc.rados->get_async_processor(), source_zone);
+ auto *thread = new RGWDataSyncProcessorThread(this->store, svc.rados->get_async_processor(), source_zone);
ret = thread->init();
if (ret < 0) {
ldout(cct, 0) << "ERROR: failed to initialize data sync thread" << dendl;
}
auto interval = cct->_conf->rgw_sync_log_trim_interval;
if (interval > 0) {
- sync_log_trimmer = new RGWSyncLogTrimThread(this, &*bucket_trim, interval);
+ sync_log_trimmer = new RGWSyncLogTrimThread(this->store, &*bucket_trim, interval);
ret = sync_log_trimmer->init();
if (ret < 0) {
ldout(cct, 0) << "ERROR: failed to initialize sync log trim thread" << dendl;
data_notifier->start();
lc = new RGWLC();
- lc->initialize(cct, this);
+ lc->initialize(cct, this->store);
if (use_lc_thread)
lc->start_processor();
- quota_handler = RGWQuotaHandler::generate_handler(this, quota_threads);
+ quota_handler = RGWQuotaHandler::generate_handler(this->store, quota_threads);
bucket_index_max_shards = (cct->_conf->rgw_override_bucket_index_max_shards ? cct->_conf->rgw_override_bucket_index_max_shards :
zone.bucket_index_max_shards);
reshard_wait = std::make_shared<RGWReshardWait>();
- reshard = new RGWReshard(this);
+ reshard = new RGWReshard(this->store);
/* only the master zone in the zonegroup reshards buckets */
run_reshard_thread = run_reshard_thread && (zonegroup.master_zone == zone.id);
}
RGWObjState *astate = NULL;
- RGWObjectCtx rctx(this);
+ RGWObjectCtx rctx(this->store);
r = get_obj_state(&rctx, bucket_info, obj, &astate, false, y);
if (r < 0)
return r;
real_time mtime;
uint64_t obj_size;
- RGWObjectCtx rctx(this);
+ RGWObjectCtx rctx(this->store);
RGWRados::Object op_target(this, dest_bucket_info, rctx, obj);
RGWRados::Object::Read read_op(&op_target);
rgw::BlockingAioThrottle aio(cct->_conf->rgw_put_obj_min_window_size);
using namespace rgw::putobj;
const rgw_placement_rule *ptail_rule = (dest_placement_rule ? &(*dest_placement_rule) : nullptr);
- AtomicObjectProcessor processor(&aio, this, dest_bucket_info, ptail_rule, user_id,
+ AtomicObjectProcessor processor(&aio, this->store, dest_bucket_info, ptail_rule, user_id,
obj_ctx, dest_obj, olh_epoch, tag, dpp, null_yield);
RGWRESTConn *conn;
auto& zone_conn_map = svc.zone->get_zone_conn_map();
using namespace rgw::putobj;
// do not change the null_yield in the initialization of this AtomicObjectProcessor
// it causes crashes in the ragweed tests
- AtomicObjectProcessor processor(&aio, this, dest_bucket_info, &dest_placement,
+ AtomicObjectProcessor processor(&aio, this->store, dest_bucket_info, &dest_placement,
dest_bucket_info.owner, obj_ctx,
dest_obj, olh_epoch, tag, dpp, null_yield);
int ret = processor.prepare(y);
if (!op.size())
return 0;
- RGWObjectCtx obj_ctx(this);
+ RGWObjectCtx obj_ctx(this->store);
bufferlist bl;
RGWRados::Bucket bop(this, bucket_info);
// since we expect to do this rarely, we'll do our work in a
// block and erase our work after each try
- RGWObjectCtx obj_ctx(this);
+ RGWObjectCtx obj_ctx(this->store);
const rgw_bucket& b = bs->bucket;
std::string bucket_id = b.get_key();
- RGWBucketReshardLock reshard_lock(this, bucket_info, true);
+ RGWBucketReshardLock reshard_lock(this->store, bucket_info, true);
ret = reshard_lock.lock();
if (ret < 0) {
ldout(cct, 20) << __func__ <<
ldout(cct, 10) << __func__ <<
" INFO: was able to take reshard lock for bucket " <<
bucket_id << dendl;
- ret = RGWBucketReshard::clear_resharding(this, bucket_info);
+ ret = RGWBucketReshard::clear_resharding(this->store, bucket_info);
if (ret < 0) {
reshard_lock.unlock();
ldout(cct, 0) << __func__ <<
io_ctx.locator_set_key(list_state.locator);
RGWObjState *astate = NULL;
- RGWObjectCtx rctx(this);
+ RGWObjectCtx rctx(this->store);
int r = get_obj_state(&rctx, bucket_info, obj, &astate, false, y);
if (r < 0)
return r;
int RGWRados::add_bucket_to_reshard(const RGWBucketInfo& bucket_info, uint32_t new_num_shards)
{
- RGWReshard reshard(this);
+ RGWReshard reshard(this->store);
uint32_t num_source_shards = (bucket_info.num_shards > 0 ? bucket_info.num_shards : 1);
return ++max_bucket_id;
}
-RGWRados *RGWStoreManager::init_storage_provider(CephContext *cct, bool use_gc_thread, bool use_lc_thread,
- bool quota_threads, bool run_sync_thread, bool run_reshard_thread, bool use_cache)
-{
- RGWRados *store = new RGWRados;
-
- if ((*store).set_use_cache(use_cache)
- .set_run_gc_thread(use_gc_thread)
- .set_run_lc_thread(use_lc_thread)
- .set_run_quota_threads(quota_threads)
- .set_run_sync_thread(run_sync_thread)
- .set_run_reshard_thread(run_reshard_thread)
- .initialize(cct) < 0) {
- delete store;
- return NULL;
- }
-
- return store;
-}
-
-RGWRados *RGWStoreManager::init_raw_storage_provider(CephContext *cct)
-{
- RGWRados *store = NULL;
- store = new RGWRados;
-
- store->set_context(cct);
-
- int ret = store->init_svc(true);
- if (ret < 0) {
- ldout(cct, 0) << "ERROR: failed to init services (ret=" << cpp_strerror(-ret) << ")" << dendl;
- return nullptr;
- }
-
- if (store->init_rados() < 0) {
- delete store;
- return nullptr;
- }
-
- return store;
-}
-
-void RGWStoreManager::close_storage(RGWRados *store)
-{
- if (!store)
- return;
-
- store->finalize();
-
- delete store;
-}
-
librados::Rados* RGWRados::get_rados_handle()
{
return &rados;
};
class RGWObjectCtx {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
ceph::shared_mutex lock = ceph::make_shared_mutex("RGWObjectCtx");
void *s{nullptr};
std::map<rgw_obj, RGWObjState> objs_state;
public:
- explicit RGWObjectCtx(RGWRados *_store) : store(_store) {}
- explicit RGWObjectCtx(RGWRados *_store, void *_s) : store(_store), s(_s) {}
+ explicit RGWObjectCtx(rgw::sal::RGWRadosStore *_store) : store(_store) {}
+ explicit RGWObjectCtx(rgw::sal::RGWRadosStore *_store, void *_s) : store(_store), s(_s) {}
void *get_private() {
return s;
}
- RGWRados *get_store() {
+ rgw::sal::RGWRadosStore *get_store() {
return store;
}
class RGWGetDirHeader_CB;
class RGWGetUserHeader_CB;
+namespace rgw { namespace sal { class RGWRadosStore; } }
class RGWAsyncRadosProcessor;
ceph::mutex lock = ceph::make_mutex("rados_timer_lock");
SafeTimer *timer;
+ rgw::sal::RGWRadosStore *store;
RGWGC *gc;
RGWLC *lc;
RGWObjectExpirer *obj_expirer;
void set_context(CephContext *_cct) {
cct = _cct;
}
+ void set_store(rgw::sal::RGWRadosStore *_store) {
+ store = _store;
+ }
RGWServices svc;
RGWCtl ctl;
uint64_t next_bucket_id();
};
-class RGWStoreManager {
-public:
- RGWStoreManager() {}
- static RGWRados *get_storage(CephContext *cct, bool use_gc_thread, bool use_lc_thread, bool quota_threads,
- bool run_sync_thread, bool run_reshard_thread, bool use_cache = true) {
- RGWRados *store = init_storage_provider(cct, use_gc_thread, use_lc_thread, quota_threads, run_sync_thread,
- run_reshard_thread, use_cache);
- return store;
- }
- static RGWRados *get_raw_storage(CephContext *cct) {
- RGWRados *store = init_raw_storage_provider(cct);
- return store;
- }
- static RGWRados *init_storage_provider(CephContext *cct, bool use_gc_thread, bool use_lc_thread, bool quota_threads, bool run_sync_thread, bool run_reshard_thread, bool use_metadata_cache);
- static RGWRados *init_raw_storage_provider(CephContext *cct);
- static void close_storage(RGWRados *store);
-
-};
-
#endif
#include "rgw_log.h"
#include "rgw_rest.h"
#include "rgw_user.h"
+#include "rgw_sal.h"
#include "services/svc_zone.h"
static constexpr bool USE_SAFE_TIMER_CALLBACKS = false;
-RGWRealmReloader::RGWRealmReloader(RGWRados*& store, std::map<std::string, std::string>& service_map_meta,
+RGWRealmReloader::RGWRealmReloader(rgw::sal::RGWRadosStore*& store, std::map<std::string, std::string>& service_map_meta,
Pauser* frontends)
: store(store),
service_map_meta(service_map_meta),
ldout(cct, 1) << "Creating new store" << dendl;
- RGWRados* store_cleanup = nullptr;
+ rgw::sal::RGWRadosStore* store_cleanup = nullptr;
{
std::unique_lock lock{mutex};
}
}
- int r = store->register_to_service_map("rgw", service_map_meta);
+ int r = store->getRados()->register_to_service_map("rgw", service_map_meta);
if (r < 0) {
lderr(cct) << "ERROR: failed to register to service map: " << cpp_strerror(-r) << dendl;
ldout(cct, 1) << "Finishing initialization of new store" << dendl;
// finish initializing the new store
ldout(cct, 1) << " - REST subsystem init" << dendl;
- rgw_rest_init(cct, store, store->svc.zone->get_zonegroup());
+ rgw_rest_init(cct, store->svc()->zone->get_zonegroup());
ldout(cct, 1) << " - usage subsystem init" << dendl;
- rgw_log_usage_init(cct, store);
+ rgw_log_usage_init(cct, store->getRados());
ldout(cct, 1) << "Resuming frontends with new realm configuration." << dendl;
#include "rgw_realm_watcher.h"
#include "common/Cond.h"
-class RGWRados;
+namespace rgw {
+namespace sal {
+class RGWRadosStore;
+}
+}
/**
* RGWRealmReloader responds to new period notifications by recreating RGWRados
/// pause all frontends while realm reconfiguration is in progress
virtual void pause() = 0;
/// resume all frontends with the given RGWRados instance
- virtual void resume(RGWRados* store) = 0;
+ virtual void resume(rgw::sal::RGWRadosStore* store) = 0;
};
- RGWRealmReloader(RGWRados*& store, std::map<std::string, std::string>& service_map_meta,
+ RGWRealmReloader(rgw::sal::RGWRadosStore*& store, std::map<std::string, std::string>& service_map_meta,
Pauser* frontends);
~RGWRealmReloader() override;
class C_Reload; //< Context that calls reload()
- /// main()'s RGWRados pointer as a reference, modified by reload()
- RGWRados*& store;
+ /// main()'s RGWRadosStore pointer as a reference, modified by reload()
+ rgw::sal::RGWRadosStore*& store;
std::map<std::string, std::string>& service_map_meta;
Pauser *const frontends;
#include "rgw_zone.h"
#include "rgw_bucket.h"
#include "rgw_reshard.h"
+#include "rgw_sal.h"
#include "cls/rgw/cls_rgw_client.h"
#include "cls/lock/cls_lock_client.h"
#include "common/errno.h"
class BucketReshardShard {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
const RGWBucketInfo& bucket_info;
int num_shard;
RGWRados::BucketShard bs;
}
public:
- BucketReshardShard(RGWRados *_store, const RGWBucketInfo& _bucket_info,
+ BucketReshardShard(rgw::sal::RGWRadosStore *_store, const RGWBucketInfo& _bucket_info,
int _num_shard,
deque<librados::AioCompletion *>& _completions) :
- store(_store), bucket_info(_bucket_info), bs(store),
+ store(_store), bucket_info(_bucket_info), bs(store->getRados()),
aio_completions(_completions)
{
num_shard = (bucket_info.num_shards > 0 ? _num_shard : -1);
librados::ObjectWriteOperation op;
for (auto& entry : entries) {
- store->bi_put(op, bs, entry);
+ store->getRados()->bi_put(op, bs, entry);
}
cls_rgw_bucket_update_stats(op, false, stats);
class BucketReshardManager {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
const RGWBucketInfo& target_bucket_info;
deque<librados::AioCompletion *> completions;
int num_target_shards;
vector<BucketReshardShard *> target_shards;
public:
- BucketReshardManager(RGWRados *_store,
+ BucketReshardManager(rgw::sal::RGWRadosStore *_store,
const RGWBucketInfo& _target_bucket_info,
int _num_target_shards) :
store(_store), target_bucket_info(_target_bucket_info),
}
}; // class BucketReshardManager
-RGWBucketReshard::RGWBucketReshard(RGWRados *_store,
+RGWBucketReshard::RGWBucketReshard(rgw::sal::RGWRadosStore *_store,
const RGWBucketInfo& _bucket_info,
const map<string, bufferlist>& _bucket_attrs,
RGWBucketReshardLock* _outer_reshard_lock) :
outer_reshard_lock(_outer_reshard_lock)
{ }
-int RGWBucketReshard::set_resharding_status(RGWRados* store,
+int RGWBucketReshard::set_resharding_status(rgw::sal::RGWRadosStore* store,
const RGWBucketInfo& bucket_info,
const string& new_instance_id,
int32_t num_shards,
cls_rgw_bucket_instance_entry instance_entry;
instance_entry.set_status(new_instance_id, num_shards, status);
- int ret = store->bucket_set_reshard(bucket_info, instance_entry);
+ int ret = store->getRados()->bucket_set_reshard(bucket_info, instance_entry);
if (ret < 0) {
ldout(store->ctx(), 0) << "RGWReshard::" << __func__ << " ERROR: error setting bucket resharding flag on bucket index: "
<< cpp_strerror(-ret) << dendl;
}
// reshard lock assumes lock is held
-int RGWBucketReshard::clear_resharding(RGWRados* store,
+int RGWBucketReshard::clear_resharding(rgw::sal::RGWRadosStore* store,
const RGWBucketInfo& bucket_info)
{
int ret = clear_index_shard_reshard_status(store, bucket_info);
}
cls_rgw_bucket_instance_entry instance_entry;
- ret = store->bucket_set_reshard(bucket_info, instance_entry);
+ ret = store->getRados()->bucket_set_reshard(bucket_info, instance_entry);
if (ret < 0) {
ldout(store->ctx(), 0) << "RGWReshard::" << __func__ <<
" ERROR: error setting bucket resharding flag on bucket index: " <<
return 0;
}
-int RGWBucketReshard::clear_index_shard_reshard_status(RGWRados* store,
+int RGWBucketReshard::clear_index_shard_reshard_status(rgw::sal::RGWRadosStore* store,
const RGWBucketInfo& bucket_info)
{
uint32_t num_shards = bucket_info.num_shards;
return 0;
}
-static int create_new_bucket_instance(RGWRados *store,
+static int create_new_bucket_instance(rgw::sal::RGWRadosStore *store,
int new_num_shards,
const RGWBucketInfo& bucket_info,
map<string, bufferlist>& attrs,
{
new_bucket_info = bucket_info;
- store->create_bucket_id(&new_bucket_info.bucket.bucket_id);
+ store->getRados()->create_bucket_id(&new_bucket_info.bucket.bucket_id);
new_bucket_info.num_shards = new_num_shards;
new_bucket_info.objv_tracker.clear();
new_bucket_info.new_bucket_instance_id.clear();
new_bucket_info.reshard_status = 0;
- int ret = store->svc.bi->init_index(new_bucket_info);
+ int ret = store->svc()->bi->init_index(new_bucket_info);
if (ret < 0) {
cerr << "ERROR: failed to init new bucket indexes: " << cpp_strerror(-ret) << std::endl;
return ret;
}
- ret = store->put_bucket_instance_info(new_bucket_info, true, real_time(), &attrs);
+ ret = store->getRados()->put_bucket_instance_info(new_bucket_info, true, real_time(), &attrs);
if (ret < 0) {
cerr << "ERROR: failed to store new bucket instance info: " << cpp_strerror(-ret) << std::endl;
return ret;
class BucketInfoReshardUpdate
{
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
RGWBucketInfo bucket_info;
std::map<string, bufferlist> bucket_attrs;
int set_status(cls_rgw_reshard_status s) {
bucket_info.reshard_status = s;
- int ret = store->put_bucket_instance_info(bucket_info, false, real_time(), &bucket_attrs);
+ int ret = store->getRados()->put_bucket_instance_info(bucket_info, false, real_time(), &bucket_attrs);
if (ret < 0) {
ldout(store->ctx(), 0) << "ERROR: failed to write bucket info, ret=" << ret << dendl;
return ret;
}
public:
- BucketInfoReshardUpdate(RGWRados *_store,
+ BucketInfoReshardUpdate(rgw::sal::RGWRadosStore *_store,
RGWBucketInfo& _bucket_info,
map<string, bufferlist>& _bucket_attrs,
const string& new_bucket_id) :
};
-RGWBucketReshardLock::RGWBucketReshardLock(RGWRados* _store,
+RGWBucketReshardLock::RGWBucketReshardLock(rgw::sal::RGWRadosStore* _store,
const std::string& reshard_lock_oid,
bool _ephemeral) :
store(_store),
internal_lock.set_must_renew(false);
int ret;
if (ephemeral) {
- ret = internal_lock.lock_exclusive_ephemeral(&store->reshard_pool_ctx,
+ ret = internal_lock.lock_exclusive_ephemeral(&store->getRados()->reshard_pool_ctx,
lock_oid);
} else {
- ret = internal_lock.lock_exclusive(&store->reshard_pool_ctx, lock_oid);
+ ret = internal_lock.lock_exclusive(&store->getRados()->reshard_pool_ctx, lock_oid);
}
if (ret < 0) {
ldout(store->ctx(), 0) << "RGWReshardLock::" << __func__ <<
}
void RGWBucketReshardLock::unlock() {
- int ret = internal_lock.unlock(&store->reshard_pool_ctx, lock_oid);
+ int ret = internal_lock.unlock(&store->getRados()->reshard_pool_ctx, lock_oid);
if (ret < 0) {
ldout(store->ctx(), 0) << "WARNING: RGWBucketReshardLock::" << __func__ <<
" failed to drop lock on " << lock_oid << " ret=" << ret << dendl;
internal_lock.set_must_renew(true);
int ret;
if (ephemeral) {
- ret = internal_lock.lock_exclusive_ephemeral(&store->reshard_pool_ctx,
+ ret = internal_lock.lock_exclusive_ephemeral(&store->getRados()->reshard_pool_ctx,
lock_oid);
} else {
- ret = internal_lock.lock_exclusive(&store->reshard_pool_ctx, lock_oid);
+ ret = internal_lock.lock_exclusive(&store->getRados()->reshard_pool_ctx, lock_oid);
}
if (ret < 0) { /* expired or already locked by another processor */
std::stringstream error_s;
marker.clear();
while (is_truncated) {
entries.clear();
- ret = store->bi_list(bucket, i, string(), marker, max_entries, &entries, &is_truncated);
+ ret = store->getRados()->bi_list(bucket, i, string(), marker, max_entries, &entries, &is_truncated);
if (ret < 0 && ret != -ENOENT) {
derr << "ERROR: bi_list(): " << cpp_strerror(-ret) << dendl;
return ret;
bool account = entry.get_info(&cls_key, &category, &stats);
rgw_obj_key key(cls_key);
rgw_obj obj(new_bucket_info.bucket, key);
- int ret = store->get_target_shard_id(new_bucket_info, obj.get_hash_object(), &target_shard_id);
+ int ret = store->getRados()->get_target_shard_id(new_bucket_info, obj.get_hash_object(), &target_shard_id);
if (ret < 0) {
lderr(store->ctx()) << "ERROR: get_target_shard_id() returned ret=" << ret << dendl;
return ret;
return -EIO;
}
- ret = store->ctl.bucket->link_bucket(new_bucket_info.owner, new_bucket_info.bucket, bucket_info.creation_time, null_yield);
+ ret = store->ctl()->bucket->link_bucket(new_bucket_info.owner, new_bucket_info.bucket, bucket_info.creation_time, null_yield);
if (ret < 0) {
lderr(store->ctx()) << "failed to link new bucket instance (bucket_id=" << new_bucket_info.bucket.bucket_id << ": " << cpp_strerror(-ret) << ")" << dendl;
return ret;
int RGWBucketReshard::get_status(list<cls_rgw_bucket_instance_entry> *status)
{
- return store->svc.bi_rados->get_reshard_status(bucket_info, status);
+ return store->svc()->bi_rados->get_reshard_status(bucket_info, status);
}
// best effort and don't report out an error; the lock isn't needed
// at this point since all we're using a best effor to to remove old
// shard objects
- ret = store->svc.bi->clean_index(bucket_info);
+ ret = store->svc()->bi->clean_index(bucket_info);
if (ret < 0) {
lderr(store->ctx()) << "Error: " << __func__ <<
" failed to clean up old shards; " <<
"RGWRados::clean_bucket_index returned " << ret << dendl;
}
- ret = store->ctl.bucket->remove_bucket_instance_info(bucket_info.bucket,
+ ret = store->ctl()->bucket->remove_bucket_instance_info(bucket_info.bucket,
bucket_info, null_yield);
if (ret < 0) {
lderr(store->ctx()) << "Error: " << __func__ <<
// since the real problem is the issue that led to this error code
// path, we won't touch ret and instead use another variable to
// temporarily error codes
- int ret2 = store->svc.bi->clean_index(new_bucket_info);
+ int ret2 = store->svc()->bi->clean_index(new_bucket_info);
if (ret2 < 0) {
lderr(store->ctx()) << "Error: " << __func__ <<
" failed to clean up shards from failed incomplete resharding; " <<
"RGWRados::clean_bucket_index returned " << ret2 << dendl;
}
- ret2 = store->ctl.bucket->remove_bucket_instance_info(new_bucket_info.bucket,
+ ret2 = store->ctl()->bucket->remove_bucket_instance_info(new_bucket_info.bucket,
new_bucket_info,
null_yield);
if (ret2 < 0) {
} // execute
-RGWReshard::RGWReshard(RGWRados* _store, bool _verbose, ostream *_out,
+RGWReshard::RGWReshard(rgw::sal::RGWRadosStore* _store, bool _verbose, ostream *_out,
Formatter *_formatter) :
store(_store), instance_lock(bucket_instance_lock_name),
verbose(_verbose), out(_out), formatter(_formatter)
int RGWReshard::add(cls_rgw_reshard_entry& entry)
{
- if (!store->svc.zone->can_reshard()) {
+ if (!store->svc()->zone->can_reshard()) {
ldout(store->ctx(), 20) << __func__ << " Resharding is disabled" << dendl;
return 0;
}
librados::ObjectWriteOperation op;
cls_rgw_reshard_add(op, entry);
- int ret = store->reshard_pool_ctx.operate(logshard_oid, &op);
+ int ret = store->getRados()->reshard_pool_ctx.operate(logshard_oid, &op);
if (ret < 0) {
lderr(store->ctx()) << "ERROR: failed to add entry to reshard log, oid=" << logshard_oid << " tenant=" << entry.tenant << " bucket=" << entry.bucket_name << dendl;
return ret;
get_logshard_oid(logshard_num, &logshard_oid);
- int ret = cls_rgw_reshard_list(store->reshard_pool_ctx, logshard_oid, marker, max, entries, is_truncated);
+ int ret = cls_rgw_reshard_list(store->getRados()->reshard_pool_ctx, logshard_oid, marker, max, entries, is_truncated);
if (ret < 0) {
if (ret == -ENOENT) {
}
lderr(store->ctx()) << "ERROR: failed to list reshard log entries, oid=" << logshard_oid << dendl;
if (ret == -EACCES) {
- lderr(store->ctx()) << "access denied to pool " << store->svc.zone->get_zone_params().reshard_pool
+ lderr(store->ctx()) << "access denied to pool " << store->svc()->zone->get_zone_params().reshard_pool
<< ". Fix the pool access permissions of your client" << dendl;
}
}
get_bucket_logshard_oid(entry.tenant, entry.bucket_name, &logshard_oid);
- int ret = cls_rgw_reshard_get(store->reshard_pool_ctx, logshard_oid, entry);
+ int ret = cls_rgw_reshard_get(store->getRados()->reshard_pool_ctx, logshard_oid, entry);
if (ret < 0) {
if (ret != -ENOENT) {
lderr(store->ctx()) << "ERROR: failed to get entry from reshard log, oid=" << logshard_oid << " tenant=" << entry.tenant <<
librados::ObjectWriteOperation op;
cls_rgw_reshard_remove(op, entry);
- int ret = store->reshard_pool_ctx.operate(logshard_oid, &op);
+ int ret = store->getRados()->reshard_pool_ctx.operate(logshard_oid, &op);
if (ret < 0) {
lderr(store->ctx()) << "ERROR: failed to remove entry from reshard log, oid=" << logshard_oid << " tenant=" << entry.tenant << " bucket=" << entry.bucket_name << dendl;
return ret;
int RGWReshard::clear_bucket_resharding(const string& bucket_instance_oid, cls_rgw_reshard_entry& entry)
{
- int ret = cls_rgw_clear_bucket_resharding(store->reshard_pool_ctx, bucket_instance_oid);
+ int ret = cls_rgw_clear_bucket_resharding(store->getRados()->reshard_pool_ctx, bucket_instance_oid);
if (ret < 0) {
lderr(store->ctx()) << "ERROR: failed to clear bucket resharding, bucket_instance_oid=" << bucket_instance_oid << dendl;
return ret;
ldout(store->ctx(), 20) << __func__ << " resharding " <<
entry.bucket_name << dendl;
- auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+ auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
rgw_bucket bucket;
RGWBucketInfo bucket_info;
map<string, bufferlist> attrs;
- ret = store->get_bucket_info(obj_ctx, entry.tenant, entry.bucket_name,
+ ret = store->getRados()->get_bucket_info(obj_ctx, entry.tenant, entry.bucket_name,
bucket_info, nullptr, null_yield, &attrs);
if (ret < 0) {
ldout(cct, 0) << __func__ << ": Error in get_bucket_info: " <<
int RGWReshard::process_all_logshards()
{
- if (!store->svc.zone->can_reshard()) {
+ if (!store->svc()->zone->can_reshard()) {
ldout(store->ctx(), 20) << __func__ << " Resharding is disabled" << dendl;
return 0;
}
class CephContext;
-class RGWRados;
class RGWReshard;
+namespace rgw { namespace sal {
+ class RGWRadosStore;
+} }
class RGWBucketReshardLock {
using Clock = ceph::coarse_mono_clock;
- RGWRados* store;
+ rgw::sal::RGWRadosStore* store;
const std::string lock_oid;
const bool ephemeral;
rados::cls::lock::Lock internal_lock;
}
public:
- RGWBucketReshardLock(RGWRados* _store,
+ RGWBucketReshardLock(rgw::sal::RGWRadosStore* _store,
const std::string& reshard_lock_oid,
bool _ephemeral);
- RGWBucketReshardLock(RGWRados* _store,
+ RGWBucketReshardLock(rgw::sal::RGWRadosStore* _store,
const RGWBucketInfo& bucket_info,
bool _ephemeral) :
RGWBucketReshardLock(_store, bucket_info.bucket.get_key(':'), _ephemeral)
private:
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
RGWBucketInfo bucket_info;
std::map<string, bufferlist> bucket_attrs;
// pass nullptr for the final parameter if no outer reshard lock to
// manage
- RGWBucketReshard(RGWRados *_store, const RGWBucketInfo& _bucket_info,
+ RGWBucketReshard(rgw::sal::RGWRadosStore *_store, const RGWBucketInfo& _bucket_info,
const std::map<string, bufferlist>& _bucket_attrs,
RGWBucketReshardLock* _outer_reshard_lock);
int execute(int num_shards, int max_op_entries,
RGWReshard *reshard_log = nullptr);
int get_status(std::list<cls_rgw_bucket_instance_entry> *status);
int cancel();
- static int clear_resharding(RGWRados* store,
+ static int clear_resharding(rgw::sal::RGWRadosStore* store,
const RGWBucketInfo& bucket_info);
int clear_resharding() {
return clear_resharding(store, bucket_info);
}
- static int clear_index_shard_reshard_status(RGWRados* store,
+ static int clear_index_shard_reshard_status(rgw::sal::RGWRadosStore* store,
const RGWBucketInfo& bucket_info);
int clear_index_shard_reshard_status() {
return clear_index_shard_reshard_status(store, bucket_info);
}
- static int set_resharding_status(RGWRados* store,
+ static int set_resharding_status(rgw::sal::RGWRadosStore* store,
const RGWBucketInfo& bucket_info,
const string& new_instance_id,
int32_t num_shards,
using Clock = ceph::coarse_mono_clock;
private:
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
string lock_name;
rados::cls::lock::Lock instance_lock;
int num_logshards;
void get_bucket_logshard_oid(const string& tenant, const string& bucket_name, string *oid);
public:
- RGWReshard(RGWRados* _store, bool _verbose = false, ostream *_out = nullptr, Formatter *_formatter = nullptr);
+ RGWReshard(rgw::sal::RGWRadosStore* _store, bool _verbose = false, ostream *_out = nullptr, Formatter *_formatter = nullptr);
int add(cls_rgw_reshard_entry& entry);
int update(const RGWBucketInfo& bucket_info, const RGWBucketInfo& new_bucket_info);
int get(cls_rgw_reshard_entry& entry);
static set<string> hostnames_set;
static set<string> hostnames_s3website_set;
-void rgw_rest_init(CephContext *cct, RGWRados *store, const RGWZoneGroup& zone_group)
+void rgw_rest_init(CephContext *cct, const RGWZoneGroup& zone_group)
{
for (const auto& rgw2http : base_rgw_to_http_attrs) {
rgw_to_http_attrs[rgw2http.rgw_attr] = rgw2http.http_attr;
return check_caps(s->user->caps);
}
-RGWOp* RGWHandler_REST::get_op(RGWRados* store)
+RGWOp* RGWHandler_REST::get_op(rgw::sal::RGWRadosStore* store)
{
RGWOp *op;
switch (s->op) {
if (! s->user->user_id.empty() && s->auth.identity->get_identity_type() != TYPE_ROLE) {
try {
map<string, bufferlist> uattrs;
- if (auto ret = store->ctl.user->get_attrs_by_uid(s->user->user_id, &uattrs, null_yield); ! ret) {
+ if (auto ret = store->ctl()->user->get_attrs_by_uid(s->user->user_id, &uattrs, null_yield); ! ret) {
if (s->iam_user_policies.empty()) {
s->iam_user_policies = get_iam_user_policy_from_attr(s->cct, store, uattrs, s->user->user_id.tenant);
} else {
}
RGWHandler_REST* RGWREST::get_handler(
- RGWRados * const store,
+ rgw::sal::RGWRadosStore * const store,
struct req_state* const s,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string& frontend_prefix,
extern std::map<std::string, std::string> rgw_to_http_attrs;
-extern void rgw_rest_init(CephContext *cct, RGWRados *store, const RGWZoneGroup& zone_group);
+extern void rgw_rest_init(CephContext *cct, const RGWZoneGroup& zone_group);
extern void rgw_flush_formatter_and_reset(struct req_state *s,
ceph::Formatter *formatter);
public:
RGWGetObj_ObjStore() : sent_header(false) {}
- void init(RGWRados *store, struct req_state *s, RGWHandler *h) override {
+ void init(rgw::sal::RGWRadosStore *store, struct req_state *s, RGWHandler *h) override {
RGWGetObj::init(store, s, h);
sent_header = false;
}
RGWRESTFlusher flusher;
public:
RGWRESTOp() : http_ret(0) {}
- void init(RGWRados *store, struct req_state *s,
+ void init(rgw::sal::RGWRadosStore *store, struct req_state *s,
RGWHandler *dialect_handler) override {
RGWOp::init(store, s, dialect_handler);
flusher.init(s, this);
int init_permissions(RGWOp* op) override;
int read_permissions(RGWOp* op) override;
- virtual RGWOp* get_op(RGWRados* store);
+ virtual RGWOp* get_op(rgw::sal::RGWRadosStore* store);
virtual void put_op(RGWOp* op);
};
static int preprocess(struct req_state *s, rgw::io::BasicClient* rio);
public:
RGWREST() {}
- RGWHandler_REST *get_handler(RGWRados *store,
+ RGWHandler_REST *get_handler(rgw::sal::RGWRadosStore *store,
struct req_state *s,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string& frontend_prefix,
if (use_http_params) {
RGWBucketInfo bucket_info;
map<string, bufferlist> attrs;
- auto obj_ctx = store->svc.sysobj->init_obj_ctx();
- http_ret = store->get_bucket_info(obj_ctx, uid.tenant, bucket, bucket_info, NULL, s->yield, &attrs);
+ auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
+ http_ret = store->getRados()->get_bucket_info(obj_ctx, uid.tenant, bucket, bucket_info, NULL, s->yield, &attrs);
if (http_ret < 0) {
return;
}
#define dout_subsys ceph_subsys_rgw
void RGWOp_ZoneGroupMap_Get::execute() {
- http_ret = zonegroup_map.read(g_ceph_context, store->svc.sysobj);
+ http_ret = zonegroup_map.read(g_ceph_context, store->svc()->sysobj);
if (http_ret < 0) {
dout(5) << "failed to read zone_group map" << dendl;
}
}
void RGWOp_ZoneConfig_Get::send_response() {
- const RGWZoneParams& zone_params = store->svc.zone->get_zone_params();
+ const RGWZoneParams& zone_params = store->svc()->zone->get_zone_params();
set_req_state_err(s, http_ret);
dump_errno(s);
return nullptr;
}
-int RGWHandler_REST_IAM::init(RGWRados *store,
+int RGWHandler_REST_IAM::init(rgw::sal::RGWRadosStore *store,
struct req_state *s,
rgw::io::BasicClient *cio)
{
post_body(post_body) {}
~RGWHandler_REST_IAM() override = default;
- int init(RGWRados *store,
+ int init(rgw::sal::RGWRadosStore *store,
struct req_state *s,
rgw::io::BasicClient *cio) override;
int authorize(const DoutPrefixProvider* dpp) override;
if (period.empty()) {
ldout(s->cct, 5) << "Missing period id trying to use current" << dendl;
- period = store->svc.zone->get_current_period_id();
+ period = store->svc()->zone->get_current_period_id();
if (period.empty()) {
ldout(s->cct, 5) << "Missing period id" << dendl;
http_ret = -EINVAL;
}
}
- RGWMetadataLog meta_log{s->cct, store->svc.zone, store->svc.cls, period};
+ RGWMetadataLog meta_log{s->cct, store->svc()->zone, store->svc()->cls, period};
meta_log.init_list_entries(shard_id, ut_st, ut_et, marker, &handle);
for (list<cls_log_entry>::iterator iter = entries.begin();
iter != entries.end(); ++iter) {
cls_log_entry& entry = *iter;
- store->ctl.meta.mgr->dump_log_entry(entry, s->formatter);
+ store->ctl()->meta.mgr->dump_log_entry(entry, s->formatter);
flusher.flush();
}
s->formatter->close_section();
void RGWOp_MDLog_Info::execute() {
num_objects = s->cct->_conf->rgw_md_log_max_shards;
- period = store->svc.mdlog->read_oldest_log_period();
+ period = store->svc()->mdlog->read_oldest_log_period();
http_ret = period.get_error();
}
if (period.empty()) {
ldout(s->cct, 5) << "Missing period id trying to use current" << dendl;
- period = store->svc.zone->get_current_period_id();
+ period = store->svc()->zone->get_current_period_id();
if (period.empty()) {
ldout(s->cct, 5) << "Missing period id" << dendl;
return;
}
}
- RGWMetadataLog meta_log{s->cct, store->svc.zone, store->svc.cls, period};
+ RGWMetadataLog meta_log{s->cct, store->svc()->zone, store->svc()->cls, period};
http_ret = meta_log.get_info(shard_id, &info);
}
if (period.empty()) {
ldout(s->cct, 5) << "Missing period id trying to use current" << dendl;
- period = store->svc.zone->get_current_period_id();
+ period = store->svc()->zone->get_current_period_id();
if (period.empty()) {
ldout(s->cct, 5) << "Missing period id" << dendl;
return;
}
}
- RGWMetadataLog meta_log{s->cct, store->svc.zone, store->svc.cls, period};
+ RGWMetadataLog meta_log{s->cct, store->svc()->zone, store->svc()->cls, period};
http_ret = meta_log.trim(shard_id, ut_st, ut_et, start_marker, end_marker);
}
if (period.empty()) {
ldout(s->cct, 5) << "Missing period id trying to use current" << dendl;
- period = store->svc.zone->get_current_period_id();
+ period = store->svc()->zone->get_current_period_id();
}
if (period.empty() ||
return;
}
- RGWMetadataLog meta_log{s->cct, store->svc.zone, store->svc.cls, period};
+ RGWMetadataLog meta_log{s->cct, store->svc()->zone, store->svc()->cls, period};
unsigned dur;
dur = (unsigned)strict_strtol(duration_str.c_str(), 10, &err);
if (!err.empty() || dur <= 0) {
if (period.empty()) {
ldout(s->cct, 5) << "Missing period id trying to use current" << dendl;
- period = store->svc.zone->get_current_period_id();
+ period = store->svc()->zone->get_current_period_id();
}
if (period.empty() ||
return;
}
- RGWMetadataLog meta_log{s->cct, store->svc.zone, store->svc.cls, period};
+ RGWMetadataLog meta_log{s->cct, store->svc()->zone, store->svc()->cls, period};
http_ret = meta_log.unlock(shard_id, zone_id, locker_id);
}
}
}
- store->wakeup_meta_sync_shards(updated_shards);
+ store->getRados()->wakeup_meta_sync_shards(updated_shards);
http_ret = 0;
}
if (!bucket_instance.empty()) {
rgw_bucket b(rgw_bucket_key(tenant_name, bn, bucket_instance));
- http_ret = store->get_bucket_instance_info(*s->sysobj_ctx, b, bucket_info, NULL, NULL, s->yield);
+ http_ret = store->getRados()->get_bucket_instance_info(*s->sysobj_ctx, b, bucket_info, NULL, NULL, s->yield);
if (http_ret < 0) {
ldpp_dout(s, 5) << "could not get bucket instance info for bucket instance id=" << bucket_instance << dendl;
return;
}
} else { /* !bucket_name.empty() */
- http_ret = store->get_bucket_info(*s->sysobj_ctx, tenant_name, bucket_name, bucket_info, NULL, s->yield, NULL);
+ http_ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, tenant_name, bucket_name, bucket_info, NULL, s->yield, NULL);
if (http_ret < 0) {
ldpp_dout(s, 5) << "could not get bucket info for bucket=" << bucket_name << dendl;
return;
send_response();
do {
list<rgw_bi_log_entry> entries;
- int ret = store->svc.bilog_rados->log_list(bucket_info, shard_id,
+ int ret = store->svc()->bilog_rados->log_list(bucket_info, shard_id,
marker, max_entries - count,
entries, &truncated);
if (ret < 0) {
if (!bucket_instance.empty()) {
rgw_bucket b(rgw_bucket_key(tenant_name, bn, bucket_instance));
- http_ret = store->get_bucket_instance_info(*s->sysobj_ctx, b, bucket_info, NULL, NULL, s->yield);
+ http_ret = store->getRados()->get_bucket_instance_info(*s->sysobj_ctx, b, bucket_info, NULL, NULL, s->yield);
if (http_ret < 0) {
ldpp_dout(s, 5) << "could not get bucket instance info for bucket instance id=" << bucket_instance << dendl;
return;
}
} else { /* !bucket_name.empty() */
- http_ret = store->get_bucket_info(*s->sysobj_ctx, tenant_name, bucket_name, bucket_info, NULL, s->yield, NULL);
+ http_ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, tenant_name, bucket_name, bucket_info, NULL, s->yield, NULL);
if (http_ret < 0) {
ldpp_dout(s, 5) << "could not get bucket info for bucket=" << bucket_name << dendl;
return;
}
}
map<RGWObjCategory, RGWStorageStats> stats;
- int ret = store->get_bucket_stats(bucket_info, shard_id, &bucket_ver, &master_ver, stats, &max_marker, &syncstopped);
+ int ret = store->getRados()->get_bucket_stats(bucket_info, shard_id, &bucket_ver, &master_ver, stats, &max_marker, &syncstopped);
if (ret < 0 && ret != -ENOENT) {
http_ret = ret;
return;
if (!bucket_instance.empty()) {
rgw_bucket b(rgw_bucket_key(tenant_name, bn, bucket_instance));
- http_ret = store->get_bucket_instance_info(*s->sysobj_ctx, b, bucket_info, NULL, NULL, s->yield);
+ http_ret = store->getRados()->get_bucket_instance_info(*s->sysobj_ctx, b, bucket_info, NULL, NULL, s->yield);
if (http_ret < 0) {
ldpp_dout(s, 5) << "could not get bucket instance info for bucket instance id=" << bucket_instance << dendl;
return;
}
} else { /* !bucket_name.empty() */
- http_ret = store->get_bucket_info(*s->sysobj_ctx, tenant_name, bucket_name, bucket_info, NULL, s->yield, NULL);
+ http_ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, tenant_name, bucket_name, bucket_info, NULL, s->yield, NULL);
if (http_ret < 0) {
ldpp_dout(s, 5) << "could not get bucket info for bucket=" << bucket_name << dendl;
return;
}
}
- http_ret = store->svc.bilog_rados->log_trim(bucket_info, shard_id, start_marker, end_marker);
+ http_ret = store->svc()->bilog_rados->log_trim(bucket_info, shard_id, start_marker, end_marker);
if (http_ret < 0) {
ldpp_dout(s, 5) << "ERROR: trim_bi_log_entries() " << dendl;
}
// Note that last_marker is updated to be the marker of the last
// entry listed
- http_ret = store->svc.datalog_rados->list_entries(shard_id, ut_st, ut_et,
+ http_ret = store->svc()->datalog_rados->list_entries(shard_id, ut_st, ut_et,
max_entries, entries, marker,
&last_marker, &truncated);
}
return;
}
- http_ret = store->svc.datalog_rados->get_info(shard_id, &info);
+ http_ret = store->svc()->datalog_rados->get_info(shard_id, &info);
}
void RGWOp_DATALog_ShardInfo::send_response() {
}
}
- store->wakeup_data_sync_shards(source_zone, updated_shards);
+ store->getRados()->wakeup_data_sync_shards(source_zone, updated_shards);
http_ret = 0;
}
return;
}
- http_ret = store->svc.datalog_rados->trim_entries(shard_id, ut_st, ut_et, start_marker, end_marker);
+ http_ret = store->svc()->datalog_rados->trim_entries(shard_id, ut_st, ut_et, start_marker, end_marker);
}
// not in header to avoid pulling in rgw_sync.h
void RGWOp_MDLog_Status::execute()
{
- auto sync = store->get_meta_sync_manager();
+ auto sync = store->getRados()->get_meta_sync_manager();
if (sync == nullptr) {
ldout(s->cct, 1) << "no sync manager" << dendl;
http_ret = -ENOENT;
}
// read the bucket instance info for num_shards
- auto ctx = store->svc.sysobj->init_obj_ctx();
+ auto ctx = store->svc()->sysobj->init_obj_ctx();
RGWBucketInfo info;
- http_ret = store->get_bucket_instance_info(ctx, bucket, info, nullptr, nullptr, s->yield);
+ http_ret = store->getRados()->get_bucket_instance_info(ctx, bucket, info, nullptr, nullptr, s->yield);
if (http_ret < 0) {
ldpp_dout(s, 4) << "failed to read bucket info: " << cpp_strerror(http_ret) << dendl;
return;
void RGWOp_DATALog_Status::execute()
{
const auto source_zone = s->info.args.get("source-zone");
- auto sync = store->get_data_sync_manager(source_zone);
+ auto sync = store->getRados()->get_data_sync_manager(source_zone);
if (sync == nullptr) {
ldout(s->cct, 1) << "no sync manager for source-zone " << source_zone << dendl;
http_ret = -ENOENT;
frame_metadata_key(s, metadata_key);
- auto meta_mgr = store->ctl.meta.mgr;
+ auto meta_mgr = store->ctl()->meta.mgr;
/* Get keys */
http_ret = meta_mgr->get(metadata_key, s->formatter, s->yield);
marker = "3:bf885d8f:root::sorry_janefonda_665:head";
*/
- http_ret = store->ctl.meta.mgr->list_keys_init(metadata_key, marker, &handle);
+ http_ret = store->ctl()->meta.mgr->list_keys_init(metadata_key, marker, &handle);
if (http_ret < 0) {
dout(5) << "ERROR: can't get key: " << cpp_strerror(http_ret) << dendl;
return;
s->formatter->open_array_section("keys");
- auto meta_mgr = store->ctl.meta.mgr;
+ auto meta_mgr = store->ctl()->meta.mgr;
uint64_t left;
do {
}
}
- http_ret = store->ctl.meta.mgr->put(metadata_key, bl, s->yield, sync_type,
+ http_ret = store->ctl()->meta.mgr->put(metadata_key, bl, s->yield, sync_type,
&ondisk_version);
if (http_ret < 0) {
dout(5) << "ERROR: can't put key: " << cpp_strerror(http_ret) << dendl;
string metadata_key;
frame_metadata_key(s, metadata_key);
- http_ret = store->ctl.meta.mgr->remove(metadata_key, s->yield);
+ http_ret = store->ctl()->meta.mgr->remove(metadata_key, s->yield);
if (http_ret < 0) {
dout(5) << "ERROR: can't remove key: " << cpp_strerror(http_ret) << dendl;
return;
period.set_id(period_id);
period.set_epoch(epoch);
- http_ret = period.init(store->ctx(), store->svc.sysobj, realm_id, realm_name);
+ http_ret = period.init(store->ctx(), store->svc()->sysobj, realm_id, realm_name);
if (http_ret < 0)
ldout(store->ctx(), 5) << "failed to read period" << dendl;
}
auto cct = store->ctx();
// initialize the period without reading from rados
- period.init(cct, store->svc.sysobj, false);
+ period.init(cct, store->svc()->sysobj, false);
// decode the period from input
const auto max_size = cct->_conf->rgw_max_put_param_size;
}
// require period.realm_id to match our realm
- if (period.get_realm() != store->svc.zone->get_realm().get_id()) {
+ if (period.get_realm() != store->svc()->zone->get_realm().get_id()) {
error_stream << "period with realm id " << period.get_realm()
- << " doesn't match current realm " << store->svc.zone->get_realm().get_id() << std::endl;
+ << " doesn't match current realm " << store->svc()->zone->get_realm().get_id() << std::endl;
http_ret = -EINVAL;
return;
}
// period that we haven't restarted with yet. we also don't want to modify
// the objects in use by RGWRados
RGWRealm realm(period.get_realm());
- http_ret = realm.init(cct, store->svc.sysobj);
+ http_ret = realm.init(cct, store->svc()->sysobj);
if (http_ret < 0) {
lderr(cct) << "failed to read current realm: "
<< cpp_strerror(-http_ret) << dendl;
}
RGWPeriod current_period;
- http_ret = current_period.init(cct, store->svc.sysobj, realm.get_id());
+ http_ret = current_period.init(cct, store->svc()->sysobj, realm.get_id());
if (http_ret < 0) {
lderr(cct) << "failed to read current period: "
<< cpp_strerror(-http_ret) << dendl;
}
// if it's not period commit, nobody is allowed to push to the master zone
- if (period.get_master_zone() == store->svc.zone->get_zone_params().get_id()) {
+ if (period.get_master_zone() == store->svc()->zone->get_zone_params().get_id()) {
ldout(cct, 10) << "master zone rejecting period id="
<< period.get_id() << " epoch=" << period.get_epoch() << dendl;
http_ret = -EINVAL; // XXX: error code
return;
}
- auto period_history = store->svc.mdlog->get_period_history();
+ auto period_history = store->svc()->mdlog->get_period_history();
// decide whether we can set_current_period() or set_latest_epoch()
if (period.get_id() != current_period.get_id()) {
// read realm
realm.reset(new RGWRealm(id, name));
- http_ret = realm->init(g_ceph_context, store->svc.sysobj);
+ http_ret = realm->init(g_ceph_context, store->svc()->sysobj);
if (http_ret < 0)
lderr(store->ctx()) << "failed to read realm id=" << id
<< " name=" << name << dendl;
{
{
// read default realm
- RGWRealm realm(store->ctx(), store->svc.sysobj);
+ RGWRealm realm(store->ctx(), store->svc()->sysobj);
[[maybe_unused]] int ret = realm.read_default_id(default_id);
}
- http_ret = store->svc.zone->list_realms(realms);
+ http_ret = store->svc()->zone->list_realms(realms);
if (http_ret < 0)
lderr(store->ctx()) << "failed to list realms" << dendl;
}
}
string role_name = s->info.args.get("RoleName");
- RGWRole role(s->cct, store->pctl, role_name, s->user->user_id.tenant);
+ RGWRole role(s->cct, store->getRados()->pctl, role_name, s->user->user_id.tenant);
if (op_ret = role.get(); op_ret < 0) {
if (op_ret == -ENOENT) {
op_ret = -ERR_NO_ROLE_FOUND;
if (op_ret < 0) {
return;
}
- RGWRole role(s->cct, store->pctl, role_name, role_path, trust_policy,
+ RGWRole role(s->cct, store->getRados()->pctl, role_name, role_path, trust_policy,
s->user->user_id.tenant, max_session_duration);
op_ret = role.create(true);
if (op_ret < 0) {
return;
}
- RGWRole role(s->cct, store->pctl, role_name, s->user->user_id.tenant);
+ RGWRole role(s->cct, store->getRados()->pctl, role_name, s->user->user_id.tenant);
op_ret = role.get();
if (op_ret == -ENOENT) {
return;
}
vector<RGWRole> result;
- op_ret = RGWRole::get_roles_by_path_prefix(store, s->cct, path_prefix, s->user->user_id.tenant, result);
+ op_ret = RGWRole::get_roles_by_path_prefix(store->getRados(), s->cct, path_prefix, s->user->user_id.tenant, result);
if (op_ret == 0) {
s->formatter->open_array_section("ListRolesResponse");
ldout(s->cct, 20) << "Read " << obj_tags.count() << "tags" << dendl;
// forward bucket tags requests to meta master zone
- if (!store->svc.zone->is_meta_master()) {
+ if (!store->svc()->zone->is_meta_master()) {
/* only need to keep this data around if we're not meta master */
in_data = std::move(data);
}
RGWZoneGroup zonegroup;
string api_name;
- int ret = store->svc.zone->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
+ int ret = store->svc()->zone->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
if (ret >= 0) {
api_name = zonegroup.api_name;
} else {
return -EINVAL;
}
- if (!store->svc.zone->is_meta_master()) {
+ if (!store->svc()->zone->is_meta_master()) {
/* only need to keep this data around if we're not meta master */
in_data.append(data);
}
dump_start(s);
}
-static int create_s3_policy(struct req_state *s, RGWRados *store,
+static int create_s3_policy(struct req_state *s, rgw::sal::RGWRadosStore *store,
RGWAccessControlPolicy_S3& s3policy,
ACLOwner& owner)
{
if (!s->canned_acl.empty())
return -ERR_INVALID_REQUEST;
- return s3policy.create_from_headers(store->ctl.user, s->info.env, owner);
+ return s3policy.create_from_headers(store->ctl()->user, s->info.env, owner);
}
return s3policy.create_canned(owner, s->bucket_owner, s->canned_acl);
return ret;
}
}
- ret = store->get_bucket_info(*s->sysobj_ctx,
+ ret = store->getRados()->get_bucket_info(*s->sysobj_ctx,
copy_source_tenant_name,
copy_source_bucket_name,
copy_source_bucket_info,
end_header(s, this);
}
-static inline int get_obj_attrs(RGWRados *store, struct req_state *s, rgw_obj& obj, map<string, bufferlist>& attrs)
+static inline int get_obj_attrs(rgw::sal::RGWRadosStore *store, struct req_state *s, rgw_obj& obj, map<string, bufferlist>& attrs)
{
- RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
+ RGWRados::Object op_target(store->getRados(), s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
RGWRados::Object::Read read_op(&op_target);
read_op.params.attrs = &attrs;
return ret;
}
-int RGWPutACLs_ObjStore_S3::get_policy_from_state(RGWRados *store,
+int RGWPutACLs_ObjStore_S3::get_policy_from_state(rgw::sal::RGWRadosStore *store,
struct req_state *s,
stringstream& ss)
{
}
// forward bucket cors requests to meta master zone
- if (!store->svc.zone->is_meta_master()) {
+ if (!store->svc()->zone->is_meta_master()) {
/* only need to keep this data around if we're not meta master */
in_data.append(data);
}
f.open_array_section("data_location");
for (auto miter = manifest->obj_begin(); miter != manifest->obj_end(); ++miter) {
f.open_object_section("obj");
- rgw_raw_obj raw_loc = miter.get_location().get_raw_obj(store);
+ rgw_raw_obj raw_loc = miter.get_location().get_raw_obj(store->getRados());
uint64_t ofs = miter.get_ofs();
uint64_t left = manifest->get_obj_size() - ofs;
::encode_json("ofs", miter.get_ofs(), &f);
return 0;
}
-static int verify_mfa(RGWRados *store, RGWUserInfo *user, const string& mfa_str, bool *verified, const DoutPrefixProvider *dpp)
+static int verify_mfa(rgw::sal::RGWRadosStore *store, RGWUserInfo *user, const string& mfa_str, bool *verified, const DoutPrefixProvider *dpp)
{
vector<string> params;
get_str_vec(mfa_str, " ", params);
return -EACCES;
}
- int ret = store->svc.cls->mfa.check_mfa(user->user_id, serial, pin, null_yield);
+ int ret = store->svc()->cls->mfa.check_mfa(user->user_id, serial, pin, null_yield);
if (ret < 0) {
ldpp_dout(dpp, 20) << "NOTICE: failed to check MFA, serial=" << serial << dendl;
return -EACCES;
return 0;
}
-int RGWHandler_REST_S3::init(RGWRados *store, struct req_state *s,
+int RGWHandler_REST_S3::init(rgw::sal::RGWRadosStore *store, struct req_state *s,
rgw::io::BasicClient *cio)
{
int ret;
* it tries AWS v4 before AWS v2
*/
int RGW_Auth_S3::authorize(const DoutPrefixProvider *dpp,
- RGWRados* const store,
+ rgw::sal::RGWRadosStore* const store,
const rgw::auth::StrategyRegistry& auth_registry,
struct req_state* const s)
{
return ret;
}
-int RGWHandler_Auth_S3::init(RGWRados *store, struct req_state *state,
+int RGWHandler_Auth_S3::init(rgw::sal::RGWRadosStore *store, struct req_state *state,
rgw::io::BasicClient *cio)
{
int ret = RGWHandler_REST_S3::init_from_header(state, RGW_FORMAT_JSON,
obj_ctx.set_prefetch_data(obj);
RGWObjState* state = nullptr;
- if (store->get_obj_state(&obj_ctx, s->bucket_info, obj, &state, false, s->yield) < 0) {
+ if (store->getRados()->get_obj_state(&obj_ctx, s->bucket_info, obj, &state, false, s->yield) < 0) {
return false;
}
if (! state->exists) {
return state->exists;
}
-int RGWHandler_REST_S3Website::init(RGWRados *store, req_state *s,
+int RGWHandler_REST_S3Website::init(rgw::sal::RGWRadosStore *store, req_state *s,
rgw::io::BasicClient* cio)
{
// save the original object name before retarget() replaces it with the
if (!(s->prot_flags & RGW_REST_WEBSITE))
return 0;
- int ret = store->get_bucket_info(*s->sysobj_ctx, s->bucket_tenant,
+ int ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, s->bucket_tenant,
s->bucket_name, s->bucket_info, NULL,
s->yield, &s->bucket_attrs);
if (ret < 0) {
#include "rgw_auth.h"
#include "rgw_auth_filters.h"
#include "rgw_sts.h"
+#include "rgw_sal.h"
struct rgw_http_error {
int http_ret;
RGWPutACLs_ObjStore_S3() {}
~RGWPutACLs_ObjStore_S3() override {}
- int get_policy_from_state(RGWRados *store, struct req_state *s, stringstream& ss) override;
+ int get_policy_from_state(rgw::sal::RGWRadosStore *store, struct req_state *s, stringstream& ss) override;
void send_response() override;
int get_params() override;
};
class RGW_Auth_S3 {
public:
static int authorize(const DoutPrefixProvider *dpp,
- RGWRados *store,
+ rgw::sal::RGWRadosStore *store,
const rgw::auth::StrategyRegistry& auth_registry,
struct req_state *s);
};
static int validate_bucket_name(const string& bucket);
static int validate_object_name(const string& bucket);
- int init(RGWRados *store,
+ int init(rgw::sal::RGWRadosStore *store,
struct req_state *s,
rgw::io::BasicClient *cio) override;
int authorize(const DoutPrefixProvider *dpp) override {
}
~RGWHandler_REST_S3() override = default;
- int init(RGWRados *store,
+ int init(rgw::sal::RGWRadosStore *store,
struct req_state *s,
rgw::io::BasicClient *cio) override;
int authorize(const DoutPrefixProvider *dpp) override;
using RGWHandler_REST_S3::RGWHandler_REST_S3;
~RGWHandler_REST_S3Website() override = default;
- int init(RGWRados *store, req_state *s, rgw::io::BasicClient* cio) override;
+ int init(rgw::sal::RGWRadosStore *store, req_state *s, rgw::io::BasicClient* cio) override;
int error_handler(int err_no, string *error_content) override;
};
}
int RGW_Auth_STS::authorize(const DoutPrefixProvider *dpp,
- RGWRados *store,
+ rgw::sal::RGWRadosStore *store,
const rgw::auth::StrategyRegistry& auth_registry,
struct req_state *s)
{
return nullptr;
}
-int RGWHandler_REST_STS::init(RGWRados *store,
+int RGWHandler_REST_STS::init(rgw::sal::RGWRadosStore *store,
struct req_state *s,
rgw::io::BasicClient *cio)
{
class RGW_Auth_STS {
public:
static int authorize(const DoutPrefixProvider *dpp,
- RGWRados *store,
+ rgw::sal::RGWRadosStore *store,
const rgw::auth::StrategyRegistry& auth_registry,
struct req_state *s);
};
post_body(post_body) {}
~RGWHandler_REST_STS() override = default;
- int init(RGWRados *store,
+ int init(rgw::sal::RGWRadosStore *store,
struct req_state *s,
rgw::io::BasicClient *cio) override;
int authorize(const DoutPrefixProvider* dpp) override;
void RGWStatAccount_ObjStore_SWIFT::execute()
{
RGWStatAccount_ObjStore::execute();
- op_ret = store->ctl.user->get_attrs_by_uid(s->user->user_id, &attrs, s->yield);
+ op_ret = store->ctl()->user->get_attrs_by_uid(s->user->user_id, &attrs, s->yield);
}
void RGWStatAccount_ObjStore_SWIFT::send_response()
}
static int get_swift_container_settings(req_state * const s,
- RGWRados * const store,
+ rgw::sal::RGWRadosStore * const store,
RGWAccessControlPolicy * const policy,
bool * const has_policy,
uint32_t * rw_mask,
if (read_list || write_list) {
RGWAccessControlPolicy_SWIFT swift_policy(s->cct);
- const auto r = swift_policy.create(store->ctl.user,
+ const auto r = swift_policy.create(store->ctl()->user,
s->user->user_id,
s->user->display_name,
read_list,
policy.create_default(s->user->user_id, s->user->display_name);
}
- location_constraint = store->svc.zone->get_zonegroup().api_name;
+ location_constraint = store->svc()->zone->get_zonegroup().api_name;
get_rmattrs_from_headers(s, CONT_PUT_ATTR_PREFIX,
CONT_REMOVE_ATTR_PREFIX, rmattr_names);
placement_rule.init(s->info.env->get("HTTP_X_STORAGE_POLICY", ""), s->info.storage_class);
if (bucket_name.compare(s->bucket.name) != 0) {
RGWBucketInfo bucket_info;
map<string, bufferlist> bucket_attrs;
- auto obj_ctx = store->svc.sysobj->init_obj_ctx();
- r = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
+ auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
+ r = store->getRados()->get_bucket_info(obj_ctx, s->user->user_id.tenant,
bucket_name, bucket_info, nullptr,
s->yield, &bucket_attrs);
if (r < 0) {
RGWObjectCtx obj_ctx(store);
obj_ctx.set_atomic(slo_seg);
- RGWRados::Object op_target(store, s->bucket_info, obj_ctx, slo_seg);
+ RGWRados::Object op_target(store->getRados(), s->bucket_info, obj_ctx, slo_seg);
RGWRados::Object::Read read_op(&op_target);
bool compressed;
}
static int get_swift_account_settings(req_state * const s,
- RGWRados * const store,
+ rgw::sal::RGWRadosStore * const store,
RGWAccessControlPolicy_SWIFTAcct * const policy,
bool * const has_policy)
{
const char * const acl_attr = s->info.env->get("HTTP_X_ACCOUNT_ACCESS_CONTROL");
if (acl_attr) {
RGWAccessControlPolicy_SWIFTAcct swift_acct_policy(s->cct);
- const bool r = swift_acct_policy.create(store->ctl.user,
+ const bool r = swift_acct_policy.create(store->ctl()->user,
s->user->user_id,
s->user->display_name,
string(acl_attr));
s->formatter->close_section();
}
else {
- pair.second.list_data(*(s->formatter), s->cct->_conf, *store);
+ pair.second.list_data(*(s->formatter), s->cct->_conf, *store->getRados());
}
}
}
-void RGWFormPost::init(RGWRados* const store,
+void RGWFormPost::init(rgw::sal::RGWRadosStore* const store,
req_state* const s,
RGWHandler* const dialect_handler)
{
* now. It will be initialized in RGWHandler_REST_SWIFT::postauth_init(). */
const string& bucket_name = s->init_state.url_bucket;
- auto user_ctl = store->ctl.user;
+ auto user_ctl = store->ctl()->user;
/* TempURL in Formpost only requires that bucket name is specified. */
if (bucket_name.empty()) {
/* Need to get user info of bucket owner. */
RGWBucketInfo bucket_info;
- int ret = store->get_bucket_info(*s->sysobj_ctx,
+ int ret = store->getRados()->get_bucket_info(*s->sysobj_ctx,
bucket_tenant, bucket_name,
bucket_info, nullptr, s->yield);
if (ret < 0) {
class RGWGetErrorPage : public RGWGetObj_ObjStore_SWIFT {
public:
- RGWGetErrorPage(RGWRados* const store,
+ RGWGetErrorPage(rgw::sal::RGWRadosStore* const store,
RGWHandler_REST* const handler,
req_state* const s,
const int http_ret) {
obj_ctx.set_prefetch_data(obj);
RGWObjState* state = nullptr;
- if (store->get_obj_state(&obj_ctx, s->bucket_info, obj, &state, false, s->yield) < 0) {
+ if (store->getRados()->get_obj_state(&obj_ctx, s->bucket_info, obj, &state, false, s->yield) < 0) {
return false;
}
obj_ctx.set_prefetch_data(obj);
RGWObjState* state = nullptr;
- if (store->get_obj_state(&obj_ctx, s->bucket_info, obj, &state, false, s->yield) < 0) {
+ if (store->getRados()->get_obj_state(&obj_ctx, s->bucket_info, obj, &state, false, s->yield) < 0) {
return false;
}
return 0;
}
-int RGWHandler_REST_SWIFT::init(RGWRados* store, struct req_state* s,
+int RGWHandler_REST_SWIFT::init(rgw::sal::RGWRadosStore* store, struct req_state* s,
rgw::io::BasicClient *cio)
{
struct req_init_state *t = &s->init_state;
RGWFormPost() = default;
~RGWFormPost() = default;
- void init(RGWRados* store,
+ void init(rgw::sal::RGWRadosStore* store,
req_state* s,
RGWHandler* dialect_handler) override;
class RGWSwiftWebsiteHandler {
- RGWRados* const store;
+ rgw::sal::RGWRadosStore* const store;
req_state* const s;
RGWHandler_REST* const handler;
RGWOp* get_ws_index_op();
RGWOp* get_ws_listing_op();
public:
- RGWSwiftWebsiteHandler(RGWRados* const store,
+ RGWSwiftWebsiteHandler(rgw::sal::RGWRadosStore* const store,
req_state* const s,
RGWHandler_REST* const handler)
: store(store),
int validate_bucket_name(const string& bucket);
- int init(RGWRados *store, struct req_state *s, rgw::io::BasicClient *cio) override;
+ int init(rgw::sal::RGWRadosStore *store, struct req_state *s, rgw::io::BasicClient *cio) override;
int authorize(const DoutPrefixProvider *dpp) override;
int postauth_init() override;
return website_handler->retarget_bucket(op, new_op);
}
- int init(RGWRados* const store,
+ int init(rgw::sal::RGWRadosStore* const store,
struct req_state* const s,
rgw::io::BasicClient* const cio) override {
website_handler = boost::in_place<RGWSwiftWebsiteHandler>(store, s, this);
return website_handler->retarget_object(op, new_op);
}
- int init(RGWRados* const store,
+ int init(rgw::sal::RGWRadosStore* const store,
struct req_state* const s,
rgw::io::BasicClient* const cio) override {
website_handler = boost::in_place<RGWSwiftWebsiteHandler>(store, s, this);
return new RGWGetCrossDomainPolicy_ObjStore_SWIFT();
}
- int init(RGWRados* const store,
+ int init(rgw::sal::RGWRadosStore* const store,
struct req_state* const state,
rgw::io::BasicClient* const cio) override {
state->dialect = "swift";
return new RGWGetHealthCheck_ObjStore_SWIFT();
}
- int init(RGWRados* const store,
+ int init(rgw::sal::RGWRadosStore* const store,
struct req_state* const state,
rgw::io::BasicClient* const cio) override {
state->dialect = "swift";
return new RGWInfo_ObjStore_SWIFT();
}
- int init(RGWRados* const store,
+ int init(rgw::sal::RGWRadosStore* const store,
struct req_state* const state,
rgw::io::BasicClient* const cio) override {
state->dialect = "swift";
}
}
- http_ret = RGWUsage::show(store, uid, bucket_name, start, end, show_entries, show_summary, &categories, flusher);
+ http_ret = RGWUsage::show(store->getRados(), uid, bucket_name, start, end, show_entries, show_summary, &categories, flusher);
}
class RGWOp_Usage_Delete : public RGWRESTOp {
}
}
- http_ret = RGWUsage::trim(store, uid, bucket_name, start, end);
+ http_ret = RGWUsage::trim(store->getRados(), uid, bucket_name, start, end);
}
RGWOp *RGWHandler_Usage::op_get()
RGWUserInfo info;
rgw_user user_id(user_name);
- op_ret = store->ctl.user->get_info_by_uid(user_id, &info, s->yield);
+ op_ret = store->ctl()->user->get_info_by_uid(user_id, &info, s->yield);
if (op_ret < 0) {
op_ret = -ERR_NO_SUCH_ENTITY;
return;
}
map<string, bufferlist> uattrs;
- op_ret = store->ctl.user->get_attrs_by_uid(user_id, &uattrs, s->yield);
+ op_ret = store->ctl()->user->get_attrs_by_uid(user_id, &uattrs, s->yield);
if (op_ret == -ENOENT) {
op_ret = -ERR_NO_SUCH_ENTITY;
return;
uattrs[RGW_ATTR_USER_POLICY] = in_bl;
RGWObjVersionTracker objv_tracker;
- op_ret = store->ctl.user->store_info(info, s->yield,
+ op_ret = store->ctl()->user->store_info(info, s->yield,
RGWUserCtl::PutParams()
.set_objv_tracker(&objv_tracker)
.set_attrs(&uattrs));
rgw_user user_id(user_name);
map<string, bufferlist> uattrs;
- op_ret = store->ctl.user->get_attrs_by_uid(user_id, &uattrs, s->yield);
+ op_ret = store->ctl()->user->get_attrs_by_uid(user_id, &uattrs, s->yield);
if (op_ret == -ENOENT) {
ldout(s->cct, 0) << "ERROR: attrs not found for user" << user_name << dendl;
op_ret = -ERR_NO_SUCH_ENTITY;
rgw_user user_id(user_name);
map<string, bufferlist> uattrs;
- op_ret = store->ctl.user->get_attrs_by_uid(user_id, &uattrs, s->yield);
+ op_ret = store->ctl()->user->get_attrs_by_uid(user_id, &uattrs, s->yield);
if (op_ret == -ENOENT) {
ldout(s->cct, 0) << "ERROR: attrs not found for user" << user_name << dendl;
op_ret = -ERR_NO_SUCH_ENTITY;
RGWUserInfo info;
map<string, bufferlist> uattrs;
rgw_user user_id(user_name);
- op_ret = store->ctl.user->get_info_by_uid(user_id, &info, s->yield,
+ op_ret = store->ctl()->user->get_info_by_uid(user_id, &info, s->yield,
RGWUserCtl::GetParams()
.set_attrs(&uattrs));
if (op_ret < 0) {
uattrs[RGW_ATTR_USER_POLICY] = in_bl;
RGWObjVersionTracker objv_tracker;
- op_ret = store->ctl.user->store_info(info, s->yield,
+ op_ret = store->ctl()->user->store_info(info, s->yield,
RGWUserCtl::PutParams()
.set_old_info(&info)
.set_objv_tracker(&objv_tracker)
--- /dev/null
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2019 Red Hat, Inc.
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <system_error>
+#include <unistd.h>
+#include <sstream>
+
+#include "common/errno.h"
+
+#include "rgw_sal.h"
+
+#define dout_subsys ceph_subsys_rgw
+
+namespace rgw::sal {
+
+RGWObject *RGWRadosBucket::create_object(const rgw_obj_key &key)
+{
+ if (!object) {
+ object = new RGWRadosObject(store, key);
+ }
+
+ return object;
+}
+
+RGWUser *RGWRadosStore::get_user(const rgw_user &u)
+{
+ if (!user) {
+ user = new RGWRadosUser(this, u);
+ }
+
+ return user;
+}
+
+RGWSalBucket *RGWRadosStore::create_bucket(RGWUser &u, const cls_user_bucket &b)
+{
+ if (!bucket) {
+ bucket = new RGWRadosBucket(this, u, b);
+ }
+
+ return bucket;
+}
+
+void RGWRadosStore::finalize(void) {
+ if (rados)
+ rados->finalize();
+}
+
+} // namespace rgw::sal
+
+rgw::sal::RGWRadosStore *RGWStoreManager::init_storage_provider(CephContext *cct, bool use_gc_thread, bool use_lc_thread, bool quota_threads, bool run_sync_thread, bool run_reshard_thread, bool use_cache)
+{
+ RGWRados *rados = new RGWRados;
+ rgw::sal::RGWRadosStore *store = new rgw::sal::RGWRadosStore();
+
+ store->setRados(rados);
+ rados->set_store(store);
+
+ if ((*rados).set_use_cache(use_cache)
+ .set_run_gc_thread(use_gc_thread)
+ .set_run_lc_thread(use_lc_thread)
+ .set_run_quota_threads(quota_threads)
+ .set_run_sync_thread(run_sync_thread)
+ .set_run_reshard_thread(run_reshard_thread)
+ .initialize(cct) < 0) {
+ delete store;
+ return NULL;
+ }
+
+ return store;
+}
+
+rgw::sal::RGWRadosStore *RGWStoreManager::init_raw_storage_provider(CephContext *cct)
+{
+ RGWRados *rados = new RGWRados;
+ rgw::sal::RGWRadosStore *store = new rgw::sal::RGWRadosStore();
+
+ store->setRados(rados);
+ rados->set_store(store);
+
+ rados->set_context(cct);
+
+ int ret = rados->init_svc(true);
+ if (ret < 0) {
+ ldout(cct, 0) << "ERROR: failed to init services (ret=" << cpp_strerror(-ret) << ")" << dendl;
+ delete store;
+ return nullptr;
+ }
+
+ if (rados->init_rados() < 0) {
+ delete store;
+ return nullptr;
+ }
+
+ return store;
+}
+
+void RGWStoreManager::close_storage(rgw::sal::RGWRadosStore *store)
+{
+ if (!store)
+ return;
+
+ store->finalize();
+
+ delete store;
+}
--- /dev/null
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2019 Red Hat, Inc.
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#pragma once
+
+#include "rgw_rados.h"
+#include "rgw_user.h"
+
+namespace rgw { namespace sal {
+
+#define RGW_SAL_VERSION 1
+
+class RGWUser;
+class RGWSalBucket;
+class RGWObject;
+
+typedef std::vector<RGWSalBucket> RGWBucketList;
+typedef std::map<string, string> RGWAttrs;
+
+class RGWStore {
+ public:
+ RGWStore() {}
+ virtual ~RGWStore() = default;
+
+ virtual RGWUser* get_user(const rgw_user &u) = 0;
+ virtual RGWSalBucket* get_bucket(RGWUser &u, const cls_user_bucket &b) = 0;
+ virtual RGWSalBucket* create_bucket(RGWUser &u, const cls_user_bucket &b) = 0;
+ virtual RGWBucketList* list_buckets(void) = 0;
+
+ virtual void finalize(void)=0;
+
+ virtual CephContext *ctx(void)=0;
+};
+
+class RGWUser {
+ protected:
+ rgw_user user;
+
+ public:
+ RGWUser() : user() {}
+ RGWUser(const rgw_user &_u) : user(_u) {}
+ virtual ~RGWUser() = default;
+
+ virtual RGWBucketList* list_buckets(void) = 0;
+};
+
+class RGWSalBucket {
+ protected:
+ cls_user_bucket ub;
+
+ public:
+ RGWSalBucket() : ub() {}
+ RGWSalBucket(const cls_user_bucket &_b) : ub(_b) {}
+ virtual ~RGWSalBucket() = default;
+
+ virtual RGWObject* get_object(const rgw_obj_key &key) = 0;
+ virtual RGWBucketList* list(void) = 0;
+ virtual RGWObject* create_object(const rgw_obj_key &key /* Attributes */) = 0;
+ virtual RGWAttrs& get_attrs(void) = 0;
+ virtual int set_attrs(RGWAttrs &attrs) = 0;
+ virtual int delete_bucket(void) = 0;
+ virtual RGWAccessControlPolicy& get_acl(void) = 0;
+ virtual int set_acl(const RGWAccessControlPolicy &acl) = 0;
+};
+
+class RGWObject {
+ protected:
+ rgw_obj_key key;
+
+ public:
+ RGWObject() : key() {}
+ RGWObject(const rgw_obj_key &_k) : key(_k) {}
+ virtual ~RGWObject() = default;
+
+ virtual int read(off_t offset, off_t length, std::iostream &stream) = 0;
+ virtual int write(off_t offset, off_t length, std::iostream &stream) = 0;
+ virtual RGWAttrs& get_attrs(void) = 0;
+ virtual int set_attrs(RGWAttrs &attrs) = 0;
+ virtual int delete_object(void) = 0;
+ virtual RGWAccessControlPolicy& get_acl(void) = 0;
+ virtual int set_acl(const RGWAccessControlPolicy &acl) = 0;
+};
+
+
+class RGWRadosStore;
+
+class RGWRadosUser : public RGWUser {
+ private:
+ RGWRadosStore *store;
+
+ public:
+ RGWRadosUser(RGWRadosStore *_st, const rgw_user &_u) : RGWUser(_u), store(_st) { }
+ RGWRadosUser() {}
+
+ RGWBucketList* list_buckets(void) { return new RGWBucketList(); }
+};
+
+class RGWRadosObject : public RGWObject {
+ private:
+ RGWRadosStore *store;
+ RGWAttrs attrs;
+ RGWAccessControlPolicy acls;
+
+ public:
+ RGWRadosObject()
+ : attrs(),
+ acls() {
+ }
+
+ RGWRadosObject(RGWRadosStore *_st, const rgw_obj_key &_k)
+ : RGWObject(_k),
+ store(_st),
+ attrs(),
+ acls() {
+ }
+
+ int read(off_t offset, off_t length, std::iostream &stream) { return length; }
+ int write(off_t offset, off_t length, std::iostream &stream) { return length; }
+ RGWAttrs& get_attrs(void) { return attrs; }
+ int set_attrs(RGWAttrs &a) { attrs = a; return 0; }
+ int delete_object(void) { return 0; }
+ RGWAccessControlPolicy& get_acl(void) { return acls; }
+ int set_acl(const RGWAccessControlPolicy &acl) { acls = acl; return 0; }
+};
+
+class RGWRadosBucket : public RGWSalBucket {
+ private:
+ RGWRadosStore *store;
+ RGWRadosObject *object;
+ RGWAttrs attrs;
+ RGWAccessControlPolicy acls;
+ RGWRadosUser user;
+
+ public:
+ RGWRadosBucket()
+ : object(nullptr),
+ attrs(),
+ acls(),
+ user() {
+ }
+
+ RGWRadosBucket(RGWRadosStore *_st, RGWUser &_u, const cls_user_bucket &_b)
+ : RGWSalBucket(_b),
+ store(_st),
+ object(nullptr),
+ attrs(),
+ acls(),
+ user(dynamic_cast<RGWRadosUser&>(_u)) {
+ }
+
+ RGWObject* get_object(const rgw_obj_key &key) { return object; }
+ RGWBucketList* list(void) { return new RGWBucketList(); }
+ RGWObject* create_object(const rgw_obj_key &key /* Attributes */) override;
+ RGWAttrs& get_attrs(void) { return attrs; }
+ int set_attrs(RGWAttrs &a) { attrs = a; return 0; }
+ int delete_bucket(void) { return 0; }
+ RGWAccessControlPolicy& get_acl(void) { return acls; }
+ int set_acl(const RGWAccessControlPolicy &acl) { acls = acl; return 0; }
+};
+
+class RGWRadosStore : public RGWStore {
+ private:
+ RGWRados *rados;
+ RGWRadosUser *user;
+ RGWRadosBucket *bucket;
+
+ public:
+ RGWRadosStore()
+ : rados(nullptr),
+ user(nullptr),
+ bucket(nullptr) {
+ }
+ ~RGWRadosStore() {
+ if (bucket)
+ delete bucket;
+ if (user)
+ delete user;
+ if (rados)
+ delete rados;
+ }
+
+ virtual RGWUser* get_user(const rgw_user &u);
+ virtual RGWSalBucket* get_bucket(RGWUser &u, const cls_user_bucket &b) { return bucket; }
+ virtual RGWSalBucket* create_bucket(RGWUser &u, const cls_user_bucket &b);
+ virtual RGWBucketList* list_buckets(void) { return new RGWBucketList(); }
+
+ void setRados(RGWRados * st) { rados = st; }
+ RGWRados *getRados(void) { return rados; }
+
+ RGWServices *svc() { return &rados->svc; }
+ RGWCtl *ctl() { return &rados->ctl; }
+
+ void finalize(void) override;
+
+ virtual CephContext *ctx(void) { return rados->ctx(); }
+};
+
+} } // namespace rgw::sal
+
+
+class RGWStoreManager {
+public:
+ RGWStoreManager() {}
+ static rgw::sal::RGWRadosStore *get_storage(CephContext *cct, bool use_gc_thread, bool use_lc_thread, bool quota_threads,
+ bool run_sync_thread, bool run_reshard_thread, bool use_cache = true) {
+ rgw::sal::RGWRadosStore *store = init_storage_provider(cct, use_gc_thread, use_lc_thread,
+ quota_threads, run_sync_thread, run_reshard_thread, use_cache);
+ return store;
+ }
+ static rgw::sal::RGWRadosStore *get_raw_storage(CephContext *cct) {
+ rgw::sal::RGWRadosStore *rados = init_raw_storage_provider(cct);
+ return rados;
+ }
+ static rgw::sal::RGWRadosStore *init_storage_provider(CephContext *cct, bool use_gc_thread, bool use_lc_thread, bool quota_threads, bool run_sync_thread, bool run_reshard_thread, bool use_metadata_cache);
+ static rgw::sal::RGWRadosStore *init_raw_storage_provider(CephContext *cct);
+ static void close_storage(rgw::sal::RGWRadosStore *store);
+
+};
#include "rgw_user.h"
#include "rgw_iam_policy.h"
#include "rgw_sts.h"
+#include "rgw_sal.h"
#define dout_subsys ceph_subsys_rgw
}
int AssumedRoleUser::generateAssumedRoleUser(CephContext* cct,
- RGWRados *store,
+ rgw::sal::RGWRadosStore *store,
const string& roleId,
const rgw::ARN& roleArn,
const string& roleSessionName)
if (auto r_arn = rgw::ARN::parse(arn); r_arn) {
auto pos = r_arn->resource.find_last_of('/');
string roleName = r_arn->resource.substr(pos + 1);
- RGWRole role(cct, store->pctl, roleName, r_arn->account);
+ RGWRole role(cct, store->getRados()->pctl, roleName, r_arn->account);
if (int ret = role.get(); ret < 0) {
if (ret == -ENOENT) {
ret = -ERR_NO_ROLE_FOUND;
{
int ret = 0;
RGWUserInfo info;
- if (ret = rgw_get_user_info_by_uid(store->ctl.user, user_id, info); ret < 0) {
+ if (ret = rgw_get_user_info_by_uid(store->ctl()->user, user_id, info); ret < 0) {
return -ERR_NO_SUCH_ENTITY;
}
info.assumed_role_arn = arn;
RGWObjVersionTracker objv_tracker;
- if (ret = rgw_store_user_info(store->ctl.user, info, &info, &objv_tracker, real_time(),
+ if (ret = rgw_store_user_info(store->ctl()->user, info, &info, &objv_tracker, real_time(),
false); ret < 0) {
return -ERR_INTERNAL_ERROR;
}
string assumeRoleId;
public:
int generateAssumedRoleUser( CephContext* cct,
- RGWRados *store,
+ rgw::sal::RGWRadosStore *store,
const string& roleId,
const rgw::ARN& roleArn,
const string& roleSessionName);
class STSService {
CephContext* cct;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
rgw_user user_id;
RGWRole role;
rgw::auth::Identity* identity;
int storeARN(string& arn);
public:
STSService() = default;
- STSService(CephContext* cct, RGWRados *store, rgw_user user_id, rgw::auth::Identity* identity) : cct(cct), store(store), user_id(user_id), identity(identity) {}
+ STSService(CephContext* cct, rgw::sal::RGWRadosStore *store, rgw_user user_id, rgw::auth::Identity* identity) : cct(cct), store(store), user_id(user_id), identity(identity) {}
std::tuple<int, RGWRole> getRoleInfo(const string& arn);
AssumeRoleResponse assumeRole(AssumeRoleRequest& req);
GetSessionTokenResponse getSessionToken(GetSessionTokenRequest& req);
user_str = user;
- if ((ret = store->ctl.user->get_info_by_swift(user_str, &info, s->yield)) < 0)
+ if ((ret = store->ctl()->user->get_info_by_swift(user_str, &info, s->yield)) < 0)
{
ret = -EACCES;
goto done;
end_header(s);
}
-int RGWHandler_SWIFT_Auth::init(RGWRados *store, struct req_state *state,
+int RGWHandler_SWIFT_Auth::init(rgw::sal::RGWRadosStore *store, struct req_state *state,
rgw::io::BasicClient *cio)
{
state->dialect = "swift-auth";
#include "rgw_auth.h"
#include "rgw_auth_keystone.h"
#include "rgw_auth_filters.h"
+#include "rgw_sal.h"
#define RGW_SWIFT_TOKEN_EXPIRATION (15 * 60)
~RGWHandler_SWIFT_Auth() override {}
RGWOp *op_get() override;
- int init(RGWRados *store, struct req_state *state, rgw::io::BasicClient *cio) override;
+ int init(rgw::sal::RGWRadosStore *store, struct req_state *state, rgw::io::BasicClient *cio) override;
int authorize(const DoutPrefixProvider *dpp) override;
int postauth_init() override { return 0; }
int read_permissions(RGWOp *op) override { return 0; }
static string mdlog_sync_status_shard_prefix = "mdlog.sync-status.shard";
static string mdlog_sync_full_sync_index_prefix = "meta.full-sync.index";
-RGWSyncErrorLogger::RGWSyncErrorLogger(RGWRados *_store, const string &oid_prefix, int _num_shards) : store(_store), num_shards(_num_shards) {
+RGWSyncErrorLogger::RGWSyncErrorLogger(rgw::sal::RGWRadosStore *_store, const string &oid_prefix, int _num_shards) : store(_store), num_shards(_num_shards) {
for (int i = 0; i < num_shards; i++) {
oids.push_back(get_shard_oid(oid_prefix, i));
}
rgw_sync_error_info info(source_zone, error_code, message);
bufferlist bl;
encode(info, bl);
- store->svc.cls->timelog.prepare_entry(entry, real_clock::now(), section, name, bl);
+ store->svc()->cls->timelog.prepare_entry(entry, real_clock::now(), section, name, bl);
uint32_t shard_id = ++counter % num_shards;
int RGWRemoteMetaLog::read_master_log_shards_info(const string &master_period, map<int, RGWMetadataLogInfo> *shards_info)
{
- if (store->svc.zone->is_meta_master()) {
+ if (store->svc()->zone->is_meta_master()) {
return 0;
}
int RGWRemoteMetaLog::read_master_log_shards_next(const string& period, map<int, string> shard_markers, map<int, rgw_mdlog_shard_data> *result)
{
- if (store->svc.zone->is_meta_master()) {
+ if (store->svc()->zone->is_meta_master()) {
return 0;
}
int RGWRemoteMetaLog::init()
{
- conn = store->svc.zone->get_master_conn();
+ conn = store->svc()->zone->get_master_conn();
int ret = http_manager.start();
if (ret < 0) {
int RGWMetaSyncStatusManager::init()
{
- if (store->svc.zone->is_meta_master()) {
+ if (store->svc()->zone->is_meta_master()) {
return 0;
}
- if (!store->svc.zone->get_master_conn()) {
+ if (!store->svc()->zone->get_master_conn()) {
lderr(store->ctx()) << "no REST connection to master zone" << dendl;
return -EIO;
}
- int r = rgw_init_ioctx(store->get_rados_handle(), store->svc.zone->get_zone_params().log_pool, ioctx, true);
+ int r = rgw_init_ioctx(store->getRados()->get_rados_handle(), store->svc()->zone->get_zone_params().log_pool, ioctx, true);
if (r < 0) {
- lderr(store->ctx()) << "ERROR: failed to open log pool (" << store->svc.zone->get_zone_params().log_pool << " ret=" << r << dendl;
+ lderr(store->ctx()) << "ERROR: failed to open log pool (" << store->svc()->zone->get_zone_params().log_pool << " ret=" << r << dendl;
return r;
}
int num_shards = sync_status.sync_info.num_shards;
for (int i = 0; i < num_shards; i++) {
- shard_objs[i] = rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env.shard_obj_name(i));
+ shard_objs[i] = rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env.shard_obj_name(i));
}
std::unique_lock wl{ts_to_shard_lock};
return out << "meta sync: ";
}
-void RGWMetaSyncEnv::init(const DoutPrefixProvider *_dpp, CephContext *_cct, RGWRados *_store, RGWRESTConn *_conn,
+void RGWMetaSyncEnv::init(const DoutPrefixProvider *_dpp, CephContext *_cct, rgw::sal::RGWRadosStore *_store, RGWRESTConn *_conn,
RGWAsyncRadosProcessor *_async_rados, RGWHTTPManager *_http_manager,
RGWSyncErrorLogger *_error_logger, RGWSyncTraceManager *_sync_tracer) {
dpp = _dpp;
}
class RGWAsyncReadMDLogEntries : public RGWAsyncRadosRequest {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
RGWMetadataLog *mdlog;
int shard_id;
string *marker;
return ret;
}
public:
- RGWAsyncReadMDLogEntries(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+ RGWAsyncReadMDLogEntries(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
RGWMetadataLog* mdlog, int _shard_id,
string* _marker, int _max_entries,
list<cls_log_entry> *_entries, bool *_truncated)
int operate() override {
auto store = env->store;
- RGWRESTConn *conn = store->svc.zone->get_master_conn();
+ RGWRESTConn *conn = store->svc()->zone->get_master_conn();
reenter(this) {
yield {
char buf[16];
set_status("acquiring sync lock");
uint32_t lock_duration = cct->_conf->rgw_sync_lease_period;
string lock_name = "sync_lock";
- RGWRados *store = sync_env->store;
+ rgw::sal::RGWRadosStore *store = sync_env->store;
lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, store,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env->status_oid()),
+ rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()),
lock_name, lock_duration, this));
lease_stack.reset(spawn(lease_cr.get(), false));
}
}
yield {
set_status("writing sync status");
- RGWRados *store = sync_env->store;
- call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(sync_env->async_rados, store->svc.sysobj,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env->status_oid()),
+ rgw::sal::RGWRadosStore *store = sync_env->store;
+ call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(sync_env->async_rados, store->svc()->sysobj,
+ rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()),
status));
}
RGWMetadataLogInfo& info = shards_info[i];
marker.next_step_marker = info.marker;
marker.timestamp = info.last_update;
- RGWRados *store = sync_env->store;
+ rgw::sal::RGWRadosStore *store = sync_env->store;
spawn(new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(sync_env->async_rados,
- store->svc.sysobj,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env->shard_obj_name(i)),
+ store->svc()->sysobj,
+ rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->shard_obj_name(i)),
marker), true);
}
}
yield {
set_status("changing sync state: build full sync maps");
status.state = rgw_meta_sync_info::StateBuildingFullSyncMaps;
- RGWRados *store = sync_env->store;
- call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(sync_env->async_rados, store->svc.sysobj,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env->status_oid()),
+ rgw::sal::RGWRadosStore *store = sync_env->store;
+ call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(sync_env->async_rados, store->svc()->sysobj,
+ rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()),
status));
}
set_status("drop lock lease");
return false;
}
using CR = RGWSimpleRadosReadCR<rgw_meta_sync_marker>;
- rgw_raw_obj obj{env->store->svc.zone->get_zone_params().log_pool,
+ rgw_raw_obj obj{env->store->svc()->zone->get_zone_params().log_pool,
env->shard_obj_name(shard_id)};
- spawn(new CR(env->async_rados, env->store->svc.sysobj, obj, &markers[shard_id]), false);
+ spawn(new CR(env->async_rados, env->store->svc()->sysobj, obj, &markers[shard_id]), false);
shard_id++;
return true;
}
using ReadInfoCR = RGWSimpleRadosReadCR<rgw_meta_sync_info>;
yield {
bool empty_on_enoent = false; // fail on ENOENT
- rgw_raw_obj obj{sync_env->store->svc.zone->get_zone_params().log_pool,
+ rgw_raw_obj obj{sync_env->store->svc()->zone->get_zone_params().log_pool,
sync_env->status_oid()};
- call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc.sysobj, obj,
+ call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj, obj,
&sync_status->sync_info, empty_on_enoent));
}
if (retcode < 0) {
string lock_name = "sync_lock";
lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados,
sync_env->store,
- rgw_raw_obj(sync_env->store->svc.zone->get_zone_params().log_pool, sync_env->status_oid()),
+ rgw_raw_obj(sync_env->store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()),
lock_name, lock_duration, this));
lease_stack.reset(spawn(lease_cr.get(), false));
}
yield;
}
entries_index.reset(new RGWShardedOmapCRManager(sync_env->async_rados, sync_env->store, this, num_shards,
- sync_env->store->svc.zone->get_zone_params().log_pool,
+ sync_env->store->svc()->zone->get_zone_params().log_pool,
mdlog_sync_full_sync_index_prefix));
yield {
call(new RGWReadRESTResourceCR<list<string> >(cct, conn, sync_env->http_manager,
tn->log(20, SSTR("list metadata: section=" << *sections_iter << " key=" << *iter));
string s = *sections_iter + ":" + *iter;
int shard_id;
- RGWRados *store = sync_env->store;
- int ret = store->ctl.meta.mgr->get_shard_id(*sections_iter, *iter, &shard_id);
+ rgw::sal::RGWRadosStore *store = sync_env->store;
+ int ret = store->ctl()->meta.mgr->get_shard_id(*sections_iter, *iter, &shard_id);
if (ret < 0) {
tn->log(0, SSTR("ERROR: could not determine shard id for " << *sections_iter << ":" << *iter));
ret_status = ret;
int shard_id = (int)iter->first;
rgw_meta_sync_marker& marker = iter->second;
marker.total_entries = entries_index->get_total_entries(shard_id);
- spawn(new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(sync_env->async_rados, sync_env->store->svc.sysobj,
- rgw_raw_obj(sync_env->store->svc.zone->get_zone_params().log_pool, sync_env->shard_obj_name(shard_id)),
+ spawn(new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(sync_env->async_rados, sync_env->store->svc()->sysobj,
+ rgw_raw_obj(sync_env->store->svc()->zone->get_zone_params().log_pool, sync_env->shard_obj_name(shard_id)),
marker), true);
}
}
};
class RGWAsyncMetaStoreEntry : public RGWAsyncRadosRequest {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
string raw_key;
bufferlist bl;
protected:
int _send_request() override {
- int ret = store->ctl.meta.mgr->put(raw_key, bl, null_yield, RGWMDLogSyncType::APPLY_ALWAYS);
+ int ret = store->ctl()->meta.mgr->put(raw_key, bl, null_yield, RGWMDLogSyncType::APPLY_ALWAYS);
if (ret < 0) {
ldout(store->ctx(), 0) << "ERROR: can't store key: " << raw_key << " ret=" << ret << dendl;
return ret;
return 0;
}
public:
- RGWAsyncMetaStoreEntry(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+ RGWAsyncMetaStoreEntry(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
const string& _raw_key,
bufferlist& _bl) : RGWAsyncRadosRequest(caller, cn), store(_store),
raw_key(_raw_key), bl(_bl) {}
};
class RGWAsyncMetaRemoveEntry : public RGWAsyncRadosRequest {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
string raw_key;
protected:
int _send_request() override {
- int ret = store->ctl.meta.mgr->remove(raw_key, null_yield);
+ int ret = store->ctl()->meta.mgr->remove(raw_key, null_yield);
if (ret < 0) {
ldout(store->ctx(), 0) << "ERROR: can't remove key: " << raw_key << " ret=" << ret << dendl;
return ret;
return 0;
}
public:
- RGWAsyncMetaRemoveEntry(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWRados *_store,
+ RGWAsyncMetaRemoveEntry(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RGWRadosStore *_store,
const string& _raw_key) : RGWAsyncRadosRequest(caller, cn), store(_store),
raw_key(_raw_key) {}
};
ldpp_dout(sync_env->dpp, 20) << __func__ << "(): updating marker marker_oid=" << marker_oid << " marker=" << new_marker << " realm_epoch=" << sync_marker.realm_epoch << dendl;
tn->log(20, SSTR("new marker=" << new_marker));
- RGWRados *store = sync_env->store;
+ rgw::sal::RGWRadosStore *store = sync_env->store;
return new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(sync_env->async_rados,
- store->svc.sysobj,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, marker_oid),
+ store->svc()->sysobj,
+ rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, marker_oid),
sync_marker);
}
yield {
uint32_t lock_duration = cct->_conf->rgw_sync_lease_period;
string lock_name = "sync_lock";
- RGWRados *store = sync_env->store;
+ rgw::sal::RGWRadosStore *store = sync_env->store;
lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, store,
rgw_raw_obj(pool, sync_env->shard_obj_name(shard_id)),
lock_name, lock_duration, this));
ldpp_dout(sync_env->dpp, 4) << *this << ": saving marker pos=" << temp_marker->marker << " realm_epoch=" << realm_epoch << dendl;
using WriteMarkerCR = RGWSimpleRadosWriteCR<rgw_meta_sync_marker>;
- yield call(new WriteMarkerCR(sync_env->async_rados, sync_env->store->svc.sysobj,
+ yield call(new WriteMarkerCR(sync_env->async_rados, sync_env->store->svc()->sysobj,
rgw_raw_obj(pool, sync_env->shard_obj_name(shard_id)),
*temp_marker));
}
yield {
uint32_t lock_duration = cct->_conf->rgw_sync_lease_period;
string lock_name = "sync_lock";
- RGWRados *store = sync_env->store;
+ rgw::sal::RGWRadosStore *store = sync_env->store;
lease_cr.reset( new RGWContinuousLeaseCR(sync_env->async_rados, store,
rgw_raw_obj(pool, sync_env->shard_obj_name(shard_id)),
lock_name, lock_duration, this));
}
RGWCoroutine *alloc_finisher_cr() override {
- RGWRados *store = sync_env->store;
- return new RGWSimpleRadosReadCR<rgw_meta_sync_marker>(sync_env->async_rados, store->svc.sysobj,
+ rgw::sal::RGWRadosStore *store = sync_env->store;
+ return new RGWSimpleRadosReadCR<rgw_meta_sync_marker>(sync_env->async_rados, store->svc()->sysobj,
rgw_raw_obj(pool, sync_env->shard_obj_name(shard_id)),
&sync_marker);
}
RGWMetaSyncCR(RGWMetaSyncEnv *_sync_env, const RGWPeriodHistory::Cursor &cursor,
const rgw_meta_sync_status& _sync_status, RGWSyncTraceNodeRef& _tn)
: RGWCoroutine(_sync_env->cct), sync_env(_sync_env),
- pool(sync_env->store->svc.zone->get_zone_params().log_pool),
+ pool(sync_env->store->svc()->zone->get_zone_params().log_pool),
cursor(cursor), sync_status(_sync_status), tn(_tn) {}
~RGWMetaSyncCR() {
// loop through one period at a time
tn->log(1, "start");
for (;;) {
- if (cursor == sync_env->store->svc.mdlog->get_period_history()->get_current()) {
+ if (cursor == sync_env->store->svc()->mdlog->get_period_history()->get_current()) {
next = RGWPeriodHistory::Cursor{};
if (cursor) {
ldpp_dout(sync_env->dpp, 10) << "RGWMetaSyncCR on current period="
// get the mdlog for the current period (may be empty)
auto& period_id = sync_status.sync_info.period;
auto realm_epoch = sync_status.sync_info.realm_epoch;
- auto mdlog = sync_env->store->svc.mdlog->get_log(period_id);
+ auto mdlog = sync_env->store->svc()->mdlog->get_log(period_id);
tn->log(1, SSTR("realm epoch=" << realm_epoch << " period id=" << period_id));
sync_status.sync_info.period = cursor.get_period().get_id();
sync_status.sync_info.realm_epoch = cursor.get_epoch();
yield call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(sync_env->async_rados,
- sync_env->store->svc.sysobj,
+ sync_env->store->svc()->sysobj,
rgw_raw_obj(pool, sync_env->status_oid()),
sync_status.sync_info));
}
env->async_rados = async_rados;
env->http_manager = &http_manager;
env->error_logger = error_logger;
- env->sync_tracer = store->get_sync_tracer();
+ env->sync_tracer = store->getRados()->get_sync_tracer();
}
int RGWRemoteMetaLog::read_sync_status(rgw_meta_sync_status *sync_status)
{
- if (store->svc.zone->is_meta_master()) {
+ if (store->svc()->zone->is_meta_master()) {
return 0;
}
// cannot run concurrently with run_sync(), so run in a separate manager
- RGWCoroutinesManager crs(store->ctx(), store->get_cr_registry());
+ RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
RGWHTTPManager http_manager(store->ctx(), crs.get_completion_mgr());
int ret = http_manager.start();
if (ret < 0) {
int RGWRemoteMetaLog::init_sync_status()
{
- if (store->svc.zone->is_meta_master()) {
+ if (store->svc()->zone->is_meta_master()) {
return 0;
}
rgw_meta_sync_info sync_info;
sync_info.num_shards = mdlog_info.num_shards;
- auto cursor = store->svc.mdlog->get_period_history()->get_current();
+ auto cursor = store->svc()->mdlog->get_period_history()->get_current();
if (cursor) {
sync_info.period = cursor.get_period().get_id();
sync_info.realm_epoch = cursor.get_epoch();
int RGWRemoteMetaLog::store_sync_info(const rgw_meta_sync_info& sync_info)
{
tn->log(20, "store sync info");
- return run(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(async_rados, store->svc.sysobj,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env.status_oid()),
+ return run(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(async_rados, store->svc()->sysobj,
+ rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env.status_oid()),
sync_info));
}
// return a cursor to the period at our sync position
-static RGWPeriodHistory::Cursor get_period_at(RGWRados* store,
+static RGWPeriodHistory::Cursor get_period_at(rgw::sal::RGWRadosStore* store,
const rgw_meta_sync_info& info)
{
if (info.period.empty()) {
}
// look for an existing period in our history
- auto cursor = store->svc.mdlog->get_period_history()->lookup(info.realm_epoch);
+ auto cursor = store->svc()->mdlog->get_period_history()->lookup(info.realm_epoch);
if (cursor) {
// verify that the period ids match
auto& existing = cursor.get_period().get_id();
// read the period from rados or pull it from the master
RGWPeriod period;
- int r = store->svc.mdlog->pull_period(info.period, period);
+ int r = store->svc()->mdlog->pull_period(info.period, period);
if (r < 0) {
lderr(store->ctx()) << "ERROR: failed to read period id "
<< info.period << ": " << cpp_strerror(r) << dendl;
return RGWPeriodHistory::Cursor{r};
}
// attach the period to our history
- cursor = store->svc.mdlog->get_period_history()->attach(std::move(period));
+ cursor = store->svc()->mdlog->get_period_history()->attach(std::move(period));
if (!cursor) {
r = cursor.get_error();
lderr(store->ctx()) << "ERROR: failed to read period history back to "
int RGWRemoteMetaLog::run_sync()
{
- if (store->svc.zone->is_meta_master()) {
+ if (store->svc()->zone->is_meta_master()) {
return 0;
}
if (sync_status.sync_info.state == rgw_meta_sync_info::StateInit) {
ldpp_dout(dpp, 20) << __func__ << "(): init" << dendl;
sync_status.sync_info.num_shards = mdlog_info.num_shards;
- auto cursor = store->svc.mdlog->get_period_history()->get_current();
+ auto cursor = store->svc()->mdlog->get_period_history()->get_current();
if (cursor) {
// run full sync, then start incremental from the current period/epoch
sync_status.sync_info.period = cursor.get_period().get_id();
#include "rgw_http_client.h"
#include "rgw_metadata.h"
#include "rgw_meta_sync_status.h"
-#include "rgw_rados.h"
+#include "rgw_sal.h"
#include "rgw_sync_trace.h"
#include "rgw_mdlog.h"
+namespace rgw { namespace sal {
+ class RGWRadosStore;
+} }
#define ERROR_LOGGER_SHARDS 32
#define RGW_SYNC_ERROR_LOG_SHARD_PREFIX "sync.error-log"
class RGWSyncTraceManager;
class RGWSyncErrorLogger {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
vector<string> oids;
int num_shards;
std::atomic<int64_t> counter = { 0 };
public:
- RGWSyncErrorLogger(RGWRados *_store, const string &oid_prefix, int _num_shards);
+ RGWSyncErrorLogger(rgw::sal::RGWRadosStore *_store, const string &oid_prefix, int _num_shards);
RGWCoroutine *log_error_cr(const string& source_zone, const string& section, const string& name, uint32_t error_code, const string& message);
static string get_shard_oid(const string& oid_prefix, int shard_id);
struct RGWMetaSyncEnv {
const DoutPrefixProvider *dpp;
CephContext *cct{nullptr};
- RGWRados *store{nullptr};
+ rgw::sal::RGWRadosStore *store{nullptr};
RGWRESTConn *conn{nullptr};
RGWAsyncRadosProcessor *async_rados{nullptr};
RGWHTTPManager *http_manager{nullptr};
RGWMetaSyncEnv() {}
- void init(const DoutPrefixProvider *_dpp, CephContext *_cct, RGWRados *_store, RGWRESTConn *_conn,
+ void init(const DoutPrefixProvider *_dpp, CephContext *_cct, rgw::sal::RGWRadosStore *_store, RGWRESTConn *_conn,
RGWAsyncRadosProcessor *_async_rados, RGWHTTPManager *_http_manager,
RGWSyncErrorLogger *_error_logger, RGWSyncTraceManager *_sync_tracer);
class RGWRemoteMetaLog : public RGWCoroutinesManager {
const DoutPrefixProvider *dpp;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
RGWRESTConn *conn;
RGWAsyncRadosProcessor *async_rados;
RGWSyncTraceNodeRef tn;
public:
- RGWRemoteMetaLog(const DoutPrefixProvider *dpp, RGWRados *_store,
+ RGWRemoteMetaLog(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *_store,
RGWAsyncRadosProcessor *async_rados,
RGWMetaSyncStatusManager *_sm)
- : RGWCoroutinesManager(_store->ctx(), _store->get_cr_registry()),
+ : RGWCoroutinesManager(_store->ctx(), _store->getRados()->get_cr_registry()),
dpp(dpp), store(_store), conn(NULL), async_rados(async_rados),
http_manager(store->ctx(), completion_mgr),
status_manager(_sm) {}
};
class RGWMetaSyncStatusManager : public DoutPrefixProvider {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
librados::IoCtx ioctx;
RGWRemoteMetaLog master_log;
vector<string> clone_markers;
public:
- RGWMetaSyncStatusManager(RGWRados *_store, RGWAsyncRadosProcessor *async_rados)
+ RGWMetaSyncStatusManager(rgw::sal::RGWRadosStore *_store, RGWAsyncRadosProcessor *async_rados)
: store(_store), master_log(this, store, async_rados, this)
{}
int init();
void expand_target(RGWDataSyncEnv *sync_env, const string& sid, const string& path, string *dest) {
apply_meta_param(path, "sid", sid, dest);
- const RGWZoneGroup& zg = sync_env->store->svc.zone->get_zonegroup();
+ const RGWZoneGroup& zg = sync_env->store->svc()->zone->get_zonegroup();
apply_meta_param(path, "zonegroup", zg.get_name(), dest);
apply_meta_param(path, "zonegroup_id", zg.get_id(), dest);
- const RGWZone& zone = sync_env->store->svc.zone->get_zone();
+ const RGWZone& zone = sync_env->store->svc()->zone->get_zone();
apply_meta_param(path, "zone", zone.name, dest);
apply_meta_param(path, "zone_id", zone.id, dest);
}
auto& root_conf = root_profile->conn_conf;
root_profile->conn.reset(new S3RESTConn(sync_env->cct,
- sync_env->store->svc.zone,
+ sync_env->store->svc()->zone,
id,
{ root_conf->endpoint },
root_conf->key,
auto& c = i.second;
c->conn.reset(new S3RESTConn(sync_env->cct,
- sync_env->store->svc.zone,
+ sync_env->store->svc()->zone,
id,
{ c->conn_conf->endpoint },
c->conn_conf->key,
obj_size(_obj_size),
src_properties(_src_properties),
rest_obj(_rest_obj),
- status_obj(sync_env->store->svc.zone->get_zone_params().log_pool,
+ status_obj(sync_env->store->svc()->zone->get_zone_params().log_pool,
RGWBucketSyncStatusManager::obj_status_oid(sync_env->source_zone, src_obj)) {
}
int operate() override {
reenter(this) {
- yield call(new RGWSimpleRadosReadCR<rgw_sync_aws_multipart_upload_info>(sync_env->async_rados, sync_env->store->svc.sysobj,
+ yield call(new RGWSimpleRadosReadCR<rgw_sync_aws_multipart_upload_info>(sync_env->async_rados, sync_env->store->svc()->sysobj,
status_obj, &status, false));
if (retcode < 0 && retcode != -ENOENT) {
return set_cr_error(ret_err);
}
- yield call(new RGWSimpleRadosWriteCR<rgw_sync_aws_multipart_upload_info>(sync_env->async_rados, sync_env->store->svc.sysobj, status_obj, status));
+ yield call(new RGWSimpleRadosWriteCR<rgw_sync_aws_multipart_upload_info>(sync_env->async_rados, sync_env->store->svc()->sysobj, status_obj, status));
if (retcode < 0) {
ldout(sync_env->cct, 0) << "ERROR: failed to store multipart upload state, retcode=" << retcode << dendl;
/* continue with upload anyway */
<< " zone_short_id=" << src_zone_short_id << " pg_ver=" << src_pg_ver
<< dendl;
-
- source_conn = sync_env->store->svc.zone->get_zone_conn_by_id(sync_env->source_zone);
+ source_conn = sync_env->store->svc()->zone->get_zone_conn_by_id(sync_env->source_zone);
if (!source_conn) {
ldout(sync_env->cct, 0) << "ERROR: cannot find http connection to zone " << sync_env->source_zone << dendl;
return set_cr_error(-EINVAL);
~RGWElasticDataSyncModule() override {}
void init(RGWDataSyncEnv *sync_env, uint64_t instance_id) override {
- conf->init_instance(sync_env->store->svc.zone->get_realm(), instance_id);
+ conf->init_instance(sync_env->store->svc()->zone->get_realm(), instance_id);
}
RGWCoroutine *init_sync(RGWDataSyncEnv *sync_env) override {
protected:
RGWOp *op_get() override {
if (s->info.args.exists("query")) {
- return new RGWMetadataSearch_ObjStore_S3(store->get_sync_module());
+ return new RGWMetadataSearch_ObjStore_S3(store->getRados()->get_sync_module());
}
if (!s->init_state.url_bucket.empty() &&
s->info.args.exists("mdsearch")) {
rgw_raw_obj obj;
ups.get_sub_meta_obj(sub_name, &obj);
bool empty_on_enoent = false;
- call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc.sysobj,
+ call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj,
obj,
&user_sub_conf, empty_on_enoent));
}
using ReadInfoCR = RGWSimpleRadosReadCR<rgw_pubsub_bucket_topics>;
yield {
bool empty_on_enoent = true;
- call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc.sysobj,
+ call(new ReadInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj,
bucket_obj,
&bucket_topics, empty_on_enoent));
}
using ReadUserTopicsInfoCR = RGWSimpleRadosReadCR<rgw_pubsub_user_topics>;
yield {
bool empty_on_enoent = true;
- call(new ReadUserTopicsInfoCR(sync_env->async_rados, sync_env->store->svc.sysobj,
+ call(new ReadUserTopicsInfoCR(sync_env->async_rados, sync_env->store->svc()->sysobj,
user_obj,
&user_topics, empty_on_enoent));
}
void init(RGWDataSyncEnv *sync_env, uint64_t instance_id) override {
PSManagerRef mgr = PSManager::get_shared(sync_env, env);
- env->init_instance(sync_env->store->svc.zone->get_realm(), instance_id, mgr);
+ env->init_instance(sync_env->store->svc()->zone->get_realm(), instance_id, mgr);
}
RGWCoroutine *start_sync(RGWDataSyncEnv *sync_env) override {
dest.arn_topic = topic_name;
// the topic ARN will be sent in the reply
const rgw::ARN arn(rgw::Partition::aws, rgw::Service::sns,
- store->svc.zone->get_zonegroup().get_name(),
+ store->svc()->zone->get_zonegroup().get_name(),
s->user->user_id.tenant, topic_name);
topic_arn = arn.to_string();
return 0;
return -EINVAL;
}
- const auto psmodule = static_cast<RGWPSSyncModuleInstance*>(store->get_sync_module().get());
+ const auto psmodule = static_cast<RGWPSSyncModuleInstance*>(store->getRados()->get_sync_module().get());
const auto& conf = psmodule->get_effective_conf();
dest.push_endpoint = s->info.args.get("push-endpoint");
const auto& id = s->owner.get_id();
- ret = store->get_bucket_info(*s->sysobj_ctx, id.tenant, bucket_name,
+ ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, id.tenant, bucket_name,
bucket_info, nullptr, null_yield, nullptr);
if (ret < 0) {
ldout(s->cct, 1) << "failed to get bucket info, cannot verify ownership" << dendl;
ups = make_unique<RGWUserPubSub>(store, s->owner.get_id());
auto b = ups->get_bucket(bucket_info.bucket);
ceph_assert(b);
- const auto psmodule = static_cast<RGWPSSyncModuleInstance*>(store->get_sync_module().get());
+ const auto psmodule = static_cast<RGWPSSyncModuleInstance*>(store->getRados()->get_sync_module().get());
const auto& conf = psmodule->get_effective_conf();
for (const auto& c : configurations.list) {
return ret;
}
- ret = store->get_bucket_info(*s->sysobj_ctx, s->owner.get_id().tenant, bucket_name,
+ ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, s->owner.get_id().tenant, bucket_name,
bucket_info, nullptr, null_yield, nullptr);
if (ret < 0) {
return ret;
return ret;
}
- ret = store->get_bucket_info(*s->sysobj_ctx, s->owner.get_id().tenant, bucket_name,
+ ret = store->getRados()->get_bucket_info(*s->sysobj_ctx, s->owner.get_id().tenant, bucket_name,
bucket_info, nullptr, null_yield, nullptr);
if (ret < 0) {
return ret;
}
}
-RGWDataAccess::RGWDataAccess(RGWRados *_store) : store(_store)
+RGWDataAccess::RGWDataAccess(rgw::sal::RGWRadosStore *_store) : store(_store)
{
- sysobj_ctx = std::make_unique<RGWSysObjectCtx>(store->svc.sysobj->init_obj_ctx());
+ sysobj_ctx = std::make_unique<RGWSysObjectCtx>(store->svc()->sysobj->init_obj_ctx());
}
int RGWDataAccess::Bucket::init()
{
- int ret = sd->store->get_bucket_info(*sd->sysobj_ctx,
+ int ret = sd->store->getRados()->get_bucket_info(*sd->sysobj_ctx,
tenant, name,
bucket_info,
&mtime,
const DoutPrefixProvider *dpp,
optional_yield y)
{
- RGWRados *store = sd->store;
+ rgw::sal::RGWRadosStore *store = sd->store;
CephContext *cct = store->ctx();
string tag;
auto& owner = bucket->policy.get_owner();
- string req_id = store->svc.zone_utils->unique_id(store->get_new_req_id());
+ string req_id = store->svc()->zone_utils->unique_id(store->getRados()->get_new_req_id());
using namespace rgw::putobj;
AtomicObjectProcessor processor(&aio, store, bucket_info, nullptr,
CompressorRef plugin;
boost::optional<RGWPutObj_Compress> compressor;
- const auto& compression_type = store->svc.zone->get_zone_params().get_compression_type(bucket_info.placement_rule);
+ const auto& compression_type = store->svc()->zone->get_zone_params().get_compression_type(bucket_info.placement_rule);
if (compression_type != "none") {
plugin = Compressor::create(store->ctx(), compression_type);
if (!plugin) {
class RGWSysObjectCtx;
struct RGWObjVersionTracker;
class optional_yield;
+namespace rgw { namespace sal {
+ class RGWRadosStore;
+} }
struct obj_version;
class RGWDataAccess
{
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
std::unique_ptr<RGWSysObjectCtx> sysobj_ctx;
public:
- RGWDataAccess(RGWRados *_store);
+ RGWDataAccess(rgw::sal::RGWRadosStore *_store);
class Object;
class Bucket;
#include <sstream>
#include "rgw_torrent.h"
+#include "rgw_sal.h"
#include "include/str_list.h"
#include "include/rados/librados.hpp"
store = NULL;
}
-void seed::init(struct req_state *p_req, RGWRados *p_store)
+void seed::init(struct req_state *p_req, rgw::sal::RGWRadosStore *p_store)
{
s = p_req;
store = p_store;
rgw_obj obj(s->bucket, s->object.name);
rgw_raw_obj raw_obj;
- store->obj_to_raw(s->bucket_info.placement_rule, obj, &raw_obj);
+ store->getRados()->obj_to_raw(s->bucket_info.placement_rule, obj, &raw_obj);
- auto obj_ctx = store->svc.sysobj->init_obj_ctx();
+ auto obj_ctx = store->svc()->sysobj->init_obj_ctx();
auto sysobj = obj_ctx.get_obj(raw_obj);
op_ret = sysobj.omap().set(key, bl, null_yield);
bufferlist bl; // bufflist ready to send
struct req_state *s{nullptr};
- RGWRados *store{nullptr};
+ rgw::sal::RGWRadosStore *store{nullptr};
SHA1 h;
TorrentBencode dencode;
~seed();
int get_params();
- void init(struct req_state *p_req, RGWRados *p_store);
+ void init(struct req_state *p_req, rgw::sal::RGWRadosStore *p_store);
int get_torrent_file(RGWRados::Object::Read &read_op,
uint64_t &total_len,
ceph::bufferlist &bl_data,
#include "rgw_cr_rest.h"
#include "rgw_data_sync.h"
#include "rgw_metadata.h"
-#include "rgw_rados.h"
+#include "rgw_sal.h"
#include "rgw_zone.h"
#include "rgw_sync.h"
#include "rgw_bucket.h"
/// rados watcher for bucket trim notifications
class BucketTrimWatcher : public librados::WatchCtx2 {
- RGWRados *const store;
+ rgw::sal::RGWRadosStore *const store;
const rgw_raw_obj& obj;
rgw_rados_ref ref;
uint64_t handle{0};
boost::container::flat_map<TrimNotifyType, HandlerPtr> handlers;
public:
- BucketTrimWatcher(RGWRados *store, const rgw_raw_obj& obj,
+ BucketTrimWatcher(rgw::sal::RGWRadosStore *store, const rgw_raw_obj& obj,
TrimCounters::Server *counters)
: store(store), obj(obj) {
handlers.emplace(NotifyTrimCounters, new TrimCounters::Handler(counters));
}
int start() {
- int r = store->get_raw_obj_ref(obj, &ref);
+ int r = store->getRados()->get_raw_obj_ref(obj, &ref);
if (r < 0) {
return r;
}
/// concurrent requests
class BucketTrimShardCollectCR : public RGWShardCollectCR {
static constexpr int MAX_CONCURRENT_SHARDS = 16;
- RGWRados *const store;
+ rgw::sal::RGWRadosStore *const store;
const RGWBucketInfo& bucket_info;
const std::vector<std::string>& markers; //< shard markers to trim
size_t i{0}; //< index of current shard marker
public:
- BucketTrimShardCollectCR(RGWRados *store, const RGWBucketInfo& bucket_info,
+ BucketTrimShardCollectCR(rgw::sal::RGWRadosStore *store, const RGWBucketInfo& bucket_info,
const std::vector<std::string>& markers)
: RGWShardCollectCR(store->ctx(), MAX_CONCURRENT_SHARDS),
store(store), bucket_info(bucket_info), markers(markers)
/// trim the bilog of all of the given bucket instance's shards
class BucketTrimInstanceCR : public RGWCoroutine {
- RGWRados *const store;
+ rgw::sal::RGWRadosStore *const store;
RGWHTTPManager *const http;
BucketTrimObserver *const observer;
std::string bucket_instance;
std::vector<std::string> min_markers; //< min marker per shard
public:
- BucketTrimInstanceCR(RGWRados *store, RGWHTTPManager *http,
+ BucketTrimInstanceCR(rgw::sal::RGWRadosStore *store, RGWHTTPManager *http,
BucketTrimObserver *observer,
const std::string& bucket_instance)
: RGWCoroutine(store->ctx()), store(store),
http(http), observer(observer),
bucket_instance(bucket_instance),
- zone_id(store->svc.zone->get_zone().id),
- peer_status(store->svc.zone->get_zone_data_notify_to_map().size()) {
+ zone_id(store->svc()->zone->get_zone().id),
+ peer_status(store->svc()->zone->get_zone_data_notify_to_map().size()) {
rgw_bucket_parse_bucket_key(cct, bucket_instance, &bucket, nullptr);
}
};
auto p = peer_status.begin();
- for (auto& c : store->svc.zone->get_zone_data_notify_to_map()) {
+ for (auto& c : store->svc()->zone->get_zone_data_notify_to_map()) {
using StatusCR = RGWReadRESTResourceCR<StatusShards>;
spawn(new StatusCR(cct, c.second, http, "/admin/log/", params, &*p),
false);
++p;
}
// in parallel, read the local bucket instance info
- spawn(new RGWGetBucketInstanceInfoCR(store->svc.rados->get_async_processor(), store,
+ spawn(new RGWGetBucketInstanceInfoCR(store->svc()->rados->get_async_processor(), store,
bucket, &bucket_info),
false);
}
/// trim each bucket instance while limiting the number of concurrent operations
class BucketTrimInstanceCollectCR : public RGWShardCollectCR {
- RGWRados *const store;
+ rgw::sal::RGWRadosStore *const store;
RGWHTTPManager *const http;
BucketTrimObserver *const observer;
std::vector<std::string>::const_iterator bucket;
std::vector<std::string>::const_iterator end;
public:
- BucketTrimInstanceCollectCR(RGWRados *store, RGWHTTPManager *http,
+ BucketTrimInstanceCollectCR(rgw::sal::RGWRadosStore *store, RGWHTTPManager *http,
BucketTrimObserver *observer,
const std::vector<std::string>& buckets,
int max_concurrent)
};
class BucketTrimCR : public RGWCoroutine {
- RGWRados *const store;
+ rgw::sal::RGWRadosStore *const store;
RGWHTTPManager *const http;
const BucketTrimConfig& config;
BucketTrimObserver *const observer;
static const std::string section; //< metadata section for bucket instances
public:
- BucketTrimCR(RGWRados *store, RGWHTTPManager *http,
+ BucketTrimCR(rgw::sal::RGWRadosStore *store, RGWHTTPManager *http,
const BucketTrimConfig& config, BucketTrimObserver *observer,
const rgw_raw_obj& obj)
: RGWCoroutine(store->ctx()), store(store), http(http), config(config),
// read BucketTrimStatus for marker position
set_status("reading trim status");
using ReadStatus = RGWSimpleRadosReadCR<BucketTrimStatus>;
- yield call(new ReadStatus(store->svc.rados->get_async_processor(), store->svc.sysobj, obj,
+ yield call(new ReadStatus(store->svc()->rados->get_async_processor(), store->svc()->sysobj, obj,
&status, true, &objv));
if (retcode < 0) {
ldout(cct, 10) << "failed to read bilog trim status: "
return buckets.size() < config.buckets_per_interval;
};
- call(new MetadataListCR(cct, store->svc.rados->get_async_processor(),
- store->ctl.meta.mgr,
+ call(new MetadataListCR(cct, store->svc()->rados->get_async_processor(),
+ store->ctl()->meta.mgr,
section, status.marker, cb));
}
if (retcode < 0) {
status.marker = std::move(last_cold_marker);
ldout(cct, 20) << "writing bucket trim marker=" << status.marker << dendl;
using WriteStatus = RGWSimpleRadosWriteCR<BucketTrimStatus>;
- yield call(new WriteStatus(store->svc.rados->get_async_processor(), store->svc.sysobj, obj,
+ yield call(new WriteStatus(store->svc()->rados->get_async_processor(), store->svc()->sysobj, obj,
status, &objv));
if (retcode < 0) {
ldout(cct, 4) << "failed to write updated trim status: "
}
class BucketTrimPollCR : public RGWCoroutine {
- RGWRados *const store;
+ rgw::sal::RGWRadosStore *const store;
RGWHTTPManager *const http;
const BucketTrimConfig& config;
BucketTrimObserver *const observer;
const std::string cookie;
public:
- BucketTrimPollCR(RGWRados *store, RGWHTTPManager *http,
+ BucketTrimPollCR(rgw::sal::RGWRadosStore *store, RGWHTTPManager *http,
const BucketTrimConfig& config,
BucketTrimObserver *observer, const rgw_raw_obj& obj)
: RGWCoroutine(store->ctx()), store(store), http(http),
// prevent others from trimming for our entire wait interval
set_status("acquiring trim lock");
- yield call(new RGWSimpleRadosLockCR(store->svc.rados->get_async_processor(), store,
+ yield call(new RGWSimpleRadosLockCR(store->svc()->rados->get_async_processor(), store,
obj, name, cookie,
config.trim_interval_sec));
if (retcode < 0) {
if (retcode < 0) {
// on errors, unlock so other gateways can try
set_status("unlocking");
- yield call(new RGWSimpleRadosUnlockCR(store->svc.rados->get_async_processor(), store,
+ yield call(new RGWSimpleRadosUnlockCR(store->svc()->rados->get_async_processor(), store,
obj, name, cookie));
}
}
class BucketTrimManager::Impl : public TrimCounters::Server,
public BucketTrimObserver {
public:
- RGWRados *const store;
+ rgw::sal::RGWRadosStore *const store;
const BucketTrimConfig config;
const rgw_raw_obj status_obj;
/// protect data shared between data sync, trim, and watch/notify threads
std::mutex mutex;
- Impl(RGWRados *store, const BucketTrimConfig& config)
+ Impl(rgw::sal::RGWRadosStore *store, const BucketTrimConfig& config)
: store(store), config(config),
- status_obj(store->svc.zone->get_zone_params().log_pool, BucketTrimStatus::oid),
+ status_obj(store->svc()->zone->get_zone_params().log_pool, BucketTrimStatus::oid),
counter(config.counter_size),
trimmed(config.recent_size, config.recent_duration),
watcher(store, status_obj, this)
}
};
-BucketTrimManager::BucketTrimManager(RGWRados *store,
+BucketTrimManager::BucketTrimManager(rgw::sal::RGWRadosStore *store,
const BucketTrimConfig& config)
: impl(new Impl(store, config))
{
class CephContext;
class RGWCoroutine;
class RGWHTTPManager;
-class RGWRados;
namespace rgw {
+namespace sal {
+ class RGWRadosStore;
+}
+
/// Interface to inform the trim process about which buckets are most active
struct BucketChangeObserver {
virtual ~BucketChangeObserver() = default;
class Impl;
std::unique_ptr<Impl> impl;
public:
- BucketTrimManager(RGWRados *store, const BucketTrimConfig& config);
+ BucketTrimManager(sal::RGWRadosStore *store, const BucketTrimConfig& config);
~BucketTrimManager();
int init();
class DataLogTrimCR : public RGWCoroutine {
using TrimCR = RGWSyncLogTrimCR;
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
RGWHTTPManager *http;
const int num_shards;
const std::string& zone_id; //< my zone id
int ret{0};
public:
- DataLogTrimCR(RGWRados *store, RGWHTTPManager *http,
+ DataLogTrimCR(rgw::sal::RGWRadosStore *store, RGWHTTPManager *http,
int num_shards, std::vector<std::string>& last_trim)
: RGWCoroutine(store->ctx()), store(store), http(http),
num_shards(num_shards),
- zone_id(store->svc.zone->get_zone().id),
- peer_status(store->svc.zone->get_zone_data_notify_to_map().size()),
+ zone_id(store->svc()->zone->get_zone().id),
+ peer_status(store->svc()->zone->get_zone_data_notify_to_map().size()),
min_shard_markers(num_shards, TrimCR::max_marker),
last_trim(last_trim)
{}
};
auto p = peer_status.begin();
- for (auto& c : store->svc.zone->get_zone_data_notify_to_map()) {
+ for (auto& c : store->svc()->zone->get_zone_data_notify_to_map()) {
ldout(cct, 20) << "query sync status from " << c.first << dendl;
using StatusCR = RGWReadRESTResourceCR<rgw_data_sync_status>;
spawn(new StatusCR(cct, c.second, http, "/admin/log/", params, &*p),
ldout(cct, 10) << "trimming log shard " << i
<< " at marker=" << m
<< " last_trim=" << last_trim[i] << dendl;
- spawn(new TrimCR(store, store->svc.datalog_rados->get_oid(i),
+ spawn(new TrimCR(store, store->svc()->datalog_rados->get_oid(i),
m, &last_trim[i]),
true);
}
return 0;
}
-RGWCoroutine* create_admin_data_log_trim_cr(RGWRados *store,
+RGWCoroutine* create_admin_data_log_trim_cr(rgw::sal::RGWRadosStore *store,
RGWHTTPManager *http,
int num_shards,
std::vector<std::string>& markers)
}
class DataLogTrimPollCR : public RGWCoroutine {
- RGWRados *store;
+ rgw::sal::RGWRadosStore *store;
RGWHTTPManager *http;
const int num_shards;
const utime_t interval; //< polling interval
std::vector<std::string> last_trim; //< last trimmed marker per shard
public:
- DataLogTrimPollCR(RGWRados *store, RGWHTTPManager *http,
+ DataLogTrimPollCR(rgw::sal::RGWRadosStore *store, RGWHTTPManager *http,
int num_shards, utime_t interval)
: RGWCoroutine(store->ctx()), store(store), http(http),
num_shards(num_shards), interval(interval),
- lock_oid(store->svc.datalog_rados->get_oid(0)),
+ lock_oid(store->svc()->datalog_rados->get_oid(0)),
lock_cookie(RGWSimpleRadosLockCR::gen_random_cookie(cct)),
last_trim(num_shards)
{}
// request a 'data_trim' lock that covers the entire wait interval to
// prevent other gateways from attempting to trim for the duration
set_status("acquiring trim lock");
- yield call(new RGWSimpleRadosLockCR(store->svc.rados->get_async_processor(), store,
- rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, lock_oid),
+ yield call(new RGWSimpleRadosLockCR(store->svc()->rados->get_async_processor(), store,
+ rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, lock_oid),
"data_trim", lock_cookie,
interval.sec()));
if (retcode < 0) {
return 0;
}
-RGWCoroutine* create_data_log_trim_cr(RGWRados *store,
+RGWCoroutine* create_data_log_trim_cr(rgw::sal::RGWRadosStore *store,
RGWHTTPManager *http,
int num_shards, utime_t interval)
{
class RGWRados;
class RGWHTTPManager;
class utime_t;
+namespace rgw { namespace sal {
+ class RGWRadosStore;
+} }
// DataLogTrimCR factory function
-extern RGWCoroutine* create_data_log_trim_cr(RGWRados *store,
+extern RGWCoroutine* create_data_log_trim_cr(rgw::sal::RGWRadosStore *store,
RGWHTTPManager *http,
int num_shards, utime_t interval);
// factory function for datalog trim via radosgw-admin
-RGWCoroutine* create_admin_data_log_trim_cr(RGWRados *store,
+RGWCoroutine* create_admin_data_log_trim_cr(rgw::sal::RGWRadosStore *store,
RGWHTTPManager *http,
int num_shards,
std::vector<std::string>& markers);
/// purge all log shards for the given mdlog
class PurgeLogShardsCR : public RGWShardCollectCR {
- RGWRados *const store;
+ rgw::sal::RGWRadosStore *const store;
const RGWMetadataLog* mdlog;
const int num_shards;
rgw_raw_obj obj;
static constexpr int max_concurrent = 16;
public:
- PurgeLogShardsCR(RGWRados *store, const RGWMetadataLog* mdlog,
+ PurgeLogShardsCR(rgw::sal::RGWRadosStore *store, const RGWMetadataLog* mdlog,
const rgw_pool& pool, int num_shards)
: RGWShardCollectCR(store->ctx(), max_concurrent),
store(store), mdlog(mdlog), num_shards(num_shards), obj(pool, "")
RGWSI_Zone *zone;
RGWSI_MDLog *mdlog;
} svc;
- RGWRados *const store;
+ rgw::sal::RGWRadosStore *const store;
RGWMetadataManager *const metadata;
RGWObjVersionTracker objv;
Cursor cursor;
epoch_t *last_trim_epoch; //< update last trim on success
public:
- PurgePeriodLogsCR(RGWRados *store, epoch_t realm_epoch, epoch_t *last_trim)
- : RGWCoroutine(store->ctx()), store(store), metadata(store->ctl.meta.mgr),
+ PurgePeriodLogsCR(rgw::sal::RGWRadosStore *store, epoch_t realm_epoch, epoch_t *last_trim)
+ : RGWCoroutine(store->ctx()), store(store), metadata(store->ctl()->meta.mgr),
realm_epoch(realm_epoch), last_trim_epoch(last_trim) {
- svc.zone = store->svc.zone;
- svc.mdlog = store->svc.mdlog;
+ svc.zone = store->svc()->zone;
+ svc.mdlog = store->svc()->mdlog;
}
int operate() override;
/// construct a RGWRESTConn for each zone in the realm
template <typename Zonegroups>
-connection_map make_peer_connections(RGWRados *store,
+connection_map make_peer_connections(rgw::sal::RGWRadosStore *store,
const Zonegroups& zonegroups)
{
connection_map connections;
for (auto& g : zonegroups) {
for (auto& z : g.second.zones) {
std::unique_ptr<RGWRESTConn> conn{
- new RGWRESTConn(store->ctx(), store->svc.zone, z.first, z.second.endpoints)};
+ new RGWRESTConn(store->ctx(), store->svc()->zone, z.first, z.second.endpoints)};
connections.emplace(z.first, std::move(conn));
}
}
struct TrimEnv {
const DoutPrefixProvider *dpp;
- RGWRados *const store;
+ rgw::sal::RGWRadosStore *const store;
RGWHTTPManager *const http;
int num_shards;
const std::string& zone;
Cursor current; //< cursor to current period
epoch_t last_trim_epoch{0}; //< epoch of last mdlog that was purged
- TrimEnv(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards)
+ TrimEnv(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWHTTPManager *http, int num_shards)
: dpp(dpp), store(store), http(http), num_shards(num_shards),
- zone(store->svc.zone->get_zone_params().get_id()),
- current(store->svc.mdlog->get_period_history()->get_current())
+ zone(store->svc()->zone->get_zone_params().get_id()),
+ current(store->svc()->mdlog->get_period_history()->get_current())
{}
};
/// last trim marker for each shard, only applies to current period's mdlog
std::vector<std::string> last_trim_markers;
- MasterTrimEnv(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards)
+ MasterTrimEnv(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWHTTPManager *http, int num_shards)
: TrimEnv(dpp, store, http, num_shards),
last_trim_markers(num_shards)
{
/// last trim timestamp for each shard, only applies to current period's mdlog
std::vector<ceph::real_time> last_trim_timestamps;
- PeerTrimEnv(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards)
+ PeerTrimEnv(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWHTTPManager *http, int num_shards)
: TrimEnv(dpp, store, http, num_shards),
last_trim_timestamps(num_shards)
{}
// if realm_epoch == current, trim mdlog based on markers
if (epoch == env.current.get_epoch()) {
- auto mdlog = store->svc.mdlog->get_log(env.current.get_period().get_id());
+ auto mdlog = store->svc()->mdlog->get_log(env.current.get_period().get_id());
spawn(new MetaMasterTrimShardCollectCR(env, mdlog, min_status), true);
}
}
: RGWShardCollectCR(env.store->ctx(), MAX_CONCURRENT_SHARDS),
env(env), mdlog(mdlog), period_id(env.current.get_period().get_id())
{
- meta_env.init(env.dpp, cct, env.store, env.store->svc.zone->get_master_conn(),
- env.store->svc.rados->get_async_processor(), env.http, nullptr,
- env.store->get_sync_tracer());
+ meta_env.init(env.dpp, cct, env.store, env.store->svc()->zone->get_master_conn(),
+ env.store->svc()->rados->get_async_processor(), env.http, nullptr,
+ env.store->getRados()->get_sync_tracer());
}
bool spawn_next() override;
};
using LogInfoCR = RGWReadRESTResourceCR<rgw_mdlog_info>;
- call(new LogInfoCR(cct, env.store->svc.zone->get_master_conn(), env.http,
+ call(new LogInfoCR(cct, env.store->svc()->zone->get_master_conn(), env.http,
"/admin/log/", params, &mdlog_info));
}
if (retcode < 0) {
// if realm_epoch == current, trim mdlog based on master's markers
if (mdlog_info.realm_epoch == env.current.get_epoch()) {
yield {
- auto mdlog = env.store->svc.mdlog->get_log(env.current.get_period().get_id());
+ auto mdlog = env.store->svc()->mdlog->get_log(env.current.get_period().get_id());
call(new MetaPeerTrimShardCollectCR(env, mdlog));
// ignore any errors during purge/trim because we want to hold the lock open
}
}
class MetaTrimPollCR : public RGWCoroutine {
- RGWRados *const store;
+ rgw::sal::RGWRadosStore *const store;
const utime_t interval; //< polling interval
const rgw_raw_obj obj;
const std::string name{"meta_trim"}; //< lock name
virtual RGWCoroutine* alloc_cr() = 0;
public:
- MetaTrimPollCR(RGWRados *store, utime_t interval)
+ MetaTrimPollCR(rgw::sal::RGWRadosStore *store, utime_t interval)
: RGWCoroutine(store->ctx()), store(store), interval(interval),
- obj(store->svc.zone->get_zone_params().log_pool, RGWMetadataLogHistory::oid),
+ obj(store->svc()->zone->get_zone_params().log_pool, RGWMetadataLogHistory::oid),
cookie(RGWSimpleRadosLockCR::gen_random_cookie(cct))
{}
// prevent others from trimming for our entire wait interval
set_status("acquiring trim lock");
- yield call(new RGWSimpleRadosLockCR(store->svc.rados->get_async_processor(), store,
+ yield call(new RGWSimpleRadosLockCR(store->svc()->rados->get_async_processor(), store,
obj, name, cookie, interval.sec()));
if (retcode < 0) {
ldout(cct, 4) << "failed to lock: " << cpp_strerror(retcode) << dendl;
if (retcode < 0) {
// on errors, unlock so other gateways can try
set_status("unlocking");
- yield call(new RGWSimpleRadosUnlockCR(store->svc.rados->get_async_processor(), store,
+ yield call(new RGWSimpleRadosUnlockCR(store->svc()->rados->get_async_processor(), store,
obj, name, cookie));
}
}
return new MetaMasterTrimCR(env);
}
public:
- MetaMasterTrimPollCR(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http,
+ MetaMasterTrimPollCR(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWHTTPManager *http,
int num_shards, utime_t interval)
: MetaTrimPollCR(store, interval),
env(dpp, store, http, num_shards)
return new MetaPeerTrimCR(env);
}
public:
- MetaPeerTrimPollCR(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http,
+ MetaPeerTrimPollCR(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWHTTPManager *http,
int num_shards, utime_t interval)
: MetaTrimPollCR(store, interval),
env(dpp, store, http, num_shards)
{}
};
-RGWCoroutine* create_meta_log_trim_cr(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http,
+RGWCoroutine* create_meta_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWHTTPManager *http,
int num_shards, utime_t interval)
{
- if (store->svc.zone->is_meta_master()) {
+ if (store->svc()->zone->is_meta_master()) {
return new MetaMasterTrimPollCR(dpp, store, http, num_shards, interval);
}
return new MetaPeerTrimPollCR(dpp, store, http, num_shards, interval);
struct MetaMasterAdminTrimCR : private MasterTrimEnv, public MetaMasterTrimCR {
- MetaMasterAdminTrimCR(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards)
+ MetaMasterAdminTrimCR(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWHTTPManager *http, int num_shards)
: MasterTrimEnv(dpp, store, http, num_shards),
MetaMasterTrimCR(*static_cast<MasterTrimEnv*>(this))
{}
};
struct MetaPeerAdminTrimCR : private PeerTrimEnv, public MetaPeerTrimCR {
- MetaPeerAdminTrimCR(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards)
+ MetaPeerAdminTrimCR(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store, RGWHTTPManager *http, int num_shards)
: PeerTrimEnv(dpp, store, http, num_shards),
MetaPeerTrimCR(*static_cast<PeerTrimEnv*>(this))
{}
};
-RGWCoroutine* create_admin_meta_log_trim_cr(const DoutPrefixProvider *dpp, RGWRados *store,
+RGWCoroutine* create_admin_meta_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RGWRadosStore *store,
RGWHTTPManager *http,
int num_shards)
{
- if (store->svc.zone->is_meta_master()) {
+ if (store->svc()->zone->is_meta_master()) {
return new MetaMasterAdminTrimCR(dpp, store, http, num_shards);
}
return new MetaPeerAdminTrimCR(dpp, store, http, num_shards);
class RGWRados;
class RGWHTTPManager;
class utime_t;
+namespace rgw { namespace sal {
+ class RGWRadosStore;
+} }
// MetaLogTrimCR factory function
RGWCoroutine* create_meta_log_trim_cr(const DoutPrefixProvider *dpp,
- RGWRados *store,
+ rgw::sal::RGWRadosStore *store,
RGWHTTPManager *http,
int num_shards, utime_t interval);
// factory function for mdlog trim via radosgw-admin
RGWCoroutine* create_admin_meta_log_trim_cr(const DoutPrefixProvider *dpp,
- RGWRados *store,
+ rgw::sal::RGWRadosStore *store,
RGWHTTPManager *http,
int num_shards);
#include "common/Formatter.h"
#include "common/ceph_json.h"
#include "common/RWLock.h"
-#include "rgw_rados.h"
+#include "rgw_sal.h"
#include "rgw_zone.h"
#include "rgw_acl.h"
info.access_keys.clear();
}
-int rgw_user_sync_all_stats(RGWRados *store, const rgw_user& user_id)
+int rgw_user_sync_all_stats(rgw::sal::RGWRadosStore *store, const rgw_user& user_id)
{
CephContext *cct = store->ctx();
size_t max_entries = cct->_conf->rgw_list_buckets_max_chunk;
bool is_truncated = false;
string marker;
int ret;
- RGWSysObjectCtx obj_ctx = store->svc.sysobj->init_obj_ctx();
+ RGWSysObjectCtx obj_ctx = store->svc()->sysobj->init_obj_ctx();
do {
RGWUserBuckets user_buckets;
- ret = store->ctl.user->list_buckets(user_id,
+ ret = store->ctl()->user->list_buckets(user_id,
marker, string(),
max_entries,
false,
bucket.tenant = user_id.tenant;
bucket.name = bucket_ent.bucket.name;
- ret = store->get_bucket_info(obj_ctx, user_id.tenant, bucket_ent.bucket.name,
+ ret = store->getRados()->get_bucket_info(obj_ctx, user_id.tenant, bucket_ent.bucket.name,
bucket_info, nullptr, null_yield, nullptr);
if (ret < 0) {
ldout(cct, 0) << "ERROR: could not read bucket info: bucket=" << bucket_ent.bucket << " ret=" << ret << dendl;
continue;
}
- ret = store->ctl.bucket->sync_user_stats(user_id, bucket_info);
+ ret = store->ctl()->bucket->sync_user_stats(user_id, bucket_info);
if (ret < 0) {
ldout(cct, 0) << "ERROR: could not sync bucket stats: ret=" << ret << dendl;
return ret;
}
RGWQuotaInfo bucket_quota;
- ret = store->check_bucket_shards(bucket_info, bucket_info.bucket, bucket_quota);
+ ret = store->getRados()->check_bucket_shards(bucket_info, bucket_info.bucket, bucket_quota);
if (ret < 0) {
ldout(cct, 0) << "ERROR in check_bucket_shards: " << cpp_strerror(-ret)<< dendl;
}
}
} while (is_truncated);
- ret = store->ctl.user->complete_flush_stats(user_id);
+ ret = store->ctl()->user->complete_flush_stats(user_id);
if (ret < 0) {
cerr << "ERROR: failed to complete syncing user stats: ret=" << ret << std::endl;
return ret;
return 0;
}
-int rgw_user_get_all_buckets_stats(RGWRados *store, const rgw_user& user_id, map<string, cls_user_bucket_entry>& buckets_usage_map)
+int rgw_user_get_all_buckets_stats(rgw::sal::RGWRadosStore *store, const rgw_user& user_id, map<string, cls_user_bucket_entry>& buckets_usage_map)
{
CephContext *cct = store->ctx();
size_t max_entries = cct->_conf->rgw_list_buckets_max_chunk;
const RGWBucketEnt& bucket_ent = i.second;
RGWBucketEnt stats;
- ret = store->ctl.bucket->read_bucket_stats(bucket_ent.bucket, &stats, null_yield);
+ ret = store->ctl()->bucket->read_bucket_stats(bucket_ent.bucket, &stats, null_yield);
if (ret < 0) {
ldout(cct, 0) << "ERROR: could not get bucket stats: ret=" << ret << dendl;
return ret;
init_default();
}
-int RGWUser::init(RGWRados *storage, RGWUserAdminOpState& op_state)
+int RGWUser::init(rgw::sal::RGWRadosStore *storage, RGWUserAdminOpState& op_state)
{
init_default();
int ret = init_storage(storage);
clear_populated();
}
-int RGWUser::init_storage(RGWRados *storage)
+int RGWUser::init_storage(rgw::sal::RGWRadosStore *storage)
{
if (!storage) {
return -EINVAL;
}
store = storage;
- user_ctl = store->ctl.user;
+ user_ctl = store->ctl()->user;
clear_populated();
string obj_marker;
CephContext *cct = store->ctx();
size_t max_buckets = cct->_conf->rgw_list_buckets_max_chunk;
- RGWBucketCtl* bucket_ctl = store->ctl.bucket;
+ RGWBucketCtl* bucket_ctl = store->ctl()->bucket;
do {
RGWUserBuckets buckets;
marker = iter->first;
}
- ret = store->set_buckets_enabled(bucket_names, !suspended);
+ ret = store->getRados()->set_buckets_enabled(bucket_names, !suspended);
if (ret < 0) {
set_err_msg(err_msg, "failed to modify bucket");
return ret;
op_state.max_entries = 1000;
}
- auto meta_mgr = store->ctl.meta.mgr;
+ auto meta_mgr = store->ctl()->meta.mgr;
int ret = meta_mgr->list_keys_init(metadata_key, op_state.marker, &handle);
if (ret < 0) {
return 0;
}
-int RGWUserAdminOp_User::list(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_User::list(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher)
{
RGWUser user;
return 0;
}
-int RGWUserAdminOp_User::info(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_User::info(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher)
{
RGWUserInfo info;
RGWStorageStats stats;
RGWStorageStats *arg_stats = NULL;
if (op_state.fetch_stats) {
- int ret = store->ctl.user->read_stats(info.user_id, &stats);
+ int ret = store->ctl()->user->read_stats(info.user_id, &stats);
if (ret < 0 && ret != -ENOENT) {
return ret;
}
return 0;
}
-int RGWUserAdminOp_User::create(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_User::create(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher)
{
RGWUserInfo info;
return 0;
}
-int RGWUserAdminOp_User::modify(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_User::modify(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher)
{
RGWUserInfo info;
return 0;
}
-int RGWUserAdminOp_User::remove(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_User::remove(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher, optional_yield y)
{
RGWUserInfo info;
return ret;
}
-int RGWUserAdminOp_Subuser::create(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_Subuser::create(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher)
{
RGWUserInfo info;
return 0;
}
-int RGWUserAdminOp_Subuser::modify(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_Subuser::modify(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher)
{
RGWUserInfo info;
return 0;
}
-int RGWUserAdminOp_Subuser::remove(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_Subuser::remove(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher)
{
RGWUserInfo info;
return 0;
}
-int RGWUserAdminOp_Key::create(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_Key::create(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher)
{
RGWUserInfo info;
return 0;
}
-int RGWUserAdminOp_Key::remove(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_Key::remove(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher)
{
RGWUserInfo info;
return 0;
}
-int RGWUserAdminOp_Caps::add(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_Caps::add(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher)
{
RGWUserInfo info;
}
-int RGWUserAdminOp_Caps::remove(RGWRados *store, RGWUserAdminOpState& op_state,
+int RGWUserAdminOp_Caps::remove(rgw::sal::RGWRadosStore *store, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher)
{
RGWUserInfo info;
#define XMLNS_AWS_S3 "http://s3.amazonaws.com/doc/2006-03-01/"
-class RGWRados;
class RGWUserCtl;
class RGWBucketCtl;
class RGWUserBuckets;
class RGWGetUserStats_CB;
+namespace rgw { namespace sal {
+class RGWRadosStore;
+} }
/**
* A string wrapper that includes encode/decode functions
};
WRITE_CLASS_ENCODER(RGWUID)
-extern int rgw_user_sync_all_stats(RGWRados *store, const rgw_user& user_id);
-extern int rgw_user_get_all_buckets_stats(RGWRados *store, const rgw_user& user_id, map<string, cls_user_bucket_entry>&buckets_usage_map);
+extern int rgw_user_sync_all_stats(rgw::sal::RGWRadosStore *store, const rgw_user& user_id);
+extern int rgw_user_get_all_buckets_stats(rgw::sal::RGWRadosStore *store, const rgw_user& user_id, map<string, cls_user_bucket_entry>&buckets_usage_map);
/**
* Get the anonymous (ie, unauthenticated) user info.
std::map<std::string, int, ltstr_nocase> key_type_map;
rgw_user user_id;
- RGWRados *store{nullptr};
+ rgw::sal::RGWRadosStore *store{nullptr};
RGWUserCtl *user_ctl{nullptr};
map<std::string, RGWAccessKey> *swift_keys{nullptr};
RGWUser *user{nullptr};
rgw_user user_id;
- RGWRados *store{nullptr};
+ rgw::sal::RGWRadosStore *store{nullptr};
RGWUserCtl *user_ctl{nullptr};
bool subusers_allowed{false};
private:
RGWUserInfo old_info;
- RGWRados *store{nullptr};
+ rgw::sal::RGWRadosStore *store{nullptr};
RGWUserCtl *user_ctl{nullptr};
rgw_user user_id;
public:
RGWUser();
- int init(RGWRados *storage, RGWUserAdminOpState& op_state);
+ int init(rgw::sal::RGWRadosStore *storage, RGWUserAdminOpState& op_state);
- int init_storage(RGWRados *storage);
+ int init_storage(rgw::sal::RGWRadosStore *storage);
int init(RGWUserAdminOpState& op_state);
int init_members(RGWUserAdminOpState& op_state);
- RGWRados *get_store() { return store; }
+ rgw::sal::RGWRadosStore *get_store() { return store; }
RGWUserCtl *get_user_ctl() { return user_ctl; }
/* API Contracted Members */
class RGWUserAdminOp_User
{
public:
- static int list(RGWRados *store,
+ static int list(rgw::sal::RGWRadosStore *store,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
- static int info(RGWRados *store,
+ static int info(rgw::sal::RGWRadosStore *store,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
- static int create(RGWRados *store,
+ static int create(rgw::sal::RGWRadosStore *store,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
- static int modify(RGWRados *store,
+ static int modify(rgw::sal::RGWRadosStore *store,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
- static int remove(RGWRados *store,
+ static int remove(rgw::sal::RGWRadosStore *store,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y);
};
class RGWUserAdminOp_Subuser
{
public:
- static int create(RGWRados *store,
+ static int create(rgw::sal::RGWRadosStore *store,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
- static int modify(RGWRados *store,
+ static int modify(rgw::sal::RGWRadosStore *store,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
- static int remove(RGWRados *store,
+ static int remove(rgw::sal::RGWRadosStore *store,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
};
class RGWUserAdminOp_Key
{
public:
- static int create(RGWRados *store,
+ static int create(rgw::sal::RGWRadosStore *store,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
- static int remove(RGWRados *store,
+ static int remove(rgw::sal::RGWRadosStore *store,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
};
class RGWUserAdminOp_Caps
{
public:
- static int add(RGWRados *store,
+ static int add(rgw::sal::RGWRadosStore *store,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
- static int remove(RGWRados *store,
+ static int remove(rgw::sal::RGWRadosStore *store,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
};
realm_epoch++;
}
-static int read_sync_status(RGWRados *store, rgw_meta_sync_status *sync_status)
+static int read_sync_status(rgw::sal::RGWRadosStore *store, rgw_meta_sync_status *sync_status)
{
// initialize a sync status manager to read the status
- RGWMetaSyncStatusManager mgr(store, store->svc.rados->get_async_processor());
+ RGWMetaSyncStatusManager mgr(store, store->svc()->rados->get_async_processor());
int r = mgr.init();
if (r < 0) {
return r;
return r;
}
-int RGWPeriod::update_sync_status(RGWRados *store, /* for now */
+int RGWPeriod::update_sync_status(rgw::sal::RGWRadosStore *store, /* for now */
const RGWPeriod ¤t_period,
std::ostream& error_stream,
bool force_if_stale)
return 0;
}
-int RGWPeriod::commit(RGWRados *store,
+int RGWPeriod::commit(rgw::sal::RGWRadosStore *store,
RGWRealm& realm, const RGWPeriod& current_period,
std::ostream& error_stream, bool force_if_stale)
{
const std::string get_period_oid_prefix() const;
// gather the metadata sync status for each shard; only for use on master zone
- int update_sync_status(RGWRados *store,
+ int update_sync_status(rgw::sal::RGWRadosStore *store,
const RGWPeriod ¤t_period,
std::ostream& error_stream, bool force_if_stale);
int update();
// commit a staging period; only for use on master zone
- int commit(RGWRados *store,
+ int commit(rgw::sal::RGWRadosStore *store,
RGWRealm& realm, const RGWPeriod ¤t_period,
std::ostream& error_stream, bool force_if_stale = false);
// Unfortunately RGWCivetWeb is too tightly tied to civetweb to test RGWCivetWeb::init_env.
RGWEnv rgw_env;
RGWUserInfo user;
- RGWRados rgw_rados;
+ rgw::sal::RGWRadosStore store;
rgw_env.set("REMOTE_ADDR", "192.168.1.1");
rgw_env.set("HTTP_HOST", "1.2.3.4");
req_state rgw_req_state(cct.get(), &rgw_env, &user, 0);
- rgw_build_iam_environment(&rgw_rados, &rgw_req_state);
+ rgw_build_iam_environment(&store, &rgw_req_state);
auto ip = rgw_req_state.env.find("aws:SourceIp");
ASSERT_NE(ip, rgw_req_state.env.end());
EXPECT_EQ(ip->second, "192.168.1.1");
ASSERT_EQ(cct.get()->_conf.set_val("rgw_remote_addr_param", "SOME_VAR"), 0);
EXPECT_EQ(cct.get()->_conf->rgw_remote_addr_param, "SOME_VAR");
rgw_req_state.env.clear();
- rgw_build_iam_environment(&rgw_rados, &rgw_req_state);
+ rgw_build_iam_environment(&store, &rgw_req_state);
ip = rgw_req_state.env.find("aws:SourceIp");
EXPECT_EQ(ip, rgw_req_state.env.end());
rgw_env.set("SOME_VAR", "192.168.1.2");
rgw_req_state.env.clear();
- rgw_build_iam_environment(&rgw_rados, &rgw_req_state);
+ rgw_build_iam_environment(&store, &rgw_req_state);
ip = rgw_req_state.env.find("aws:SourceIp");
ASSERT_NE(ip, rgw_req_state.env.end());
EXPECT_EQ(ip->second, "192.168.1.2");
ASSERT_EQ(cct.get()->_conf.set_val("rgw_remote_addr_param", "HTTP_X_FORWARDED_FOR"), 0);
rgw_env.set("HTTP_X_FORWARDED_FOR", "192.168.1.3");
rgw_req_state.env.clear();
- rgw_build_iam_environment(&rgw_rados, &rgw_req_state);
+ rgw_build_iam_environment(&store, &rgw_req_state);
ip = rgw_req_state.env.find("aws:SourceIp");
ASSERT_NE(ip, rgw_req_state.env.end());
EXPECT_EQ(ip->second, "192.168.1.3");
rgw_env.set("HTTP_X_FORWARDED_FOR", "192.168.1.4, 4.3.2.1, 2001:db8:85a3:8d3:1319:8a2e:370:7348");
rgw_req_state.env.clear();
- rgw_build_iam_environment(&rgw_rados, &rgw_req_state);
+ rgw_build_iam_environment(&store, &rgw_req_state);
ip = rgw_req_state.env.find("aws:SourceIp");
ASSERT_NE(ip, rgw_req_state.env.end());
EXPECT_EQ(ip->second, "192.168.1.4");