return 0;
}
-int SQLiteConfigStore::read_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y, std::string_view period_id,
- uint32_t& epoch, RGWObjVersionTracker* objv, RGWPeriod& info)
+int SQLiteConfigStore::update_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y,
+ std::string_view period_id, uint32_t epoch)
{
Prefix prefix{*dpp, "dbconfig:sqlite:read_latest_epoch "}; dpp = &prefix;
-
- if (period_id.empty()) {
- ldpp_dout(dpp, 0) << "requires a period id" << dendl;
- return -EINVAL;
- }
-
- try {
- auto conn = impl->get(dpp);
- period_select_epoch(dpp, *conn, period_id, epoch, info);
- } catch (const buffer::error& e) {
- ldpp_dout(dpp, 20) << "period decode failed: " << e.what() << dendl;
- return -EIO;
- } catch (const sqlite::error& e) {
- ldpp_dout(dpp, 20) << "period select failed: " << e.what() << dendl;
- if (e.code() == sqlite::errc::done) {
- return -ENOENT;
- } else if (e.code() == sqlite::errc::busy) {
- return -EBUSY;
- }
- return -EIO;
- }
- return 0;
-}
-
-int SQLiteConfigStore::write_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive,
- std::string_view period_id, uint32_t epoch, RGWObjVersionTracker* objv,
- const RGWPeriod& info)
-{
- Prefix prefix{*dpp, "dbconfig:sqlite:write_latest_epoch "}; dpp = &prefix;
-
- if (info.id.empty()) {
- ldpp_dout(dpp, 0) << "period cannot have an empty id" << dendl;
- return -EINVAL;
- }
-
- bufferlist bl;
- encode(info, bl);
- const auto data = std::string_view{bl.c_str(), bl.length()};
-
- try {
- auto conn = impl->get(dpp);
- sqlite::stmt_ptr* stmt = nullptr;
- if (exclusive) {
- stmt = &conn->statements["period_ins"];
- if (!*stmt) {
- const std::string sql = fmt::format(schema::period_insert4,
- P1, P2, P3, P4);
- *stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql);
- }
- } else {
- stmt = &conn->statements["period_ups"];
- if (!*stmt) {
- const std::string sql = fmt::format(schema::period_upsert4,
- P1, P2, P3, P4);
- *stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql);
- }
- }
- auto binding = sqlite::stmt_binding{stmt->get()};
- sqlite::bind_text(dpp, binding, P1, info.id);
- sqlite::bind_int(dpp, binding, P2, info.epoch);
- sqlite::bind_text(dpp, binding, P3, info.realm_id);
- sqlite::bind_text(dpp, binding, P4, data);
-
- auto reset = sqlite::stmt_execution{stmt->get()};
- sqlite::eval0(dpp, reset);
- } catch (const sqlite::error& e) {
- ldpp_dout(dpp, 20) << "period insert failed: " << e.what() << dendl;
- if (e.code() == sqlite::errc::foreign_key_constraint) {
- return -EINVAL; // refers to nonexistent RealmID
- } else if (e.code() == sqlite::errc::busy) {
- return -EBUSY;
- }
- return -EIO;
- }
+ // TODO: implement it later
return 0;
}
optional_yield y, const std::string& marker,
std::span<std::string> entries,
sal::ListResult<std::string>& result) override;
- int read_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y, std::string_view period_id,
- uint32_t& epoch, RGWObjVersionTracker* objv, RGWPeriod& info) override;
- int write_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, std::string_view period_id,
- uint32_t epoch, RGWObjVersionTracker* objv, const RGWPeriod& info) override;
+ int update_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y,
+ std::string_view period_id, uint32_t epoch) override;
int write_default_zonegroup_id(const DoutPrefixProvider* dpp,
optional_yield y, bool exclusive,
return 0;
}
-int ImmutableConfigStore::read_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y, std::string_view period_id,
- uint32_t& epoch, RGWObjVersionTracker* objv, RGWPeriod& info)
-{
- return -ENOENT;
-}
-
-int ImmutableConfigStore::write_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive,
- std::string_view period_id, uint32_t epoch, RGWObjVersionTracker* objv,
- const RGWPeriod& info)
+int ImmutableConfigStore::update_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y,
+ std::string_view period_id, uint32_t epoch)
{
return -EROFS;
}
optional_yield y, const std::string& marker,
std::span<std::string> entries,
ListResult<std::string>& result) override;
- virtual int read_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y, std::string_view period_id,
- uint32_t& epoch, RGWObjVersionTracker* objv, RGWPeriod& info) override;
- virtual int write_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, std::string_view period_id,
- uint32_t epoch, RGWObjVersionTracker* objv, const RGWPeriod& info) override;
+ virtual int update_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y,
+ std::string_view period_id, uint32_t epoch) override;
// ZoneGroup
virtual int write_default_zonegroup_id(const DoutPrefixProvider* dpp,
period_latest_epoch_info_oid));
}
-int RadosConfigStore::read_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y, std::string_view period_id,
- uint32_t& epoch, RGWObjVersionTracker* objv, RGWPeriod& info)
+static int read_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y,
+ ConfigImpl* impl, std::string_view period_id,
+ uint32_t& epoch, RGWObjVersionTracker* objv)
{
const auto& pool = impl->period_pool;
const auto latest_oid = latest_epoch_oid(dpp->get_cct()->_conf, period_id);
return r;
}
-int RadosConfigStore::write_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive,
- std::string_view period_id, uint32_t epoch, RGWObjVersionTracker* objv,
- const RGWPeriod& info)
+static int write_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y,
+ ConfigImpl* impl, bool exclusive,
+ std::string_view period_id, uint32_t epoch,
+ RGWObjVersionTracker* objv)
{
const auto& pool = impl->period_pool;
const auto latest_oid = latest_epoch_oid(dpp->get_cct()->_conf, period_id);
return impl->remove(dpp, y, pool, latest_oid, objv);
}
-static int update_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y,
- RadosConfigStore& rados_config_store, std::string_view period_id,
- uint32_t epoch, RGWPeriod& info)
+int RadosConfigStore::update_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y,
+ std::string_view period_id, uint32_t epoch)
{
static constexpr int MAX_RETRIES = 20;
bool exclusive = false;
// read existing epoch
- int r = rados_config_store.read_latest_epoch(dpp, y, period_id, existing_epoch, &objv, info);
+ int r = read_latest_epoch(dpp, y, impl.get(), period_id, existing_epoch, &objv);
if (r == -ENOENT) {
// use an exclusive create to set the epoch atomically
exclusive = true;
<< " -> " << epoch << " on period=" << period_id << dendl;
}
- r = rados_config_store.write_latest_epoch(dpp, y, exclusive, period_id, epoch, &objv, info);
+ r = write_latest_epoch(dpp, y, impl.get(), exclusive, period_id, epoch, &objv);
if (r == -EEXIST) {
continue; // exclusive create raced with another update, retry
} else if (r == -ECANCELED) {
}
// non const RGWPeriod
- RGWPeriod info_copy = info;
- (void) update_latest_epoch(dpp, y, *this, info.get_id(), info.get_epoch(), info_copy);
+ (void) this->update_latest_epoch(dpp, y, info.get_id(), info.get_epoch());
return 0;
}
int r = 0;
if (!epoch) {
epoch = 0;
- r = read_latest_epoch(dpp, y, period_id, *epoch, nullptr, info);
+ r = read_latest_epoch(dpp, y, impl.get(), period_id, *epoch, nullptr);
if (r < 0) {
return r;
}
// read the latest_epoch
uint32_t latest_epoch = 0;
RGWObjVersionTracker latest_objv;
- RGWPeriod period; // not used in RadosConfigStore, but needed in the API
- int r = read_latest_epoch(dpp, y, period_id, latest_epoch, &latest_objv, period);
+ int r = read_latest_epoch(dpp, y, impl.get(), period_id, latest_epoch, &latest_objv);
if (r < 0 && r != -ENOENT) { // just delete epoch=0 on ENOENT
ldpp_dout(dpp, 0) << "failed to read latest epoch for period "
<< period_id << ": " << cpp_strerror(r) << dendl;
optional_yield y, const std::string& marker,
std::span<std::string> entries,
sal::ListResult<std::string>& result) override;
- virtual int read_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y, std::string_view period_id,
- uint32_t& epoch, RGWObjVersionTracker* objv, RGWPeriod& info) override;
- virtual int write_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, std::string_view period_id,
- uint32_t epoch, RGWObjVersionTracker* objv, const RGWPeriod& info)override;
+ virtual int update_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y,
+ std::string_view period_id, uint32_t epoch) override;
// ZoneGroup
virtual int write_default_zonegroup_id(const DoutPrefixProvider* dpp,
data_sync_cr->wakeup(shard_id, entries);
}
-int RGWRemoteDataLog::run_sync(const DoutPrefixProvider *dpp, int num_shards)
+int RGWRemoteDataLog::run_sync(const DoutPrefixProvider *dpp, int num_shards, rgw::sal::ConfigStore* cfgstore)
{
// construct and start bid manager for data sync fairness
const auto& control_pool = sc.env->driver->svc()->zone->get_zone_params().control_pool;
int read_recovering_shards(const DoutPrefixProvider *dpp, const int num_shards, std::set<int>& recovering_shards);
int read_shard_status(const DoutPrefixProvider *dpp, int shard_id, std::set<std::string>& lagging_buckets,std::set<std::string>& recovering_buckets, rgw_data_sync_marker* sync_marker, const int max_entries);
int init_sync_status(const DoutPrefixProvider *dpp, int num_shards);
- int run_sync(const DoutPrefixProvider *dpp, int num_shards);
+ int run_sync(const DoutPrefixProvider *dpp, int num_shards, rgw::sal::ConfigStore* cfgstore);
void wakeup(int shard_id, bc::flat_set<rgw_data_notify_entry>& entries);
};
return source_log.read_source_log_shards_next(dpp, shard_markers, result);
}
- int run(const DoutPrefixProvider *dpp) { return source_log.run_sync(dpp, num_shards); }
+ int run(const DoutPrefixProvider *dpp, rgw::sal::ConfigStore* cfgstore) { return source_log.run_sync(dpp, num_shards, cfgstore); }
void wakeup(int shard_id, bc::flat_set<rgw_data_notify_entry>& entries) { return source_log.wakeup(shard_id, entries); }
#include "services/svc_zone.h"
-#define FIRST_EPOCH 1
-
#define dout_subsys ceph_subsys_rgw
using namespace std;
return -ENOENT;
}
-int RGWPeriod::get_latest_epoch(const DoutPrefixProvider *dpp, epoch_t& latest_epoch, optional_yield y)
-{
- RGWPeriodLatestEpochInfo info;
-
- auto config_store_type = g_conf().get_val<std::string>("rgw_config_store");
- auto cfgstore = DriverManager::create_config_store(dpp, config_store_type);
- int ret = cfgstore->read_latest_epoch(dpp, y, id, info.epoch, nullptr, *this);
- if (ret < 0) {
- return ret;
- }
-
- latest_epoch = info.epoch;
-
- return 0;
-}
-
-void RGWPeriod::fork()
-{
- ldout(cct, 20) << __func__ << " realm " << realm_id << " period " << id << dendl;
- predecessor_uuid = id;
- id = get_staging_id(realm_id);
- period_map.reset();
- realm_epoch++;
-}
-
static int read_sync_status(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw_meta_sync_status *sync_status)
{
rgw::sal::RadosStore* rados_store = static_cast<rgw::sal::RadosStore*>(driver);
return 0;
}
-int RGWPeriod::commit(const DoutPrefixProvider *dpp,
- rgw::sal::Driver* driver,
- RGWRealm& realm, const RGWPeriod& current_period,
- std::ostream& error_stream, optional_yield y,
- bool force_if_stale)
-{
- ldpp_dout(dpp, 20) << __func__ << " realm " << realm.get_id() << " period " << current_period.get_id() << dendl;
- // gateway must be in the master zone to commit
- if (driver->get_zone()->get_zonegroup().is_master_zonegroup()) {
- error_stream << "Cannot commit period on zone "
- << driver->get_zone()->get_id() << ", it must be sent to "
- "the period's master zone " << master_zone << '.' << std::endl;
- return -EINVAL;
- }
- // period predecessor must match current period
- if (predecessor_uuid != current_period.get_id()) {
- error_stream << "Period predecessor " << predecessor_uuid
- << " does not match current period " << current_period.get_id()
- << ". Use 'period pull' to get the latest period from the master, "
- "reapply your changes, and try again." << std::endl;
- return -EINVAL;
- }
- // realm epoch must be 1 greater than current period
- if (realm_epoch != current_period.get_realm_epoch() + 1) {
- error_stream << "Period's realm epoch " << realm_epoch
- << " does not come directly after current realm epoch "
- << current_period.get_realm_epoch() << ". Use 'realm pull' to get the "
- "latest realm and period from the master zone, reapply your changes, "
- "and try again." << std::endl;
- return -EINVAL;
- }
-
- auto config_store_type = g_conf().get_val<std::string>("rgw_config_store");
- auto cfgstore = DriverManager::create_config_store(dpp, config_store_type);
- // did the master zone change?
- if (master_zone != current_period.get_master_zone()) {
- // store the current metadata sync status in the period
- int r = update_sync_status(dpp, driver, current_period, error_stream, force_if_stale);
- if (r < 0) {
- ldpp_dout(dpp, 0) << "failed to update metadata sync status: "
- << cpp_strerror(-r) << dendl;
- return r;
- }
- // create an object with a new period id
- period_map.id = id = rgw::gen_random_uuid();
- epoch = FIRST_EPOCH;
-
- constexpr bool exclusive = true;
- r = cfgstore->create_period(dpp, y, exclusive, *this);
- if (r < 0) {
- ldpp_dout(dpp, 0) << "failed to create new period: " << cpp_strerror(-r) << dendl;
- return r;
- }
- // set as current period
- r = realm.set_current_period(dpp, *this, y);
- if (r < 0) {
- ldpp_dout(dpp, 0) << "failed to update realm's current period: "
- << cpp_strerror(-r) << dendl;
- return r;
- }
- ldpp_dout(dpp, 4) << "Promoted to master zone and committed new period "
- << id << dendl;
- return 0;
- }
- // period must be based on current epoch
- if (epoch != current_period.get_epoch()) {
- error_stream << "Period epoch " << epoch << " does not match "
- "predecessor epoch " << current_period.get_epoch()
- << ". Use 'period pull' to get the latest epoch from the master zone, "
- "reapply your changes, and try again." << std::endl;
- return -EINVAL;
- }
- // set period as next epoch
- set_id(current_period.get_id());
- set_epoch(current_period.get_epoch() + 1);
- set_predecessor(current_period.get_predecessor());
- realm_epoch = current_period.get_realm_epoch();
- // write the period to rados
- int r = cfgstore->create_period(dpp, y, false, *this);
- if (r < 0) {
- ldpp_dout(dpp, 0) << "failed to store period: " << cpp_strerror(-r) << dendl;
- return r;
- }
- // set as latest epoch
- r = update_latest_epoch(dpp, epoch, y);
- if (r < 0) {
- ldpp_dout(dpp, 0) << "failed to set latest epoch: " << cpp_strerror(-r) << dendl;
- return r;
- }
- r = rgw::reflect_period(dpp, y, cfgstore.get(), *this);
- if (r < 0) {
- ldpp_dout(dpp, 0) << "failed to update local objects: " << cpp_strerror(-r) << dendl;
- return r;
- }
- ldpp_dout(dpp, 4) << "Committed new epoch " << epoch
- << " for period " << id << dendl;
- return 0;
-}
-
void RGWPeriod::generate_test_instances(list<RGWPeriod*> &o)
{
RGWPeriod *z = new RGWPeriod;
do {
auto start = ceph::real_clock::now();
+
int r = processor->process(this);
if (r < 0) {
ldpp_dout(this, 0) << "ERROR: processor->process() returned error r=" << r << dendl;
class RGWMetaSyncProcessorThread : public RGWSyncProcessorThread
{
RGWMetaSyncStatusManager sync;
+ rgw::sal::ConfigStore* cfgstore;
uint64_t interval_msec() override {
return 0; /* no interval associated, it'll run once until stopped */
}
int process(const DoutPrefixProvider *dpp) override {
- sync.run(dpp, null_yield);
+ sync.run(dpp, null_yield, cfgstore);
return 0;
}
};
PerfCountersRef counters;
RGWDataSyncStatusManager sync;
bool initialized;
+ rgw::sal::ConfigStore *cfgstore{nullptr};
uint64_t interval_msec() override {
if (initialized) {
/* we'll be back! */
return 0;
}
- sync.run(dpp);
+ sync.run(dpp, cfgstore);
return 0;
}
};
}
int RGWRados::init_svc(bool raw, const DoutPrefixProvider *dpp,
- bool background_tasks, // Ignored when `raw`
- const rgw::SiteConfig& site)
+ bool background_tasks, // Ignored when `raw`
+ const rgw::SiteConfig& site, rgw::sal::ConfigStore* cfgstore)
{
if (raw) {
- return svc.init_raw(cct, driver, use_cache, null_yield, dpp, site);
+ return svc.init_raw(cct, driver, use_cache, null_yield, dpp, site, cfgstore);
}
- return svc.init(cct, driver, use_cache, run_sync_thread, background_tasks, null_yield, dpp, site);
+ return svc.init(cct, driver, use_cache, run_sync_thread, background_tasks, null_yield, dpp, site, cfgstore);
}
/**
*/
int RGWRados::init_begin(CephContext* _cct, const DoutPrefixProvider *dpp,
bool background_tasks,
- const rgw::SiteConfig& site)
+ const rgw::SiteConfig& site, rgw::sal::ConfigStore* cfgstore)
{
set_context(_cct);
int ret = driver->init_neorados(dpp);
return ret;
}
- ret = init_svc(false, dpp, background_tasks, site);
+ ret = init_svc(false, dpp, background_tasks, site, cfgstore);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to init services (ret=" << cpp_strerror(-ret) << ")" << dendl;
return ret;
CephContext *ctx() { return cct; }
/** do all necessary setup of the storage device */
int init_begin(CephContext *_cct, const DoutPrefixProvider *dpp,
- bool background_tasks, const rgw::SiteConfig& site);
+ bool background_tasks, const rgw::SiteConfig& site, rgw::sal::ConfigStore* cfgstore);
/** Initialize the RADOS instance and prepare to do other ops */
- int init_svc(bool raw, const DoutPrefixProvider *dpp, bool background_tasks, const rgw::SiteConfig& site);
+ int init_svc(bool raw, const DoutPrefixProvider *dpp, bool background_tasks, const rgw::SiteConfig& site, rgw::sal::ConfigStore* cfgstore);
virtual int init_rados();
int init_complete(const DoutPrefixProvider *dpp, optional_yield y);
void finalize();
#include "rgw_zone.h"
#include "rgw_sal_rados.h"
#include "rgw_sal_config.h"
+#include "rgw_process_env.h"
#include "services/svc_zone.h"
#include "services/svc_mdlog.h"
period.set_epoch(epoch);
- auto config_store_type = g_conf().get_val<std::string>("rgw_config_store");
- auto cfgstore = DriverManager::create_config_store(this, config_store_type);
- op_ret = cfgstore->read_period(this, y, period_id, epoch, period);
+ op_ret = s->penv.cfgstore->read_period(this, y, period_id, epoch, period);
if (op_ret < 0)
ldpp_dout(this, 5) << "failed to read period" << dendl;
}
auto cct = driver->ctx();
// initialize the period without reading from rados
- auto config_store_type = g_conf().get_val<std::string>("rgw_config_store");
- auto cfgstore = DriverManager::create_config_store(this, config_store_type);
- cfgstore->read_period(this, y,driver->get_zone()->get_current_period_id(), std::nullopt, period);
+ s->penv.cfgstore->read_period(this, y,driver->get_zone()->get_current_period_id(), std::nullopt, period);
// decode the period from input
const auto max_size = cct->_conf->rgw_max_put_param_size;
// load the realm and current period from rados; there may be a more recent
// period that we haven't restarted with yet. we also don't want to modify
// the objects in use by RGWRados
- RGWRealm realm(period.get_realm());
- op_ret = rgw::read_realm(this, y, cfgstore.get(), realm.get_id(), realm.get_name(), realm);
+ std::string_view realm_id = period.get_realm();
+ constexpr std::string_view realm_name; // empty, look up by id only
+ RGWRealm realm;
+ std::unique_ptr<rgw::sal::RealmWriter> realm_writer;
+ op_ret = rgw::read_realm(this, y, s->penv.cfgstore, realm_id, realm_name, realm, &realm_writer);
if (op_ret < 0) {
ldpp_dout(this, -1) << "failed to read current realm: "
<< cpp_strerror(-op_ret) << dendl;
}
RGWPeriod current_period;
- op_ret = cfgstore->read_period(this, y, driver->get_zone()->get_current_period_id(), std::nullopt, current_period);
+ op_ret = s->penv.cfgstore->read_period(this, y, realm.current_period, std::nullopt, current_period);
if (op_ret < 0) {
ldpp_dout(this, -1) << "failed to read current period: "
<< cpp_strerror(-op_ret) << dendl;
// if period id is empty, handle as 'period commit'
if (period.get_id().empty()) {
-// op_ret = period.commit(this, driver, realm, current_period, error_stream, y);
- std::unique_ptr<rgw::sal::RealmWriter> realm_writer;
- op_ret = rgw::read_realm(this, null_yield, cfgstore.get(),
- period.realm_id, period.get_realm(),
- realm, &realm_writer);
if (op_ret < 0) {
cerr << "Error initializing realm: " << cpp_strerror(-op_ret) << std::endl;
return;
}
- op_ret = rgw::commit_period(this, y, cfgstore.get(), driver, realm, *realm_writer, current_period, period, error_stream, false);
+ op_ret = rgw::commit_period(this, y, s->penv.cfgstore, driver, realm, *realm_writer, current_period, period, error_stream, false);
if (op_ret == -EEXIST) {
op_ret = 0; // succeed on retries so the op is idempotent
return;
}
// write the period to rados
- op_ret = cfgstore->create_period(this, y, false, period);
+ op_ret = s->penv.cfgstore->create_period(this, y, false, period);
if (op_ret < 0) {
ldpp_dout(this, -1) << "failed to store period " << period.get_id() << dendl;
return;
}
// set as latest epoch
- op_ret = period.update_latest_epoch(this, period.get_epoch(), y);
+ op_ret = s->penv.cfgstore->update_latest_epoch(this, y, period.get_id(), period.get_epoch());
if (op_ret == -EEXIST) {
// already have this epoch (or a more recent one)
ldpp_dout(this, 4) << "already have epoch >= " << period.get_epoch()
return;
}
// attach a copy of the period into the period history
- auto cursor = period_history->attach(this, RGWPeriod{period}, y);
+ auto cursor = period_history->attach(this, RGWPeriod{period}, y, s->penv.cfgstore);
if (!cursor) {
// we're missing some history between the new period and current_period
op_ret = cursor.get_error();
return;
}
// set as current period
- op_ret = realm.set_current_period(this, period, y);
+ op_ret = rgw::realm_set_current_period(this, y, s->penv.cfgstore, *realm_writer, realm, period);
if (op_ret < 0) {
ldpp_dout(this, -1) << "failed to update realm's current period" << dendl;
return;
return;
}
// reflect the period into our local objects
- op_ret = rgw::reflect_period(this, y, cfgstore.get(), period);
+ op_ret = rgw::reflect_period(this, y, s->penv.cfgstore, period);
if (op_ret < 0) {
ldpp_dout(this, -1) << "failed to update local objects: "
<< cpp_strerror(-op_ret) << dendl;
if (notify_realm) {
// trigger realm reload after sending the response, because reload may
// race to close this connection
- auto config_store_type = g_conf().get_val<std::string>("rgw_config_store");
- auto cfgstore = DriverManager::create_config_store(this, config_store_type);
- (void) cfgstore->realm_notify_new_period(this, null_yield, period);
+ (void) s->penv.cfgstore->realm_notify_new_period(this, s->yield, period);
}
}
// read realm
realm.reset(new RGWRealm(id, name));
auto config_store_type = g_conf().get_val<std::string>("rgw_config_store");
- auto cfgstore = DriverManager::create_config_store(this, config_store_type);
- op_ret = rgw::read_realm(this, y, cfgstore.get(), realm->get_id(), realm->get_name(), *realm);
+ op_ret = rgw::read_realm(this, y, s->penv.cfgstore, realm->get_id(), realm->get_name(), *realm);
if (op_ret < 0)
ldpp_dout(this, -1) << "failed to read realm id=" << id
<< " name=" << name << dendl;
void RGWOp_Realm_List::execute(optional_yield y)
{
- {
- // read default realm
- RGWRealm realm(driver->ctx());
- default_id = realm.get_default_oid();
- }
+ s->penv.cfgstore->read_default_realm_id(this, y, default_id);
op_ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->list_realms(this, realms);
if (op_ret < 0)
ldpp_dout(this, -1) << "failed to list realms" << dendl;
bool run_sync,
bool background_tasks,
optional_yield y,
- const DoutPrefixProvider *dpp)
+ const DoutPrefixProvider *dpp,
+ rgw::sal::ConfigStore* cfgstore)
{
finisher = std::make_unique<RGWSI_Finisher>(cct);
bucket_sobj = std::make_unique<RGWSI_Bucket_SObj>(cct);
has_shutdown = true;
}
-int RGWServices::do_init(CephContext *_cct, rgw::sal::RadosStore* driver, bool have_cache, bool raw, bool run_sync, bool background_tasks, optional_yield y, const DoutPrefixProvider *dpp, const rgw::SiteConfig& _site)
+int RGWServices::do_init(CephContext *_cct, rgw::sal::RadosStore* driver, bool have_cache, bool raw, bool run_sync, bool background_tasks, optional_yield y, const DoutPrefixProvider *dpp, const rgw::SiteConfig& _site, rgw::sal::ConfigStore* cfgstore)
{
cct = _cct;
site = &_site;
- int r = _svc.init(cct, driver, have_cache, raw, run_sync, background_tasks, y, dpp);
+ int r = _svc.init(cct, driver, have_cache, raw, run_sync, background_tasks, y, dpp, cfgstore);
if (r < 0) {
return r;
}
int init(CephContext *cct, rgw::sal::RadosStore* store, bool have_cache,
bool raw_storage, bool run_sync, bool background_tasks,
- optional_yield y, const DoutPrefixProvider *dpp);
+ optional_yield y, const DoutPrefixProvider *dpp, rgw::sal::ConfigStore* cfgstore);
void shutdown();
};
int do_init(CephContext *cct, rgw::sal::RadosStore* store, bool have_cache,
bool raw_storage, bool run_sync, bool background_tasks, optional_yield y,
- const DoutPrefixProvider *dpp, const rgw::SiteConfig& site);
+ const DoutPrefixProvider *dpp, const rgw::SiteConfig& site, rgw::sal::ConfigStore* cfgstore);
int init(CephContext *cct, rgw::sal::RadosStore* store, bool have_cache,
bool run_sync, bool background_tasks, optional_yield y, const DoutPrefixProvider *dpp,
- const rgw::SiteConfig& site) {
- return do_init(cct, store, have_cache, false, run_sync, background_tasks, y, dpp, site);
+ const rgw::SiteConfig& site, rgw::sal::ConfigStore* cfgstore) {
+ return do_init(cct, store, have_cache, false, run_sync, background_tasks, y, dpp, site, cfgstore);
}
int init_raw(CephContext *cct, rgw::sal::RadosStore* store,
bool have_cache, optional_yield y,
const DoutPrefixProvider *dpp,
- const rgw::SiteConfig& site) {
- return do_init(cct, store, have_cache, true, false, false, y, dpp, site);
+ const rgw::SiteConfig& site, rgw::sal::ConfigStore* cfgstore) {
+ return do_init(cct, store, have_cache, true, false, false, y, dpp, site, cfgstore);
}
void shutdown() {
_svc.shutdown();
static RGWPeriodHistory::Cursor get_period_at(const DoutPrefixProvider *dpp,
rgw::sal::RadosStore* store,
const rgw_meta_sync_info& info,
- optional_yield y)
+ optional_yield y, rgw::sal::ConfigStore* cfgstore)
{
if (info.period.empty()) {
// return an empty cursor with error=0
// read the period from rados or pull it from the master
RGWPeriod period;
- int r = store->svc()->mdlog->pull_period(dpp, info.period, period, y);
+ int r = store->svc()->mdlog->pull_period(dpp, info.period, period, y, cfgstore);
if (r < 0) {
ldpp_dout(dpp, -1) << "ERROR: failed to read period id "
<< info.period << ": " << cpp_strerror(r) << dendl;
return RGWPeriodHistory::Cursor{r};
}
// attach the period to our history
- cursor = store->svc()->mdlog->get_period_history()->attach(dpp, std::move(period), y);
+ cursor = store->svc()->mdlog->get_period_history()->attach(dpp, std::move(period), y, cfgstore);
if (!cursor) {
r = cursor.get_error();
ldpp_dout(dpp, -1) << "ERROR: failed to read period history back to "
return cursor;
}
-int RGWRemoteMetaLog::run_sync(const DoutPrefixProvider *dpp, optional_yield y)
+int RGWRemoteMetaLog::run_sync(const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::ConfigStore* cfgstore)
{
if (store->svc()->zone->is_meta_master()) {
return 0;
case rgw_meta_sync_info::StateSync:
tn->log(20, "sync");
// find our position in the period history (if any)
- cursor = get_period_at(dpp, store, sync_status.sync_info, y);
+ cursor = get_period_at(dpp, store, sync_status.sync_info, y, cfgstore);
r = cursor.get_error();
if (r < 0) {
return r;
int read_master_log_shards_next(const DoutPrefixProvider *dpp, const std::string& period, std::map<int, std::string> shard_markers, std::map<int, rgw_mdlog_shard_data> *result);
int read_sync_status(const DoutPrefixProvider *dpp, rgw_meta_sync_status *sync_status);
int init_sync_status(const DoutPrefixProvider *dpp);
- int run_sync(const DoutPrefixProvider *dpp, optional_yield y);
+ int run_sync(const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::ConfigStore* cfgstore);
void wakeup(int shard_id);
return master_log.read_master_log_shards_next(dpp, period, shard_markers, result);
}
- int run(const DoutPrefixProvider *dpp, optional_yield y) { return master_log.run_sync(dpp, y); }
+ int run(const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::ConfigStore* cfgstore) { return master_log.run_sync(dpp, y, cfgstore); }
// implements DoutPrefixProvider
{
ldpp_dout(dpp, 20) << __func__ << " realm " << realm.id
<< " period " << current_period.id << dendl;
+ auto zone_svc = static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone; // XXX
+
// gateway must be in the master zone to commit
- if (driver->get_zone()->get_zonegroup().is_master_zonegroup()) {
+ if (info.master_zone != zone_svc->get_zone_params().id) {
error_stream << "Cannot commit period on zone "
- << driver->get_zone()->get_id() << ", it must be sent to "
+ << zone_svc->get_zone_params().id << ", it must be sent to "
"the period's master zone " << info.master_zone << '.' << std::endl;
return -EINVAL;
}
void dump(Formatter *f) const;
void decode_json(JSONObj *obj);
-
- static std::string get_oid(const std::string& realm_id);
- static rgw_pool get_pool(CephContext *cct);
};
WRITE_CLASS_ENCODER(RGWPeriodConfig)
std::string id;
std::string name;
- CephContext *cct{nullptr};
-
std::string current_period;
epoch_t epoch{0}; //< realm epoch, incremented for each new period
public:
RGWRealm() {}
RGWRealm(const std::string& _id, const std::string& _name = "") : id(_id), name(_name) {}
- RGWRealm(CephContext *_cct): cct(_cct) {}
- RGWRealm(const std::string& _name, CephContext *_cct, RGWSI_SysObj *_sysobj_svc): name(_name), cct(_cct){}
const std::string& get_name() const { return name; }
const std::string& get_id() const { return id; }
void set_id(const std::string& _id) { id = _id;}
void clear_id() { id.clear(); }
- virtual ~RGWRealm();
-
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
- encode(id, bl);
- encode(name, bl);
+ {
+ // these used to be wrapped by RGWSystemMetaObj::encode(),
+ // so the extra ENCODE_START/ENCODE_FINISH are preserved
+ ENCODE_START(1, 1, bl);
+ encode(id, bl);
+ encode(name, bl);
+ ENCODE_FINISH(bl);
+ }
encode(current_period, bl);
encode(epoch, bl);
ENCODE_FINISH(bl);
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
- decode(id, bl);
- decode(name, bl);
+ {
+ // these used to be wrapped by RGWSystemMetaObj::decode(),
+ // so the extra DECODE_START/DECODE_FINISH are preserved
+ DECODE_START(1, bl);
+ decode(id, bl);
+ decode(name, bl);
+ DECODE_FINISH(bl);
+ }
decode(current_period, bl);
decode(epoch, bl);
DECODE_FINISH(bl);
}
+ // TODO: use ConfigStore for watch/notify,
+ // After refactoring RGWRealmWatcher and RGWRealmReloader, get_pool and get_info_oid_prefix will be removed.
rgw_pool get_pool(CephContext *cct) const;
- const std::string get_default_oid(bool old_format = false) const;
- const std::string& get_names_oid_prefix() const;
const std::string& get_info_oid_prefix(bool old_format = false) const;
- std::string get_predefined_id(CephContext *cct) const;
- const std::string& get_predefined_name(CephContext *cct) const;
void dump(Formatter *f) const;
void decode_json(JSONObj *obj);
const std::string& get_current_period() const {
return current_period;
}
- int set_current_period(const DoutPrefixProvider *dpp, RGWPeriod& period, optional_yield y);
void clear_current_period_and_epoch() {
current_period.clear();
epoch = 0;
RGWPeriod *pperiod,
RGWZoneGroup *pzonegroup,
bool *pfound,
+ rgw::sal::ConfigStore* cfgstore,
optional_yield y) const;
};
WRITE_CLASS_ENCODER(RGWRealm)
std::string realm_id;
epoch_t realm_epoch{1}; //< realm epoch when period was made current
- CephContext *cct{nullptr};
- int use_latest_epoch(const DoutPrefixProvider *dpp, optional_yield y);
- int use_current_period();
-
- const std::string get_period_oid() const;
- const std::string get_period_oid_prefix() const;
-
// gather the metadata sync status for each shard; only for use on master zone
int update_sync_status(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
const RGWPeriodConfig& get_config() const { return period_config; }
const std::vector<std::string>& get_sync_status() const { return sync_status; }
rgw_pool get_pool(CephContext *cct) const;
- const std::string& get_latest_epoch_oid() const;
const std::string& get_info_oid_prefix() const;
void set_user_quota(RGWQuotaInfo& user_quota) {
RGWZoneGroup *pzonegroup,
optional_yield y) const;
- int get_latest_epoch(const DoutPrefixProvider *dpp, epoch_t& epoch, optional_yield y);
- // update latest_epoch if the given epoch is higher, else return -EEXIST
- int update_latest_epoch(const DoutPrefixProvider *dpp, epoch_t epoch, optional_yield y);
-
- void fork();
-
- // commit a staging period; only for use on master zone
- int commit(const DoutPrefixProvider *dpp,
- rgw::sal::Driver* driver,
- RGWRealm& realm, const RGWPeriod ¤t_period,
- std::ostream& error_stream, optional_yield y,
- bool force_if_stale = false);
-
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(id, bl);
if (raw_storage_op) {
site = rgw::SiteConfig::make_fake();
driver = DriverManager::get_raw_storage(dpp(), g_ceph_context,
- cfg, context_pool, *site);
+ cfg, context_pool, *site, cfgstore.get());
} else {
site = std::make_unique<rgw::SiteConfig>();
auto r = site->load(dpp(), null_yield, cfgstore.get(), localzonegroup_op);
false,
false, // No background tasks!
null_yield,
+ cfgstore.get(),
need_cache && g_conf()->rgw_cache_enabled,
need_gc);
}
if (opt_cmd == OPT::MDLOG_AUTOTRIM) {
// need a full history for purging old mdlog periods
- static_cast<rgw::sal::RadosStore*>(driver)->svc()->mdlog->init_oldest_log_period(null_yield, dpp());
+ static_cast<rgw::sal::RadosStore*>(driver)->svc()->mdlog->init_oldest_log_period(null_yield, dpp(), cfgstore.get());
RGWCoroutinesManager crs(driver->ctx(), driver->get_cr_registry());
RGWHTTPManager http(driver->ctx(), crs.get_completion_mgr());
return -ret;
}
- ret = sync.run(dpp(), null_yield);
+ ret = sync.run(dpp(), null_yield, cfgstore.get());
if (ret < 0) {
cerr << "ERROR: sync.run() returned ret=" << ret << std::endl;
return -ret;
return -ret;
}
- ret = sync.run(dpp());
+ ret = sync.run(dpp(), cfgstore.get());
if (ret < 0) {
cerr << "ERROR: sync.run() returned ret=" << ret << std::endl;
return -ret;
run_quota,
run_sync,
g_conf().get_val<bool>("rgw_dynamic_resharding"),
- true, true, null_yield, // run notification thread
+ true, true, null_yield, env.cfgstore, // run notification thread
g_conf()->rgw_cache_enabled);
if (!env.driver) {
return -EIO;
if (env.driver->get_name() == "rados") {
// add a watcher to respond to realm configuration changes
- pusher = std::make_unique<RGWPeriodPusher>(dpp, env.driver, null_yield);
+ pusher = std::make_unique<RGWPeriodPusher>(dpp, env.driver, env.cfgstore, null_yield);
fe_pauser = std::make_unique<RGWFrontendPauser>(fes, pusher.get());
rgw_pauser = std::make_unique<RGWPauser>();
rgw_pauser->add_pauser(fe_pauser.get());
exit(1);
}
- driver = DriverManager::get_storage(&dp, g_ceph_context, cfg, context_pool, site, false, false, false, false, false, false, true, null_yield);
+ driver = DriverManager::get_storage(&dp, g_ceph_context, cfg, context_pool, site, false, false, false, false, false, false, true, null_yield, cfgstore.get());
if (!driver) {
std::cerr << "couldn't init storage provider" << std::endl;
return EIO;
#define FIRST_EPOCH 1
-const string& RGWPeriod::get_latest_epoch_oid() const
-{
- if (cct->_conf->rgw_period_latest_epoch_info_oid.empty()) {
- return period_latest_epoch_info_oid;
- }
- return cct->_conf->rgw_period_latest_epoch_info_oid;
-}
-
const string& RGWPeriod::get_info_oid_prefix() const
{
return period_info_oid_prefix;
}
-const string RGWPeriod::get_period_oid_prefix() const
-{
- return get_info_oid_prefix() + id;
-}
-
-const string RGWPeriod::get_period_oid() const
-{
- std::ostringstream oss;
- oss << get_period_oid_prefix();
- // skip the epoch for the staging period
- if (id != get_staging_id(realm_id))
- oss << "." << epoch;
- return oss.str();
-}
-
bool RGWPeriod::find_zone(const DoutPrefixProvider *dpp,
const rgw_zone_id& zid,
RGWZoneGroup *pzonegroup,
JSONDecoder::decode_json("realm_epoch", realm_epoch, obj);
}
-int RGWPeriod::update_latest_epoch(const DoutPrefixProvider *dpp, epoch_t epoch, optional_yield y)
-{
- static constexpr int MAX_RETRIES = 20;
-
- for (int i = 0; i < MAX_RETRIES; i++) {
- RGWPeriodLatestEpochInfo info;
- RGWObjVersionTracker objv;
- bool exclusive = false;
-
- // read existing epoch
- auto config_store_type = g_conf().get_val<std::string>("rgw_config_store");
- auto cfgstore = DriverManager::create_config_store(dpp, config_store_type);
- int r = cfgstore->read_latest_epoch(dpp, y, id, info.epoch, &objv, *this);
- if (r == -ENOENT) {
- // use an exclusive create to set the epoch atomically
- exclusive = true;
- ldpp_dout(dpp, 20) << "creating initial latest_epoch=" << epoch
- << " for period=" << id << dendl;
- } else if (r < 0) {
- ldpp_dout(dpp, 0) << "ERROR: failed to read latest_epoch" << dendl;
- return r;
- } else if (epoch <= info.epoch) {
- r = -EEXIST; // fail with EEXIST if epoch is not newer
- ldpp_dout(dpp, 10) << "found existing latest_epoch " << info.epoch
- << " >= given epoch " << epoch << ", returning r=" << r << dendl;
- return r;
- } else {
- ldpp_dout(dpp, 20) << "updating latest_epoch from " << info.epoch
- << " -> " << epoch << " on period=" << id << dendl;
- }
-
- r = cfgstore->write_latest_epoch(dpp, y, exclusive, id, epoch, &objv, *this);
- if (r == -EEXIST) {
- continue; // exclusive create raced with another update, retry
- } else if (r == -ECANCELED) {
- continue; // write raced with a conflicting version, retry
- }
- if (r < 0) {
- ldpp_dout(dpp, 0) << "ERROR: failed to write latest_epoch" << dendl;
- return r;
- }
- return 0; // return success
- }
-
- return -ECANCELED; // fail after max retries
-}
-
-int RGWPeriod::use_latest_epoch(const DoutPrefixProvider *dpp, optional_yield y)
-{
- RGWPeriodLatestEpochInfo info;
- auto config_store_type = g_conf().get_val<std::string>("rgw_config_store");
- auto cfgstore = DriverManager::create_config_store(dpp, config_store_type);
- int ret = cfgstore->read_latest_epoch(dpp, y, id, info.epoch, nullptr, *this);
- if (ret < 0) {
- return ret;
- }
-
- epoch = info.epoch;
-
- return 0;
-}
-
~Impl();
Cursor get_current() const { return current_cursor; }
- Cursor attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y);
+ Cursor attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y, rgw::sal::ConfigStore* cfgstore);
Cursor insert(RGWPeriod&& period);
Cursor lookup(epoch_t realm_epoch);
histories.clear_and_dispose(std::default_delete<History>{});
}
-Cursor RGWPeriodHistory::Impl::attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y)
+Cursor RGWPeriodHistory::Impl::attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y,
+ rgw::sal::ConfigStore* cfgstore)
{
if (current_history == histories.end()) {
return Cursor{-EINVAL};
}
// pull the period outside of the lock
- int r = puller->pull(dpp, predecessor_id, period, y);
+ int r = puller->pull(dpp, predecessor_id, period, y, cfgstore);
if (r < 0) {
return Cursor{r};
}
{
return impl->get_current();
}
-Cursor RGWPeriodHistory::attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y)
+Cursor RGWPeriodHistory::attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y,
+ rgw::sal::ConfigStore* cfgstore)
{
- return impl->attach(dpp, std::move(period), y);
+ return impl->attach(dpp, std::move(period), y, cfgstore);
}
Cursor RGWPeriodHistory::insert(RGWPeriod&& period)
{
namespace bi = boost::intrusive;
class RGWPeriod;
+namespace rgw::sal { class ConfigStore; }
/**
* RGWPeriodHistory tracks the relative history of all inserted periods,
virtual ~Puller() = default;
virtual int pull(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period,
- optional_yield y) = 0;
+ optional_yield y, rgw::sal::ConfigStore* cfgstore) = 0;
};
RGWPeriodHistory(CephContext* cct, Puller* puller,
/// current_period and the given period, reading predecessor periods or
/// fetching them from the master as necessary. returns a cursor at the
/// given period that can be used to traverse the current_history
- Cursor attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y);
+ Cursor attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y, rgw::sal::ConfigStore* cfgstore);
/// insert the given period into an existing history, or create a new
/// unconnected history. similar to attach(), but it doesn't try to fetch
} // anonymous namespace
int RGWPeriodPuller::pull(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period,
- optional_yield y)
+ optional_yield y, rgw::sal::ConfigStore* cfgstore)
{
// try to read the period from rados
constexpr auto zero_epoch = 0;
period.set_id(period_id);
period.set_epoch(zero_epoch);
- auto config_store_type = g_conf().get_val<std::string>("rgw_config_store");
- auto cfgstore = DriverManager::create_config_store(dpp, config_store_type);
int r = cfgstore->read_period(dpp, y, period_id, zero_epoch, period);
if (r < 0) {
if (svc.zone->is_meta_master()) {
return r;
}
// update latest epoch
- r = period.update_latest_epoch(dpp, period.get_epoch(), y);
+ r = cfgstore->update_latest_epoch(dpp, y, period.get_id(), period.get_epoch());
if (r == -EEXIST) {
// already have this epoch (or a more recent one)
return 0;
}
// reflect period objects if this is the latest version
if (svc.zone->get_realm().get_current_period() == period_id) {
- r = rgw::reflect_period(dpp, y, cfgstore.get(), period);
+ r = rgw::reflect_period(dpp, y, cfgstore, period);
if (r < 0) {
return r;
}
public:
explicit RGWPeriodPuller(RGWSI_Zone *zone_svc, RGWSI_SysObj *sysobj_svc);
- int pull(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period, optional_yield y) override;
+ int pull(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period, optional_yield y, rgw::sal::ConfigStore* cfgstore) override;
};
RGWPeriodPusher::RGWPeriodPusher(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
- optional_yield y)
+ rgw::sal::ConfigStore* cfgstore, optional_yield y)
: cct(driver->ctx()), driver(driver)
{
rgw::sal::Zone* zone = driver->get_zone();
// always send out the current period on startup
RGWPeriod period;
- auto config_store_type = g_conf().get_val<std::string>("rgw_config_store");
- auto cfgstore = DriverManager::create_config_store(dpp, config_store_type);
- int r = cfgstore->read_period(dpp, y, zone->get_current_period_id(), std::nullopt, period);
+ auto r = cfgstore->read_period(dpp, y, zone->get_current_period_id(), std::nullopt, period);
if (r < 0) {
ldpp_dout(dpp, -1) << "failed to load period for realm " << realm_id << dendl;
return;
class RGWPeriodPusher final : public RGWRealmWatcher::Watcher,
public RGWRealmReloader::Pauser {
public:
- explicit RGWPeriodPusher(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y);
+ explicit RGWPeriodPusher(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::ConfigStore* cfgsore,
+ optional_yield y);
~RGWPeriodPusher() override;
/// respond to realm notifications by pushing new periods to other zones
using namespace std;
using namespace rgw_zone_defaults;
-RGWRealm::~RGWRealm() {}
-
RGWRemoteMetaLog::~RGWRemoteMetaLog()
{
delete error_logger;
}
-string RGWRealm::get_predefined_id(CephContext *cct) const {
- return cct->_conf.get_val<string>("rgw_realm_id");
-}
-
-const string& RGWRealm::get_predefined_name(CephContext *cct) const {
- return cct->_conf->rgw_realm;
-}
-
rgw_pool RGWRealm::get_pool(CephContext *cct) const
{
if (cct->_conf->rgw_realm_root_pool.empty()) {
return rgw_pool(cct->_conf->rgw_realm_root_pool);
}
-const string RGWRealm::get_default_oid(bool old_format) const
-{
- if (cct->_conf->rgw_default_realm_info_oid.empty()) {
- return default_realm_info_oid;
- }
- return cct->_conf->rgw_default_realm_info_oid;
-}
-
-const string& RGWRealm::get_names_oid_prefix() const
-{
- return realm_names_oid_prefix;
-}
-
const string& RGWRealm::get_info_oid_prefix(bool old_format) const
{
return realm_info_oid_prefix;
}
-int RGWRealm::set_current_period(const DoutPrefixProvider *dpp, RGWPeriod& period, optional_yield y)
-{
- // update realm epoch to match the period's
- if (epoch > period.get_realm_epoch()) {
- ldpp_dout(dpp, 0) << "ERROR: set_current_period with old realm epoch "
- << period.get_realm_epoch() << ", current epoch=" << epoch << dendl;
- return -EINVAL;
- }
- if (epoch == period.get_realm_epoch() && current_period != period.get_id()) {
- ldpp_dout(dpp, 0) << "ERROR: set_current_period with same realm epoch "
- << period.get_realm_epoch() << ", but different period id "
- << period.get_id() << " != " << current_period << dendl;
- return -EINVAL;
- }
-
- epoch = period.get_realm_epoch();
- current_period = period.get_id();
-
- auto config_store_type = g_conf().get_val<std::string>("rgw_config_store");
- auto cfgstore = DriverManager::create_config_store(dpp, config_store_type);
- std::unique_ptr<rgw::sal::RealmWriter> writer;
- int ret = cfgstore->create_realm(dpp, y, false, *this, &writer);
- if (ret < 0) {
- ldpp_dout(dpp, 0) << "ERROR: realm create: " << cpp_strerror(-ret) << dendl;
- return ret;
- }
- ret = rgw::realm_set_current_period(dpp, y, cfgstore.get(), *writer, *this, period);
- if (ret < 0) {
- ldpp_dout(dpp, 0) << "ERROR: period update: " << cpp_strerror(-ret) << dendl;
- return ret;
- }
-
- ret = rgw::reflect_period(dpp, y, cfgstore.get(), period);
- if (ret < 0) {
- ldpp_dout(dpp, 0) << "ERROR: period.reflect(): " << cpp_strerror(-ret) << dendl;
- return ret;
- }
-
- return 0;
-}
-
string RGWRealm::get_control_oid() const
{
return get_info_oid_prefix() + id + ".control";
RGWPeriod *pperiod,
RGWZoneGroup *pzonegroup,
bool *pfound,
+ rgw::sal::ConfigStore* cfgstore,
optional_yield y) const
{
auto& found = *pfound;
epoch_t epoch = 0;
RGWPeriod period(period_id, epoch);
- auto config_store_type = g_conf().get_val<std::string>("rgw_config_store");
- auto cfgstore = DriverManager::create_config_store(dpp, config_store_type);
int r = cfgstore->read_period(dpp, y, period_id, epoch, period);
if (r < 0) {
ldpp_dout(dpp, 0) << "WARNING: period init failed: " << cpp_strerror(-r) << " ... skipping" << dendl;
cct->_conf->rgw_enable_quota_threads,
cct->_conf->rgw_run_sync_thread,
cct->_conf.get_val<bool>("rgw_dynamic_resharding"),
- true, true, null_yield, // run notification thread
+ true, true, null_yield, env.cfgstore, // run notification thread
cct->_conf->rgw_cache_enabled);
}
#include "rgw_rest_ratelimit.h"
#include "rgw_sal.h"
#include "rgw_sal_config.h"
+#include "rgw_process_env.h"
class RGWOp_Ratelimit_Info : public RGWRESTOp {
int check_caps(const RGWUserCaps& caps) override {
std::string realm_id = driver->get_zone()->get_realm_id();
RGWPeriodConfig period_config;
auto config_store_type = g_conf().get_val<std::string>("rgw_config_store");
- auto cfgstore = DriverManager::create_config_store(this, config_store_type);
+ auto cfgstore = s->penv.cfgstore;
op_ret = cfgstore->read_period_config(this, y, realm_id, period_config);
if (op_ret && op_ret != -ENOENT) {
ldpp_dout(this, 0) << "Error on period config read" << dendl;
}
auto config_store_type = g_conf().get_val<std::string>("rgw_config_store");
- auto cfgstore = DriverManager::create_config_store(s, config_store_type);
+ auto cfgstore = s->penv.cfgstore;
if (global) {
std::string realm_id = driver->get_zone()->get_realm_id();
RGWPeriodConfig period_config;
bool quota_threads,
bool run_sync_thread,
bool run_reshard_thread,
- bool run_notification_thread,
+ bool run_notification_thread,
bool use_cache,
bool use_gc,
bool background_tasks,
- optional_yield y)
+ optional_yield y, rgw::sal::ConfigStore* cfgstore)
{
rgw::sal::Driver* driver{nullptr};
.set_run_sync_thread(run_sync_thread)
.set_run_reshard_thread(run_reshard_thread)
.set_run_notification_thread(run_notification_thread)
- .init_begin(cct, dpp, background_tasks, site_config) < 0) {
+ .init_begin(cct, dpp, background_tasks, site_config, cfgstore) < 0) {
delete driver;
return nullptr;
}
.set_run_sync_thread(run_sync_thread)
.set_run_reshard_thread(run_reshard_thread)
.set_run_notification_thread(run_notification_thread)
- .init_begin(cct, dpp, background_tasks, site_config) < 0) {
+ .init_begin(cct, dpp, background_tasks, site_config, cfgstore) < 0) {
delete driver;
return nullptr;
}
rgw::sal::Driver* DriverManager::init_raw_storage_provider(const DoutPrefixProvider* dpp, CephContext* cct,
const Config& cfg, boost::asio::io_context& io_context,
- const rgw::SiteConfig& site_config)
+ const rgw::SiteConfig& site_config, rgw::sal::ConfigStore* cfgstore)
{
rgw::sal::Driver* driver = nullptr;
if (cfg.store_name.compare("rados") == 0) {
return nullptr;
}
- int ret = rados->init_svc(true, dpp, false, site_config);
+ int ret = rados->init_svc(true, dpp, false, site_config, cfgstore);
if (ret < 0) {
ldout(cct, 0) << "ERROR: failed to init services (ret=" << cpp_strerror(-ret) << ")" << dendl;
delete driver;
bool run_notification_thread,
bool background_tasks,
optional_yield y,
+ rgw::sal::ConfigStore* cfgstore,
bool use_cache = true,
bool use_gc = true) {
rgw::sal::Driver* driver = init_storage_provider(dpp, cct, cfg, io_context,
quota_threads,
run_sync_thread,
run_reshard_thread,
- run_notification_thread,
+ run_notification_thread,
use_cache, use_gc,
- background_tasks, y);
+ background_tasks, y, cfgstore);
return driver;
}
/** Get a stripped down driver by service name */
static rgw::sal::Driver* get_raw_storage(const DoutPrefixProvider* dpp,
CephContext* cct, const Config& cfg,
boost::asio::io_context& io_context,
- const rgw::SiteConfig& site_config) {
+ const rgw::SiteConfig& site_config,
+ rgw::sal::ConfigStore* cfgstore) {
rgw::sal::Driver* driver = init_raw_storage_provider(dpp, cct, cfg,
io_context,
- site_config);
+ site_config,
+ cfgstore);
return driver;
}
/** Initialize a new full Driver */
bool quota_threads,
bool run_sync_thread,
bool run_reshard_thread,
- bool run_notification_thread,
+ bool run_notification_thread,
bool use_metadata_cache,
bool use_gc, bool background_tasks,
- optional_yield y);
+ optional_yield y, rgw::sal::ConfigStore* cfgstore);
/** Initialize a new raw Driver */
static rgw::sal::Driver* init_raw_storage_provider(const DoutPrefixProvider* dpp,
CephContext* cct,
const Config& cfg,
boost::asio::io_context& io_context,
- const rgw::SiteConfig& site_config);
+ const rgw::SiteConfig& site_config,
+ rgw::sal::ConfigStore* cfgstore);
/** Close a Driver when it's no longer needed */
static void close_storage(rgw::sal::Driver* driver);
struct RGWRealm;
struct RGWZoneGroup;
struct RGWZoneParams;
-class RGWObjVersionTracker;
namespace rgw::sal {
optional_yield y, const std::string& marker,
std::span<std::string> entries,
ListResult<std::string>& result) = 0;
- virtual int read_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y, std::string_view period_id,
- uint32_t& epoch, RGWObjVersionTracker* objv, RGWPeriod& info) = 0;
- virtual int write_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, std::string_view period_id,
- uint32_t epoch, RGWObjVersionTracker* objv, const RGWPeriod& info) = 0;
+ virtual int update_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y,
+ std::string_view period_id, uint32_t epoch) = 0;
///@}
/// @group ZoneGroup
encode_json("anonymous_ratelimit", anon_ratelimit, f);
}
-std::string RGWPeriodConfig::get_oid(const std::string& realm_id)
-{
- if (realm_id.empty()) {
- return "period_config.default";
- }
- return "period_config." + realm_id;
-}
-
-rgw_pool RGWPeriodConfig::get_pool(CephContext *cct)
-{
- const auto& pool_name = cct->_conf->rgw_period_root_pool;
- if (pool_name.empty()) {
- return {RGW_DEFAULT_PERIOD_ROOT_POOL};
- }
- return {pool_name};
-}
-
int RGWSystemMetaObj::delete_obj(const DoutPrefixProvider *dpp, optional_yield y, bool old_format)
{
rgw_pool pool(get_pool(cct));
{
RGWSI_Zone *zone_svc{nullptr};
librados::Rados* rados{nullptr};
+ rgw::sal::ConfigStore* cfgstore{nullptr};
class ClsSubService : public RGWServiceInstance {
friend class RGWSI_Cls;
if (run_sync &&
svc.zone->need_to_sync()) {
// initialize the log period history
- svc.mdlog->init_oldest_log_period(y, dpp);
+ svc.mdlog->init_oldest_log_period(y, dpp, cfgstore);
}
return 0;
}
// traverse all the way back to the beginning of the period history, and
// return a cursor to the first period in a fully attached history
-Cursor RGWSI_MDLog::find_oldest_period(const DoutPrefixProvider *dpp, optional_yield y)
+Cursor RGWSI_MDLog::find_oldest_period(const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::ConfigStore* cfgstore)
{
auto cursor = period_history->get_current();
}
// pull the predecessor and add it to our history
RGWPeriod period;
- int r = period_puller->pull(dpp, predecessor, period, y);
+ int r = period_puller->pull(dpp, predecessor, period, y, cfgstore);
if (r < 0) {
return cursor;
}
return cursor;
}
-Cursor RGWSI_MDLog::init_oldest_log_period(optional_yield y, const DoutPrefixProvider *dpp)
+Cursor RGWSI_MDLog::init_oldest_log_period(optional_yield y, const DoutPrefixProvider *dpp, rgw::sal::ConfigStore* cfgstore)
{
// read the mdlog history
RGWMetadataLogHistory state;
if (ret == -ENOENT) {
// initialize the mdlog history and write it
ldpp_dout(dpp, 10) << "initializing mdlog history" << dendl;
- auto cursor = find_oldest_period(dpp, y);
+ auto cursor = find_oldest_period(dpp, y, cfgstore);
if (!cursor) {
return cursor;
}
if (cursor) {
return cursor;
} else {
- cursor = find_oldest_period(dpp, y);
+ cursor = find_oldest_period(dpp, y, cfgstore);
state.oldest_realm_epoch = cursor.get_epoch();
state.oldest_period_id = cursor.get_period().get_id();
ldpp_dout(dpp, 10) << "rewriting mdlog history" << dendl;
// pull the oldest period by id
RGWPeriod period;
- ret = period_puller->pull(dpp, state.oldest_period_id, period, y);
+ ret = period_puller->pull(dpp, state.oldest_period_id, period, y, cfgstore);
if (ret < 0) {
ldpp_dout(dpp, 1) << "failed to read period id=" << state.oldest_period_id
<< " for mdlog history: " << cpp_strerror(ret) << dendl;
return Cursor{-EINVAL};
}
// attach the period to our history
- return period_history->attach(dpp, std::move(period), y);
+ return period_history->attach(dpp, std::move(period), y, cfgstore);
}
Cursor RGWSI_MDLog::read_oldest_log_period(optional_yield y, const DoutPrefixProvider *dpp) const
}
int RGWSI_MDLog::pull_period(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period,
- optional_yield y)
+ optional_yield y, rgw::sal::ConfigStore* cfgstore)
{
- return period_puller->pull(dpp, period_id, period, y);
+ return period_puller->pull(dpp, period_id, period, y, cfgstore);
}
std::unique_ptr<RGWPeriodPuller> period_puller;
// maintains a connected history of periods
std::unique_ptr<RGWPeriodHistory> period_history;
+ rgw::sal::ConfigStore* cfgstore{nullptr};
public:
RGWSI_MDLog(CephContext *cct, bool run_sync);
// traverse all the way back to the beginning of the period history, and
// return a cursor to the first period in a fully attached history
- RGWPeriodHistory::Cursor find_oldest_period(const DoutPrefixProvider *dpp, optional_yield y);
+ RGWPeriodHistory::Cursor find_oldest_period(const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::ConfigStore* cfgstore);
/// initialize the oldest log period if it doesn't exist, and attach it to
/// our current history
- RGWPeriodHistory::Cursor init_oldest_log_period(optional_yield y, const DoutPrefixProvider *dpp);
+ RGWPeriodHistory::Cursor init_oldest_log_period(optional_yield y, const DoutPrefixProvider *dpp, rgw::sal::ConfigStore* cfgstore);
/// read the oldest log period, and return a cursor to it in our existing
/// period history
return period_history.get();
}
- int pull_period(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period, optional_yield y);
+ int pull_period(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period, optional_yield y, rgw::sal::ConfigStore* cfgstore);
/// find or create the metadata log for the given period
RGWMetadataLog* get_log(const std::string& period);
RGWSI_Zone *zone_svc{nullptr};
librados::Rados *rados{nullptr};
RGWSI_Finisher *finisher_svc{nullptr};
+ rgw::sal::ConfigStore *cfgstore{nullptr};
ceph::shared_mutex watchers_lock = ceph::make_shared_mutex("watchers_lock");
rgw_pool control_pool;
ObjectCache cache;
std::shared_ptr<RGWSI_SysObj_Cache_CB> cb;
+ rgw::sal::ConfigStore *cfgstore{nullptr};
void normalize_pool_and_obj(const rgw_pool& src_pool, const std::string& src_obj, rgw_pool& dst_pool, std::string& dst_obj);
protected:
RGWPeriod *pperiod,
RGWZoneGroup *pzonegroup,
bool *pfound,
+ rgw::sal::ConfigStore* cfgstore,
optional_yield y)
{
auto& found = *pfound;
for (auto& realm_name : realms) {
string realm_id;
RGWRealm realm(realm_id, realm_name);
- auto config_store_type = g_conf().get_val<std::string>("rgw_config_store");
- auto cfgstore = DriverManager::create_config_store(dpp, config_store_type);
- r = rgw::read_realm(dpp, y, cfgstore.get(), realm_id, realm_name, realm);
+ r = rgw::read_realm(dpp, y, cfgstore, realm_id, realm_name, realm);
if (r < 0) {
ldpp_dout(dpp, 0) << "WARNING: can't open realm " << realm_name << ": " << cpp_strerror(-r) << " ... skipping" << dendl;
continue;
}
r = realm.find_zone(dpp, zid, pperiod,
- pzonegroup, &found, y);
+ pzonegroup, &found, cfgstore, y);
if (r < 0) {
ldpp_dout(dpp, 20) << __func__ << "(): ERROR: realm.find_zone() returned r=" << r<< dendl;
return r;
assert(sysobj_svc->is_started()); /* if not then there's ordering issue */
- auto config_store_type = g_conf().get_val<std::string>("rgw_config_store");
- auto cfgstore = DriverManager::create_config_store(dpp, config_store_type);
- ret = rgw::read_realm(dpp, y, cfgstore.get(), realm->get_id(), realm->get_name(), *realm);
+ ret = rgw::read_realm(dpp, y, cfgstore, realm->get_id(), realm->get_name(), *realm);
if (ret < 0 && ret != -ENOENT) {
ldpp_dout(dpp, 0) << "failed reading realm info: ret "<< ret << " " << cpp_strerror(-ret) << dendl;
return ret;
current_period,
zonegroup,
&found_period_conf,
+ cfgstore,
y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: search_realm_conf() failed: ret="<< ret << dendl;
int RGWSI_Zone::list_realms(const DoutPrefixProvider *dpp, list<string>& realms)
{
- RGWRealm realm(cct);
+ RGWRealm realm;
RGWSI_SysObj::Pool syspool = sysobj_svc->get_pool(realm.get_pool(cct));
return syspool.list_prefixed_objs(dpp, realm_names_oid_prefix, &realms);
string period_id = current_period;
while(!period_id.empty()) {
RGWPeriod period(period_id);
- auto config_store_type = g_conf().get_val<std::string>("rgw_config_store");
- auto cfgstore = DriverManager::create_config_store(dpp, config_store_type);
ret = cfgstore->read_period(dpp, y, period_id, std::nullopt, period);
if (ret < 0) {
return ret;
std::map<rgw_zone_id, RGWZone> zone_by_id;
std::unique_ptr<rgw_sync_policy_info> sync_policy;
+ rgw::sal::ConfigStore *cfgstore{nullptr};
void init(RGWSI_SysObj *_sysobj_svc,
librados::Rados* rados_,
RGWPeriod *pperiod,
RGWZoneGroup *pzonegroup,
bool *pfound,
+ rgw::sal::ConfigStore* cfgstore,
optional_yield y);
public:
RGWSI_Zone(CephContext *cct);
false,
false,
false,
- true, true, null_yield,
+ true, true, null_yield, cfgstore.get(),
false));
if (!store) {
std::cerr << "couldn't init storage provider" << std::endl;