return 0;
}
-int SQLiteConfigStore::update_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y,
- std::string_view period_id, uint32_t epoch)
-{
- Prefix prefix{*dpp, "dbconfig:sqlite:read_latest_epoch "}; dpp = &prefix;
- // TODO: implement it later
- return 0;
-}
-
int SQLiteConfigStore::list_period_ids(const DoutPrefixProvider* dpp,
optional_yield y,
const std::string& marker,
optional_yield y, const std::string& marker,
std::span<std::string> entries,
sal::ListResult<std::string>& result) override;
- int update_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y,
- std::string_view period_id, uint32_t epoch) override;
int write_default_zonegroup_id(const DoutPrefixProvider* dpp,
optional_yield y, bool exclusive,
return 0;
}
-int ImmutableConfigStore::update_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y,
- std::string_view period_id, uint32_t epoch)
-{
- return -EROFS;
-}
-
// ZoneGroup
optional_yield y, const std::string& marker,
std::span<std::string> entries,
ListResult<std::string>& result) override;
- virtual int update_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y,
- std::string_view period_id, uint32_t epoch) override;
// ZoneGroup
virtual int write_default_zonegroup_id(const DoutPrefixProvider* dpp,
return impl->remove(dpp, y, pool, latest_oid, objv);
}
-int RadosConfigStore::update_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y,
- std::string_view period_id, uint32_t epoch)
+static int update_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y,
+ ConfigImpl* impl, std::string_view period_id,
+ uint32_t epoch)
{
static constexpr int MAX_RETRIES = 20;
bool exclusive = false;
// read existing epoch
- int r = read_latest_epoch(dpp, y, impl.get(), period_id, existing_epoch, &objv);
+ int r = read_latest_epoch(dpp, y, impl, period_id, existing_epoch, &objv);
if (r == -ENOENT) {
// use an exclusive create to set the epoch atomically
exclusive = true;
<< " -> " << epoch << " on period=" << period_id << dendl;
}
- r = write_latest_epoch(dpp, y, impl.get(), exclusive, period_id, epoch, &objv);
+ r = write_latest_epoch(dpp, y, impl, exclusive, period_id, epoch, &objv);
if (r == -EEXIST) {
continue; // exclusive create raced with another update, retry
} else if (r == -ECANCELED) {
return r;
}
- // non const RGWPeriod
- (void) this->update_latest_epoch(dpp, y, info.get_id(), info.get_epoch());
+ (void) update_latest_epoch(dpp, y, impl.get(), info.get_id(), info.get_epoch());
return 0;
}
// read the latest_epoch
uint32_t latest_epoch = 0;
RGWObjVersionTracker latest_objv;
- int r = read_latest_epoch(dpp, y, impl.get(), period_id, latest_epoch, &latest_objv);
+ int r = read_latest_epoch(dpp, y, impl.get(), period_id,
+ latest_epoch, &latest_objv);
if (r < 0 && r != -ENOENT) { // just delete epoch=0 on ENOENT
ldpp_dout(dpp, 0) << "failed to read latest epoch for period "
<< period_id << ": " << cpp_strerror(r) << dendl;
optional_yield y, const std::string& marker,
std::span<std::string> entries,
sal::ListResult<std::string>& result) override;
- virtual int update_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y,
- std::string_view period_id, uint32_t epoch) override;
// ZoneGroup
virtual int write_default_zonegroup_id(const DoutPrefixProvider* dpp,
data_sync_cr->wakeup(shard_id, entries);
}
-int RGWRemoteDataLog::run_sync(const DoutPrefixProvider *dpp, int num_shards, rgw::sal::ConfigStore* cfgstore)
+int RGWRemoteDataLog::run_sync(const DoutPrefixProvider *dpp, int num_shards)
{
// construct and start bid manager for data sync fairness
const auto& control_pool = sc.env->driver->svc()->zone->get_zone_params().control_pool;
int read_recovering_shards(const DoutPrefixProvider *dpp, const int num_shards, std::set<int>& recovering_shards);
int read_shard_status(const DoutPrefixProvider *dpp, int shard_id, std::set<std::string>& lagging_buckets,std::set<std::string>& recovering_buckets, rgw_data_sync_marker* sync_marker, const int max_entries);
int init_sync_status(const DoutPrefixProvider *dpp, int num_shards);
- int run_sync(const DoutPrefixProvider *dpp, int num_shards, rgw::sal::ConfigStore* cfgstore);
+ int run_sync(const DoutPrefixProvider *dpp, int num_shards);
void wakeup(int shard_id, bc::flat_set<rgw_data_notify_entry>& entries);
};
return source_log.read_source_log_shards_next(dpp, shard_markers, result);
}
- int run(const DoutPrefixProvider *dpp, rgw::sal::ConfigStore* cfgstore) { return source_log.run_sync(dpp, num_shards, cfgstore); }
+ int run(const DoutPrefixProvider *dpp) { return source_log.run_sync(dpp, num_shards); }
void wakeup(int shard_id, bc::flat_set<rgw_data_notify_entry>& entries) { return source_log.wakeup(shard_id, entries); }
// vim: ts=8 sw=2 smarttab ft=cpp
#include "rgw_sync.h"
-#include "rgw_sal.h"
-#include "rgw_sal_config.h"
#include "services/svc_zone.h"
return -ENOENT;
}
+int RGWPeriod::get_latest_epoch(const DoutPrefixProvider *dpp, epoch_t& latest_epoch, optional_yield y)
+{
+ RGWPeriodLatestEpochInfo info;
+
+ int ret = read_latest_epoch(dpp, info, y);
+ if (ret < 0) {
+ return ret;
+ }
+
+ latest_epoch = info.epoch;
+
+ return 0;
+}
+
+int RGWPeriod::delete_obj(const DoutPrefixProvider *dpp, optional_yield y)
+{
+ rgw_pool pool(get_pool(cct));
+
+ // delete the object for each period epoch
+ for (epoch_t e = 1; e <= epoch; e++) {
+ RGWPeriod p{get_id(), e};
+ rgw_raw_obj oid{pool, p.get_period_oid()};
+ auto sysobj = sysobj_svc->get_obj(oid);
+ int ret = sysobj.wop().remove(dpp, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "WARNING: failed to delete period object " << oid
+ << ": " << cpp_strerror(-ret) << dendl;
+ }
+ }
+
+ // delete the .latest_epoch object
+ rgw_raw_obj oid{pool, get_period_oid_prefix() + get_latest_epoch_oid()};
+ auto sysobj = sysobj_svc->get_obj(oid);
+ int ret = sysobj.wop().remove(dpp, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "WARNING: failed to delete period object " << oid
+ << ": " << cpp_strerror(-ret) << dendl;
+ }
+ return ret;
+}
+
+int RGWPeriod::update(const DoutPrefixProvider *dpp, optional_yield y)
+{
+ auto zone_svc = sysobj_svc->get_zone_svc();
+ ldpp_dout(dpp, 20) << __func__ << " realm " << realm_id << " period " << get_id() << dendl;
+ list<string> zonegroups;
+ int ret = zone_svc->list_zonegroups(dpp, zonegroups);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "ERROR: failed to list zonegroups: " << cpp_strerror(-ret) << dendl;
+ return ret;
+ }
+
+ // clear zone short ids of removed zones. period_map.update() will add the
+ // remaining zones back
+ period_map.short_zone_ids.clear();
+
+ for (auto& iter : zonegroups) {
+ RGWZoneGroup zg(string(), iter);
+ ret = zg.init(dpp, cct, sysobj_svc, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "WARNING: zg.init() failed: " << cpp_strerror(-ret) << dendl;
+ continue;
+ }
+
+ if (zg.realm_id != realm_id) {
+ ldpp_dout(dpp, 20) << "skipping zonegroup " << zg.get_name() << " zone realm id " << zg.realm_id << ", not on our realm " << realm_id << dendl;
+ continue;
+ }
+
+ if (zg.master_zone.empty()) {
+ ldpp_dout(dpp, 0) << "ERROR: zonegroup " << zg.get_name() << " should have a master zone " << dendl;
+ return -EINVAL;
+ }
+
+ if (zg.zones.find(zg.master_zone) == zg.zones.end()) {
+ ldpp_dout(dpp, 0) << "ERROR: zonegroup " << zg.get_name()
+ << " has a non existent master zone "<< dendl;
+ return -EINVAL;
+ }
+
+ if (zg.is_master_zonegroup()) {
+ master_zonegroup = zg.get_id();
+ master_zone = zg.master_zone;
+ }
+
+ int ret = period_map.update(zg, cct);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ ret = period_config.read(dpp, sysobj_svc, realm_id, y);
+ if (ret < 0 && ret != -ENOENT) {
+ ldpp_dout(dpp, 0) << "ERROR: failed to read period config: "
+ << cpp_strerror(ret) << dendl;
+ return ret;
+ }
+ return 0;
+}
+
+void RGWPeriod::fork()
+{
+ ldout(cct, 20) << __func__ << " realm " << realm_id << " period " << id << dendl;
+ predecessor_uuid = id;
+ id = get_staging_id(realm_id);
+ period_map.reset();
+ realm_epoch++;
+}
+
static int read_sync_status(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw_meta_sync_status *sync_status)
{
rgw::sal::RadosStore* rados_store = static_cast<rgw::sal::RadosStore*>(driver);
return 0;
}
+int RGWPeriod::commit(const DoutPrefixProvider *dpp,
+ rgw::sal::Driver* driver,
+ RGWRealm& realm, const RGWPeriod& current_period,
+ std::ostream& error_stream, optional_yield y,
+ bool force_if_stale)
+{
+ auto zone_svc = sysobj_svc->get_zone_svc();
+ ldpp_dout(dpp, 20) << __func__ << " realm " << realm.get_id() << " period " << current_period.get_id() << dendl;
+ // gateway must be in the master zone to commit
+ if (master_zone != zone_svc->get_zone_params().get_id()) {
+ error_stream << "Cannot commit period on zone "
+ << zone_svc->get_zone_params().get_id() << ", it must be sent to "
+ "the period's master zone " << master_zone << '.' << std::endl;
+ return -EINVAL;
+ }
+ // period predecessor must match current period
+ if (predecessor_uuid != current_period.get_id()) {
+ error_stream << "Period predecessor " << predecessor_uuid
+ << " does not match current period " << current_period.get_id()
+ << ". Use 'period pull' to get the latest period from the master, "
+ "reapply your changes, and try again." << std::endl;
+ return -EINVAL;
+ }
+ // realm epoch must be 1 greater than current period
+ if (realm_epoch != current_period.get_realm_epoch() + 1) {
+ error_stream << "Period's realm epoch " << realm_epoch
+ << " does not come directly after current realm epoch "
+ << current_period.get_realm_epoch() << ". Use 'realm pull' to get the "
+ "latest realm and period from the master zone, reapply your changes, "
+ "and try again." << std::endl;
+ return -EINVAL;
+ }
+ // did the master zone change?
+ if (master_zone != current_period.get_master_zone()) {
+ // store the current metadata sync status in the period
+ int r = update_sync_status(dpp, driver, current_period, error_stream, force_if_stale);
+ if (r < 0) {
+ ldpp_dout(dpp, 0) << "failed to update metadata sync status: "
+ << cpp_strerror(-r) << dendl;
+ return r;
+ }
+ // create an object with a new period id
+ r = create(dpp, y, true);
+ if (r < 0) {
+ ldpp_dout(dpp, 0) << "failed to create new period: " << cpp_strerror(-r) << dendl;
+ return r;
+ }
+ // set as current period
+ r = realm.set_current_period(dpp, *this, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 0) << "failed to update realm's current period: "
+ << cpp_strerror(-r) << dendl;
+ return r;
+ }
+ ldpp_dout(dpp, 4) << "Promoted to master zone and committed new period "
+ << id << dendl;
+ return 0;
+ }
+ // period must be based on current epoch
+ if (epoch != current_period.get_epoch()) {
+ error_stream << "Period epoch " << epoch << " does not match "
+ "predecessor epoch " << current_period.get_epoch()
+ << ". Use 'period pull' to get the latest epoch from the master zone, "
+ "reapply your changes, and try again." << std::endl;
+ return -EINVAL;
+ }
+ // set period as next epoch
+ set_id(current_period.get_id());
+ set_epoch(current_period.get_epoch() + 1);
+ set_predecessor(current_period.get_predecessor());
+ realm_epoch = current_period.get_realm_epoch();
+ // write the period to rados
+ int r = store_info(dpp, false, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 0) << "failed to store period: " << cpp_strerror(-r) << dendl;
+ return r;
+ }
+ // set as latest epoch
+ r = update_latest_epoch(dpp, epoch, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 0) << "failed to set latest epoch: " << cpp_strerror(-r) << dendl;
+ return r;
+ }
+ r = reflect(dpp, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 0) << "failed to update local objects: " << cpp_strerror(-r) << dendl;
+ return r;
+ }
+ ldpp_dout(dpp, 4) << "Committed new epoch " << epoch
+ << " for period " << id << dendl;
+ return 0;
+}
+
void RGWPeriod::generate_test_instances(list<RGWPeriod*> &o)
{
RGWPeriod *z = new RGWPeriod;
do {
auto start = ceph::real_clock::now();
-
int r = processor->process(this);
if (r < 0) {
ldpp_dout(this, 0) << "ERROR: processor->process() returned error r=" << r << dendl;
class RGWMetaSyncProcessorThread : public RGWSyncProcessorThread
{
RGWMetaSyncStatusManager sync;
- rgw::sal::ConfigStore *cfgstore{nullptr};
uint64_t interval_msec() override {
return 0; /* no interval associated, it'll run once until stopped */
sync.stop();
}
public:
- RGWMetaSyncProcessorThread(rgw::sal::RadosStore* _driver, RGWAsyncRadosProcessor *async_rados,
- rgw::sal::ConfigStore *_cfgstore)
- : RGWSyncProcessorThread(_driver->getRados(), "meta-sync"), sync(_driver, async_rados), cfgstore(_cfgstore) {}
+ RGWMetaSyncProcessorThread(rgw::sal::RadosStore* _driver, RGWAsyncRadosProcessor *async_rados)
+ : RGWSyncProcessorThread(_driver->getRados(), "meta-sync"), sync(_driver, async_rados) {}
void wakeup_sync_shards(set<int>& shard_ids) {
for (set<int>::iterator iter = shard_ids.begin(); iter != shard_ids.end(); ++iter) {
}
int process(const DoutPrefixProvider *dpp) override {
- sync.run(dpp, null_yield, cfgstore);
+ sync.run(dpp, null_yield);
return 0;
}
};
PerfCountersRef counters;
RGWDataSyncStatusManager sync;
bool initialized;
- rgw::sal::ConfigStore *cfgstore{nullptr};
uint64_t interval_msec() override {
if (initialized) {
}
public:
RGWDataSyncProcessorThread(rgw::sal::RadosStore* _driver, RGWAsyncRadosProcessor *async_rados,
- const RGWZone* source_zone, rgw::sal::ConfigStore *_cfgstore)
+ const RGWZone* source_zone)
: RGWSyncProcessorThread(_driver->getRados(), "data-sync"),
counters(sync_counters::build(store->ctx(), std::string("data-sync-from-") + source_zone->name)),
sync(_driver, async_rados, source_zone->id, counters.get()),
- initialized(false), cfgstore(_cfgstore) {}
+ initialized(false) {}
void wakeup_sync_shards(bc::flat_map<int, bc::flat_set<rgw_data_notify_entry> >& entries) {
for (bc::flat_map<int, bc::flat_set<rgw_data_notify_entry> >::iterator iter = entries.begin(); iter != entries.end(); ++iter) {
/* we'll be back! */
return 0;
}
- sync.run(dpp, cfgstore);
+ sync.run(dpp);
return 0;
}
};
* Initialize the RADOS instance and prepare to do other ops
* Returns 0 on success, -ERR# on failure.
*/
-int RGWRados::init_complete(const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::ConfigStore* cfgstore)
+int RGWRados::init_complete(const DoutPrefixProvider *dpp, optional_yield y)
{
int ret;
}
auto async_processor = svc.async_processor;
std::lock_guard l{meta_sync_thread_lock};
- meta_sync_processor_thread = new RGWMetaSyncProcessorThread(this->driver, async_processor, cfgstore);
+ meta_sync_processor_thread = new RGWMetaSyncProcessorThread(this->driver, async_processor);
ret = meta_sync_processor_thread->init(dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to initialize meta sync thread" << dendl;
std::lock_guard dl{data_sync_thread_lock};
for (auto source_zone : svc.zone->get_data_sync_source_zones()) {
ldpp_dout(dpp, 5) << "starting data sync thread for zone " << source_zone->name << dendl;
- auto *thread = new RGWDataSyncProcessorThread(this->driver, svc.async_processor, source_zone, cfgstore);
+ auto *thread = new RGWDataSyncProcessorThread(this->driver, svc.async_processor, source_zone);
ret = thread->init(dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to initialize data sync thread" << dendl;
}
int RGWRados::init_svc(bool raw, const DoutPrefixProvider *dpp,
- bool background_tasks, // Ignored when `raw`
- const rgw::SiteConfig& site, rgw::sal::ConfigStore* cfgstore)
+ bool background_tasks, // Ignored when `raw`
+ const rgw::SiteConfig& site)
{
if (raw) {
- return svc.init_raw(cct, driver, use_cache, null_yield, dpp, site, cfgstore);
+ return svc.init_raw(cct, driver, use_cache, null_yield, dpp, site);
}
- return svc.init(cct, driver, use_cache, run_sync_thread, background_tasks, null_yield, dpp, site, cfgstore);
+ return svc.init(cct, driver, use_cache, run_sync_thread, background_tasks, null_yield, dpp, site);
}
/**
*/
int RGWRados::init_begin(CephContext* _cct, const DoutPrefixProvider *dpp,
bool background_tasks,
- const rgw::SiteConfig& site, rgw::sal::ConfigStore* cfgstore)
+ const rgw::SiteConfig& site)
{
set_context(_cct);
int ret = driver->init_neorados(dpp);
return ret;
}
- ret = init_svc(false, dpp, background_tasks, site, cfgstore);
+ ret = init_svc(false, dpp, background_tasks, site);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to init services (ret=" << cpp_strerror(-ret) << ")" << dendl;
return ret;
CephContext *ctx() { return cct; }
/** do all necessary setup of the storage device */
int init_begin(CephContext *_cct, const DoutPrefixProvider *dpp,
- bool background_tasks, const rgw::SiteConfig& site, rgw::sal::ConfigStore* cfgstore);
+ bool background_tasks, const rgw::SiteConfig& site);
/** Initialize the RADOS instance and prepare to do other ops */
- int init_svc(bool raw, const DoutPrefixProvider *dpp, bool background_tasks, const rgw::SiteConfig& site, rgw::sal::ConfigStore* cfgstore);
+ int init_svc(bool raw, const DoutPrefixProvider *dpp, bool background_tasks, const rgw::SiteConfig& site);
virtual int init_rados();
- int init_complete(const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::ConfigStore* cfgstore);
+ int init_complete(const DoutPrefixProvider *dpp, optional_yield y);
void finalize();
int register_to_service_map(const DoutPrefixProvider *dpp, const std::string& daemon_type, const std::map<std::string, std::string>& meta);
#include "rgw_rest_config.h"
#include "rgw_zone.h"
#include "rgw_sal_rados.h"
-#include "rgw_sal_config.h"
-#include "rgw_process_env.h"
#include "services/svc_zone.h"
#include "services/svc_mdlog.h"
RESTArgs::get_string(s, "period_id", period_id, &period_id);
RESTArgs::get_uint32(s, "epoch", 0, &epoch);
- op_ret = s->penv.cfgstore->read_period(this, y, period_id, std::nullopt, period);
+ period.set_id(period_id);
+ period.set_epoch(epoch);
+
+ op_ret = period.init(this, driver->ctx(), static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, realm_id, y);
if (op_ret < 0)
ldpp_dout(this, 5) << "failed to read period" << dendl;
}
{
auto cct = driver->ctx();
+ // initialize the period without reading from rados
+ period.init(this, cct, static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, y, false);
+
// decode the period from input
const auto max_size = cct->_conf->rgw_max_put_param_size;
bool empty;
// load the realm and current period from rados; there may be a more recent
// period that we haven't restarted with yet. we also don't want to modify
// the objects in use by RGWRados
- std::string_view realm_id = period.get_realm();
- constexpr std::string_view realm_name; // empty, look up by id only
- RGWRealm realm;
- std::unique_ptr<rgw::sal::RealmWriter> realm_writer;
- op_ret = rgw::read_realm(this, y, s->penv.cfgstore, realm_id, realm_name, realm, &realm_writer);
+ RGWRealm realm(period.get_realm());
+ op_ret = realm.init(this, cct, static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, y);
if (op_ret < 0) {
ldpp_dout(this, -1) << "failed to read current realm: "
<< cpp_strerror(-op_ret) << dendl;
}
RGWPeriod current_period;
- op_ret = s->penv.cfgstore->read_period(this, y, realm.current_period, std::nullopt, current_period);
+ op_ret = current_period.init(this, cct, static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, realm.get_id(), y);
if (op_ret < 0) {
ldpp_dout(this, -1) << "failed to read current period: "
<< cpp_strerror(-op_ret) << dendl;
// if period id is empty, handle as 'period commit'
if (period.get_id().empty()) {
- op_ret = rgw::commit_period(this, y, s->penv.cfgstore, driver, realm, *realm_writer, current_period, period, error_stream, false);
+ op_ret = period.commit(this, driver, realm, current_period, error_stream, y);
if (op_ret == -EEXIST) {
op_ret = 0; // succeed on retries so the op is idempotent
return;
}
// write the period to rados
- op_ret = s->penv.cfgstore->create_period(this, y, false, period);
+ op_ret = period.store_info(this, false, y);
if (op_ret < 0) {
ldpp_dout(this, -1) << "failed to store period " << period.get_id() << dendl;
return;
}
// set as latest epoch
- op_ret = s->penv.cfgstore->update_latest_epoch(this, y, period.get_id(), period.get_epoch());
+ op_ret = period.update_latest_epoch(this, period.get_epoch(), y);
if (op_ret == -EEXIST) {
// already have this epoch (or a more recent one)
ldpp_dout(this, 4) << "already have epoch >= " << period.get_epoch()
return;
}
// attach a copy of the period into the period history
- auto cursor = period_history->attach(this, RGWPeriod{period}, y, s->penv.cfgstore);
+ auto cursor = period_history->attach(this, RGWPeriod{period}, y);
if (!cursor) {
// we're missing some history between the new period and current_period
op_ret = cursor.get_error();
return;
}
// set as current period
- op_ret = rgw::realm_set_current_period(this, y, s->penv.cfgstore, *realm_writer, realm, period);
+ op_ret = realm.set_current_period(this, period, y);
if (op_ret < 0) {
ldpp_dout(this, -1) << "failed to update realm's current period" << dendl;
return;
return;
}
// reflect the period into our local objects
- op_ret = rgw::reflect_period(this, y, s->penv.cfgstore, period);
+ op_ret = period.reflect(this, y);
if (op_ret < 0) {
ldpp_dout(this, -1) << "failed to update local objects: "
<< cpp_strerror(-op_ret) << dendl;
if (notify_realm) {
// trigger realm reload after sending the response, because reload may
// race to close this connection
- (void) s->penv.cfgstore->realm_notify_new_period(this, s->yield, period);
+ notify_realm->notify_new_period(this, period, s->yield);
}
}
// read realm
realm.reset(new RGWRealm(id, name));
- op_ret = rgw::read_realm(this, y, s->penv.cfgstore, realm->get_id(), realm->get_name(), *realm);
+ op_ret = realm->init(this, g_ceph_context, static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, y);
if (op_ret < 0)
ldpp_dout(this, -1) << "failed to read realm id=" << id
<< " name=" << name << dendl;
void RGWOp_Realm_List::execute(optional_yield y)
{
- s->penv.cfgstore->read_default_realm_id(this, y, default_id);
+ {
+ // read default realm
+ RGWRealm realm(driver->ctx(), static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj);
+ [[maybe_unused]] int ret = realm.read_default_id(this, default_id, y);
+ }
op_ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->list_realms(this, realms);
if (op_ret < 0)
ldpp_dout(this, -1) << "failed to list realms" << dendl;
bool run_sync,
bool background_tasks,
optional_yield y,
- const DoutPrefixProvider *dpp,
- rgw::sal::ConfigStore* cfgstore,
- const rgw::SiteConfig* site)
+ const DoutPrefixProvider *dpp)
{
finisher = std::make_unique<RGWSI_Finisher>(cct);
bucket_sobj = std::make_unique<RGWSI_Bucket_SObj>(cct);
cls = std::make_unique<RGWSI_Cls>(cct);
config_key_rados = std::make_unique<RGWSI_ConfigKey_RADOS>(cct);
datalog_rados = std::make_unique<RGWDataChangesLog>(cct);
- mdlog = std::make_unique<RGWSI_MDLog>(cct, run_sync, cfgstore);
+ mdlog = std::make_unique<RGWSI_MDLog>(cct, run_sync);
notify = std::make_unique<RGWSI_Notify>(cct);
- zone = std::make_unique<RGWSI_Zone>(cct, cfgstore, site);
+ zone = std::make_unique<RGWSI_Zone>(cct);
zone_utils = std::make_unique<RGWSI_ZoneUtils>(cct);
quota = std::make_unique<RGWSI_Quota>(cct);
sync_modules = std::make_unique<RGWSI_SyncModules>(cct);
has_shutdown = true;
}
-int RGWServices::do_init(CephContext *_cct, rgw::sal::RadosStore* driver, bool have_cache, bool raw, bool run_sync, bool background_tasks, optional_yield y, const DoutPrefixProvider *dpp, const rgw::SiteConfig& _site, rgw::sal::ConfigStore* cfgstore)
+int RGWServices::do_init(CephContext *_cct, rgw::sal::RadosStore* driver, bool have_cache, bool raw, bool run_sync, bool background_tasks, optional_yield y, const DoutPrefixProvider *dpp, const rgw::SiteConfig& _site)
{
cct = _cct;
site = &_site;
- int r = _svc.init(cct, driver, have_cache, raw, run_sync, background_tasks, y, dpp, cfgstore, site);
+ int r = _svc.init(cct, driver, have_cache, raw, run_sync, background_tasks, y, dpp);
if (r < 0) {
return r;
}
#include "rgw_common.h"
-namespace rgw {
- class SiteConfig;
- namespace sal {
- class RadosStore;
- }
+namespace rgw::sal {
+class RadosStore;
}
struct RGWServices_Def;
int init(CephContext *cct, rgw::sal::RadosStore* store, bool have_cache,
bool raw_storage, bool run_sync, bool background_tasks,
- optional_yield y, const DoutPrefixProvider *dpp, rgw::sal::ConfigStore* cfgstore, const rgw::SiteConfig* site);
+ optional_yield y, const DoutPrefixProvider *dpp);
void shutdown();
};
int do_init(CephContext *cct, rgw::sal::RadosStore* store, bool have_cache,
bool raw_storage, bool run_sync, bool background_tasks, optional_yield y,
- const DoutPrefixProvider *dpp, const rgw::SiteConfig& site, rgw::sal::ConfigStore* cfgstore);
+ const DoutPrefixProvider *dpp, const rgw::SiteConfig& site);
int init(CephContext *cct, rgw::sal::RadosStore* store, bool have_cache,
bool run_sync, bool background_tasks, optional_yield y, const DoutPrefixProvider *dpp,
- const rgw::SiteConfig& site, rgw::sal::ConfigStore* cfgstore) {
- return do_init(cct, store, have_cache, false, run_sync, background_tasks, y, dpp, site, cfgstore);
+ const rgw::SiteConfig& site) {
+ return do_init(cct, store, have_cache, false, run_sync, background_tasks, y, dpp, site);
}
int init_raw(CephContext *cct, rgw::sal::RadosStore* store,
bool have_cache, optional_yield y,
const DoutPrefixProvider *dpp,
- const rgw::SiteConfig& site, rgw::sal::ConfigStore* cfgstore) {
- return do_init(cct, store, have_cache, true, false, false, y, dpp, site, cfgstore);
+ const rgw::SiteConfig& site) {
+ return do_init(cct, store, have_cache, true, false, false, y, dpp, site);
}
void shutdown() {
_svc.shutdown();
static RGWPeriodHistory::Cursor get_period_at(const DoutPrefixProvider *dpp,
rgw::sal::RadosStore* store,
const rgw_meta_sync_info& info,
- optional_yield y, rgw::sal::ConfigStore* cfgstore)
+ optional_yield y)
{
if (info.period.empty()) {
// return an empty cursor with error=0
// read the period from rados or pull it from the master
RGWPeriod period;
- int r = store->svc()->mdlog->pull_period(dpp, info.period, period, y, cfgstore);
+ int r = store->svc()->mdlog->pull_period(dpp, info.period, period, y);
if (r < 0) {
ldpp_dout(dpp, -1) << "ERROR: failed to read period id "
<< info.period << ": " << cpp_strerror(r) << dendl;
return RGWPeriodHistory::Cursor{r};
}
// attach the period to our history
- cursor = store->svc()->mdlog->get_period_history()->attach(dpp, std::move(period), y, cfgstore);
+ cursor = store->svc()->mdlog->get_period_history()->attach(dpp, std::move(period), y);
if (!cursor) {
r = cursor.get_error();
ldpp_dout(dpp, -1) << "ERROR: failed to read period history back to "
return cursor;
}
-int RGWRemoteMetaLog::run_sync(const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::ConfigStore* cfgstore)
+int RGWRemoteMetaLog::run_sync(const DoutPrefixProvider *dpp, optional_yield y)
{
if (store->svc()->zone->is_meta_master()) {
return 0;
case rgw_meta_sync_info::StateSync:
tn->log(20, "sync");
// find our position in the period history (if any)
- cursor = get_period_at(dpp, store, sync_status.sync_info, y, cfgstore);
+ cursor = get_period_at(dpp, store, sync_status.sync_info, y);
r = cursor.get_error();
if (r < 0) {
return r;
int read_master_log_shards_next(const DoutPrefixProvider *dpp, const std::string& period, std::map<int, std::string> shard_markers, std::map<int, rgw_mdlog_shard_data> *result);
int read_sync_status(const DoutPrefixProvider *dpp, rgw_meta_sync_status *sync_status);
int init_sync_status(const DoutPrefixProvider *dpp);
- int run_sync(const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::ConfigStore* cfgstore);
+ int run_sync(const DoutPrefixProvider *dpp, optional_yield y);
void wakeup(int shard_id);
return master_log.read_master_log_shards_next(dpp, period, shard_markers, result);
}
- int run(const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::ConfigStore* cfgstore) { return master_log.run_sync(dpp, y, cfgstore); }
+ int run(const DoutPrefixProvider *dpp, optional_yield y) { return master_log.run_sync(dpp, y); }
// implements DoutPrefixProvider
struct RGWAccessKey;
-namespace rgw {
/// Generate a random uuid for realm/period/zonegroup/zone ids
-std::string gen_random_uuid()
+static std::string gen_random_uuid()
{
uuid_d uuid;
uuid.generate_random();
return uuid.to_string();
}
-}
void RGWDefaultZoneGroupInfo::dump(Formatter *f) const {
encode_json("default_zonegroup", default_zonegroup, f);
}
}
+int RGWZoneGroup::create_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format)
+{
+ name = default_zonegroup_name;
+ api_name = default_zonegroup_name;
+ is_master = true;
+
+ RGWZoneGroupPlacementTarget placement_target;
+ placement_target.name = "default-placement";
+ placement_targets[placement_target.name] = placement_target;
+ default_placement.name = "default-placement";
+
+ RGWZoneParams zone_params(default_zone_name);
+
+ int r = zone_params.init(dpp, cct, sysobj_svc, y, false);
+ if (r < 0) {
+ ldpp_dout(dpp, 0) << "create_default: error initializing zone params: " << cpp_strerror(-r) << dendl;
+ return r;
+ }
+
+ r = zone_params.create_default(dpp, y);
+ if (r < 0 && r != -EEXIST) {
+ ldpp_dout(dpp, 0) << "create_default: error in create_default zone params: " << cpp_strerror(-r) << dendl;
+ return r;
+ } else if (r == -EEXIST) {
+ ldpp_dout(dpp, 10) << "zone_params::create_default() returned -EEXIST, we raced with another default zone_params creation" << dendl;
+ zone_params.clear_id();
+ r = zone_params.init(dpp, cct, sysobj_svc, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 0) << "create_default: error in init existing zone params: " << cpp_strerror(-r) << dendl;
+ return r;
+ }
+ ldpp_dout(dpp, 20) << "zone_params::create_default() " << zone_params.get_name() << " id " << zone_params.get_id()
+ << dendl;
+ }
+
+ RGWZone& default_zone = zones[zone_params.get_id()];
+ default_zone.name = zone_params.get_name();
+ default_zone.id = zone_params.get_id();
+ master_zone = default_zone.id;
+
+ // initialize supported zone features
+ default_zone.supported_features.insert(rgw::zone_features::supported.begin(),
+ rgw::zone_features::supported.end());
+ // enable default zonegroup features
+ enabled_features.insert(rgw::zone_features::enabled.begin(),
+ rgw::zone_features::enabled.end());
+
+ r = create(dpp, y);
+ if (r < 0 && r != -EEXIST) {
+ ldpp_dout(dpp, 0) << "error storing zone group info: " << cpp_strerror(-r) << dendl;
+ return r;
+ }
+
+ if (r == -EEXIST) {
+ ldpp_dout(dpp, 10) << "create_default() returned -EEXIST, we raced with another zonegroup creation" << dendl;
+ id.clear();
+ r = init(dpp, cct, sysobj_svc, y);
+ if (r < 0) {
+ return r;
+ }
+ }
+
+ if (old_format) {
+ name = id;
+ }
+
+ post_process_params(dpp, y);
+
+ return 0;
+}
+
int RGWZoneGroup::equals(const string& other_zonegroup) const
{
if (is_master && other_zonegroup.empty())
return (id == other_zonegroup);
}
+int RGWZoneGroup::add_zone(const DoutPrefixProvider *dpp,
+ const RGWZoneParams& zone_params, bool *is_master, bool *read_only,
+ const list<string>& endpoints, const string *ptier_type,
+ bool *psync_from_all, list<string>& sync_from, list<string>& sync_from_rm,
+ string *predirect_zone, std::optional<int> bucket_index_max_shards,
+ RGWSyncModulesManager *sync_mgr,
+ const rgw::zone_features::set& enable_features,
+ const rgw::zone_features::set& disable_features,
+ optional_yield y)
+{
+ auto& zone_id = zone_params.get_id();
+ auto& zone_name = zone_params.get_name();
+
+ // check for duplicate zone name on insert
+ if (!zones.count(zone_id)) {
+ for (const auto& zone : zones) {
+ if (zone.second.name == zone_name) {
+ ldpp_dout(dpp, 0) << "ERROR: found existing zone name " << zone_name
+ << " (" << zone.first << ") in zonegroup " << get_name() << dendl;
+ return -EEXIST;
+ }
+ }
+ }
+
+ if (is_master) {
+ if (*is_master) {
+ if (!master_zone.empty() && master_zone != zone_id) {
+ ldpp_dout(dpp, 0) << "NOTICE: overriding master zone: " << master_zone << dendl;
+ }
+ master_zone = zone_id;
+ } else if (master_zone == zone_id) {
+ master_zone.clear();
+ }
+ }
+
+ RGWZone& zone = zones[zone_id];
+ zone.name = zone_name;
+ zone.id = zone_id;
+ if (!endpoints.empty()) {
+ zone.endpoints = endpoints;
+ }
+ if (read_only) {
+ zone.read_only = *read_only;
+ }
+ if (ptier_type) {
+ zone.tier_type = *ptier_type;
+ if (!sync_mgr->get_module(*ptier_type, nullptr)) {
+ ldpp_dout(dpp, 0) << "ERROR: could not found sync module: " << *ptier_type
+ << ", valid sync modules: "
+ << sync_mgr->get_registered_module_names()
+ << dendl;
+ return -ENOENT;
+ }
+ }
+
+ if (psync_from_all) {
+ zone.sync_from_all = *psync_from_all;
+ }
+
+ if (predirect_zone) {
+ zone.redirect_zone = *predirect_zone;
+ }
+
+ if (bucket_index_max_shards) {
+ zone.bucket_index_max_shards = *bucket_index_max_shards;
+ }
+
+ for (auto add : sync_from) {
+ zone.sync_from.insert(add);
+ }
+
+ for (auto rm : sync_from_rm) {
+ zone.sync_from.erase(rm);
+ }
+
+ zone.supported_features.insert(enable_features.begin(),
+ enable_features.end());
+
+ for (const auto& feature : disable_features) {
+ if (enabled_features.contains(feature)) {
+ lderr(cct) << "ERROR: Cannot disable zone feature \"" << feature
+ << "\" until it's been disabled in zonegroup " << name << dendl;
+ return -EINVAL;
+ }
+ auto i = zone.supported_features.find(feature);
+ if (i == zone.supported_features.end()) {
+ ldout(cct, 1) << "WARNING: zone feature \"" << feature
+ << "\" was not enabled in zone " << zone.name << dendl;
+ continue;
+ }
+ zone.supported_features.erase(i);
+ }
+
+ post_process_params(dpp, y);
+
+ return update(dpp,y);
+}
+
+
+int RGWZoneGroup::rename_zone(const DoutPrefixProvider *dpp,
+ const RGWZoneParams& zone_params,
+ optional_yield y)
+{
+ RGWZone& zone = zones[zone_params.get_id()];
+ zone.name = zone_params.get_name();
+
+ return update(dpp, y);
+}
+
+void RGWZoneGroup::post_process_params(const DoutPrefixProvider *dpp, optional_yield y)
+{
+ bool log_data = zones.size() > 1;
+
+ if (master_zone.empty()) {
+ auto iter = zones.begin();
+ if (iter != zones.end()) {
+ master_zone = iter->first;
+ }
+ }
+
+ for (auto& item : zones) {
+ RGWZone& zone = item.second;
+ zone.log_data = log_data;
+
+ RGWZoneParams zone_params(zone.id, zone.name);
+ int ret = zone_params.init(dpp, cct, sysobj_svc, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "WARNING: could not read zone params for zone id=" << zone.id << " name=" << zone.name << dendl;
+ continue;
+ }
+
+ for (auto& pitem : zone_params.placement_pools) {
+ const string& placement_name = pitem.first;
+ if (placement_targets.find(placement_name) == placement_targets.end()) {
+ RGWZoneGroupPlacementTarget placement_target;
+ placement_target.name = placement_name;
+ placement_targets[placement_name] = placement_target;
+ }
+ }
+ }
+
+ if (default_placement.empty() && !placement_targets.empty()) {
+ default_placement.init(placement_targets.begin()->first, RGW_STORAGE_CLASS_STANDARD);
+ }
+}
+
+int RGWZoneGroup::remove_zone(const DoutPrefixProvider *dpp, const std::string& zone_id, optional_yield y)
+{
+ auto iter = zones.find(zone_id);
+ if (iter == zones.end()) {
+ ldpp_dout(dpp, 0) << "zone id " << zone_id << " is not a part of zonegroup "
+ << name << dendl;
+ return -ENOENT;
+ }
+
+ zones.erase(iter);
+
+ post_process_params(dpp, y);
+
+ return update(dpp, y);
+}
+
void RGWDefaultSystemMetaObjInfo::dump(Formatter *f) const {
encode_json("default_id", default_id, f);
}
JSONDecoder::decode_json("default_id", default_id, obj);
}
+int RGWSystemMetaObj::rename(const DoutPrefixProvider *dpp, const string& new_name, optional_yield y)
+{
+ string new_id;
+ int ret = read_id(dpp, new_name, new_id, y);
+ if (!ret) {
+ return -EEXIST;
+ }
+ if (ret < 0 && ret != -ENOENT) {
+ ldpp_dout(dpp, 0) << "Error read_id " << new_name << ": " << cpp_strerror(-ret) << dendl;
+ return ret;
+ }
+ string old_name = name;
+ name = new_name;
+ ret = update(dpp, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "Error storing new obj info " << new_name << ": " << cpp_strerror(-ret) << dendl;
+ return ret;
+ }
+ ret = store_name(dpp, true, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "Error storing new name " << new_name << ": " << cpp_strerror(-ret) << dendl;
+ return ret;
+ }
+ /* delete old name */
+ rgw_pool pool(get_pool(cct));
+ string oid = get_names_oid_prefix() + old_name;
+ rgw_raw_obj old_name_obj(pool, oid);
+ auto sysobj = sysobj_svc->get_obj(old_name_obj);
+ ret = sysobj.wop().remove(dpp, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "Error delete old obj name " << old_name << ": " << cpp_strerror(-ret) << dendl;
+ return ret;
+ }
+
+ return ret;
+}
+
+int RGWSystemMetaObj::read(const DoutPrefixProvider *dpp, optional_yield y)
+{
+ int ret = read_id(dpp, name, id, y);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return read_info(dpp, id, y);
+}
+
+int RGWZoneParams::create_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format)
+{
+ name = default_zone_name;
+
+ int r = create(dpp, y);
+ if (r < 0) {
+ return r;
+ }
+
+ if (old_format) {
+ name = id;
+ }
+
+ return r;
+}
+
const string& RGWZoneParams::get_compression_type(const rgw_placement_rule& placement_rule) const
{
static const std::string NONE{"none"};
RGWPeriod& info, std::ostream& error_stream,
bool force_if_stale)
{
- ldpp_dout(dpp, 20) << __func__ << " realm " << realm.id
- << " period " << current_period.id << dendl;
auto zone_svc = static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone; // XXX
+ ldpp_dout(dpp, 20) << __func__ << " realm " << realm.id
+ << " period " << current_period.id << dendl;
// gateway must be in the master zone to commit
if (info.master_zone != zone_svc->get_zone_params().id) {
error_stream << "Cannot commit period on zone "
#include "rgw_sync_policy.h"
-struct RGWZoneParams {
+class RGWSyncModulesManager;
+
+class RGWSI_SysObj;
+class RGWSI_Zone;
+
+class RGWSystemMetaObj {
+public:
std::string id;
std::string name;
+
+ CephContext *cct{nullptr};
+ RGWSI_SysObj *sysobj_svc{nullptr};
+ RGWSI_Zone *zone_svc{nullptr};
+
+ int store_name(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y);
+ int store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y);
+ int read_info(const DoutPrefixProvider *dpp, const std::string& obj_id, optional_yield y, bool old_format = false);
+ int read_id(const DoutPrefixProvider *dpp, const std::string& obj_name, std::string& obj_id, optional_yield y);
+ int read_default(const DoutPrefixProvider *dpp,
+ RGWDefaultSystemMetaObjInfo& default_info,
+ const std::string& oid,
+ optional_yield y);
+ /* read and use default id */
+ int use_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format = false);
+
+public:
+ RGWSystemMetaObj() {}
+ RGWSystemMetaObj(const std::string& _name): name(_name) {}
+ RGWSystemMetaObj(const std::string& _id, const std::string& _name) : id(_id), name(_name) {}
+ RGWSystemMetaObj(CephContext *_cct, RGWSI_SysObj *_sysobj_svc) {
+ reinit_instance(_cct, _sysobj_svc);
+ }
+ RGWSystemMetaObj(const std::string& _name, CephContext *_cct, RGWSI_SysObj *_sysobj_svc): name(_name) {
+ reinit_instance(_cct, _sysobj_svc);
+ }
+
+ const std::string& get_name() const { return name; }
+ const std::string& get_id() const { return id; }
+
+ void set_name(const std::string& _name) { name = _name;}
+ void set_id(const std::string& _id) { id = _id;}
+ void clear_id() { id.clear(); }
+
+ virtual ~RGWSystemMetaObj() {}
+
+ virtual void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(id, bl);
+ encode(name, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ virtual void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(id, bl);
+ decode(name, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void reinit_instance(CephContext *_cct, RGWSI_SysObj *_sysobj_svc);
+ int init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc,
+ optional_yield y,
+ bool setup_obj = true, bool old_format = false);
+ virtual int read_default_id(const DoutPrefixProvider *dpp, std::string& default_id, optional_yield y,
+ bool old_format = false);
+ virtual int set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = false);
+ int delete_default();
+ virtual int create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = true);
+ int delete_obj(const DoutPrefixProvider *dpp, optional_yield y, bool old_format = false);
+ int rename(const DoutPrefixProvider *dpp, const std::string& new_name, optional_yield y);
+ int update(const DoutPrefixProvider *dpp, optional_yield y) { return store_info(dpp, false, y);}
+ int update_name(const DoutPrefixProvider *dpp, optional_yield y) { return store_name(dpp, false, y);}
+ int read(const DoutPrefixProvider *dpp, optional_yield y);
+ int write(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y);
+
+ virtual rgw_pool get_pool(CephContext *cct) const = 0;
+ virtual const std::string get_default_oid(bool old_format = false) const = 0;
+ virtual const std::string& get_names_oid_prefix() const = 0;
+ virtual const std::string& get_info_oid_prefix(bool old_format = false) const = 0;
+ virtual std::string get_predefined_id(CephContext *cct) const = 0;
+ virtual const std::string& get_predefined_name(CephContext *cct) const = 0;
+
+ void dump(Formatter *f) const;
+ void decode_json(JSONObj *obj);
+};
+WRITE_CLASS_ENCODER(RGWSystemMetaObj)
+
+struct RGWZoneParams : RGWSystemMetaObj {
rgw_pool domain_root;
rgw_pool control_pool;
rgw_pool gc_pool;
JSONFormattable tier_config;
- RGWZoneParams() {}
- explicit RGWZoneParams(const std::string& _name) : name(_name){}
- RGWZoneParams(const rgw_zone_id& _id, const std::string& _name) : id(_id.id), name(_name) {}
- RGWZoneParams(const rgw_zone_id& _id, const std::string& _name, const std::string& _realm_id)
- : id(_id.id), name(_name), realm_id(_realm_id) {}
-
- const std::string& get_name() const { return name; }
- const std::string& get_id() const { return id; }
-
- void set_name(const std::string& _name) { name = _name;}
- void set_id(const std::string& _id) { id = _id;}
- void clear_id() { id.clear(); }
-
- rgw_pool get_pool(CephContext *cct) const;
+ RGWZoneParams() : RGWSystemMetaObj() {}
+ explicit RGWZoneParams(const std::string& name) : RGWSystemMetaObj(name){}
+ RGWZoneParams(const rgw_zone_id& id, const std::string& name) : RGWSystemMetaObj(id.id, name) {}
+ RGWZoneParams(const rgw_zone_id& id, const std::string& name, const std::string& _realm_id)
+ : RGWSystemMetaObj(id.id, name), realm_id(_realm_id) {}
+ virtual ~RGWZoneParams();
+
+ rgw_pool get_pool(CephContext *cct) const override;
+ const std::string get_default_oid(bool old_format = false) const override;
+ const std::string& get_names_oid_prefix() const override;
+ const std::string& get_info_oid_prefix(bool old_format = false) const override;
+ std::string get_predefined_id(CephContext *cct) const override;
+ const std::string& get_predefined_name(CephContext *cct) const override;
+
+ int init(const DoutPrefixProvider *dpp,
+ CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y,
+ bool setup_obj = true, bool old_format = false);
+ using RGWSystemMetaObj::init;
+ int read_default_id(const DoutPrefixProvider *dpp, std::string& default_id, optional_yield y, bool old_format = false) override;
+ int set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = false) override;
+ int create_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format = false);
+ int create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = true) override;
+ int fix_pool_names(const DoutPrefixProvider *dpp, optional_yield y);
const std::string& get_compression_type(const rgw_placement_rule& placement_rule) const;
- void encode(bufferlist& bl) const {
+ void encode(bufferlist& bl) const override {
ENCODE_START(15, 1, bl);
encode(domain_root, bl);
encode(control_pool, bl);
encode(user_email_pool, bl);
encode(user_swift_pool, bl);
encode(user_uid_pool, bl);
- {
- // these used to be wrapped by RGWSystemMetaObj::encode(),
- // so the extra ENCODE_START/ENCODE_FINISH are preserved
- ENCODE_START(1, 1, bl);
- encode(id, bl);
- encode(name, bl);
- ENCODE_FINISH(bl);
- }
+ RGWSystemMetaObj::encode(bl);
encode(system_key, bl);
encode(placement_pools, bl);
rgw_pool unused_metadata_heap;
ENCODE_FINISH(bl);
}
- void decode(bufferlist::const_iterator& bl) {
+ void decode(bufferlist::const_iterator& bl) override {
DECODE_START(15, bl);
decode(domain_root, bl);
decode(control_pool, bl);
decode(user_swift_pool, bl);
decode(user_uid_pool, bl);
if (struct_v >= 6) {
- {
- // these used to be wrapped by RGWSystemMetaObj::decode(),
- // so the extra DECODE_START/DECODE_FINISH are preserved
- DECODE_START(1, bl);
- decode(id, bl);
- decode(name, bl);
- DECODE_FINISH(bl);
- }
+ RGWSystemMetaObj::decode(bl);
} else if (struct_v >= 2) {
decode(name, bl);
id = name;
};
WRITE_CLASS_ENCODER(RGWZoneParams)
-struct RGWZoneGroup {
- std::string id;
- std::string name;
+struct RGWZoneGroup : public RGWSystemMetaObj {
std::string api_name;
std::list<std::string> endpoints;
bool is_master = false;
rgw_sync_policy_info sync_policy;
rgw::zone_features::set enabled_features;
- CephContext *cct{nullptr};
RGWZoneGroup(): is_master(false){}
- RGWZoneGroup(const std::string &_id, const std::string &_name):id(_id), name(_name) {}
- explicit RGWZoneGroup(const std::string &_name):name(_name) {}
- RGWZoneGroup(const std::string &_name, bool _is_master, const std::string& _realm_id,
- const std::list<std::string>& _endpoints)
- : name(_name), endpoints(_endpoints), is_master(_is_master), realm_id(_realm_id) {}
-
- const std::string& get_name() const { return name; }
- const std::string& get_id() const { return id; }
-
- void set_name(const std::string& _name) { name = _name;}
- void set_id(const std::string& _id) { id = _id;}
- void clear_id() { id.clear(); }
+ RGWZoneGroup(const std::string &id, const std::string &name):RGWSystemMetaObj(id, name) {}
+ explicit RGWZoneGroup(const std::string &_name):RGWSystemMetaObj(_name) {}
+ RGWZoneGroup(const std::string &_name, bool _is_master, CephContext *cct, RGWSI_SysObj* sysobj_svc,
+ const std::string& _realm_id, const std::list<std::string>& _endpoints)
+ : RGWSystemMetaObj(_name, cct , sysobj_svc), endpoints(_endpoints), is_master(_is_master),
+ realm_id(_realm_id) {}
+ virtual ~RGWZoneGroup();
bool is_master_zonegroup() const { return is_master;}
+ void update_master(const DoutPrefixProvider *dpp, bool _is_master, optional_yield y) {
+ is_master = _is_master;
+ post_process_params(dpp, y);
+ }
+ void post_process_params(const DoutPrefixProvider *dpp, optional_yield y);
- void encode(bufferlist& bl) const {
+ void encode(bufferlist& bl) const override {
ENCODE_START(6, 1, bl);
encode(name, bl);
encode(api_name, bl);
encode(default_placement, bl);
encode(hostnames, bl);
encode(hostnames_s3website, bl);
- {
- // these used to be wrapped by RGWSystemMetaObj::encode(),
- // so the extra ENCODE_START/ENCODE_FINISH are preserved
- ENCODE_START(1, 1, bl);
- encode(id, bl);
- encode(name, bl);
- ENCODE_FINISH(bl);
- }
+ RGWSystemMetaObj::encode(bl);
encode(realm_id, bl);
encode(sync_policy, bl);
encode(enabled_features, bl);
ENCODE_FINISH(bl);
}
- void decode(bufferlist::const_iterator& bl) {
+ void decode(bufferlist::const_iterator& bl) override {
DECODE_START(6, bl);
decode(name, bl);
decode(api_name, bl);
decode(hostnames_s3website, bl);
}
if (struct_v >= 4) {
- {
- // these used to be wrapped by RGWSystemMetaObj::decode(),
- // so the extra DECODE_START/DECODE_FINISH are preserved
- DECODE_START(1, bl);
- decode(id, bl);
- decode(name, bl);
- DECODE_FINISH(bl);
- }
+ RGWSystemMetaObj::decode(bl);
decode(realm_id, bl);
} else {
id = name;
DECODE_FINISH(bl);
}
+ int read_default_id(const DoutPrefixProvider *dpp, std::string& default_id, optional_yield y, bool old_format = false) override;
+ int set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = false) override;
+ int create_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format = false);
int equals(const std::string& other_zonegroup) const;
- rgw_pool get_pool(CephContext *cct) const;
+ int add_zone(const DoutPrefixProvider *dpp,
+ const RGWZoneParams& zone_params, bool *is_master, bool *read_only,
+ const std::list<std::string>& endpoints, const std::string *ptier_type,
+ bool *psync_from_all, std::list<std::string>& sync_from,
+ std::list<std::string>& sync_from_rm, std::string *predirect_zone,
+ std::optional<int> bucket_index_max_shards, RGWSyncModulesManager *sync_mgr,
+ const rgw::zone_features::set& enable_features,
+ const rgw::zone_features::set& disable_features,
+ optional_yield y);
+ int remove_zone(const DoutPrefixProvider *dpp, const std::string& zone_id, optional_yield y);
+ int rename_zone(const DoutPrefixProvider *dpp, const RGWZoneParams& zone_params, optional_yield y);
+ rgw_pool get_pool(CephContext *cct) const override;
+ const std::string get_default_oid(bool old_region_format = false) const override;
+ const std::string& get_info_oid_prefix(bool old_region_format = false) const override;
+ const std::string& get_names_oid_prefix() const override;
+ std::string get_predefined_id(CephContext *cct) const override;
+ const std::string& get_predefined_name(CephContext *cct) const override;
void dump(Formatter *f) const;
void decode_json(JSONObj *obj);
void dump(Formatter *f) const;
void decode_json(JSONObj *obj);
+
+ // the period config must be stored in a local object outside of the period,
+ // so that it can be used in a default configuration where no realm/period
+ // exists
+ int read(const DoutPrefixProvider *dpp, RGWSI_SysObj *sysobj_svc, const std::string& realm_id, optional_yield y);
+ int write(const DoutPrefixProvider *dpp, RGWSI_SysObj *sysobj_svc, const std::string& realm_id, optional_yield y);
+
+ static std::string get_oid(const std::string& realm_id);
+ static rgw_pool get_pool(CephContext *cct);
};
WRITE_CLASS_ENCODER(RGWPeriodConfig)
class RGWRealm;
class RGWPeriod;
-class RGWRealm
+class RGWRealm : public RGWSystemMetaObj
{
public:
- std::string id;
- std::string name;
-
std::string current_period;
epoch_t epoch{0}; //< realm epoch, incremented for each new period
+ int create_control(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y);
+ int delete_control(const DoutPrefixProvider *dpp, optional_yield y);
public:
RGWRealm() {}
- RGWRealm(const std::string& _id, const std::string& _name = "") : id(_id), name(_name) {}
+ RGWRealm(const std::string& _id, const std::string& _name = "") : RGWSystemMetaObj(_id, _name) {}
+ RGWRealm(CephContext *_cct, RGWSI_SysObj *_sysobj_svc): RGWSystemMetaObj(_cct, _sysobj_svc) {}
+ RGWRealm(const std::string& _name, CephContext *_cct, RGWSI_SysObj *_sysobj_svc): RGWSystemMetaObj(_name, _cct, _sysobj_svc){}
+ virtual ~RGWRealm() override;
- const std::string& get_name() const { return name; }
- const std::string& get_id() const { return id; }
-
- void set_name(const std::string& _name) { name = _name;}
- void set_id(const std::string& _id) { id = _id;}
- void clear_id() { id.clear(); }
-
- void encode(bufferlist& bl) const {
+ void encode(bufferlist& bl) const override {
ENCODE_START(1, 1, bl);
- {
- // these used to be wrapped by RGWSystemMetaObj::encode(),
- // so the extra ENCODE_START/ENCODE_FINISH are preserved
- ENCODE_START(1, 1, bl);
- encode(id, bl);
- encode(name, bl);
- ENCODE_FINISH(bl);
- }
+ RGWSystemMetaObj::encode(bl);
encode(current_period, bl);
encode(epoch, bl);
ENCODE_FINISH(bl);
}
- void decode(bufferlist::const_iterator& bl) {
+ void decode(bufferlist::const_iterator& bl) override {
DECODE_START(1, bl);
- {
- // these used to be wrapped by RGWSystemMetaObj::decode(),
- // so the extra DECODE_START/DECODE_FINISH are preserved
- DECODE_START(1, bl);
- decode(id, bl);
- decode(name, bl);
- DECODE_FINISH(bl);
- }
+ RGWSystemMetaObj::decode(bl);
decode(current_period, bl);
decode(epoch, bl);
DECODE_FINISH(bl);
}
- // TODO: use ConfigStore for watch/notify,
- // After refactoring RGWRealmWatcher and RGWRealmReloader, get_pool and get_info_oid_prefix will be removed.
- rgw_pool get_pool(CephContext *cct) const;
- const std::string& get_info_oid_prefix(bool old_format = false) const;
+ int create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = true) override;
+ int delete_obj(const DoutPrefixProvider *dpp, optional_yield y);
+ rgw_pool get_pool(CephContext *cct) const override;
+ const std::string get_default_oid(bool old_format = false) const override;
+ const std::string& get_names_oid_prefix() const override;
+ const std::string& get_info_oid_prefix(bool old_format = false) const override;
+ std::string get_predefined_id(CephContext *cct) const override;
+ const std::string& get_predefined_name(CephContext *cct) const override;
+
+ using RGWSystemMetaObj::read_id; // expose as public for radosgw-admin
void dump(Formatter *f) const;
void decode_json(JSONObj *obj);
const std::string& get_current_period() const {
return current_period;
}
+ int set_current_period(const DoutPrefixProvider *dpp, RGWPeriod& period, optional_yield y);
void clear_current_period_and_epoch() {
current_period.clear();
epoch = 0;
epoch_t get_epoch() const { return epoch; }
std::string get_control_oid() const;
+ /// send a notify on the realm control object
+ int notify_zone(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y);
+ /// notify the zone of a new period
+ int notify_new_period(const DoutPrefixProvider *dpp, const RGWPeriod& period, optional_yield y);
int find_zone(const DoutPrefixProvider *dpp,
const rgw_zone_id& zid,
RGWPeriod *pperiod,
RGWZoneGroup *pzonegroup,
bool *pfound,
- rgw::sal::ConfigStore* cfgstore,
optional_yield y) const;
};
WRITE_CLASS_ENCODER(RGWRealm)
std::string realm_id;
epoch_t realm_epoch{1}; //< realm epoch when period was made current
+ CephContext *cct{nullptr};
+ RGWSI_SysObj *sysobj_svc{nullptr};
+
+ int read_info(const DoutPrefixProvider *dpp, optional_yield y);
+ int read_latest_epoch(const DoutPrefixProvider *dpp,
+ RGWPeriodLatestEpochInfo& epoch_info,
+ optional_yield y,
+ RGWObjVersionTracker *objv = nullptr);
+ int use_latest_epoch(const DoutPrefixProvider *dpp, optional_yield y);
+ int use_current_period();
+
+ const std::string get_period_oid() const;
+ const std::string get_period_oid_prefix() const;
+
// gather the metadata sync status for each shard; only for use on master zone
int update_sync_status(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
const RGWPeriodConfig& get_config() const { return period_config; }
const std::vector<std::string>& get_sync_status() const { return sync_status; }
rgw_pool get_pool(CephContext *cct) const;
+ const std::string& get_latest_epoch_oid() const;
const std::string& get_info_oid_prefix() const;
void set_user_quota(RGWQuotaInfo& user_quota) {
realm_id = _realm_id;
}
+ int reflect(const DoutPrefixProvider *dpp, optional_yield y);
+
int get_zonegroup(RGWZoneGroup& zonegroup,
const std::string& zonegroup_id) const;
RGWZoneGroup *pzonegroup,
optional_yield y) const;
+ int get_latest_epoch(const DoutPrefixProvider *dpp, epoch_t& epoch, optional_yield y);
+ int set_latest_epoch(const DoutPrefixProvider *dpp, optional_yield y,
+ epoch_t epoch, bool exclusive = false,
+ RGWObjVersionTracker *objv = nullptr);
+ // update latest_epoch if the given epoch is higher, else return -EEXIST
+ int update_latest_epoch(const DoutPrefixProvider *dpp, epoch_t epoch, optional_yield y);
+
+ int init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc, const std::string &period_realm_id, optional_yield y,
+ bool setup_obj = true);
+ int init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y, bool setup_obj = true);
+
+ int create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = true);
+ int delete_obj(const DoutPrefixProvider *dpp, optional_yield y);
+ int store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y);
+
+ void fork();
+ int update(const DoutPrefixProvider *dpp, optional_yield y);
+
+ // commit a staging period; only for use on master zone
+ int commit(const DoutPrefixProvider *dpp,
+ rgw::sal::Driver* driver,
+ RGWRealm& realm, const RGWPeriod ¤t_period,
+ std::ostream& error_stream, optional_yield y,
+ bool force_if_stale = false);
+
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(id, bl);
/// Test whether all zonegroups in the realm support the given zone feature.
bool all_zonegroups_support(const SiteConfig& site, std::string_view feature);
-std::string gen_random_uuid();
-
} // namespace rgw
if (raw_storage_op) {
site = rgw::SiteConfig::make_fake();
driver = DriverManager::get_raw_storage(dpp(), g_ceph_context,
- cfg, context_pool, *site, cfgstore.get());
+ cfg, context_pool, *site);
} else {
site = std::make_unique<rgw::SiteConfig>();
auto r = site->load(dpp(), null_yield, cfgstore.get(), localzonegroup_op);
false,
false, // No background tasks!
null_yield,
- cfgstore.get(),
need_cache && g_conf()->rgw_cache_enabled,
need_gc);
}
if (opt_cmd == OPT::MDLOG_AUTOTRIM) {
// need a full history for purging old mdlog periods
- static_cast<rgw::sal::RadosStore*>(driver)->svc()->mdlog->init_oldest_log_period(null_yield, dpp(), cfgstore.get());
+ static_cast<rgw::sal::RadosStore*>(driver)->svc()->mdlog->init_oldest_log_period(null_yield, dpp());
RGWCoroutinesManager crs(driver->ctx(), driver->get_cr_registry());
RGWHTTPManager http(driver->ctx(), crs.get_completion_mgr());
return -ret;
}
- ret = sync.run(dpp(), null_yield, cfgstore.get());
+ ret = sync.run(dpp(), null_yield);
if (ret < 0) {
cerr << "ERROR: sync.run() returned ret=" << ret << std::endl;
return -ret;
return -ret;
}
- ret = sync.run(dpp(), cfgstore.get());
+ ret = sync.run(dpp());
if (ret < 0) {
cerr << "ERROR: sync.run() returned ret=" << ret << std::endl;
return -ret;
run_quota,
run_sync,
g_conf().get_val<bool>("rgw_dynamic_resharding"),
- true, true, null_yield, env.cfgstore, // run notification thread
+ true, true, null_yield, // run notification thread
g_conf()->rgw_cache_enabled);
if (!env.driver) {
return -EIO;
if (env.driver->get_name() == "rados") {
// add a watcher to respond to realm configuration changes
- pusher = std::make_unique<RGWPeriodPusher>(dpp, env.driver, env.cfgstore, null_yield);
+ pusher = std::make_unique<RGWPeriodPusher>(dpp, env.driver, null_yield);
fe_pauser = std::make_unique<RGWFrontendPauser>(fes, pusher.get());
rgw_pauser = std::make_unique<RGWPauser>();
rgw_pauser->add_pauser(fe_pauser.get());
exit(1);
}
- driver = DriverManager::get_storage(&dp, g_ceph_context, cfg, context_pool, site, false, false, false, false, false, false, true, null_yield, cfgstore.get());
+ driver = DriverManager::get_storage(&dp, g_ceph_context, cfg, context_pool, site, false, false, false, false, false, false, true, null_yield);
if (!driver) {
std::cerr << "couldn't init storage provider" << std::endl;
return EIO;
// vim: ts=8 sw=2 smarttab ft=cpp
#include "rgw_sync.h"
-#include "rgw_sal.h"
-#include "rgw_sal_config.h"
using namespace std;
using namespace rgw_zone_defaults;
#define FIRST_EPOCH 1
+int RGWPeriod::init(const DoutPrefixProvider *dpp,
+ CephContext *_cct, RGWSI_SysObj *_sysobj_svc,
+ optional_yield y, bool setup_obj)
+{
+ cct = _cct;
+ sysobj_svc = _sysobj_svc;
+
+ if (!setup_obj)
+ return 0;
+
+ if (id.empty()) {
+ RGWRealm realm(realm_id);
+ int ret = realm.init(dpp, cct, sysobj_svc, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 4) << "RGWPeriod::init failed to init realm id " << realm_id << " : " <<
+ cpp_strerror(-ret) << dendl;
+ return ret;
+ }
+ id = realm.get_current_period();
+ realm_id = realm.get_id();
+ }
+
+ if (!epoch) {
+ int ret = use_latest_epoch(dpp, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "failed to use_latest_epoch period id " << id << " realm id " << realm_id
+ << " : " << cpp_strerror(-ret) << dendl;
+ return ret;
+ }
+ }
+
+ return read_info(dpp, y);
+}
+
+int RGWPeriod::init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc,
+ const string& period_realm_id, optional_yield y, bool setup_obj)
+{
+ cct = _cct;
+ sysobj_svc = _sysobj_svc;
+
+ realm_id = period_realm_id;
+
+ if (!setup_obj)
+ return 0;
+
+ return init(dpp, _cct, _sysobj_svc, y, setup_obj);
+}
+
+const string& RGWPeriod::get_latest_epoch_oid() const
+{
+ if (cct->_conf->rgw_period_latest_epoch_info_oid.empty()) {
+ return period_latest_epoch_info_oid;
+ }
+ return cct->_conf->rgw_period_latest_epoch_info_oid;
+}
+
const string& RGWPeriod::get_info_oid_prefix() const
{
return period_info_oid_prefix;
}
+const string RGWPeriod::get_period_oid_prefix() const
+{
+ return get_info_oid_prefix() + id;
+}
+
+const string RGWPeriod::get_period_oid() const
+{
+ std::ostringstream oss;
+ oss << get_period_oid_prefix();
+ // skip the epoch for the staging period
+ if (id != get_staging_id(realm_id))
+ oss << "." << epoch;
+ return oss.str();
+}
+
bool RGWPeriod::find_zone(const DoutPrefixProvider *dpp,
const rgw_zone_id& zid,
RGWZoneGroup *pzonegroup,
return rgw_pool(cct->_conf->rgw_period_root_pool);
}
+int RGWPeriod::set_latest_epoch(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ epoch_t epoch, bool exclusive,
+ RGWObjVersionTracker *objv)
+{
+ string oid = get_period_oid_prefix() + get_latest_epoch_oid();
+
+ rgw_pool pool(get_pool(cct));
+ bufferlist bl;
+
+ RGWPeriodLatestEpochInfo info;
+ info.epoch = epoch;
+
+ using ceph::encode;
+ encode(info, bl);
+
+ auto sysobj = sysobj_svc->get_obj(rgw_raw_obj(pool, oid));
+ return sysobj.wop()
+ .set_exclusive(exclusive)
+ .write(dpp, bl, y);
+}
+
+int RGWPeriod::read_info(const DoutPrefixProvider *dpp, optional_yield y)
+{
+ rgw_pool pool(get_pool(cct));
+
+ bufferlist bl;
+
+ auto sysobj = sysobj_svc->get_obj(rgw_raw_obj{pool, get_period_oid()});
+ int ret = sysobj.rop().read(dpp, &bl, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "failed reading obj info from " << pool << ":" << get_period_oid() << ": " << cpp_strerror(-ret) << dendl;
+ return ret;
+ }
+
+ try {
+ using ceph::decode;
+ auto iter = bl.cbegin();
+ decode(*this, iter);
+ } catch (buffer::error& err) {
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode obj from " << pool << ":" << get_period_oid() << dendl;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int RGWPeriod::store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y)
+{
+ rgw_pool pool(get_pool(cct));
+
+ string oid = get_period_oid();
+ bufferlist bl;
+ using ceph::encode;
+ encode(*this, bl);
+
+ auto sysobj = sysobj_svc->get_obj(rgw_raw_obj(pool, oid));
+ return sysobj.wop()
+ .set_exclusive(exclusive)
+ .write(dpp, bl, y);
+}
+
+int RGWPeriod::create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive)
+{
+ int ret;
+
+ /* create unique id */
+ uuid_d new_uuid;
+ char uuid_str[37];
+ new_uuid.generate_random();
+ new_uuid.print(uuid_str);
+ id = uuid_str;
+
+ epoch = FIRST_EPOCH;
+
+ period_map.id = id;
+
+ ret = store_info(dpp, exclusive, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "ERROR: storing info for " << id << ": " << cpp_strerror(-ret) << dendl;
+ return ret;
+ }
+
+ ret = set_latest_epoch(dpp, y, epoch);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "ERROR: setting latest epoch " << id << ": " << cpp_strerror(-ret) << dendl;
+ }
+
+ return ret;
+}
+
+int RGWPeriod::reflect(const DoutPrefixProvider *dpp, optional_yield y)
+{
+ for (auto& iter : period_map.zonegroups) {
+ RGWZoneGroup& zg = iter.second;
+ zg.reinit_instance(cct, sysobj_svc);
+ int r = zg.write(dpp, false, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 0) << "ERROR: failed to store zonegroup info for zonegroup=" << iter.first << ": " << cpp_strerror(-r) << dendl;
+ return r;
+ }
+ if (zg.is_master_zonegroup()) {
+ // set master as default if no default exists
+ r = zg.set_as_default(dpp, y, true);
+ if (r == 0) {
+ ldpp_dout(dpp, 1) << "Set the period's master zonegroup " << zg.get_id()
+ << " as the default" << dendl;
+ }
+ }
+ }
+
+ int r = period_config.write(dpp, sysobj_svc, realm_id, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 0) << "ERROR: failed to store period config: "
+ << cpp_strerror(-r) << dendl;
+ return r;
+ }
+ return 0;
+}
+
void RGWPeriod::dump(Formatter *f) const
{
encode_json("id", id, f);
JSONDecoder::decode_json("realm_epoch", realm_epoch, obj);
}
+int RGWPeriod::update_latest_epoch(const DoutPrefixProvider *dpp, epoch_t epoch, optional_yield y)
+{
+ static constexpr int MAX_RETRIES = 20;
+
+ for (int i = 0; i < MAX_RETRIES; i++) {
+ RGWPeriodLatestEpochInfo info;
+ RGWObjVersionTracker objv;
+ bool exclusive = false;
+
+ // read existing epoch
+ int r = read_latest_epoch(dpp, info, y, &objv);
+ if (r == -ENOENT) {
+ // use an exclusive create to set the epoch atomically
+ exclusive = true;
+ ldpp_dout(dpp, 20) << "creating initial latest_epoch=" << epoch
+ << " for period=" << id << dendl;
+ } else if (r < 0) {
+ ldpp_dout(dpp, 0) << "ERROR: failed to read latest_epoch" << dendl;
+ return r;
+ } else if (epoch <= info.epoch) {
+ r = -EEXIST; // fail with EEXIST if epoch is not newer
+ ldpp_dout(dpp, 10) << "found existing latest_epoch " << info.epoch
+ << " >= given epoch " << epoch << ", returning r=" << r << dendl;
+ return r;
+ } else {
+ ldpp_dout(dpp, 20) << "updating latest_epoch from " << info.epoch
+ << " -> " << epoch << " on period=" << id << dendl;
+ }
+
+ r = set_latest_epoch(dpp, y, epoch, exclusive, &objv);
+ if (r == -EEXIST) {
+ continue; // exclusive create raced with another update, retry
+ } else if (r == -ECANCELED) {
+ continue; // write raced with a conflicting version, retry
+ }
+ if (r < 0) {
+ ldpp_dout(dpp, 0) << "ERROR: failed to write latest_epoch" << dendl;
+ return r;
+ }
+ return 0; // return success
+ }
+
+ return -ECANCELED; // fail after max retries
+}
+
+int RGWPeriod::read_latest_epoch(const DoutPrefixProvider *dpp,
+ RGWPeriodLatestEpochInfo& info,
+ optional_yield y,
+ RGWObjVersionTracker *objv)
+{
+ string oid = get_period_oid_prefix() + get_latest_epoch_oid();
+
+ rgw_pool pool(get_pool(cct));
+ bufferlist bl;
+ auto sysobj = sysobj_svc->get_obj(rgw_raw_obj{pool, oid});
+ int ret = sysobj.rop().read(dpp, &bl, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 1) << "error read_lastest_epoch " << pool << ":" << oid << dendl;
+ return ret;
+ }
+ try {
+ auto iter = bl.cbegin();
+ using ceph::decode;
+ decode(info, iter);
+ } catch (buffer::error& err) {
+ ldpp_dout(dpp, 0) << "error decoding data from " << pool << ":" << oid << dendl;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int RGWPeriod::use_latest_epoch(const DoutPrefixProvider *dpp, optional_yield y)
+{
+ RGWPeriodLatestEpochInfo info;
+ int ret = read_latest_epoch(dpp, info, y);
+ if (ret < 0) {
+ return ret;
+ }
+
+ epoch = info.epoch;
+
+ return 0;
+}
+
~Impl();
Cursor get_current() const { return current_cursor; }
- Cursor attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y, rgw::sal::ConfigStore* cfgstore);
+ Cursor attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y);
Cursor insert(RGWPeriod&& period);
Cursor lookup(epoch_t realm_epoch);
histories.clear_and_dispose(std::default_delete<History>{});
}
-Cursor RGWPeriodHistory::Impl::attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y,
- rgw::sal::ConfigStore* cfgstore)
+Cursor RGWPeriodHistory::Impl::attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y)
{
if (current_history == histories.end()) {
return Cursor{-EINVAL};
}
// pull the period outside of the lock
- int r = puller->pull(dpp, predecessor_id, period, y, cfgstore);
+ int r = puller->pull(dpp, predecessor_id, period, y);
if (r < 0) {
return Cursor{r};
}
{
return impl->get_current();
}
-Cursor RGWPeriodHistory::attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y,
- rgw::sal::ConfigStore* cfgstore)
+Cursor RGWPeriodHistory::attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y)
{
- return impl->attach(dpp, std::move(period), y, cfgstore);
+ return impl->attach(dpp, std::move(period), y);
}
Cursor RGWPeriodHistory::insert(RGWPeriod&& period)
{
namespace bi = boost::intrusive;
class RGWPeriod;
-namespace rgw::sal { class ConfigStore; }
/**
* RGWPeriodHistory tracks the relative history of all inserted periods,
virtual ~Puller() = default;
virtual int pull(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period,
- optional_yield y, rgw::sal::ConfigStore* cfgstore) = 0;
+ optional_yield y) = 0;
};
RGWPeriodHistory(CephContext* cct, Puller* puller,
/// current_period and the given period, reading predecessor periods or
/// fetching them from the master as necessary. returns a cursor at the
/// given period that can be used to traverse the current_history
- Cursor attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y, rgw::sal::ConfigStore* cfgstore);
+ Cursor attach(const DoutPrefixProvider *dpp, RGWPeriod&& period, optional_yield y);
/// insert the given period into an existing history, or create a new
/// unconnected history. similar to attach(), but it doesn't try to fetch
#include "rgw_http_errors.h"
#include "common/ceph_json.h"
#include "common/errno.h"
-#include "rgw_sal_config.h"
#include "services/svc_zone.h"
-#define FIRST_EPOCH 1
#define dout_subsys ceph_subsys_rgw
#undef dout_prefix
} // anonymous namespace
int RGWPeriodPuller::pull(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period,
- optional_yield y, rgw::sal::ConfigStore* cfgstore)
+ optional_yield y)
{
// try to read the period from rados
period.set_id(period_id);
- int r = cfgstore->read_period(dpp, y, period_id, std::nullopt, period);
+ period.set_epoch(0);
+ int r = period.init(dpp, cct, svc.sysobj, y);
if (r < 0) {
if (svc.zone->is_meta_master()) {
// can't pull if we're the master
return r;
}
// write the period to rados
- period.period_map.id = period.id = rgw::gen_random_uuid();
- period.epoch = FIRST_EPOCH;
- constexpr bool exclusive = true;
- r = cfgstore->create_period(dpp, y, exclusive, period);
+ r = period.store_info(dpp, true, y);
if (r == -EEXIST) {
r = 0;
} else if (r < 0) {
return r;
}
// update latest epoch
- r = cfgstore->update_latest_epoch(dpp, y, period.get_id(), period.get_epoch());
+ r = period.update_latest_epoch(dpp, period.get_epoch(), y);
if (r == -EEXIST) {
// already have this epoch (or a more recent one)
return 0;
}
// reflect period objects if this is the latest version
if (svc.zone->get_realm().get_current_period() == period_id) {
- r = rgw::reflect_period(dpp, y, cfgstore, period);
+ r = period.reflect(dpp, y);
if (r < 0) {
return r;
}
public:
explicit RGWPeriodPuller(RGWSI_Zone *zone_svc, RGWSI_SysObj *sysobj_svc);
- int pull(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period, optional_yield y, rgw::sal::ConfigStore* cfgstore) override;
+ int pull(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period, optional_yield y) override;
};
#include "rgw_cr_rest.h"
#include "rgw_zone.h"
#include "rgw_sal.h"
-#include "rgw_sal_config.h"
#include "rgw_sal_rados.h"
#include "services/svc_zone.h"
RGWPeriodPusher::RGWPeriodPusher(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
- rgw::sal::ConfigStore* cfgstore, optional_yield y)
+ optional_yield y)
: cct(driver->ctx()), driver(driver)
{
rgw::sal::Zone* zone = driver->get_zone();
// always send out the current period on startup
RGWPeriod period;
- auto r = cfgstore->read_period(dpp, y, zone->get_current_period_id(), std::nullopt, period);
+ // XXX dang
+ int r = period.init(dpp, cct, static_cast<rgw::sal::RadosStore* >(driver)->svc()->sysobj, realm_id, y);
if (r < 0) {
ldpp_dout(dpp, -1) << "failed to load period for realm " << realm_id << dendl;
return;
class RGWPeriodPusher final : public RGWRealmWatcher::Watcher,
public RGWRealmReloader::Pauser {
public:
- explicit RGWPeriodPusher(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::ConfigStore* cfgsore,
- optional_yield y);
+ explicit RGWPeriodPusher(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y);
~RGWPeriodPusher() override;
/// respond to realm notifications by pushing new periods to other zones
using namespace std;
using namespace rgw_zone_defaults;
+RGWRealm::~RGWRealm() {}
+
RGWRemoteMetaLog::~RGWRemoteMetaLog()
{
delete error_logger;
}
+string RGWRealm::get_predefined_id(CephContext *cct) const {
+ return cct->_conf.get_val<string>("rgw_realm_id");
+}
+
+const string& RGWRealm::get_predefined_name(CephContext *cct) const {
+ return cct->_conf->rgw_realm;
+}
+
+int RGWRealm::create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive)
+{
+ int ret = RGWSystemMetaObj::create(dpp, y, exclusive);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "ERROR creating new realm object " << name << ": " << cpp_strerror(-ret) << dendl;
+ return ret;
+ }
+ // create the control object for watch/notify
+ ret = create_control(dpp, exclusive, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "ERROR creating control for new realm " << name << ": " << cpp_strerror(-ret) << dendl;
+ return ret;
+ }
+ RGWPeriod period;
+ if (current_period.empty()) {
+ /* create new period for the realm */
+ ret = period.init(dpp, cct, sysobj_svc, id, y, false);
+ if (ret < 0 ) {
+ return ret;
+ }
+ ret = period.create(dpp, y, true);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "ERROR: creating new period for realm " << name << ": " << cpp_strerror(-ret) << dendl;
+ return ret;
+ }
+ } else {
+ period = RGWPeriod(current_period, 0);
+ int ret = period.init(dpp, cct, sysobj_svc, id, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "ERROR: failed to init period " << current_period << dendl;
+ return ret;
+ }
+ }
+ ret = set_current_period(dpp, period, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "ERROR: failed set current period " << current_period << dendl;
+ return ret;
+ }
+ // try to set as default. may race with another create, so pass exclusive=true
+ // so we don't override an existing default
+ ret = set_as_default(dpp, y, true);
+ if (ret < 0 && ret != -EEXIST) {
+ ldpp_dout(dpp, 0) << "WARNING: failed to set realm as default realm, ret=" << ret << dendl;
+ }
+
+ return 0;
+}
+
+int RGWRealm::delete_obj(const DoutPrefixProvider *dpp, optional_yield y)
+{
+ int ret = RGWSystemMetaObj::delete_obj(dpp, y);
+ if (ret < 0) {
+ return ret;
+ }
+ return delete_control(dpp, y);
+}
+
+int RGWRealm::create_control(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y)
+{
+ auto pool = rgw_pool{get_pool(cct)};
+ auto oid = get_control_oid();
+ bufferlist bl;
+ auto sysobj = sysobj_svc->get_obj(rgw_raw_obj{pool, oid});
+ return sysobj.wop()
+ .set_exclusive(exclusive)
+ .write(dpp, bl, y);
+}
+
+int RGWRealm::delete_control(const DoutPrefixProvider *dpp, optional_yield y)
+{
+ auto pool = rgw_pool{get_pool(cct)};
+ auto obj = rgw_raw_obj{pool, get_control_oid()};
+ auto sysobj = sysobj_svc->get_obj(obj);
+ return sysobj.wop().remove(dpp, y);
+}
+
rgw_pool RGWRealm::get_pool(CephContext *cct) const
{
if (cct->_conf->rgw_realm_root_pool.empty()) {
return rgw_pool(cct->_conf->rgw_realm_root_pool);
}
+const string RGWRealm::get_default_oid(bool old_format) const
+{
+ if (cct->_conf->rgw_default_realm_info_oid.empty()) {
+ return default_realm_info_oid;
+ }
+ return cct->_conf->rgw_default_realm_info_oid;
+}
+
+const string& RGWRealm::get_names_oid_prefix() const
+{
+ return realm_names_oid_prefix;
+}
+
const string& RGWRealm::get_info_oid_prefix(bool old_format) const
{
return realm_info_oid_prefix;
}
+int RGWRealm::set_current_period(const DoutPrefixProvider *dpp, RGWPeriod& period, optional_yield y)
+{
+ // update realm epoch to match the period's
+ if (epoch > period.get_realm_epoch()) {
+ ldpp_dout(dpp, 0) << "ERROR: set_current_period with old realm epoch "
+ << period.get_realm_epoch() << ", current epoch=" << epoch << dendl;
+ return -EINVAL;
+ }
+ if (epoch == period.get_realm_epoch() && current_period != period.get_id()) {
+ ldpp_dout(dpp, 0) << "ERROR: set_current_period with same realm epoch "
+ << period.get_realm_epoch() << ", but different period id "
+ << period.get_id() << " != " << current_period << dendl;
+ return -EINVAL;
+ }
+
+ epoch = period.get_realm_epoch();
+ current_period = period.get_id();
+
+ int ret = update(dpp, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "ERROR: period update: " << cpp_strerror(-ret) << dendl;
+ return ret;
+ }
+
+ ret = period.reflect(dpp, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "ERROR: period.reflect(): " << cpp_strerror(-ret) << dendl;
+ return ret;
+ }
+
+ return 0;
+}
+
string RGWRealm::get_control_oid() const
{
return get_info_oid_prefix() + id + ".control";
}
+int RGWRealm::notify_zone(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y)
+{
+ rgw_pool pool{get_pool(cct)};
+ auto sysobj = sysobj_svc->get_obj(rgw_raw_obj{pool, get_control_oid()});
+ int ret = sysobj.wn().notify(dpp, bl, 0, nullptr, y);
+ if (ret < 0) {
+ return ret;
+ }
+ return 0;
+}
+
+int RGWRealm::notify_new_period(const DoutPrefixProvider *dpp, const RGWPeriod& period, optional_yield y)
+{
+ bufferlist bl;
+ using ceph::encode;
+ // push the period to dependent zonegroups/zones
+ encode(RGWRealmNotify::ZonesNeedPeriod, bl);
+ encode(period, bl);
+ // reload the gateway with the new period
+ encode(RGWRealmNotify::Reload, bl);
+
+ return notify_zone(dpp, bl, y);
+}
+
int RGWRealm::find_zone(const DoutPrefixProvider *dpp,
const rgw_zone_id& zid,
RGWPeriod *pperiod,
RGWZoneGroup *pzonegroup,
bool *pfound,
- rgw::sal::ConfigStore* cfgstore,
optional_yield y) const
{
auto& found = *pfound;
found = false;
- RGWPeriod period;
- int r = cfgstore->read_period(dpp, y, current_period, std::nullopt, period);
+ string period_id;
+ epoch_t epoch = 0;
+
+ RGWPeriod period(period_id, epoch);
+ int r = period.init(dpp, cct, sysobj_svc, get_id(), y);
if (r < 0) {
ldpp_dout(dpp, 0) << "WARNING: period init failed: " << cpp_strerror(-r) << " ... skipping" << dendl;
return r;
void RGWRealm::dump(Formatter *f) const
{
- encode_json("id", id , f);
- encode_json("name", name , f);
+ RGWSystemMetaObj::dump(f);
encode_json("current_period", current_period, f);
encode_json("epoch", epoch, f);
}
void RGWRealm::decode_json(JSONObj *obj)
{
- JSONDecoder::decode_json("id", id, obj);
- JSONDecoder::decode_json("name", name, obj);
+ RGWSystemMetaObj::decode_json(obj);
JSONDecoder::decode_json("current_period", current_period, obj);
JSONDecoder::decode_json("epoch", epoch, obj);
}
cct->_conf->rgw_enable_quota_threads,
cct->_conf->rgw_run_sync_thread,
cct->_conf.get_val<bool>("rgw_dynamic_resharding"),
- true, true, null_yield, env.cfgstore, // run notification thread
+ true, true, null_yield, // run notification thread
cct->_conf->rgw_cache_enabled);
}
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "rgw_rest_ratelimit.h"
-#include "rgw_sal.h"
-#include "rgw_sal_config.h"
-#include "rgw_process_env.h"
-
class RGWOp_Ratelimit_Info : public RGWRESTOp {
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("ratelimit", RGW_CAP_READ);
if (global) {
std::string realm_id = driver->get_zone()->get_realm_id();
RGWPeriodConfig period_config;
- auto cfgstore = s->penv.cfgstore;
- op_ret = cfgstore->read_period_config(this, y, realm_id, period_config);
+ op_ret = period_config.read(this, static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, realm_id, y);
if (op_ret && op_ret != -ENOENT) {
ldpp_dout(this, 0) << "Error on period config read" << dendl;
return;
op_ret = bucket->merge_and_store_attrs(this, attr, y);
return;
}
-
- auto cfgstore = s->penv.cfgstore;
if (global) {
std::string realm_id = driver->get_zone()->get_realm_id();
RGWPeriodConfig period_config;
- op_ret = cfgstore->read_period_config(s, y, realm_id, period_config);
+ op_ret = period_config.read(s, static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, realm_id, y);
if (op_ret && op_ret != -ENOENT) {
ldpp_dout(this, 0) << "Error on period config read" << dendl;
return;
have_max_read_bytes, max_read_bytes, have_max_write_bytes, max_write_bytes,
have_enabled, enabled, ratelimit_configured, ratelimit_info);
period_config.bucket_ratelimit = ratelimit_info;
- op_ret = cfgstore->write_period_config(s, y, false, realm_id, period_config);
+ op_ret = period_config.write(s, static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, realm_id, y);
return;
}
if (ratelimit_scope == "anon") {
have_max_read_bytes, max_read_bytes, have_max_write_bytes, max_write_bytes,
have_enabled, enabled, ratelimit_configured, ratelimit_info);
period_config.anon_ratelimit = ratelimit_info;
- op_ret = cfgstore->write_period_config(s, y, false, realm_id, period_config);
+ op_ret = period_config.write(s, static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, realm_id, y);
return;
}
if (ratelimit_scope == "user") {
have_max_read_bytes, max_read_bytes, have_max_write_bytes, max_write_bytes,
have_enabled, enabled, ratelimit_configured, ratelimit_info);
period_config.user_ratelimit = ratelimit_info;
- op_ret = cfgstore->write_period_config(s, y, false, realm_id, period_config);
+ op_ret = period_config.write(s, static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, realm_id, y);
return;
}
}
bool quota_threads,
bool run_sync_thread,
bool run_reshard_thread,
- bool run_notification_thread,
+ bool run_notification_thread,
bool use_cache,
bool use_gc,
bool background_tasks,
- optional_yield y, rgw::sal::ConfigStore* cfgstore)
+ optional_yield y)
{
rgw::sal::Driver* driver{nullptr};
.set_run_sync_thread(run_sync_thread)
.set_run_reshard_thread(run_reshard_thread)
.set_run_notification_thread(run_notification_thread)
- .init_begin(cct, dpp, background_tasks, site_config, cfgstore) < 0) {
+ .init_begin(cct, dpp, background_tasks, site_config) < 0) {
delete driver;
return nullptr;
}
delete driver;
return nullptr;
}
- if (rados->init_complete(dpp, y, cfgstore) < 0) {
+ if (rados->init_complete(dpp, y) < 0) {
delete driver;
return nullptr;
}
.set_run_sync_thread(run_sync_thread)
.set_run_reshard_thread(run_reshard_thread)
.set_run_notification_thread(run_notification_thread)
- .init_begin(cct, dpp, background_tasks, site_config, cfgstore) < 0) {
+ .init_begin(cct, dpp, background_tasks, site_config) < 0) {
delete driver;
return nullptr;
}
delete driver;
return nullptr;
}
- if (rados->init_complete(dpp, y, cfgstore) < 0) {
+ if (rados->init_complete(dpp, y) < 0) {
delete driver;
return nullptr;
}
rgw::sal::Driver* DriverManager::init_raw_storage_provider(const DoutPrefixProvider* dpp, CephContext* cct,
const Config& cfg, boost::asio::io_context& io_context,
- const rgw::SiteConfig& site_config, rgw::sal::ConfigStore* cfgstore)
+ const rgw::SiteConfig& site_config)
{
rgw::sal::Driver* driver = nullptr;
if (cfg.store_name.compare("rados") == 0) {
return nullptr;
}
- int ret = rados->init_svc(true, dpp, false, site_config, cfgstore);
+ int ret = rados->init_svc(true, dpp, false, site_config);
if (ret < 0) {
ldout(cct, 0) << "ERROR: failed to init services (ret=" << cpp_strerror(-ret) << ")" << dendl;
delete driver;
bool run_notification_thread,
bool background_tasks,
optional_yield y,
- rgw::sal::ConfigStore* cfgstore,
bool use_cache = true,
bool use_gc = true) {
rgw::sal::Driver* driver = init_storage_provider(dpp, cct, cfg, io_context,
quota_threads,
run_sync_thread,
run_reshard_thread,
- run_notification_thread,
+ run_notification_thread,
use_cache, use_gc,
- background_tasks, y, cfgstore);
+ background_tasks, y);
return driver;
}
/** Get a stripped down driver by service name */
static rgw::sal::Driver* get_raw_storage(const DoutPrefixProvider* dpp,
CephContext* cct, const Config& cfg,
boost::asio::io_context& io_context,
- const rgw::SiteConfig& site_config,
- rgw::sal::ConfigStore* cfgstore) {
+ const rgw::SiteConfig& site_config) {
rgw::sal::Driver* driver = init_raw_storage_provider(dpp, cct, cfg,
io_context,
- site_config,
- cfgstore);
+ site_config);
return driver;
}
/** Initialize a new full Driver */
bool quota_threads,
bool run_sync_thread,
bool run_reshard_thread,
- bool run_notification_thread,
+ bool run_notification_thread,
bool use_metadata_cache,
bool use_gc, bool background_tasks,
- optional_yield y, rgw::sal::ConfigStore* cfgstore);
+ optional_yield y);
/** Initialize a new raw Driver */
static rgw::sal::Driver* init_raw_storage_provider(const DoutPrefixProvider* dpp,
CephContext* cct,
const Config& cfg,
boost::asio::io_context& io_context,
- const rgw::SiteConfig& site_config,
- rgw::sal::ConfigStore* cfgstore);
+ const rgw::SiteConfig& site_config);
/** Close a Driver when it's no longer needed */
static void close_storage(rgw::sal::Driver* driver);
optional_yield y, const std::string& marker,
std::span<std::string> entries,
ListResult<std::string>& result) = 0;
- virtual int update_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y,
- std::string_view period_id, uint32_t epoch) = 0;
///@}
/// @group ZoneGroup
#include "common/errno.h"
#include "rgw_zone.h"
-#include "rgw_sal.h"
#include "rgw_sal_config.h"
#include "rgw_sync.h"
JSONDecoder::decode_json("supported_features", supported_features, obj);
}
+int RGWSystemMetaObj::init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc,
+ optional_yield y,
+ bool setup_obj, bool old_format)
+{
+ reinit_instance(_cct, _sysobj_svc);
+
+ if (!setup_obj)
+ return 0;
+
+ if (old_format && id.empty()) {
+ id = name;
+ }
+
+ if (id.empty()) {
+ id = get_predefined_id(cct);
+ }
+
+ if (id.empty()) {
+ int r;
+ if (name.empty()) {
+ name = get_predefined_name(cct);
+ }
+ if (name.empty()) {
+ r = use_default(dpp, y, old_format);
+ if (r < 0) {
+ return r;
+ }
+ } else if (!old_format) {
+ r = read_id(dpp, name, id, y);
+ if (r < 0) {
+ if (r != -ENOENT) {
+ ldpp_dout(dpp, 0) << "error in read_id for object name: " << name << " : " << cpp_strerror(-r) << dendl;
+ }
+ return r;
+ }
+ }
+ }
+
+ return read_info(dpp, id, y, old_format);
+}
+
+RGWZoneGroup::~RGWZoneGroup() {}
+
+const string RGWZoneGroup::get_default_oid(bool old_region_format) const
+{
+ if (old_region_format) {
+ if (cct->_conf->rgw_default_region_info_oid.empty()) {
+ return default_region_info_oid;
+ }
+ return cct->_conf->rgw_default_region_info_oid;
+ }
+
+ string default_oid = cct->_conf->rgw_default_zonegroup_info_oid;
+
+ if (cct->_conf->rgw_default_zonegroup_info_oid.empty()) {
+ default_oid = default_zone_group_info_oid;
+ }
+
+ default_oid += "." + realm_id;
+
+ return default_oid;
+}
+
+const string& RGWZoneGroup::get_info_oid_prefix(bool old_region_format) const
+{
+ if (old_region_format) {
+ return region_info_oid_prefix;
+ }
+ return zone_group_info_oid_prefix;
+}
+
+const string& RGWZoneGroup::get_names_oid_prefix() const
+{
+ return zonegroup_names_oid_prefix;
+}
+
+string RGWZoneGroup::get_predefined_id(CephContext *cct) const {
+ return cct->_conf.get_val<string>("rgw_zonegroup_id");
+}
+
+const string& RGWZoneGroup::get_predefined_name(CephContext *cct) const {
+ return cct->_conf->rgw_zonegroup;
+}
+
rgw_pool RGWZoneGroup::get_pool(CephContext *cct_) const
{
if (cct_->_conf->rgw_zonegroup_root_pool.empty()) {
return rgw_pool(cct_->_conf->rgw_zonegroup_root_pool);
}
+int RGWZoneGroup::read_default_id(const DoutPrefixProvider *dpp, string& default_id, optional_yield y,
+ bool old_format)
+{
+ if (realm_id.empty()) {
+ /* try using default realm */
+ RGWRealm realm;
+ int ret = realm.init(dpp, cct, sysobj_svc, y);
+ // no default realm exist
+ if (ret < 0) {
+ return read_id(dpp, default_zonegroup_name, default_id, y);
+ }
+ realm_id = realm.get_id();
+ }
+
+ return RGWSystemMetaObj::read_default_id(dpp, default_id, y, old_format);
+}
+
+int RGWSystemMetaObj::use_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format)
+{
+ return read_default_id(dpp, id, y, old_format);
+}
+
+void RGWSystemMetaObj::reinit_instance(CephContext *_cct, RGWSI_SysObj *_sysobj_svc)
+{
+ cct = _cct;
+ sysobj_svc = _sysobj_svc;
+ zone_svc = _sysobj_svc->get_zone_svc();
+}
+
+int RGWSystemMetaObj::read_info(const DoutPrefixProvider *dpp, const string& obj_id, optional_yield y,
+ bool old_format)
+{
+ rgw_pool pool(get_pool(cct));
+
+ bufferlist bl;
+
+ string oid = get_info_oid_prefix(old_format) + obj_id;
+
+ auto sysobj = sysobj_svc->get_obj(rgw_raw_obj{pool, oid});
+ int ret = sysobj.rop().read(dpp, &bl, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "failed reading obj info from " << pool << ":" << oid << ": " << cpp_strerror(-ret) << dendl;
+ return ret;
+ }
+ using ceph::decode;
+
+ try {
+ auto iter = bl.cbegin();
+ decode(*this, iter);
+ } catch (buffer::error& err) {
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode obj from " << pool << ":" << oid << dendl;
+ return -EIO;
+ }
+
+ return 0;
+}
+
void RGWZoneGroup::decode_json(JSONObj *obj)
{
- JSONDecoder::decode_json("id", id, obj);
- JSONDecoder::decode_json("name", name, obj);
+ RGWSystemMetaObj::decode_json(obj);
if (id.empty()) {
derr << "old format " << dendl;
JSONDecoder::decode_json("name", name, obj);
JSONDecoder::decode_json("enabled_features", enabled_features, obj);
}
+RGWZoneParams::~RGWZoneParams() {}
+
void RGWZoneParams::decode_json(JSONObj *obj)
{
- JSONDecoder::decode_json("id", id, obj);
- JSONDecoder::decode_json("name", name, obj);
+ RGWSystemMetaObj::decode_json(obj);
JSONDecoder::decode_json("domain_root", domain_root, obj);
JSONDecoder::decode_json("control_pool", control_pool, obj);
JSONDecoder::decode_json("gc_pool", gc_pool, obj);
void RGWZoneParams::dump(Formatter *f) const
{
- encode_json("id", id, f);
- encode_json("name", name, f);
+ RGWSystemMetaObj::dump(f);
encode_json("domain_root", domain_root, f);
encode_json("control_pool", control_pool, f);
encode_json("gc_pool", gc_pool, f);
encode_json("realm_id", realm_id, f);
}
+int RGWZoneParams::init(const DoutPrefixProvider *dpp,
+ CephContext *cct, RGWSI_SysObj *sysobj_svc,
+ optional_yield y, bool setup_obj, bool old_format)
+{
+ if (name.empty()) {
+ name = cct->_conf->rgw_zone;
+ }
+
+ return RGWSystemMetaObj::init(dpp, cct, sysobj_svc, y, setup_obj, old_format);
+}
+
rgw_pool RGWZoneParams::get_pool(CephContext *cct) const
{
if (cct->_conf->rgw_zone_root_pool.empty()) {
return rgw_pool(cct->_conf->rgw_zone_root_pool);
}
+const string RGWZoneParams::get_default_oid(bool old_format) const
+{
+ if (old_format) {
+ return cct->_conf->rgw_default_zone_info_oid;
+ }
+
+ return cct->_conf->rgw_default_zone_info_oid + "." + realm_id;
+}
+
+const string& RGWZoneParams::get_names_oid_prefix() const
+{
+ return zone_names_oid_prefix;
+}
+
+const string& RGWZoneParams::get_info_oid_prefix(bool old_format) const
+{
+ return zone_info_oid_prefix;
+}
+
+string RGWZoneParams::get_predefined_id(CephContext *cct) const {
+ return cct->_conf.get_val<string>("rgw_zone_id");
+}
+
+const string& RGWZoneParams::get_predefined_name(CephContext *cct) const {
+ return cct->_conf->rgw_zone;
+}
+
+int RGWZoneParams::read_default_id(const DoutPrefixProvider *dpp, string& default_id, optional_yield y,
+ bool old_format)
+{
+ if (realm_id.empty()) {
+ /* try using default realm */
+ RGWRealm realm;
+ int ret = realm.init(dpp, cct, sysobj_svc, y);
+ //no default realm exist
+ if (ret < 0) {
+ return read_id(dpp, default_zone_name, default_id, y);
+ }
+ realm_id = realm.get_id();
+ }
+
+ return RGWSystemMetaObj::read_default_id(dpp, default_id, y, old_format);
+}
+
+
+int RGWZoneParams::set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive)
+{
+ if (realm_id.empty()) {
+ /* try using default realm */
+ RGWRealm realm;
+ int ret = realm.init(dpp, cct, sysobj_svc, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 10) << "could not read realm id: " << cpp_strerror(-ret) << dendl;
+ return -EINVAL;
+ }
+ realm_id = realm.get_id();
+ }
+
+ return RGWSystemMetaObj::set_as_default(dpp, y, exclusive);
+}
+
+int RGWZoneParams::create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive)
+{
+ RGWZonePlacementInfo default_placement;
+ default_placement.index_pool = name + "." + default_bucket_index_pool_suffix;
+ rgw_pool pool = name + "." + default_storage_pool_suffix;
+ default_placement.storage_classes.set_storage_class(RGW_STORAGE_CLASS_STANDARD, &pool, nullptr);
+ default_placement.data_extra_pool = name + "." + default_storage_extra_pool_suffix;
+ placement_pools["default-placement"] = default_placement;
+
+ int r = fix_pool_names(dpp, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 0) << "ERROR: fix_pool_names returned r=" << r << dendl;
+ return r;
+ }
+
+ r = RGWSystemMetaObj::create(dpp, y, exclusive);
+ if (r < 0) {
+ return r;
+ }
+
+ // try to set as default. may race with another create, so pass exclusive=true
+ // so we don't override an existing default
+ r = set_as_default(dpp, y, true);
+ if (r < 0 && r != -EEXIST) {
+ ldpp_dout(dpp, 10) << "WARNING: failed to set zone as default, r=" << r << dendl;
+ }
+
+ return 0;
+}
+
rgw_pool fix_zone_pool_dup(const set<rgw_pool>& pools,
const string& default_prefix,
const string& default_suffix,
}
+static int get_zones_pool_set(const DoutPrefixProvider *dpp,
+ CephContext* cct,
+ RGWSI_SysObj* sysobj_svc,
+ const list<string>& zone_names,
+ const string& my_zone_id,
+ set<rgw_pool>& pool_names,
+ optional_yield y)
+{
+ for (const auto& name : zone_names) {
+ RGWZoneParams zone(name);
+ int r = zone.init(dpp, cct, sysobj_svc, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 0) << "Error: failed to load zone " << name
+ << " with " << cpp_strerror(-r) << dendl;
+ return r;
+ }
+ if (zone.get_id() != my_zone_id) {
+ add_zone_pools(zone, pool_names);
+ }
+ }
+ return 0;
+}
+
+int RGWZoneParams::fix_pool_names(const DoutPrefixProvider *dpp, optional_yield y)
+{
+
+ list<string> zones;
+ int r = zone_svc->list_zones(dpp, zones);
+ if (r < 0) {
+ ldpp_dout(dpp, 10) << "WARNING: driver->list_zones() returned r=" << r << dendl;
+ }
+
+ set<rgw_pool> pools;
+ r = get_zones_pool_set(dpp, cct, sysobj_svc, zones, id, pools, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 0) << "Error: get_zones_pool_names" << r << dendl;
+ return r;
+ }
+
+ domain_root = fix_zone_pool_dup(pools, name, ".rgw.meta:root", domain_root);
+ control_pool = fix_zone_pool_dup(pools, name, ".rgw.control", control_pool);
+ gc_pool = fix_zone_pool_dup(pools, name ,".rgw.log:gc", gc_pool);
+ lc_pool = fix_zone_pool_dup(pools, name ,".rgw.log:lc", lc_pool);
+ log_pool = fix_zone_pool_dup(pools, name, ".rgw.log", log_pool);
+ intent_log_pool = fix_zone_pool_dup(pools, name, ".rgw.log:intent", intent_log_pool);
+ usage_log_pool = fix_zone_pool_dup(pools, name, ".rgw.log:usage", usage_log_pool);
+ user_keys_pool = fix_zone_pool_dup(pools, name, ".rgw.meta:users.keys", user_keys_pool);
+ user_email_pool = fix_zone_pool_dup(pools, name, ".rgw.meta:users.email", user_email_pool);
+ user_swift_pool = fix_zone_pool_dup(pools, name, ".rgw.meta:users.swift", user_swift_pool);
+ user_uid_pool = fix_zone_pool_dup(pools, name, ".rgw.meta:users.uid", user_uid_pool);
+ roles_pool = fix_zone_pool_dup(pools, name, ".rgw.meta:roles", roles_pool);
+ reshard_pool = fix_zone_pool_dup(pools, name, ".rgw.log:reshard", reshard_pool);
+ otp_pool = fix_zone_pool_dup(pools, name, ".rgw.otp", otp_pool);
+ oidc_pool = fix_zone_pool_dup(pools, name, ".rgw.meta:oidc", oidc_pool);
+ notif_pool = fix_zone_pool_dup(pools, name ,".rgw.log:notif", notif_pool);
+ topics_pool = fix_zone_pool_dup(pools, name, ".rgw.meta:topics", topics_pool);
+ account_pool = fix_zone_pool_dup(pools, name, ".rgw.meta:accounts", account_pool);
+ group_pool = fix_zone_pool_dup(pools, name, ".rgw.meta:groups", group_pool);
+
+ for(auto& iter : placement_pools) {
+ iter.second.index_pool = fix_zone_pool_dup(pools, name, "." + default_bucket_index_pool_suffix,
+ iter.second.index_pool);
+ for (auto& pi : iter.second.storage_classes.get_all()) {
+ if (pi.second.data_pool) {
+ rgw_pool& pool = pi.second.data_pool.get();
+ pool = fix_zone_pool_dup(pools, name, "." + default_storage_pool_suffix,
+ pool);
+ }
+ }
+ iter.second.data_extra_pool= fix_zone_pool_dup(pools, name, "." + default_storage_extra_pool_suffix,
+ iter.second.data_extra_pool);
+ }
+
+ return 0;
+}
+
+int RGWPeriodConfig::read(const DoutPrefixProvider *dpp, RGWSI_SysObj *sysobj_svc, const std::string& realm_id,
+ optional_yield y)
+{
+ const auto& pool = get_pool(sysobj_svc->ctx());
+ const auto& oid = get_oid(realm_id);
+ bufferlist bl;
+
+ auto sysobj = sysobj_svc->get_obj(rgw_raw_obj{pool, oid});
+ int ret = sysobj.rop().read(dpp, &bl, y);
+ if (ret < 0) {
+ return ret;
+ }
+ using ceph::decode;
+ try {
+ auto iter = bl.cbegin();
+ decode(*this, iter);
+ } catch (buffer::error& err) {
+ return -EIO;
+ }
+ return 0;
+}
+
+int RGWPeriodConfig::write(const DoutPrefixProvider *dpp,
+ RGWSI_SysObj *sysobj_svc,
+ const std::string& realm_id, optional_yield y)
+{
+ const auto& pool = get_pool(sysobj_svc->ctx());
+ const auto& oid = get_oid(realm_id);
+ bufferlist bl;
+ using ceph::encode;
+ encode(*this, bl);
+ auto sysobj = sysobj_svc->get_obj(rgw_raw_obj{pool, oid});
+ return sysobj.wop()
+ .set_exclusive(false)
+ .write(dpp, bl, y);
+}
+
void RGWPeriodConfig::decode_json(JSONObj *obj)
{
JSONDecoder::decode_json("bucket_quota", quota.bucket_quota, obj);
encode_json("anonymous_ratelimit", anon_ratelimit, f);
}
+std::string RGWPeriodConfig::get_oid(const std::string& realm_id)
+{
+ if (realm_id.empty()) {
+ return "period_config.default";
+ }
+ return "period_config." + realm_id;
+}
+
+rgw_pool RGWPeriodConfig::get_pool(CephContext *cct)
+{
+ const auto& pool_name = cct->_conf->rgw_period_root_pool;
+ if (pool_name.empty()) {
+ return {RGW_DEFAULT_PERIOD_ROOT_POOL};
+ }
+ return {pool_name};
+}
+
+int RGWSystemMetaObj::delete_obj(const DoutPrefixProvider *dpp, optional_yield y, bool old_format)
+{
+ rgw_pool pool(get_pool(cct));
+
+ /* check to see if obj is the default */
+ RGWDefaultSystemMetaObjInfo default_info;
+ int ret = read_default(dpp, default_info, get_default_oid(old_format), y);
+ if (ret < 0 && ret != -ENOENT)
+ return ret;
+ if (default_info.default_id == id || (old_format && default_info.default_id == name)) {
+ string oid = get_default_oid(old_format);
+ rgw_raw_obj default_named_obj(pool, oid);
+ auto sysobj = sysobj_svc->get_obj(default_named_obj);
+ ret = sysobj.wop().remove(dpp, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "Error delete default obj name " << name << ": " << cpp_strerror(-ret) << dendl;
+ return ret;
+ }
+ }
+ if (!old_format) {
+ string oid = get_names_oid_prefix() + name;
+ rgw_raw_obj object_name(pool, oid);
+ auto sysobj = sysobj_svc->get_obj(object_name);
+ ret = sysobj.wop().remove(dpp, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "Error delete obj name " << name << ": " << cpp_strerror(-ret) << dendl;
+ return ret;
+ }
+ }
+
+ string oid = get_info_oid_prefix(old_format);
+ if (old_format) {
+ oid += name;
+ } else {
+ oid += id;
+ }
+
+ rgw_raw_obj object_id(pool, oid);
+ auto sysobj = sysobj_svc->get_obj(object_id);
+ ret = sysobj.wop().remove(dpp, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "Error delete object id " << id << ": " << cpp_strerror(-ret) << dendl;
+ }
+
+ return ret;
+}
+
void RGWZoneGroup::dump(Formatter *f) const
{
- encode_json("id", id , f);
- encode_json("name", name , f);
+ RGWSystemMetaObj::dump(f);
encode_json("api_name", api_name, f);
encode_json("is_master", is_master, f);
encode_json("endpoints", endpoints, f);
}
}
+void RGWSystemMetaObj::dump(Formatter *f) const
+{
+ encode_json("id", id , f);
+ encode_json("name", name , f);
+}
+
+void RGWSystemMetaObj::decode_json(JSONObj *obj)
+{
+ JSONDecoder::decode_json("id", id, obj);
+ JSONDecoder::decode_json("name", name, obj);
+}
+
+int RGWSystemMetaObj::read_default(const DoutPrefixProvider *dpp,
+ RGWDefaultSystemMetaObjInfo& default_info,
+ const string& oid, optional_yield y)
+{
+ using ceph::decode;
+ auto pool = get_pool(cct);
+ bufferlist bl;
+
+ auto sysobj = sysobj_svc->get_obj(rgw_raw_obj(pool, oid));
+ int ret = sysobj.rop().read(dpp, &bl, y);
+ if (ret < 0)
+ return ret;
+
+ try {
+ auto iter = bl.cbegin();
+ decode(default_info, iter);
+ } catch (buffer::error& err) {
+ ldpp_dout(dpp, 0) << "error decoding data from " << pool << ":" << oid << dendl;
+ return -EIO;
+ }
+
+ return 0;
+}
+
void RGWZoneGroupPlacementTarget::dump(Formatter *f) const
{
encode_json("name", name, f);
ENCODE_FINISH(bl);
}
+int RGWSystemMetaObj::create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive)
+{
+ int ret;
+
+ /* check to see the name is not used */
+ ret = read_id(dpp, name, id, y);
+ if (exclusive && ret == 0) {
+ ldpp_dout(dpp, 10) << "ERROR: name " << name << " already in use for obj id " << id << dendl;
+ return -EEXIST;
+ } else if ( ret < 0 && ret != -ENOENT) {
+ ldpp_dout(dpp, 0) << "failed reading obj id " << id << ": " << cpp_strerror(-ret) << dendl;
+ return ret;
+ }
+
+ if (id.empty()) {
+ /* create unique id */
+ uuid_d new_uuid;
+ char uuid_str[37];
+ new_uuid.generate_random();
+ new_uuid.print(uuid_str);
+ id = uuid_str;
+ }
+
+ ret = store_info(dpp, exclusive, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "ERROR: storing info for " << id << ": " << cpp_strerror(-ret) << dendl;
+ return ret;
+ }
+
+ return store_name(dpp, exclusive, y);
+}
+
+int RGWSystemMetaObj::read_default_id(const DoutPrefixProvider *dpp, string& default_id, optional_yield y,
+ bool old_format)
+{
+ RGWDefaultSystemMetaObjInfo default_info;
+
+ int ret = read_default(dpp, default_info, get_default_oid(old_format), y);
+ if (ret < 0) {
+ return ret;
+ }
+
+ default_id = default_info.default_id;
+
+ return 0;
+}
+
+int RGWSystemMetaObj::set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive)
+{
+ using ceph::encode;
+ string oid = get_default_oid();
+
+ rgw_pool pool(get_pool(cct));
+ bufferlist bl;
+
+ RGWDefaultSystemMetaObjInfo default_info;
+ default_info.default_id = id;
+
+ encode(default_info, bl);
+
+ auto sysobj = sysobj_svc->get_obj(rgw_raw_obj(pool, oid));
+ int ret = sysobj.wop()
+ .set_exclusive(exclusive)
+ .write(dpp, bl, y);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int RGWSystemMetaObj::store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y)
+{
+ rgw_pool pool(get_pool(cct));
+
+ string oid = get_info_oid_prefix() + id;
+
+ bufferlist bl;
+ using ceph::encode;
+ encode(*this, bl);
+ auto sysobj = sysobj_svc->get_obj(rgw_raw_obj{pool, oid});
+ return sysobj.wop()
+ .set_exclusive(exclusive)
+ .write(dpp, bl, y);
+}
+
+int RGWSystemMetaObj::read_id(const DoutPrefixProvider *dpp, const string& obj_name, string& object_id,
+ optional_yield y)
+{
+ using ceph::decode;
+ rgw_pool pool(get_pool(cct));
+ bufferlist bl;
+
+ string oid = get_names_oid_prefix() + obj_name;
+
+ auto sysobj = sysobj_svc->get_obj(rgw_raw_obj(pool, oid));
+ int ret = sysobj.rop().read(dpp, &bl, y);
+ if (ret < 0) {
+ return ret;
+ }
+
+ RGWNameToId nameToId;
+ try {
+ auto iter = bl.cbegin();
+ decode(nameToId, iter);
+ } catch (buffer::error& err) {
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode obj from " << pool << ":" << oid << dendl;
+ return -EIO;
+ }
+ object_id = nameToId.obj_id;
+ return 0;
+}
+
+int RGWSystemMetaObj::store_name(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y)
+{
+ rgw_pool pool(get_pool(cct));
+ string oid = get_names_oid_prefix() + name;
+
+ RGWNameToId nameToId;
+ nameToId.obj_id = id;
+
+ bufferlist bl;
+ using ceph::encode;
+ encode(nameToId, bl);
+ auto sysobj = sysobj_svc->get_obj(rgw_raw_obj(pool, oid));
+ return sysobj.wop()
+ .set_exclusive(exclusive)
+ .write(dpp, bl, y);
+}
+
bool RGWPeriodMap::find_zone_by_id(const rgw_zone_id& zone_id,
RGWZoneGroup *zonegroup,
RGWZone *zone) const
return false;
}
+int RGWZoneGroup::set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive)
+{
+ if (realm_id.empty()) {
+ /* try using default realm */
+ RGWRealm realm;
+ int ret = realm.init(dpp, cct, sysobj_svc, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 10) << "could not read realm id: " << cpp_strerror(-ret) << dendl;
+ return -EINVAL;
+ }
+ realm_id = realm.get_id();
+ }
+
+ return RGWSystemMetaObj::set_as_default(dpp, y, exclusive);
+}
+
+int RGWSystemMetaObj::write(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y)
+{
+ int ret = store_info(dpp, exclusive, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 20) << __func__ << "(): store_info() returned ret=" << ret << dendl;
+ return ret;
+ }
+ ret = store_name(dpp, exclusive, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 20) << __func__ << "(): store_name() returned ret=" << ret << dendl;
+ return ret;
+ }
+ return 0;
+}
+
namespace rgw {
int init_zone_pool_names(const DoutPrefixProvider *dpp, optional_yield y,
using Svc = RGWSI_MDLog::Svc;
using Cursor = RGWPeriodHistory::Cursor;
-RGWSI_MDLog::RGWSI_MDLog(CephContext *cct, bool _run_sync, rgw::sal::ConfigStore* _cfgstore) : RGWServiceInstance(cct),
- run_sync(_run_sync), cfgstore(_cfgstore) {
+RGWSI_MDLog::RGWSI_MDLog(CephContext *cct, bool _run_sync) : RGWServiceInstance(cct), run_sync(_run_sync) {
}
RGWSI_MDLog::~RGWSI_MDLog() {
if (run_sync &&
svc.zone->need_to_sync()) {
// initialize the log period history
- svc.mdlog->init_oldest_log_period(y, dpp, cfgstore);
+ svc.mdlog->init_oldest_log_period(y, dpp);
}
return 0;
}
// traverse all the way back to the beginning of the period history, and
// return a cursor to the first period in a fully attached history
-Cursor RGWSI_MDLog::find_oldest_period(const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::ConfigStore* cfgstore)
+Cursor RGWSI_MDLog::find_oldest_period(const DoutPrefixProvider *dpp, optional_yield y)
{
auto cursor = period_history->get_current();
}
// pull the predecessor and add it to our history
RGWPeriod period;
- int r = period_puller->pull(dpp, predecessor, period, y, cfgstore);
+ int r = period_puller->pull(dpp, predecessor, period, y);
if (r < 0) {
return cursor;
}
return cursor;
}
-Cursor RGWSI_MDLog::init_oldest_log_period(optional_yield y, const DoutPrefixProvider *dpp, rgw::sal::ConfigStore* cfgstore)
+Cursor RGWSI_MDLog::init_oldest_log_period(optional_yield y, const DoutPrefixProvider *dpp)
{
// read the mdlog history
RGWMetadataLogHistory state;
if (ret == -ENOENT) {
// initialize the mdlog history and write it
ldpp_dout(dpp, 10) << "initializing mdlog history" << dendl;
- auto cursor = find_oldest_period(dpp, y, cfgstore);
+ auto cursor = find_oldest_period(dpp, y);
if (!cursor) {
return cursor;
}
if (cursor) {
return cursor;
} else {
- cursor = find_oldest_period(dpp, y, cfgstore);
+ cursor = find_oldest_period(dpp, y);
state.oldest_realm_epoch = cursor.get_epoch();
state.oldest_period_id = cursor.get_period().get_id();
ldpp_dout(dpp, 10) << "rewriting mdlog history" << dendl;
// pull the oldest period by id
RGWPeriod period;
- ret = period_puller->pull(dpp, state.oldest_period_id, period, y, cfgstore);
+ ret = period_puller->pull(dpp, state.oldest_period_id, period, y);
if (ret < 0) {
ldpp_dout(dpp, 1) << "failed to read period id=" << state.oldest_period_id
<< " for mdlog history: " << cpp_strerror(ret) << dendl;
return Cursor{-EINVAL};
}
// attach the period to our history
- return period_history->attach(dpp, std::move(period), y, cfgstore);
+ return period_history->attach(dpp, std::move(period), y);
}
Cursor RGWSI_MDLog::read_oldest_log_period(optional_yield y, const DoutPrefixProvider *dpp) const
}
int RGWSI_MDLog::pull_period(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period,
- optional_yield y, rgw::sal::ConfigStore* cfgstore)
+ optional_yield y)
{
- return period_puller->pull(dpp, period_id, period, y, cfgstore);
+ return period_puller->pull(dpp, period_id, period, y);
}
std::unique_ptr<RGWPeriodPuller> period_puller;
// maintains a connected history of periods
std::unique_ptr<RGWPeriodHistory> period_history;
- rgw::sal::ConfigStore* cfgstore{nullptr};
public:
- RGWSI_MDLog(CephContext *cct, bool run_sync, rgw::sal::ConfigStore* _cfgstore);
+ RGWSI_MDLog(CephContext *cct, bool run_sync);
virtual ~RGWSI_MDLog();
librados::Rados* rados{nullptr};
// traverse all the way back to the beginning of the period history, and
// return a cursor to the first period in a fully attached history
- RGWPeriodHistory::Cursor find_oldest_period(const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::ConfigStore* cfgstore);
+ RGWPeriodHistory::Cursor find_oldest_period(const DoutPrefixProvider *dpp, optional_yield y);
/// initialize the oldest log period if it doesn't exist, and attach it to
/// our current history
- RGWPeriodHistory::Cursor init_oldest_log_period(optional_yield y, const DoutPrefixProvider *dpp, rgw::sal::ConfigStore* cfgstore);
+ RGWPeriodHistory::Cursor init_oldest_log_period(optional_yield y, const DoutPrefixProvider *dpp);
/// read the oldest log period, and return a cursor to it in our existing
/// period history
return period_history.get();
}
- int pull_period(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period, optional_yield y, rgw::sal::ConfigStore* cfgstore);
+ int pull_period(const DoutPrefixProvider *dpp, const std::string& period_id, RGWPeriod& period, optional_yield y);
/// find or create the metadata log for the given period
RGWMetadataLog* get_log(const std::string& period);
#include "rgw_zone.h"
#include "rgw_rest_conn.h"
#include "rgw_bucket_sync.h"
-#include "rgw_sal.h"
-#include "rgw_sal_config.h"
#include "common/errno.h"
#include "include/random.h"
using namespace std;
using namespace rgw_zone_defaults;
-RGWSI_Zone::RGWSI_Zone(CephContext *cct, rgw::sal::ConfigStore* _cfgstore, const rgw::SiteConfig* _site)
- : RGWServiceInstance(cct), cfgstore(_cfgstore), site(_site)
+RGWSI_Zone::RGWSI_Zone(CephContext *cct) : RGWServiceInstance(cct)
{
}
RGWPeriod *pperiod,
RGWZoneGroup *pzonegroup,
bool *pfound,
- rgw::sal::ConfigStore* cfgstore,
optional_yield y)
{
auto& found = *pfound;
for (auto& realm_name : realms) {
string realm_id;
RGWRealm realm(realm_id, realm_name);
- r = rgw::read_realm(dpp, y, cfgstore, realm_id, realm_name, realm);
+ r = realm.init(dpp, cct, sysobj_svc, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "WARNING: can't open realm " << realm_name << ": " << cpp_strerror(-r) << " ... skipping" << dendl;
continue;
}
r = realm.find_zone(dpp, zid, pperiod,
- pzonegroup, &found, cfgstore, y);
+ pzonegroup, &found, y);
if (r < 0) {
ldpp_dout(dpp, 20) << __func__ << "(): ERROR: realm.find_zone() returned r=" << r<< dendl;
return r;
assert(sysobj_svc->is_started()); /* if not then there's ordering issue */
- if (site->get_realm().has_value()) {
- *realm = site->get_realm().value();
+ ret = realm->init(dpp, cct, sysobj_svc, y);
+ if (ret < 0 && ret != -ENOENT) {
+ ldpp_dout(dpp, 0) << "failed reading realm info: ret "<< ret << " " << cpp_strerror(-ret) << dendl;
+ return ret;
}
ldpp_dout(dpp, 20) << "realm " << realm->get_name() << " " << realm->get_id() << dendl;
- if (site->get_period().has_value()) {
- *current_period = site->get_period().value();
+ ret = current_period->init(dpp, cct, sysobj_svc, realm->get_id(), y);
+ if (ret < 0 && ret != -ENOENT) {
+ ldpp_dout(dpp, 0) << "failed reading current period info: " << " " << cpp_strerror(-ret) << dendl;
+ return ret;
}
- current_period->set_realm_id(realm->get_id());
- *zone_params = site->get_zone_params();
- bool found_zone = true;
+ ret = zone_params->init(dpp, cct, sysobj_svc, y);
+ bool found_zone = (ret == 0);
+ if (ret < 0 && ret != -ENOENT) {
+ lderr(cct) << "failed reading zone info: ret "<< ret << " " << cpp_strerror(-ret) << dendl;
+ return ret;
+ }
cur_zone_id = rgw_zone_id(zone_params->get_id());
current_period,
zonegroup,
&found_period_conf,
- cfgstore,
y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: search_realm_conf() failed: ret="<< ret << dendl;
if (!zg_initialized) {
/* couldn't find a proper period config, use local zonegroup */
- std::string_view zonegroup_id = zonegroup->get_id();
- std::string_view zonegroup_name = zonegroup->get_name();
- ret = rgw::read_zonegroup(dpp, y, cfgstore, zonegroup_id, zonegroup_name, *zonegroup);
+ ret = zonegroup->init(dpp, cct, sysobj_svc, y);
zg_initialized = (ret == 0);
if (ret < 0 && ret != -ENOENT) {
ldpp_dout(dpp, 0) << "failed reading zonegroup info: " << cpp_strerror(-ret) << dendl;
}
// read period_config into current_period
auto& period_config = current_period->get_config();
- ret = cfgstore->read_period_config(dpp, y, zonegroup->realm_id, period_config);
+ ret = period_config.read(dpp, sysobj_svc, zonegroup->realm_id, y);
if (ret < 0 && ret != -ENOENT) {
ldout(cct, 0) << "ERROR: failed to read period config: "
<< cpp_strerror(ret) << dendl;
int RGWSI_Zone::list_realms(const DoutPrefixProvider *dpp, list<string>& realms)
{
- RGWRealm realm;
+ RGWRealm realm(cct, sysobj_svc);
RGWSI_SysObj::Pool syspool = sysobj_svc->get_pool(realm.get_pool(cct));
return syspool.list_prefixed_objs(dpp, realm_names_oid_prefix, &realms);
string period_id = current_period;
while(!period_id.empty()) {
RGWPeriod period(period_id);
- ret = cfgstore->read_period(dpp, y, period_id, std::nullopt, period);
+ ret = period.init(dpp, cct, sysobj_svc, y);
if (ret < 0) {
return ret;
}
if (iter != current_period->get_map().zonegroups.end()) {
ldpp_dout(dpp, 20) << "using current period zonegroup " << zonegroup->get_name() << dendl;
*zonegroup = iter->second;
+ int ret = zonegroup->init(dpp, cct, sysobj_svc, y, false);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "failed init zonegroup: " << " " << cpp_strerror(-ret) << dendl;
+ return ret;
+ }
}
for (iter = current_period->get_map().zonegroups.begin();
iter != current_period->get_map().zonegroups.end(); ++iter){
master->second.name << " id:" << master->second.id << " as master" << dendl;
if (zonegroup->get_id() == zg.get_id()) {
zonegroup->master_zone = master->second.id;
- int ret = cfgstore->create_zonegroup(dpp, y, false, *zonegroup, nullptr);
+ int ret = zonegroup->update(dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "error updating zonegroup : " << cpp_strerror(-ret) << dendl;
return ret;
}
} else {
RGWZoneGroup fixed_zg(zg.get_id(),zg.get_name());
- std::string_view zonegroup_id = zonegroup->get_id();
- std::string_view zonegroup_name = zonegroup->get_name();
- int ret = rgw::read_zonegroup(dpp, y, cfgstore, zonegroup_id, zonegroup_name, *zonegroup);
+ int ret = fixed_zg.init(dpp, cct, sysobj_svc, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl;
return ret;
}
fixed_zg.master_zone = master->second.id;
- ret = cfgstore->create_zonegroup(dpp, y, false, fixed_zg, nullptr);
+ ret = fixed_zg.update(dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl;
return ret;
int RGWSI_Zone::create_default_zg(const DoutPrefixProvider *dpp, optional_yield y)
{
ldout(cct, 10) << "Creating default zonegroup " << dendl;
- int ret = cfgstore->create_zonegroup(dpp, y, true, *zonegroup, nullptr);
+ int ret = zonegroup->create_default(dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "failure in zonegroup create_default: ret "<< ret << " " << cpp_strerror(-ret)
<< dendl;
return ret;
}
- std::string_view zonegroup_id = zonegroup->get_id();
- std::string_view zonegroup_name = zonegroup->get_name();
- ret = rgw::read_zonegroup(dpp, y, cfgstore, zonegroup_id, zonegroup_name, *zonegroup);
+ ret = zonegroup->init(dpp, cct, sysobj_svc, y);
if (ret < 0) {
ldout(cct, 0) << "failure in zonegroup create_default: ret "<< ret << " " << cpp_strerror(-ret)
<< dendl;
int RGWSI_Zone::init_default_zone(const DoutPrefixProvider *dpp, optional_yield y)
{
ldpp_dout(dpp, 10) << " Using default name "<< default_zone_name << dendl;
- int ret = cfgstore->read_zone_by_name(dpp, y, default_zone_name, *zone_params, nullptr);
+ zone_params->set_name(default_zone_name);
+ int ret = zone_params->init(dpp, cct, sysobj_svc, y);
if (ret < 0 && ret != -ENOENT) {
ldpp_dout(dpp, 0) << "failed reading zone params info: " << " " << cpp_strerror(-ret) << dendl;
return ret;
ldpp_dout(dpp, 0) << "zonegroup " << zonegroup->get_name() << " missing master_zone, setting zone " <<
master->second.name << " id:" << master->second.id << " as master" << dendl;
zonegroup->master_zone = master->second.id;
- int ret = cfgstore->create_zonegroup(dpp, y, false, *zonegroup, nullptr);
+ int ret = zonegroup->update(dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "error initializing zonegroup : " << cpp_strerror(-ret) << dendl;
return ret;
std::map<rgw_zone_id, RGWZone> zone_by_id;
std::unique_ptr<rgw_sync_policy_info> sync_policy;
- rgw::sal::ConfigStore *cfgstore{nullptr};
- const rgw::SiteConfig* site{nullptr};
void init(RGWSI_SysObj *_sysobj_svc,
librados::Rados* rados_,
RGWPeriod *pperiod,
RGWZoneGroup *pzonegroup,
bool *pfound,
- rgw::sal::ConfigStore* cfgstore,
optional_yield y);
public:
- RGWSI_Zone(CephContext *cct, rgw::sal::ConfigStore* cfgstore, const rgw::SiteConfig* _site);
+ RGWSI_Zone(CephContext *cct);
~RGWSI_Zone();
const RGWZoneParams& get_zone_params() const;
false,
false,
false,
- true, true, null_yield, cfgstore.get(),
+ true, true, null_yield,
false));
if (!store) {
std::cerr << "couldn't init storage provider" << std::endl;
#include "rgw_aio_throttle.h"
#include "rgw_sal.h"
#include "rgw_sal_store.h"
-#include "rgw_sal_config.h"
#include "driver/dbstore/common/dbstore.h"
#include "rgw_sal_d4n.h"
#include "rgw_sal_filter.h"
DriverManager::Config cfg = DriverManager::get_config(true, g_ceph_context);
cfg.store_name = "dbstore";
cfg.filter_name = "d4n";
- auto config_store_type = g_conf().get_val<std::string>("rgw_config_store");
- auto cfgstore = DriverManager::create_config_store(env->dpp, config_store_type);
auto filterDriver = DriverManager::get_raw_storage(env->dpp, g_ceph_context,
- cfg, io, site_config, cfgstore.get());
+ cfg, io, site_config);
rgw::sal::Driver* next = filterDriver;
driver = newD4NFilter(next, io);
// mock puller that throws an exception if it's called
struct ErrorPuller : public RGWPeriodHistory::Puller {
- int pull(const DoutPrefixProvider *dpp, const std::string& id, RGWPeriod& period, optional_yield, rgw::sal::ConfigStore* cfgstore) override {
+ int pull(const DoutPrefixProvider *dpp, const std::string& id, RGWPeriod& period, optional_yield) override {
throw std::runtime_error("unexpected call to pull");
}
};
public:
explicit RecordingPuller(int error) : error(error) {}
Ids ids;
- int pull(const DoutPrefixProvider *dpp, const std::string& id, RGWPeriod& period, optional_yield, rgw::sal::ConfigStore* cfgstore) override {
+ int pull(const DoutPrefixProvider *dpp, const std::string& id, RGWPeriod& period, optional_yield) override {
ids.push_back(id);
return error;
}
// mock puller that returns a fake period by parsing the period id
struct NumericPuller : public RGWPeriodHistory::Puller {
- int pull(const DoutPrefixProvider *dpp, const std::string& id, RGWPeriod& period, optional_yield, rgw::sal::ConfigStore* cfgstore) override {
+ int pull(const DoutPrefixProvider *dpp, const std::string& id, RGWPeriod& period, optional_yield) override {
// relies on numeric period ids to divine the realm_epoch
auto realm_epoch = boost::lexical_cast<epoch_t>(id);
auto predecessor = boost::lexical_cast<std::string>(realm_epoch-1);
// create a disjoint history at 1 and verify that periods are requested
// backwards from current_period
- auto c1 = history.attach(&dp, make_period("1", 1, ""), null_yield, nullptr);
+ auto c1 = history.attach(&dp, make_period("1", 1, ""), null_yield);
ASSERT_FALSE(c1);
ASSERT_EQ(-EFAULT, c1.get_error());
ASSERT_EQ(Ids{"4"}, puller.ids);
auto c4 = history.insert(make_period("4", 4, "3"));
ASSERT_TRUE(c4);
- c1 = history.attach(&dp, make_period("1", 1, ""), null_yield, nullptr);
+ c1 = history.attach(&dp, make_period("1", 1, ""), null_yield);
ASSERT_FALSE(c1);
ASSERT_EQ(-EFAULT, c1.get_error());
ASSERT_EQ(Ids({"4", "3"}), puller.ids);
auto c3 = history.insert(make_period("3", 3, "2"));
ASSERT_TRUE(c3);
- c1 = history.attach(&dp, make_period("1", 1, ""), null_yield, nullptr);
+ c1 = history.attach(&dp, make_period("1", 1, ""), null_yield);
ASSERT_FALSE(c1);
ASSERT_EQ(-EFAULT, c1.get_error());
ASSERT_EQ(Ids({"4", "3", "2"}), puller.ids);
auto c2 = history.insert(make_period("2", 2, "1"));
ASSERT_TRUE(c2);
- c1 = history.attach(&dp, make_period("1", 1, ""), null_yield, nullptr);
+ c1 = history.attach(&dp, make_period("1", 1, ""), null_yield);
ASSERT_TRUE(c1);
ASSERT_EQ(Ids({"4", "3", "2"}), puller.ids);
}
// create a disjoint history at 9 and verify that periods are requested
// backwards down to current_period
- auto c9 = history.attach(&dp, make_period("9", 9, "8"), null_yield, nullptr);
+ auto c9 = history.attach(&dp, make_period("9", 9, "8"), null_yield);
ASSERT_FALSE(c9);
ASSERT_EQ(-EFAULT, c9.get_error());
ASSERT_EQ(Ids{"8"}, puller.ids);
- auto c8 = history.attach(&dp, make_period("8", 8, "7"), null_yield, nullptr);
+ auto c8 = history.attach(&dp, make_period("8", 8, "7"), null_yield);
ASSERT_FALSE(c8);
ASSERT_EQ(-EFAULT, c8.get_error());
ASSERT_EQ(Ids({"8", "7"}), puller.ids);
- auto c7 = history.attach(&dp, make_period("7", 7, "6"), null_yield, nullptr);
+ auto c7 = history.attach(&dp, make_period("7", 7, "6"), null_yield);
ASSERT_FALSE(c7);
ASSERT_EQ(-EFAULT, c7.get_error());
ASSERT_EQ(Ids({"8", "7", "6"}), puller.ids);
- auto c6 = history.attach(&dp, make_period("6", 6, "5"), null_yield, nullptr);
+ auto c6 = history.attach(&dp, make_period("6", 6, "5"), null_yield);
ASSERT_TRUE(c6);
ASSERT_EQ(Ids({"8", "7", "6"}), puller.ids);
}
RGWPeriodHistory history(g_ceph_context, &puller, current_period);
const DoutPrefix dp(g_ceph_context, 1, "test rgw period history: ");
- auto c1 = history.attach(&dp, make_period("1", 1, ""), null_yield, nullptr);
+ auto c1 = history.attach(&dp, make_period("1", 1, ""), null_yield);
ASSERT_TRUE(c1);
// verify that we pulled and merged all periods from 1-5
RGWPeriodHistory history(g_ceph_context, &puller, current_period);
const DoutPrefix dp(g_ceph_context, 1, "test rgw period history: ");
- auto c9 = history.attach(&dp, make_period("9", 9, "8"), null_yield, nullptr);
+ auto c9 = history.attach(&dp, make_period("9", 9, "8"), null_yield);
ASSERT_TRUE(c9);
// verify that we pulled and merged all periods from 5-9