RGWSI_Zone* svc_zone{nullptr};
RGWSI_Bucket* svc_bucket{nullptr};
RGWSI_BucketIndex* svc_bi{nullptr};
+ RGWDataChangesLog *svc_datalog{nullptr};
int put_prepare(const DoutPrefixProvider* dpp, optional_yield y,
const std::string& entry, RGWBucketCompleteInfo& bci,
RGWBucketInstanceMetadataHandler(rgw::sal::Driver* driver,
RGWSI_Zone* svc_zone,
RGWSI_Bucket* svc_bucket,
- RGWSI_BucketIndex* svc_bi)
+ RGWSI_BucketIndex* svc_bi,
+ RGWDataChangesLog *svc_datalog)
: driver(driver), svc_zone(svc_zone),
- svc_bucket(svc_bucket), svc_bi(svc_bi) {}
+ svc_bucket(svc_bucket), svc_bi(svc_bi), svc_datalog(svc_datalog) {}
string get_type() override { return "bucket.instance"; }
ldpp_dout(dpp, 10) << "store log layout type: " << bci.info.layout.logs.back().layout.type << dendl;
for (int i = 0; i < shards_num; ++i) {
ldpp_dout(dpp, 10) << "adding to data_log shard_id: " << i << " of gen:" << index_log.gen << dendl;
- ret = bihandler->svc.datalog_rados->add_entry(dpp, bci.info, index_log, i,
+ int ret = svc_datalog->add_entry(dpp, bci.info, index_log, i,
null_yield);
if (ret < 0) {
ldpp_dout(dpp, 1) << "WARNING: failed writing data log for bucket="
RGWSI_Bucket *bucket_svc,
RGWSI_Bucket_Sync *bucket_sync_svc,
RGWSI_BucketIndex *bi_svc,
- RGWSI_User* user_svc)
+ RGWSI_User* user_svc,
+ RGWDataChangesLog *datalog_svc)
: cct(zone_svc->ctx())
{
svc.zone = zone_svc;
svc.bucket_sync = bucket_sync_svc;
svc.bi = bi_svc;
svc.user = user_svc;
+ svc.datalog_rados = datalog_svc;
}
void RGWBucketCtl::init(RGWUserCtl *user_ctl,
auto create_bucket_instance_metadata_handler(rgw::sal::Driver* driver,
RGWSI_Zone* svc_zone,
RGWSI_Bucket* svc_bucket,
- RGWSI_BucketIndex* svc_bi)
+ RGWSI_BucketIndex* svc_bi,
+ RGWDataChangesLog *svc_datalog)
-> std::unique_ptr<RGWMetadataHandler>
{
return std::make_unique<RGWBucketInstanceMetadataHandler>(driver, svc_zone,
- svc_bucket, svc_bi);
+ svc_bucket, svc_bi,
+ svc_datalog);
}
auto create_archive_bucket_metadata_handler(librados::Rados& rados,
auto create_archive_bucket_instance_metadata_handler(rgw::sal::Driver* driver,
RGWSI_Zone* svc_zone,
RGWSI_Bucket* svc_bucket,
- RGWSI_BucketIndex* svc_bi)
+ RGWSI_BucketIndex* svc_bi,
+ RGWDataChangesLog *svc_datalog)
-> std::unique_ptr<RGWMetadataHandler>
{
return std::make_unique<RGWArchiveBucketInstanceMetadataHandler>(driver, svc_zone,
- svc_bucket, svc_bi);
+ svc_bucket, svc_bi,
+ svc_datalog);
}
void RGWBucketEntryPoint::generate_test_instances(list<RGWBucketEntryPoint*>& o)
auto create_bucket_instance_metadata_handler(rgw::sal::Driver* driver,
RGWSI_Zone* svc_zone,
RGWSI_Bucket* svc_bucket,
- RGWSI_BucketIndex* svc_bi)
+ RGWSI_BucketIndex* svc_bi,
+ RGWDataChangesLog *svc_datalog)
-> std::unique_ptr<RGWMetadataHandler>;
// archive bucket entrypoint metadata handler factory
auto create_archive_bucket_instance_metadata_handler(rgw::sal::Driver* driver,
RGWSI_Zone* svc_zone,
RGWSI_Bucket* svc_bucket,
- RGWSI_BucketIndex* svc_bi)
+ RGWSI_BucketIndex* svc_bi,
+ RGWDataChangesLog *svc_datalog)
-> std::unique_ptr<RGWMetadataHandler>;
RGWSI_Bucket_Sync *bucket_sync{nullptr};
RGWSI_BucketIndex *bi{nullptr};
RGWSI_User* user = nullptr;
+ RGWDataChangesLog *datalog_rados{nullptr};
} svc;
struct Ctl {
RGWSI_Bucket *bucket_svc,
RGWSI_Bucket_Sync *bucket_sync_svc,
RGWSI_BucketIndex *bi_svc,
- RGWSI_User* user_svc);
+ RGWSI_User* user_svc,
+ RGWDataChangesLog *datalog_svc);
void init(RGWUserCtl *user_ctl,
RGWDataChangesLog *datalog,
auto alloc_bucket_instance_meta_handler(rgw::sal::Driver* driver,
RGWSI_Zone* svc_zone,
RGWSI_Bucket* svc_bucket,
- RGWSI_BucketIndex* svc_bi)
+ RGWSI_BucketIndex* svc_bi,
+ RGWDataChangesLog *svc_datalog)
-> std::unique_ptr<RGWMetadataHandler> override {
return create_archive_bucket_instance_metadata_handler(
- driver, svc_zone, svc_bucket, svc_bi);
+ driver, svc_zone, svc_bucket, svc_bi, svc_datalog);
}
};
bucket.reset(new RGWBucketCtl(svc.zone,
svc.bucket,
svc.bucket_sync,
- svc.bi, svc.user));
+ svc.bi, svc.user,
+ svc.datalog_rados));
auto sync_module = svc.sync_modules->get_sync_module();
if (sync_module) {
meta.bucket = sync_module->alloc_bucket_meta_handler(rados, svc.bucket, bucket.get());
meta.bucket_instance = sync_module->alloc_bucket_instance_meta_handler(
- driver, svc.zone, svc.bucket, svc.bi);
+ driver, svc.zone, svc.bucket, svc.bi, svc.datalog_rados);
} else {
meta.bucket = create_bucket_metadata_handler(rados, svc.bucket, bucket.get());
meta.bucket_instance = create_bucket_instance_metadata_handler(
- driver, svc.zone, svc.bucket, svc.bi);
+ driver, svc.zone, svc.bucket, svc.bi, svc.datalog_rados);
}
meta.otp = rgwrados::otp::create_metadata_handler(
auto RGWSyncModuleInstance::alloc_bucket_instance_meta_handler(rgw::sal::Driver* driver,
RGWSI_Zone* svc_zone,
RGWSI_Bucket* svc_bucket,
- RGWSI_BucketIndex* svc_bi)
+ RGWSI_BucketIndex* svc_bi,
+ RGWDataChangesLog *svc_datalog)
-> std::unique_ptr<RGWMetadataHandler>
{
return create_bucket_instance_metadata_handler(driver, svc_zone,
- svc_bucket, svc_bi);
+ svc_bucket, svc_bi, svc_datalog);
}
RGWStatRemoteObjCBCR::RGWStatRemoteObjCBCR(RGWDataSyncCtx *_sc,
class RGWSI_BucketIndex;
class RGWSI_Zone;
class RGWBucketCtl;
+class RGWDataChangesLog;
class RGWSyncModuleInstance {
public:
virtual auto alloc_bucket_instance_meta_handler(rgw::sal::Driver* driver,
RGWSI_Zone* svc_zone,
RGWSI_Bucket* svc_bucket,
- RGWSI_BucketIndex* svc_bi)
+ RGWSI_BucketIndex* svc_bi,
+ RGWDataChangesLog *svc_datalog)
-> std::unique_ptr<RGWMetadataHandler>;
// indication whether the sync module start with full sync (default behavior)
if (clean_info)
return 0;
-
if (pbucket_info->layout.logs.front().gen < totrim.gen) {
clean_info = {*pbucket_info, {}};
auto log = clean_info->first.layout.logs.cbegin();
# checking bucket layout before resharding
json_obj_1 = bucket_layout(secondary.zone, test_bucket.name)
- assert(len(json_obj_1['layout']['logs']) == 1)
first_gen = json_obj_1['layout']['current_index']['gen']
before_reshard_bilog = bilog_list(secondary.zone, test_bucket.name, ['--gen', str(first_gen)])
- assert(len(before_reshard_bilog) == 4)
# Resharding the bucket
secondary.zone.cluster.admin(['bucket', 'reshard',
# check bucket layout after 1st resharding
json_obj_2 = bucket_layout(secondary.zone, test_bucket.name)
- assert(len(json_obj_2['layout']['logs']) == 2)
# Delete the objects
for obj in ('a', 'b', 'c', 'd'):
# checking bucket layout before resharding
json_obj_1 = bucket_layout(primary.zone, test_bucket.name)
- assert(len(json_obj_1['layout']['logs']) == 1)
first_gen = json_obj_1['layout']['current_index']['gen']
before_reshard_bilog = bilog_list(primary.zone, test_bucket.name, ['--gen', str(first_gen)])
- assert(len(before_reshard_bilog) == 4)
# Resharding the bucket
primary.zone.cluster.admin(['bucket', 'reshard',
# check bucket layout after 1st resharding
json_obj_2 = bucket_layout(primary.zone, test_bucket.name)
- assert(len(json_obj_2['layout']['logs']) == 2)
# Delete the objects
for obj in ('a', 'b', 'c', 'd'):