rgw::sal::RadosStore* const store;
CephContext *cct;
RGWHTTPManager *http;
+ const RGWBucketInfo* bucket_info;
std::string bucket_instance;
const rgw_zone_id zid;
const std::string& zone_id;
rgw::sal::RadosStore* const store,
CephContext *cct,
RGWHTTPManager *http,
+ const RGWBucketInfo* bucket_info,
std::string bucket_instance,
const rgw_zone_id zid,
const std::string& zone_id,
StatusShards *p)
: RGWCoroutine(cct), dpp(dpp), store(store),
- cct(cct), http(http), bucket_instance(bucket_instance),
+ cct(cct), http(http), bucket_info(bucket_info), bucket_instance(bucket_instance),
zid(zid), zone_id(zone_id), p(p) {}
int operate(const DoutPrefixProvider *dpp) override {
if (retcode < 0 && retcode != -ENOENT) {
return set_cr_error(retcode);
- } else if (retcode == -ENOENT) {
+ } else if (retcode == -ENOENT && bucket_info->layout.logs.front().layout.type == rgw::BucketLogType::Deleted) {
p->generation = UINT64_MAX;
ldpp_dout(dpp, 10) << "INFO: could not read shard status for bucket:" << bucket_instance
<< " from zone: " << zid.id << dendl;
if (clean_info)
return 0;
- if (pbucket_info->layout.logs.front().gen < totrim.gen) {
+ bool deleted_type = (pbucket_info->layout.logs.back().layout.type == rgw::BucketLogType::Deleted);
+ if (pbucket_info->layout.logs.front().gen < totrim.gen ||
+ (pbucket_info->layout.logs.front().gen <= totrim.gen && deleted_type)) {
clean_info = {*pbucket_info, {}};
auto log = clean_info->first.layout.logs.cbegin();
clean_info->second = *log;
- if (clean_info->first.layout.logs.size() == 1) {
+ if (clean_info->first.layout.logs.size() == 1 && !deleted_type) {
ldpp_dout(dpp, -1)
<< "Critical error! Attempt to remove only log generation! "
<< "log.gen=" << log->gen << ", totrim.gen=" << totrim.gen
if (peer->shards.size() != status->size()) {
ldpp_dout(dpp, 5) << __PRETTY_FUNCTION__ << ":"
<< "ERROR: shards don't match. peer shard:" << peer->shards.size() << " my shards:" << status->size()
- << "for generation:" << peer->generation << dendl;
+ << "for generation:" << peer->generation << dendl;
// all peers must agree on the number of shards
return -EINVAL;
}
auto p = peer_status.begin();
for (auto& zid : zids) {
- spawn(new RGWReadRemoteStatusShardsCR(dpp, store, cct, http, bucket_instance, zid, zone_id, &*p), false);
+ spawn(new RGWReadRemoteStatusShardsCR(dpp, store, cct, http, pbucket_info, bucket_instance, zid, zone_id, &*p), false);
++p;
}
}
return json.loads(bilog)
def bilog_autotrim(zone, args = None):
- cmd = ['bilog', 'autotrim'] + zone.zone_args()
- zone.cluster.admin(cmd, read_only=True)
+ cmd = ['bilog', 'autotrim'] + (args or []) + zone.zone_args()
+ zone.cluster.admin(cmd, debug_rgw=20)
def bucket_layout(zone, bucket, args = None):
(bl_output,_) = zone.cluster.admin(['bucket', 'layout', '--bucket', bucket] + (args or []))
key = bucket2.get_key('testobj-sse-kms')
eq(data, key.get_contents_as_string(encoding='ascii'))
+@attr('bucket_trim')
def test_bucket_index_log_trim():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
primary.conn.delete_bucket(test_bucket.name)
zonegroup_data_checkpoint(zonegroup_conns)
- bilog_autotrim(primary.zone)
+ bilog_autotrim(primary.zone, ['--rgw-sync-log-trim-max-buckets', '50'],)
time.sleep(config.checkpoint_delay)
- bilog_autotrim(primary.zone)
+ bilog_autotrim(primary.zone, ['--rgw-sync-log-trim-max-buckets', '50'],)
for zonegroup in realm.current_period.zonegroups:
zonegroup_conns = ZonegroupConns(zonegroup)
for zone in zonegroup_conns.zones:
log.info('trimming on zone=%s', zone.name)
- bilog_autotrim(zone.zone)
+ bilog_autotrim(zone.zone, ['--rgw-sync-log-trim-max-buckets', '50'],)
time.sleep(config.checkpoint_delay)
# run bilog trim twice on primary zone where the bucket was resharded
- bilog_autotrim(primary.zone)
+ bilog_autotrim(primary.zone, ['--rgw-sync-log-trim-max-buckets', '50'],)
for zonegroup in realm.current_period.zonegroups:
for zone in zonegroup_conns.zones:
primary.conn.delete_bucket(test_bucket.name)
zonegroup_data_checkpoint(zonegroup_conns)
- bilog_autotrim(secondary.zone)
+ bilog_autotrim(secondary.zone, ['--rgw-sync-log-trim-max-buckets', '50'],)
time.sleep(config.checkpoint_delay)
- bilog_autotrim(secondary.zone)
+ bilog_autotrim(secondary.zone, ['--rgw-sync-log-trim-max-buckets', '50'],)
for zonegroup in realm.current_period.zonegroups:
zonegroup_conns = ZonegroupConns(zonegroup)
for zone in zonegroup_conns.zones:
log.info('trimming on zone=%s', zone.name)
- bilog_autotrim(zone.zone)
+ bilog_autotrim(zone.zone, ['--rgw-sync-log-trim-max-buckets', '50'],)
time.sleep(config.checkpoint_delay)
# run bilog trim twice on primary zone where the bucket was resharded
- bilog_autotrim(secondary.zone)
+ bilog_autotrim(secondary.zone, ['--rgw-sync-log-trim-max-buckets', '50'],)
time.sleep(config.checkpoint_delay)
for zonegroup in realm.current_period.zonegroups: