int RGWRados::store_delete_bucket_info_flag(RGWBucketInfo& bucket_info, std::map<std::string, bufferlist>& attrs, optional_yield y, const DoutPrefixProvider *dpp) {
const rgw_bucket& bucket = bucket_info.bucket;
static constexpr auto max_retries = 10;
+ rgw::bucket_log_layout_generation index_log;
+ int shards_num;
int retries = 0;
int r = 0;
do {
bucket_info.flags |= BUCKET_DELETED;
+ index_log = bucket_info.layout.logs.back();
+ shards_num = rgw::num_shards(index_log.layout.in_index);
+ const auto& log = bucket_info.layout.logs.back();
+ bucket_info.layout.logs.push_back({log.gen+1, {rgw::BucketLogType::Deleted}});
r = ctl.bucket->store_bucket_instance_info(bucket, bucket_info, y, dpp, RGWBucketCtl::BucketInstance::PutParams()
.set_exclusive(false)
.set_mtime(real_time())
break;
}
}
+
} while (r == -ECANCELED && ++retries < max_retries);
+ if (r == 0) {
+ for (int i = 0; i < shards_num; ++i) {
+ ldpp_dout(dpp, 10) << "adding to data_log shard_id: " << i << " of gen:" << index_log.gen << dendl;
+ int ret = svc.datalog_rados->add_entry(dpp, bucket_info, index_log, i,
+ null_yield);
+ if (ret < 0) {
+ ldpp_dout(dpp, 1) << "WARNING: failed writing data log for bucket="
+ << bucket_info.bucket << ", shard_id=" << i << "of generation="
+ << index_log.gen << dendl;
+ } // datalog error is not fatal
+ }
+ }
+
return r;
}
}
};
+struct StatusShards {
+ uint64_t generation = 0;
+ std::vector<rgw_bucket_shard_sync_info> shards;
+};
+
+class RGWReadRemoteStatusShardsCR : public RGWCoroutine {
+ const DoutPrefixProvider *dpp;
+ rgw::sal::RadosStore* const store;
+ CephContext *cct;
+ RGWHTTPManager *http;
+ std::string bucket_instance;
+ const rgw_zone_id zid;
+ const std::string& zone_id;
+ StatusShards *p;
+
+public:
+ RGWReadRemoteStatusShardsCR(const DoutPrefixProvider *dpp,
+ rgw::sal::RadosStore* const store,
+ CephContext *cct,
+ RGWHTTPManager *http,
+ std::string bucket_instance,
+ const rgw_zone_id zid,
+ const std::string& zone_id,
+ StatusShards *p)
+ : RGWCoroutine(cct), dpp(dpp), store(store),
+ cct(cct), http(http), bucket_instance(bucket_instance),
+ zid(zid), zone_id(zone_id), p(p) {}
+
+ int operate(const DoutPrefixProvider *dpp) override {
+ reenter(this) {
+ yield {
+ auto& zone_conn_map = store->svc()->zone->get_zone_conn_map();
+ auto ziter = zone_conn_map.find(zid);
+ if (ziter == zone_conn_map.end()) {
+ ldpp_dout(dpp, 0) << "WARNING: no connection to zone " << zid << ", can't trim bucket: " << bucket_instance << dendl;
+ return set_cr_error(-ECANCELED);
+ }
+
+ // query data sync status from each sync peer
+ rgw_http_param_pair params[] = {
+ { "type", "bucket-index" },
+ { "status", nullptr },
+ { "options", "merge" },
+ { "bucket", bucket_instance.c_str() }, /* equal to source-bucket when `options==merge` and source-bucket
+ param is not provided */
+ { "source-zone", zone_id.c_str() },
+ { "version", "2" },
+ { nullptr, nullptr }
+ };
+
+ call(new RGWReadRESTResourceCR<StatusShards>(cct, ziter->second, http, "/admin/log/", params, p));
+ }
+
+ if (retcode < 0 && retcode != -ENOENT) {
+ return set_cr_error(retcode);
+ } else if (retcode == -ENOENT) {
+ p->generation = UINT64_MAX;
+ ldpp_dout(dpp, 10) << "INFO: could not read shard status for bucket:" << bucket_instance
+ << " from zone: " << zid.id << dendl;
+ }
+
+ return set_cr_done();
+ }
+ return 0;
+ }
+};
+
/// trim the bilog of all of the given bucket instance's shards
class BucketTrimInstanceCR : public RGWCoroutine {
const RGWBucketInfo *pbucket_info; //< pointer to bucket instance info to locate bucket indices
int child_ret = 0;
const DoutPrefixProvider *dpp;
-public:
- struct StatusShards {
- uint64_t generation = 0;
- std::vector<rgw_bucket_shard_sync_info> shards;
- };
-private:
std::vector<StatusShards> peer_status; //< sync status for each peer
std::vector<std::string> min_markers; //< min marker per shard
min_generation = m->generation;
}
+ if (min_generation == UINT64_MAX) {
+ // if all peers have deleted this bucket, purge the rest of our log generations
+ totrim.gen = UINT64_MAX;
+ return 0;
+ }
+
+ ldpp_dout(dpp, 10) << "min_generation is " << min_generation << dendl;
auto& logs = pbucket_info->layout.logs;
auto log = std::find_if(logs.begin(), logs.end(),
rgw::matches_gen(min_generation));
int take_min_status(
CephContext *cct,
const uint64_t min_generation,
- std::vector<BucketTrimInstanceCR::StatusShards>::const_iterator first,
- std::vector<BucketTrimInstanceCR::StatusShards>::const_iterator last,
+ std::vector<StatusShards>::const_iterator first,
+ std::vector<StatusShards>::const_iterator last,
std::vector<std::string> *status, const DoutPrefixProvider *dpp) {
for (auto peer = first; peer != last; ++peer) {
// Peers on later generations don't get a say in the matter
}
template<>
-inline int parse_decode_json<BucketTrimInstanceCR::StatusShards>(
- BucketTrimInstanceCR::StatusShards& s, bufferlist& bl)
+inline int parse_decode_json<StatusShards>(
+ StatusShards& s, bufferlist& bl)
{
JSONParser p;
if (!p.parse(bl.c_str(), bl.length())) {
peer_status.resize(zids.size());
- auto& zone_conn_map = store->svc()->zone->get_zone_conn_map();
-
auto p = peer_status.begin();
for (auto& zid : zids) {
- // query data sync status from each sync peer
- rgw_http_param_pair params[] = {
- { "type", "bucket-index" },
- { "status", nullptr },
- { "options", "merge" },
- { "bucket", bucket_instance.c_str() }, /* equal to source-bucket when `options==merge` and source-bucket
- param is not provided */
- { "source-zone", zone_id.c_str() },
- { "version", "2" },
- { nullptr, nullptr }
- };
-
- auto ziter = zone_conn_map.find(zid);
- if (ziter == zone_conn_map.end()) {
- ldpp_dout(dpp, 0) << "WARNING: no connection to zone " << zid << ", can't trim bucket: " << bucket << dendl;
- return set_cr_error(-ECANCELED);
- }
-
- using StatusCR = RGWReadRESTResourceCR<StatusShards>;
- spawn(new StatusCR(cct, ziter->second, http, "/admin/log/", params, &*p),
- false);
+ spawn(new RGWReadRemoteStatusShardsCR(dpp, store, cct, http, bucket_instance, zid, zone_id, &*p), false);
++p;
}
}
min_markers.assign(std::max(1u, rgw::num_shards(totrim.layout.in_index)),
RGWSyncLogTrimCR::max_marker);
-
retcode = take_min_status(cct, totrim.gen, peer_status.cbegin(),
peer_status.cend(), &min_markers, dpp);
if (retcode < 0) {