for (const auto &binfo: lst) {
std::unique_ptr<rgw::sal::Bucket> bucket;
driver->get_bucket(nullptr, binfo, &bucket);
- int ret = bucket->purge_instance(dpp);
+ int ret = bucket->purge_instance(dpp, null_yield);
if (ret == 0){
auto md_key = "bucket.instance:" + binfo.bucket.get_key();
ret = driver->meta_remove(dpp, md_key, null_yield);
int RGWRadosBILogTrimCR::send_request(const DoutPrefixProvider *dpp)
{
- int r = bs.init(dpp, bucket_info, generation, shard_id);
+ int r = bs.init(dpp, bucket_info, generation, shard_id, null_yield);
if (r < 0) {
ldpp_dout(dpp, -1) << "ERROR: bucket shard init failed ret=" << r << dendl;
return r;
NULL, /* void (*progress_cb)(off_t, void *), */
NULL, /* void *progress_data*); */
dpp,
- filter.get(),
+ filter.get(), null_yield,
stat_follow_olh,
stat_dest_obj,
source_trace_entry,
pheaders,
nullptr,
nullptr, /* string *ptag, */
- petag); /* string *petag, */
+ petag, null_yield); /* string *petag, */
if (r < 0) {
ldpp_dout(dpp, 0) << "store->stat_remote_obj() returned r=" << r << dendl;
RGWRados::BucketShard bs(store);
RGWBucketInfo bucket_info;
- int r = bs.init(c->obj.bucket, c->obj, &bucket_info, &dpp);
+ int r = bs.init(c->obj.bucket, c->obj, &bucket_info, &dpp, null_yield);
if (r < 0) {
ldpp_dout(&dpp, 0) << "ERROR: " << __func__ << "(): failed to initialize BucketShard, obj=" << c->obj << " r=" << r << dendl;
/* not much to do */
ldout_bitx(bitx, &dpp, 10) <<
"EXITING " << __func__ << ": ret=" << dendl_bitx;
return ret;
- });
+ }, null_yield);
if (r < 0) {
ldpp_dout(&dpp, 0) << "ERROR: " << __func__ << "(): bucket index completion failed, obj=" << c->obj << " r=" << r << dendl;
/* ignoring error, can't do anything about it */
return 0;
}
-int RGWRados::Bucket::update_bucket_id(const string& new_bucket_id, const DoutPrefixProvider *dpp)
+int RGWRados::Bucket::update_bucket_id(const string& new_bucket_id, const DoutPrefixProvider *dpp, optional_yield y)
{
rgw_bucket bucket = bucket_info.bucket;
bucket.update_bucket_id(new_bucket_id);
bucket_info.objv_tracker.clear();
- int ret = store->get_bucket_instance_info(bucket, bucket_info, nullptr, nullptr, null_yield, dpp);
+ int ret = store->get_bucket_instance_info(bucket, bucket_info, nullptr, nullptr, y, dpp);
if (ret < 0) {
return ret;
}
if (ret == -EEXIST) {
/* we need to reread the info and return it, caller will have a use for it */
RGWBucketInfo orig_info;
- r = get_bucket_info(&svc, bucket.tenant, bucket.name, orig_info, NULL, null_yield, NULL);
+ r = get_bucket_info(&svc, bucket.tenant, bucket.name, orig_info, NULL, y, NULL);
if (r < 0) {
if (r == -ENOENT) {
continue;
if (r < 0) {
ldpp_dout(dpp, 0) << "WARNING: could not remove bucket index (r=" << r << ")" << dendl;
}
- r = ctl.bucket->remove_bucket_instance_info(info.bucket, info, null_yield, dpp);
+ r = ctl.bucket->remove_bucket_instance_info(info.bucket, info, y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "WARNING: " << __func__ << "(): failed to remove bucket instance info: bucket instance=" << info.bucket.get_key() << ": r=" << r << dendl;
/* continue anyway */
int RGWRados::BucketShard::init(const rgw_bucket& _bucket,
const rgw_obj& obj,
RGWBucketInfo* bucket_info_out,
- const DoutPrefixProvider *dpp)
+ const DoutPrefixProvider *dpp, optional_yield y)
{
bucket = _bucket;
RGWBucketInfo* bucket_info_p =
bucket_info_out ? bucket_info_out : &bucket_info;
- int ret = store->get_bucket_instance_info(bucket, *bucket_info_p, NULL, NULL, null_yield, dpp);
+ int ret = store->get_bucket_instance_info(bucket, *bucket_info_p, NULL, NULL, y, dpp);
if (ret < 0) {
return ret;
}
}
int RGWRados::BucketShard::init(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info,
- const rgw_obj& obj)
+ const rgw_obj& obj, optional_yield y)
{
bucket = bucket_info.bucket;
int RGWRados::BucketShard::init(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
const rgw::bucket_index_layout_generation& index,
- int sid)
+ int sid, optional_yield y)
{
bucket = bucket_info.bucket;
shard_id = sid;
RGWBucketInfo& bucket_info,
const std::string& obj_prefix,
const std::string& obj_delim,
- std::function<int(const rgw_bucket_dir_entry&)> handler)
+ std::function<int(const rgw_bucket_dir_entry&)> handler, optional_yield y)
{
RGWRados::Bucket target(this, bucket_info);
RGWRados::Bucket::List list_op(&target);
std::vector<rgw_bucket_dir_entry> entries(MAX_LIST_OBJS);
int ret = list_op.list_objects(dpp, MAX_LIST_OBJS, &entries, nullptr,
- &is_truncated, null_yield);
+ &is_truncated, y);
if (ret < 0) {
return ret;
} else if (!entries.empty()) {
RGWBucketInfo dest_bucket_info;
- r = get_bucket_info(&svc, bucket_info.bucket.tenant, bucket_info.swift_ver_location, dest_bucket_info, NULL, null_yield, NULL);
+ r = get_bucket_info(&svc, bucket_info.bucket.tenant, bucket_info.swift_ver_location, dest_bucket_info, NULL, y, NULL);
if (r < 0) {
ldpp_dout(dpp, 10) << "failed to read dest bucket info: r=" << r << dendl;
if (r == -ENOENT) {
NULL, /* void (*progress_cb)(off_t, void *) */
NULL, /* void *progress_data */
dpp,
- null_yield);
+ y);
if (r == -ECANCELED || r == -ENOENT) {
/* Has already been overwritten, meaning another rgw process already
* copied it out */
RGWBucketInfo& bucket_info,
rgw_obj& obj,
bool& restored,
- const DoutPrefixProvider *dpp)
+ const DoutPrefixProvider *dpp, optional_yield y)
{
if (! swift_versioning_enabled(bucket_info)) {
return 0;
int ret = get_bucket_info(&svc, bucket_info.bucket.tenant,
bucket_info.swift_ver_location,
- archive_binfo, nullptr, null_yield, nullptr);
+ archive_binfo, nullptr, y, nullptr);
if (ret < 0) {
return ret;
}
nullptr, /* void (*progress_cb)(off_t, void *) */
nullptr, /* void *progress_data */
dpp,
- null_yield);
+ y);
if (ret == -ECANCELED || ret == -ENOENT) {
/* Has already been overwritten, meaning another rgw process already
* copied it out */
/* Need to remove the archived copy. */
ret = delete_obj(dpp, obj_ctx, archive_binfo, archive_obj,
- archive_binfo.versioning_status());
+ archive_binfo.versioning_status(), y);
return ret;
};
% obj_name);
return on_last_entry_in_listing(dpp, archive_binfo, prefix, std::string(),
- handler);
+ handler, y);
}
int RGWRados::Object::Write::_do_write_meta(const DoutPrefixProvider *dpp,
map<string, string> *pheaders,
string *version_id,
string *ptag,
- string *petag)
+ string *petag, optional_yield y)
{
/* source is in a different zonegroup, copy from there */
}
ret = conn->complete_request(in_stream_req, nullptr, &set_mtime, psize,
- nullptr, pheaders, null_yield);
+ nullptr, pheaders, y);
if (ret < 0) {
return ret;
}
void (*progress_cb)(off_t, void *),
void *progress_data,
const DoutPrefixProvider *dpp,
- RGWFetchObjFilter *filter,
+ RGWFetchObjFilter *filter, optional_yield y,
bool stat_follow_olh,
const rgw_obj& stat_dest_obj,
const rgw_zone_set_entry& source_trace_entry,
using namespace rgw::putobj;
AtomicObjectProcessor processor(&aio, this, dest_bucket_info, nullptr,
user_id, obj_ctx, dest_obj, olh_epoch,
- tag, dpp, null_yield);
+ tag, dpp, y);
RGWRESTConn *conn;
auto& zone_conn_map = svc.zone->get_zone_conn_map();
auto& zonegroup_conn_map = svc.zone->get_zonegroup_conn_map();
}
}
- ret = processor.prepare(null_yield);
+ ret = processor.prepare(y);
if (ret < 0) {
return ret;
}
if (copy_if_newer) {
/* need to get mtime for destination */
- ret = get_obj_state(dpp, &obj_ctx, dest_bucket_info, stat_dest_obj, &dest_state, &manifest, stat_follow_olh, null_yield);
+ ret = get_obj_state(dpp, &obj_ctx, dest_bucket_info, stat_dest_obj, &dest_state, &manifest, y, stat_follow_olh, null_yield);
if (ret < 0)
goto set_err_state;
}
ret = conn->complete_request(in_stream_req, &etag, &set_mtime,
- &expected_size, nullptr, nullptr, null_yield);
+ &expected_size, nullptr, nullptr, y);
if (ret < 0) {
goto set_err_state;
}
auto& obj_attrs = cb.get_attrs();
RGWUserInfo owner_info;
- if (ctl.user->get_info_by_uid(dpp, *override_owner, &owner_info, null_yield) < 0) {
+ if (ctl.user->get_info_by_uid(dpp, *override_owner, &owner_info, y) < 0) {
ldpp_dout(dpp, 10) << "owner info does not exist" << dendl;
return -EINVAL;
}
bool canceled = false;
ret = processor.complete(cb.get_data_len(), etag, mtime, set_mtime,
attrs, delete_at, nullptr, nullptr, nullptr,
- zones_trace, &canceled, null_yield);
+ zones_trace, &canceled, y);
if (ret < 0) {
goto set_err_state;
}
if (copy_if_newer && canceled) {
ldpp_dout(dpp, 20) << "raced with another write of obj: " << dest_obj << dendl;
obj_ctx.invalidate(dest_obj); /* object was overwritten */
- ret = get_obj_state(dpp, &obj_ctx, dest_bucket_info, stat_dest_obj, &dest_state, &manifest, stat_follow_olh, null_yield);
+ ret = get_obj_state(dpp, &obj_ctx, dest_bucket_info, stat_dest_obj, &dest_state, &manifest, y, stat_follow_olh, null_yield);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << ": get_err_state() returned ret=" << ret << dendl;
goto set_err_state;
if (olh_epoch && *olh_epoch > 0) {
constexpr bool log_data_change = true;
ret = set_olh(dpp, obj_ctx, dest_bucket_info, dest_obj, false, nullptr,
- *olh_epoch, real_time(), false, null_yield, zones_trace, log_data_change);
+ *olh_epoch, real_time(), false, y, zones_trace, log_data_change);
} else {
// we already have the latest copy
ret = 0;
RGWRados::Object::Read& read_op,
const rgw_user& user_id,
const rgw_obj& dest_obj,
- real_time *mtime)
+ real_time *mtime, optional_yield y)
{
string etag;
return ret;
}
- ret = read_op.iterate(dpp, 0, astate->size - 1, out_stream_req->get_out_cb(), null_yield);
+ ret = read_op.iterate(dpp, 0, astate->size - 1, out_stream_req->get_out_cb(), y);
if (ret < 0) {
delete out_stream_req;
return ret;
}
- ret = rest_master_conn->complete_request(out_stream_req, etag, mtime, null_yield);
+ ret = rest_master_conn->complete_request(out_stream_req, etag, mtime, y);
if (ret < 0)
return ret;
unmod_ptr, high_precision_time,
if_match, if_nomatch, attrs_mod, copy_if_newer, attrs, category,
olh_epoch, delete_at, ptag, petag, progress_cb, progress_data, dpp,
- nullptr /* filter */, stat_follow_olh, stat_dest_obj, source_trace_entry);
+ nullptr /* filter */, y, stat_follow_olh, stat_dest_obj, source_trace_entry);
}
map<string, bufferlist> src_attrs;
if (remote_dest) {
/* dest is in a different zonegroup, copy it there */
- return copy_obj_to_remote_dest(dpp, astate, attrs, read_op, user_id, dest_obj, mtime);
+ return copy_obj_to_remote_dest(dpp, astate, attrs, read_op, user_id, dest_obj, mtime, y);
}
uint64_t max_chunk_size;
}
if (remove_ep) {
- r = ctl.bucket->remove_bucket_entrypoint_info(bucket_info.bucket, null_yield, dpp,
+ r = ctl.bucket->remove_bucket_entrypoint_info(bucket_info.bucket, y, dpp,
RGWBucketCtl::Bucket::RemoveParams()
.set_objv_tracker(&objv_tracker));
if (r < 0)
/* if the bucket is not synced we can remove the meta file */
if (!svc.zone->is_syncing_bucket_meta(bucket)) {
RGWObjVersionTracker objv_tracker;
- r = ctl.bucket->remove_bucket_instance_info(bucket, bucket_info, null_yield, dpp);
+ r = ctl.bucket->remove_bucket_instance_info(bucket, bucket_info, y, dpp);
if (r < 0) {
return r;
}
return 0;
}
-int RGWRados::set_bucket_owner(rgw_bucket& bucket, ACLOwner& owner, const DoutPrefixProvider *dpp)
+int RGWRados::set_bucket_owner(rgw_bucket& bucket, ACLOwner& owner, const DoutPrefixProvider *dpp, optional_yield y)
{
RGWBucketInfo info;
map<string, bufferlist> attrs;
int r;
if (bucket.bucket_id.empty()) {
- r = get_bucket_info(&svc, bucket.tenant, bucket.name, info, NULL, null_yield, dpp, &attrs);
+ r = get_bucket_info(&svc, bucket.tenant, bucket.name, info, NULL, y, dpp, &attrs);
} else {
- r = get_bucket_instance_info(bucket, info, nullptr, &attrs, null_yield, dpp);
+ r = get_bucket_instance_info(bucket, info, nullptr, &attrs, y, dpp);
}
if (r < 0) {
ldpp_dout(dpp, 0) << "NOTICE: get_bucket_info on bucket=" << bucket.name << " returned err=" << r << dendl;
info.owner = owner.get_id();
- r = put_bucket_instance_info(info, false, real_time(), &attrs, dpp, null_yield);
+ r = put_bucket_instance_info(info, false, real_time(), &attrs, dpp, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "NOTICE: put_bucket_info on bucket=" << bucket.name << " returned err=" << r << dendl;
return r;
}
-int RGWRados::set_buckets_enabled(vector<rgw_bucket>& buckets, bool enabled, const DoutPrefixProvider *dpp)
+int RGWRados::set_buckets_enabled(vector<rgw_bucket>& buckets, bool enabled, const DoutPrefixProvider *dpp, optional_yield y)
{
int ret = 0;
RGWBucketInfo info;
map<string, bufferlist> attrs;
- int r = get_bucket_info(&svc, bucket.tenant, bucket.name, info, NULL, null_yield, dpp, &attrs);
+ int r = get_bucket_info(&svc, bucket.tenant, bucket.name, info, NULL, y, dpp, &attrs);
if (r < 0) {
ldpp_dout(dpp, 0) << "NOTICE: get_bucket_info on bucket=" << bucket.name << " returned err=" << r << ", skipping bucket" << dendl;
ret = r;
info.flags |= BUCKET_SUSPENDED;
}
- r = put_bucket_instance_info(info, false, real_time(), &attrs, dpp, null_yield);
+ r = put_bucket_instance_info(info, false, real_time(), &attrs, dpp, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "NOTICE: put_bucket_info on bucket=" << bucket.name << " returned err=" << r << ", skipping bucket" << dendl;
ret = r;
return ret;
}
-int RGWRados::bucket_suspended(const DoutPrefixProvider *dpp, rgw_bucket& bucket, bool *suspended)
+int RGWRados::bucket_suspended(const DoutPrefixProvider *dpp, rgw_bucket& bucket, bool *suspended, optional_yield y)
{
RGWBucketInfo bucket_info;
- int ret = get_bucket_info(&svc, bucket.tenant, bucket.name, bucket_info, NULL, null_yield, dpp);
+ int ret = get_bucket_info(&svc, bucket.tenant, bucket.name, bucket_info, NULL, y, dpp);
if (ret < 0) {
return ret;
}
} else {
rgw_bucket_dir_entry dirent;
- int r = store->bi_get_instance(dpp, target->get_bucket_info(), obj, &dirent);
+ int r = store->bi_get_instance(dpp, target->get_bucket_info(), obj, &dirent, y);
if (r < 0) {
return r;
}
}
BucketShard *bs = nullptr;
- int r = target->get_bucket_shard(&bs, dpp);
+ int r = target->get_bucket_shard(&bs, dpp, y);
if (r < 0) {
ldpp_dout(dpp, 5) << "failed to get BucketShard object: r=" << r << dendl;
return r;
RGWObjectCtx& obj_ctx,
const RGWBucketInfo& bucket_info,
const rgw_obj& obj,
- int versioning_status, // versioning flags defined in enum RGWBucketFlags
+ int versioning_status, optional_yield y,// versioning flags defined in enum RGWBucketFlags
uint16_t bilog_flags,
const real_time& expiration_time,
rgw_zone_set *zones_trace)
del_op.params.expiration_time = expiration_time;
del_op.params.zones_trace = zones_trace;
- return del_op.delete_obj(null_yield, dpp);
+ return del_op.delete_obj(y, dpp);
}
int RGWRados::delete_raw_obj(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, optional_yield y)
get_obj_bucket_and_oid_loc(obj, oid, key);
RGWBucketInfo bucket_info;
- int ret = get_bucket_instance_info(obj.bucket, bucket_info, NULL, NULL, null_yield, dpp);
+ int ret = get_bucket_instance_info(obj.bucket, bucket_info, NULL, NULL, y, dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "() get_bucket_instance_info(bucket=" << obj.bucket << ") returned ret=" << ret << dendl;
return ret;
* bl: the contents of the attr
* Returns: 0 on success, -ERR# otherwise.
*/
-int RGWRados::set_attr(const DoutPrefixProvider *dpp, RGWObjectCtx* rctx, RGWBucketInfo& bucket_info, const rgw_obj& obj, const char *name, bufferlist& bl)
+int RGWRados::set_attr(const DoutPrefixProvider *dpp, RGWObjectCtx* rctx, RGWBucketInfo& bucket_info, const rgw_obj& obj, const char *name, bufferlist& bl, optional_yield y)
{
map<string, bufferlist> attrs;
attrs[name] = bl;
- return set_attrs(dpp, rctx, bucket_info, obj, attrs, NULL, null_yield);
+ return set_attrs(dpp, rctx, bucket_info, obj, attrs, NULL, y);
}
int RGWRados::set_attrs(const DoutPrefixProvider *dpp, RGWObjectCtx* rctx, RGWBucketInfo& bucket_info, const rgw_obj& src_obj,
return 0;
}
-int RGWRados::Bucket::UpdateIndex::guard_reshard(const DoutPrefixProvider *dpp, const rgw_obj& obj_instance, BucketShard **pbs, std::function<int(BucketShard *)> call)
+int RGWRados::Bucket::UpdateIndex::guard_reshard(const DoutPrefixProvider *dpp, const rgw_obj& obj_instance, BucketShard **pbs, std::function<int(BucketShard *)> call, optional_yield y)
{
RGWRados *store = target->get_store();
BucketShard *bs = nullptr;
#define NUM_RESHARD_RETRIES 10
for (int i = 0; i < NUM_RESHARD_RETRIES; ++i) {
- int ret = get_bucket_shard(&bs, dpp);
+ int ret = get_bucket_shard(&bs, dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to get BucketShard object. obj=" <<
obj_instance.key << ". ret=" << ret << dendl;
"NOTICE: resharding operation on bucket index detected, blocking. obj=" <<
obj_instance.key << dendl;
- r = store->block_while_resharding(bs, obj_instance, target->bucket_info, null_yield, dpp);
+ r = store->block_while_resharding(bs, obj_instance, target->bucket_info, y, dpp);
if (r == -ERR_BUSY_RESHARDING) {
ldpp_dout(dpp, 10) << __func__ <<
" NOTICE: block_while_resharding() still busy. obj=" <<
int r = guard_reshard(dpp, obj, nullptr, [&](BucketShard *bs) -> int {
return store->cls_obj_prepare_op(dpp, *bs, op, optag, obj, bilog_flags, y, zones_trace);
- });
+ }, y);
if (r < 0) {
return r;
RGWRados *store = target->get_store();
BucketShard *bs = nullptr;
- int ret = get_bucket_shard(&bs, dpp);
+ int ret = get_bucket_shard(&bs, dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 5) << "failed to get BucketShard object: ret=" << ret << dendl;
return ret;
RGWRados *store = target->get_store();
BucketShard *bs = nullptr;
- int ret = get_bucket_shard(&bs, dpp);
+ int ret = get_bucket_shard(&bs, dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 5) << "failed to get BucketShard object: ret=" << ret << dendl;
return ret;
int ret = guard_reshard(dpp, obj, &bs, [&](BucketShard *bs) -> int {
return store->cls_obj_complete_cancel(*bs, optag, obj, remove_objs, bilog_flags, zones_trace);
- });
+ }, y);
/*
* need to update data log anyhow, so that whoever follows needs to update its internal markers
BucketShard *bs,
const rgw_obj& obj_instance,
RGWBucketInfo& bucket_info,
- std::function<int(BucketShard *)> call)
+ std::function<int(BucketShard *)> call, optional_yield y)
{
rgw_obj obj;
const rgw_obj *pobj = &obj_instance;
int r;
for (int i = 0; i < NUM_RESHARD_RETRIES; ++i) {
- r = bs->init(pobj->bucket, *pobj, nullptr /* no RGWBucketInfo */, dpp);
+ r = bs->init(pobj->bucket, *pobj, nullptr /* no RGWBucketInfo */, dpp, y);
if (r < 0) {
ldpp_dout(dpp, 5) << "bs.init() returned ret=" << r << dendl;
return r;
"NOTICE: resharding operation on bucket index detected, blocking. obj=" <<
obj_instance.key << dendl;
- r = block_while_resharding(bs, obj_instance, bucket_info, null_yield, dpp);
+ r = block_while_resharding(bs, obj_instance, bucket_info, y, dpp);
if (r == -ERR_BUSY_RESHARDING) {
ldpp_dout(dpp, 10) << __func__ <<
" NOTICE: block_while_resharding() still busy. obj=" <<
return ret;
}
- ret = bs->init(dpp, bucket_info, obj_instance);
+ ret = bs->init(dpp, bucket_info, obj_instance, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << __func__ <<
" ERROR: failed to refresh bucket shard generation after reshard at " <<
unmod_since, high_precision_time,
svc.zone->need_to_log_data(), zones_trace);
return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, y);
- });
+ }, y);
if (r < 0) {
ldpp_dout(dpp, 20) << "rgw_rados_operate() after cls_rgw_bucket_link_olh() returned r=" << r << dendl;
return r;
cls_rgw_bucket_unlink_instance(op, key, op_tag,
olh_tag, olh_epoch, svc.zone->need_to_log_data(), zones_trace);
return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, y);
- });
+ }, y);
if (r < 0) {
ldpp_dout(dpp, 20) << "rgw_rados_operate() after cls_rgw_bucket_link_instance() returned r=" << r << dendl;
return r;
BucketShard bs(this);
int ret =
- bs.init(obj_instance.bucket, obj_instance, nullptr /* no RGWBucketInfo */, dpp);
+ bs.init(obj_instance.bucket, obj_instance, nullptr /* no RGWBucketInfo */, dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl;
return ret;
{
// fetch the current olh entry from the bucket index
rgw_bucket_olh_entry olh;
- int r = bi_get_olh(dpp, bucket_info, obj, &olh);
+ int r = bi_get_olh(dpp, bucket_info, obj, &olh, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "repair_olh failed to read olh entry for " << obj << dendl;
return r;
int RGWRados::bucket_index_trim_olh_log(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info,
RGWObjState& state,
- const rgw_obj& obj_instance, uint64_t ver)
+ const rgw_obj& obj_instance, uint64_t ver, optional_yield y)
{
rgw_rados_ref ref;
int r = get_obj_head_ref(dpp, bucket_info, obj_instance, &ref);
BucketShard bs(this);
int ret =
- bs.init(obj_instance.bucket, obj_instance, nullptr /* no RGWBucketInfo */, dpp);
+ bs.init(obj_instance.bucket, obj_instance, nullptr /* no RGWBucketInfo */, dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl;
return ret;
op.assert_exists(); // bucket index shard must exist
cls_rgw_guard_bucket_resharding(op, -ERR_BUSY_RESHARDING);
cls_rgw_trim_olh_log(op, key, ver, olh_tag);
- return pbs->bucket_obj.operate(dpp, &op, null_yield);
- });
+ return pbs->bucket_obj.operate(dpp, &op, y);
+ }, y);
if (ret < 0) {
ldpp_dout(dpp, 20) << "cls_rgw_trim_olh_log() returned r=" << ret << dendl;
return ret;
cls_rgw_guard_bucket_resharding(op, -ERR_BUSY_RESHARDING);
cls_rgw_clear_olh(op, key, olh_tag);
return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, y);
- });
+ }, y);
if (ret < 0) {
ldpp_dout(dpp, 5) << "rgw_rados_operate() after cls_rgw_clear_olh() returned ret=" << ret << dendl;
return ret;
liter != remove_instances.end(); ++liter) {
cls_rgw_obj_key& key = *liter;
rgw_obj obj_instance(bucket, key);
- int ret = delete_obj(dpp, obj_ctx, bucket_info, obj_instance, 0, RGW_BILOG_FLAG_VERSIONED_OP, ceph::real_time(), zones_trace);
+ int ret = delete_obj(dpp, obj_ctx, bucket_info, obj_instance, 0, y, RGW_BILOG_FLAG_VERSIONED_OP, ceph::real_time(), zones_trace);
if (ret < 0 && ret != -ENOENT) {
ldpp_dout(dpp, 0) << "ERROR: delete_obj() returned " << ret << " obj_instance=" << obj_instance << dendl;
return ret;
return r;
}
- r = bucket_index_trim_olh_log(dpp, bucket_info, state, obj, last_ver);
+ r = bucket_index_trim_olh_log(dpp, bucket_info, state, obj, last_ver, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: could not trim olh log, r=" << r << dendl;
return r;
int RGWRados::try_refresh_bucket_info(RGWBucketInfo& info,
ceph::real_time *pmtime,
- const DoutPrefixProvider *dpp,
+ const DoutPrefixProvider *dpp, optional_yield y,
map<string, bufferlist> *pattrs)
{
rgw_bucket bucket = info.bucket;
auto rv = info.objv_tracker.read_version;
- return ctl.bucket->read_bucket_info(bucket, &info, null_yield, dpp,
+ return ctl.bucket->read_bucket_info(bucket, &info, y, dpp,
RGWBucketCtl::BucketInstance::GetParams()
.set_mtime(pmtime)
.set_attrs(pattrs)
return 0;
}
-int RGWRados::update_containers_stats(map<string, RGWBucketEnt>& m, const DoutPrefixProvider *dpp)
+int RGWRados::update_containers_stats(map<string, RGWBucketEnt>& m, const DoutPrefixProvider *dpp, optional_yield y)
{
map<string, RGWBucketEnt>::iterator iter;
for (iter = m.begin(); iter != m.end(); ++iter) {
vector<rgw_bucket_dir_header> headers;
RGWBucketInfo bucket_info;
- int ret = get_bucket_instance_info(bucket, bucket_info, NULL, NULL, null_yield, dpp);
+ int ret = get_bucket_instance_info(bucket, bucket_info, NULL, NULL, y, dpp);
if (ret < 0) {
return ret;
}
}
int RGWRados::bi_get_instance(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj,
- rgw_bucket_dir_entry *dirent)
+ rgw_bucket_dir_entry *dirent, optional_yield y)
{
rgw_cls_bi_entry bi_entry;
- int r = bi_get(dpp, bucket_info, obj, BIIndexType::Instance, &bi_entry);
+ int r = bi_get(dpp, bucket_info, obj, BIIndexType::Instance, &bi_entry, y);
if (r < 0 && r != -ENOENT) {
ldpp_dout(dpp, 0) << "ERROR: bi_get() returned r=" << r << dendl;
}
}
int RGWRados::bi_get_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj,
- rgw_bucket_olh_entry *olh)
+ rgw_bucket_olh_entry *olh, optional_yield y)
{
rgw_cls_bi_entry bi_entry;
- int r = bi_get(dpp, bucket_info, obj, BIIndexType::OLH, &bi_entry);
+ int r = bi_get(dpp, bucket_info, obj, BIIndexType::OLH, &bi_entry, y);
if (r < 0 && r != -ENOENT) {
ldpp_dout(dpp, 0) << "ERROR: bi_get() returned r=" << r << dendl;
}
}
int RGWRados::bi_get(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj,
- BIIndexType index_type, rgw_cls_bi_entry *entry)
+ BIIndexType index_type, rgw_cls_bi_entry *entry, optional_yield y)
{
BucketShard bs(this);
- int ret = bs.init(dpp, bucket_info, obj);
+ int ret = bs.init(dpp, bucket_info, obj, y);
if (ret < 0) {
ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl;
return ret;
return cls_rgw_bi_get(ref.pool.ioctx(), ref.obj.oid, index_type, key, entry);
}
-void RGWRados::bi_put(ObjectWriteOperation& op, BucketShard& bs, rgw_cls_bi_entry& entry)
+void RGWRados::bi_put(ObjectWriteOperation& op, BucketShard& bs, rgw_cls_bi_entry& entry, optional_yield y)
{
auto& ref = bs.bucket_obj.get_ref();
cls_rgw_bi_put(op, ref.obj.oid, entry);
}
-int RGWRados::bi_put(BucketShard& bs, rgw_cls_bi_entry& entry)
+int RGWRados::bi_put(BucketShard& bs, rgw_cls_bi_entry& entry, optional_yield y)
{
auto& ref = bs.bucket_obj.get_ref();
int ret = cls_rgw_bi_put(ref.pool.ioctx(), ref.obj.oid, entry);
return 0;
}
-int RGWRados::bi_put(const DoutPrefixProvider *dpp, rgw_bucket& bucket, rgw_obj& obj, rgw_cls_bi_entry& entry)
+int RGWRados::bi_put(const DoutPrefixProvider *dpp, rgw_bucket& bucket, rgw_obj& obj, rgw_cls_bi_entry& entry, optional_yield y)
{
// make sure incomplete multipart uploads are hashed correctly
if (obj.key.ns == RGW_OBJ_NS_MULTIPART) {
}
BucketShard bs(this);
- int ret = bs.init(bucket, obj, nullptr /* no RGWBucketInfo */, dpp);
+ int ret = bs.init(bucket, obj, nullptr /* no RGWBucketInfo */, dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl;
return ret;
}
- return bi_put(bs, entry);
+ return bi_put(bs, entry, y);
}
int RGWRados::bi_list(const DoutPrefixProvider *dpp, rgw_bucket& bucket,
const string& obj_name_filter, const string& marker, uint32_t max,
- list<rgw_cls_bi_entry> *entries, bool *is_truncated)
+ list<rgw_cls_bi_entry> *entries, bool *is_truncated, optional_yield y)
{
rgw_obj obj(bucket, obj_name_filter);
BucketShard bs(this);
- int ret = bs.init(bucket, obj, nullptr /* no RGWBucketInfo */, dpp);
+ int ret = bs.init(bucket, obj, nullptr /* no RGWBucketInfo */, dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl;
return ret;
}
int RGWRados::bi_list(BucketShard& bs, const string& obj_name_filter, const string& marker, uint32_t max,
- list<rgw_cls_bi_entry> *entries, bool *is_truncated)
+ list<rgw_cls_bi_entry> *entries, bool *is_truncated, optional_yield y)
{
auto& ref = bs.bucket_obj.get_ref();
int ret = cls_rgw_bi_list(ref.pool.ioctx(), ref.obj.oid, obj_name_filter, marker, max, entries, is_truncated);
int RGWRados::bi_list(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info, int shard_id, const string& obj_name_filter, const string& marker, uint32_t max,
- list<rgw_cls_bi_entry> *entries, bool *is_truncated)
+ list<rgw_cls_bi_entry> *entries, bool *is_truncated, optional_yield y)
{
BucketShard bs(this);
int ret = bs.init(dpp, bucket_info,
bucket_info.layout.current_index,
- shard_id);
+ shard_id, y);
if (ret < 0) {
ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl;
return ret;
}
- return bi_list(bs, obj_name_filter, marker, max, entries, is_truncated);
+ return bi_list(bs, obj_name_filter, marker, max, entries, is_truncated, y);
}
int RGWRados::bi_remove(const DoutPrefixProvider *dpp, BucketShard& bs)
explicit BucketShard(RGWRados *_store) : store(_store), shard_id(-1) {}
int init(const rgw_bucket& _bucket, const rgw_obj& obj,
- RGWBucketInfo* out, const DoutPrefixProvider *dpp);
- int init(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj);
+ RGWBucketInfo* out, const DoutPrefixProvider *dpp, optional_yield y);
+ int init(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, optional_yield y);
int init(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
- const rgw::bucket_index_layout_generation& index, int sid);
+ const rgw::bucket_index_layout_generation& index, int sid, optional_yield y);
friend std::ostream& operator<<(std::ostream& out, const BucketShard& bs) {
out << "BucketShard:{ bucket=" << bs.bucket <<
//rgw::sal::Object* get_target() { return obj; }
int get_manifest(const DoutPrefixProvider *dpp, RGWObjManifest **pmanifest, optional_yield y);
- int get_bucket_shard(BucketShard **pbs, const DoutPrefixProvider *dpp) {
+ int get_bucket_shard(BucketShard **pbs, const DoutPrefixProvider *dpp, optional_yield y) {
if (!bs_initialized) {
int r =
- bs.init(bucket_info.bucket, obj, nullptr /* no RGWBucketInfo */, dpp);
+ bs.init(bucket_info.bucket, obj, nullptr /* no RGWBucketInfo */, dpp, y);
if (r < 0) {
return r;
}
rgw_bucket& get_bucket() { return bucket; }
RGWBucketInfo& get_bucket_info() { return bucket_info; }
- int update_bucket_id(const std::string& new_bucket_id, const DoutPrefixProvider *dpp);
+ int update_bucket_id(const std::string& new_bucket_id, const DoutPrefixProvider *dpp, optional_yield y);
int get_shard_id() { return shard_id; }
void set_shard_id(int id) {
bool prepared{false};
rgw_zone_set *zones_trace{nullptr};
- int init_bs(const DoutPrefixProvider *dpp) {
+ int init_bs(const DoutPrefixProvider *dpp, optional_yield y) {
int r =
- bs.init(target->get_bucket(), obj, &target->bucket_info, dpp);
+ bs.init(target->get_bucket(), obj, &target->bucket_info, dpp, y);
if (r < 0) {
return r;
}
bs_initialized = false;
}
- int guard_reshard(const DoutPrefixProvider *dpp, const rgw_obj& obj_instance, BucketShard **pbs, std::function<int(BucketShard *)> call);
+ int guard_reshard(const DoutPrefixProvider *dpp, const rgw_obj& obj_instance, BucketShard **pbs, std::function<int(BucketShard *)> call, optional_yield y);
public:
UpdateIndex(RGWRados::Bucket *_target, const rgw_obj& _obj) : target(_target), obj(_obj),
blind = (target->get_bucket_info().layout.current_index.layout.type == rgw::BucketIndexType::Indexless);
}
- int get_bucket_shard(BucketShard **pbs, const DoutPrefixProvider *dpp) {
+ int get_bucket_shard(BucketShard **pbs, const DoutPrefixProvider *dpp, optional_yield y) {
if (!bs_initialized) {
- int r = init_bs(dpp);
+ int r = init_bs(dpp, y);
if (r < 0) {
return r;
}
RGWBucketInfo& bucket_info,
const std::string& obj_prefix,
const std::string& obj_delim,
- std::function<int(const rgw_bucket_dir_entry&)> handler);
+ std::function<int(const rgw_bucket_dir_entry&)> handler, optional_yield y);
bool swift_versioning_enabled(const RGWBucketInfo& bucket_info) const;
RGWBucketInfo& bucket_info, /* in */
rgw_obj& obj, /* in/out */
bool& restored, /* out */
- const DoutPrefixProvider *dpp); /* in */
+ const DoutPrefixProvider *dpp, optional_yield y); /* in */
int copy_obj_to_remote_dest(const DoutPrefixProvider *dpp,
RGWObjState *astate,
std::map<std::string, bufferlist>& src_attrs,
RGWRados::Object::Read& read_op,
const rgw_user& user_id,
const rgw_obj& dest_obj,
- ceph::real_time *mtime);
+ ceph::real_time *mtime, optional_yield y);
enum AttrsMod {
ATTRSMOD_NONE = 0,
std::map<std::string, std::string> *pheaders,
std::string *version_id,
std::string *ptag,
- std::string *petag);
+ std::string *petag, optional_yield y);
int fetch_remote_obj(RGWObjectCtx& obj_ctx,
const rgw_user& user_id,
void (*progress_cb)(off_t, void *),
void *progress_data,
const DoutPrefixProvider *dpp,
- RGWFetchObjFilter *filter,
+ RGWFetchObjFilter *filter, optional_yield y,
bool stat_follow_olh,
const rgw_obj& stat_dest_obj,
const rgw_zone_set_entry& source_trace_entry,
RGWMetaSyncStatusManager* get_meta_sync_manager();
RGWDataSyncStatusManager* get_data_sync_manager(const rgw_zone_id& source_zone);
- int set_bucket_owner(rgw_bucket& bucket, ACLOwner& owner, const DoutPrefixProvider *dpp);
- int set_buckets_enabled(std::vector<rgw_bucket>& buckets, bool enabled, const DoutPrefixProvider *dpp);
- int bucket_suspended(const DoutPrefixProvider *dpp, rgw_bucket& bucket, bool *suspended);
+ int set_bucket_owner(rgw_bucket& bucket, ACLOwner& owner, const DoutPrefixProvider *dpp, optional_yield y);
+ int set_buckets_enabled(std::vector<rgw_bucket>& buckets, bool enabled, const DoutPrefixProvider *dpp, optional_yield y);
+ int bucket_suspended(const DoutPrefixProvider *dpp, rgw_bucket& bucket, bool *suspended, optional_yield y);
/** Delete an object.*/
int delete_obj(const DoutPrefixProvider *dpp,
RGWObjectCtx& obj_ctx,
const RGWBucketInfo& bucket_info,
const rgw_obj& obj,
- int versioning_status, // versioning flags defined in enum RGWBucketFlags
+ int versioning_status, optional_yield y, // versioning flags defined in enum RGWBucketFlags
uint16_t bilog_flags = 0,
const ceph::real_time& expiration_time = ceph::real_time(),
rgw_zone_set *zones_trace = nullptr);
* bl: the contents of the attr
* Returns: 0 on success, -ERR# otherwise.
*/
- int set_attr(const DoutPrefixProvider *dpp, RGWObjectCtx* ctx, RGWBucketInfo& bucket_info, const rgw_obj& obj, const char *name, bufferlist& bl);
+ int set_attr(const DoutPrefixProvider *dpp, RGWObjectCtx* ctx, RGWBucketInfo& bucket_info, const rgw_obj& obj, const char *name, bufferlist& bl, optional_yield y);
int set_attrs(const DoutPrefixProvider *dpp, RGWObjectCtx* ctx, RGWBucketInfo& bucket_info, const rgw_obj& obj,
std::map<std::string, bufferlist>& attrs,
BucketShard *bs,
const rgw_obj& obj_instance,
RGWBucketInfo& bucket_info,
- std::function<int(BucketShard *)> call);
+ std::function<int(BucketShard *)> call, optional_yield y);
int block_while_resharding(RGWRados::BucketShard *bs,
const rgw_obj& obj_instance,
RGWBucketInfo& bucket_info,
RGWBucketInfo& bucket_info, RGWObjState& state,
const rgw_obj& obj_instance, uint64_t ver_marker,
std::map<uint64_t, std::vector<rgw_bucket_olh_log_entry> > *log, bool *is_truncated, optional_yield y);
- int bucket_index_trim_olh_log(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, RGWObjState& obj_state, const rgw_obj& obj_instance, uint64_t ver);
+ int bucket_index_trim_olh_log(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, RGWObjState& obj_state, const rgw_obj& obj_instance, uint64_t ver, optional_yield y);
int bucket_index_clear_olh(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& obj_instance, optional_yield y);
int apply_olh_log(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, RGWObjState& obj_state, RGWBucketInfo& bucket_info, const rgw_obj& obj,
bufferlist& obj_tag, std::map<uint64_t, std::vector<rgw_bucket_olh_log_entry> >& log,
void gen_rand_obj_instance_name(rgw_obj_key *target_key);
void gen_rand_obj_instance_name(rgw_obj *target);
- int update_containers_stats(std::map<std::string, RGWBucketEnt>& m, const DoutPrefixProvider *dpp);
+ int update_containers_stats(std::map<std::string, RGWBucketEnt>& m, const DoutPrefixProvider *dpp, optional_yield y);
int append_async(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, size_t size, bufferlist& bl);
public:
//
int try_refresh_bucket_info(RGWBucketInfo& info,
ceph::real_time *pmtime,
- const DoutPrefixProvider *dpp,
+ const DoutPrefixProvider *dpp, optional_yield y,
std::map<std::string, bufferlist> *pattrs = nullptr);
int put_linked_bucket_info(RGWBucketInfo& info, bool exclusive, ceph::real_time mtime, obj_version *pep_objv,
const RGWBucketInfo& bucket_info,
const rgw::bucket_index_layout_generation& idx_layout,
int shard_id, RGWGetDirHeader_CB *ctx, int *num_aio);
- int bi_get_instance(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_bucket_dir_entry *dirent);
- int bi_get_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_bucket_olh_entry *olh);
- int bi_get(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, BIIndexType index_type, rgw_cls_bi_entry *entry);
- void bi_put(librados::ObjectWriteOperation& op, BucketShard& bs, rgw_cls_bi_entry& entry);
- int bi_put(BucketShard& bs, rgw_cls_bi_entry& entry);
- int bi_put(const DoutPrefixProvider *dpp, rgw_bucket& bucket, rgw_obj& obj, rgw_cls_bi_entry& entry);
+ int bi_get_instance(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_bucket_dir_entry *dirent, optional_yield y);
+ int bi_get_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_bucket_olh_entry *olh, optional_yield y);
+ int bi_get(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, BIIndexType index_type, rgw_cls_bi_entry *entry, optional_yield y);
+ void bi_put(librados::ObjectWriteOperation& op, BucketShard& bs, rgw_cls_bi_entry& entry, optional_yield y);
+ int bi_put(BucketShard& bs, rgw_cls_bi_entry& entry, optional_yield y);
+ int bi_put(const DoutPrefixProvider *dpp, rgw_bucket& bucket, rgw_obj& obj, rgw_cls_bi_entry& entry, optional_yield y);
int bi_list(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
int shard_id,
const std::string& marker,
uint32_t max,
std::list<rgw_cls_bi_entry> *entries,
- bool *is_truncated);
- int bi_list(BucketShard& bs, const std::string& filter_obj, const std::string& marker, uint32_t max, std::list<rgw_cls_bi_entry> *entries, bool *is_truncated);
+ bool *is_truncated, optional_yield y);
+ int bi_list(BucketShard& bs, const std::string& filter_obj, const std::string& marker, uint32_t max, std::list<rgw_cls_bi_entry> *entries, bool *is_truncated, optional_yield y);
int bi_list(const DoutPrefixProvider *dpp, rgw_bucket& bucket, const std::string& obj_name, const std::string& marker, uint32_t max,
- std::list<rgw_cls_bi_entry> *entries, bool *is_truncated);
+ std::list<rgw_cls_bi_entry> *entries, bool *is_truncated, optional_yield y);
int bi_remove(const DoutPrefixProvider *dpp, BucketShard& bs);
int cls_obj_usage_log_add(const DoutPrefixProvider *dpp, const std::string& oid, rgw_usage_log_info& info, optional_yield y);
store(_store), bucket_info(_bucket_info), shard_id(shard_id),
bs(store->getRados()), aio_completions(_completions)
{
- bs.init(dpp, bucket_info, index, shard_id);
+ bs.init(dpp, bucket_info, index, shard_id, null_yield);
max_aio_completions =
store->ctx()->_conf.get_val<uint64_t>("rgw_reshard_max_aio");
librados::ObjectWriteOperation op;
for (auto& entry : entries) {
- store->getRados()->bi_put(op, bs, entry);
+ store->getRados()->bi_put(op, bs, entry, null_yield);
}
cls_rgw_bucket_update_stats(op, false, stats);
bool verbose,
ostream *out,
Formatter *formatter,
- const DoutPrefixProvider *dpp)
+ const DoutPrefixProvider *dpp, optional_yield y)
{
if (out) {
(*out) << "tenant: " << bucket_info.bucket.tenant << std::endl;
const std::string null_object_filter; // empty string since we're not filtering by object
while (is_truncated) {
entries.clear();
- int ret = store->getRados()->bi_list(dpp, bucket_info, i, null_object_filter, marker, max_entries, &entries, &is_truncated);
+ int ret = store->getRados()->bi_list(dpp, bucket_info, i, null_object_filter, marker, max_entries, &entries, &is_truncated, y);
if (ret == -ENOENT) {
ldpp_dout(dpp, 1) << "WARNING: " << __func__ << " failed to find shard "
<< i << ", skipping" << dendl;
ret == 0) { // no fault injected, do the reshard
ret = do_reshard(bucket_info.layout.current_index,
*bucket_info.layout.target_index,
- max_op_entries, verbose, out, formatter, dpp);
+ max_op_entries, verbose, out, formatter, dpp, y);
}
if (ret < 0) {
bool verbose,
std::ostream *os,
Formatter *formatter,
- const DoutPrefixProvider *dpp);
+ const DoutPrefixProvider *dpp, optional_yield y);
public:
// pass nullptr for the final parameter if no outer reshard lock to
return store->ctl()->bucket->sync_user_stats(dpp, owner->get_id(), info, y, &ent);
}
-int RadosBucket::update_container_stats(const DoutPrefixProvider* dpp)
+int RadosBucket::update_container_stats(const DoutPrefixProvider* dpp, optional_yield y)
{
int ret;
map<std::string, RGWBucketEnt> m;
m[info.bucket.name] = ent;
- ret = store->getRados()->update_containers_stats(m, dpp);
+ ret = store->getRados()->update_containers_stats(m, dpp, y);
if (!ret)
return -EEXIST;
if (ret < 0)
new_attrs, &get_info().objv_tracker, y, dpp);
}
-int RadosBucket::try_refresh_info(const DoutPrefixProvider* dpp, ceph::real_time* pmtime)
+int RadosBucket::try_refresh_info(const DoutPrefixProvider* dpp, ceph::real_time* pmtime, optional_yield y)
{
- return store->getRados()->try_refresh_bucket_info(info, pmtime, dpp, &attrs);
+ return store->getRados()->try_refresh_bucket_info(info, pmtime, dpp, y, &attrs);
}
int RadosBucket::read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch,
return store->getRados()->cls_obj_set_bucket_tag_timeout(dpp, info, timeout);
}
-int RadosBucket::purge_instance(const DoutPrefixProvider* dpp)
+int RadosBucket::purge_instance(const DoutPrefixProvider* dpp, optional_yield y)
{
int max_shards = (info.layout.current_index.layout.normal.num_shards > 0 ? info.layout.current_index.layout.normal.num_shards : 1);
for (int i = 0; i < max_shards; i++) {
RGWRados::BucketShard bs(store->getRados());
int shard_id = (info.layout.current_index.layout.normal.num_shards > 0 ? i : -1);
- int ret = bs.init(dpp, info, info.layout.current_index, shard_id);
+ int ret = bs.init(dpp, info, info.layout.current_index, shard_id, y);
if (ret < 0) {
cerr << "ERROR: bs.init(bucket=" << info.bucket << ", shard=" << shard_id
<< "): " << cpp_strerror(-ret) << std::endl;
anon_ratelimit = svc()->zone->get_current_period().get_config().anon_ratelimit;
}
-int RadosStore::set_buckets_enabled(const DoutPrefixProvider* dpp, vector<rgw_bucket>& buckets, bool enabled)
+int RadosStore::set_buckets_enabled(const DoutPrefixProvider* dpp, vector<rgw_bucket>& buckets, bool enabled, optional_yield y)
{
- return rados->set_buckets_enabled(buckets, enabled, dpp);
+ return rados->set_buckets_enabled(buckets, enabled, dpp, y);
}
int RadosStore::get_sync_policy_handler(const DoutPrefixProvider* dpp,
}
int RadosObject::swift_versioning_restore(bool& restored,
- const DoutPrefixProvider* dpp)
+ const DoutPrefixProvider* dpp, optional_yield y)
{
rgw_obj obj = get_obj();
return store->getRados()->swift_versioning_restore(*rados_ctx,
bucket->get_info(),
obj,
restored,
- dpp);
+ dpp, y);
}
int RadosObject::swift_versioning_copy(const DoutPrefixProvider* dpp, optional_yield y)
const std::map<std::string, std::string>& meta) override;
virtual void get_quota(RGWQuota& quota) override;
virtual void get_ratelimit(RGWRateLimitInfo& bucket_ratelimit, RGWRateLimitInfo& user_ratelimit, RGWRateLimitInfo& anon_ratelimit) override;
- virtual int set_buckets_enabled(const DoutPrefixProvider* dpp, std::vector<rgw_bucket>& buckets, bool enabled) override;
+ virtual int set_buckets_enabled(const DoutPrefixProvider* dpp, std::vector<rgw_bucket>& buckets, bool enabled, optional_yield y) override;
virtual int get_sync_policy_handler(const DoutPrefixProvider* dpp,
std::optional<rgw_zone_id> zone,
std::optional<rgw_bucket> bucket,
/* Swift versioning */
virtual int swift_versioning_restore(bool& restored,
- const DoutPrefixProvider* dpp) override;
+ const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int swift_versioning_copy(const DoutPrefixProvider* dpp,
optional_yield y) override;
const bucket_index_layout_generation& idx_layout,
int shard_id, RGWGetBucketStats_CB* ctx) override;
virtual int sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y) override;
- virtual int update_container_stats(const DoutPrefixProvider* dpp) override;
+ virtual int update_container_stats(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int check_bucket_shards(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int chown(const DoutPrefixProvider* dpp, User& new_user, optional_yield y) override;
virtual int put_info(const DoutPrefixProvider* dpp, bool exclusive, ceph::real_time mtime) override;
virtual int check_empty(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int check_quota(const DoutPrefixProvider *dpp, RGWQuota& quota, uint64_t obj_size, optional_yield y, bool check_size_only = false) override;
virtual int merge_and_store_attrs(const DoutPrefixProvider* dpp, Attrs& attrs, optional_yield y) override;
- virtual int try_refresh_info(const DoutPrefixProvider* dpp, ceph::real_time* pmtime) override;
+ virtual int try_refresh_info(const DoutPrefixProvider* dpp, ceph::real_time* pmtime, optional_yield y) override;
virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
bool* is_truncated, RGWUsageIter& usage_iter,
std::map<rgw_user_bucket, rgw_usage_log_entry>& usage) override;
virtual int check_index(const DoutPrefixProvider *dpp, std::map<RGWObjCategory, RGWStorageStats>& existing_stats, std::map<RGWObjCategory, RGWStorageStats>& calculated_stats) override;
virtual int rebuild_index(const DoutPrefixProvider *dpp) override;
virtual int set_tag_timeout(const DoutPrefixProvider *dpp, uint64_t timeout) override;
- virtual int purge_instance(const DoutPrefixProvider* dpp) override;
+ virtual int purge_instance(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual std::unique_ptr<Bucket> clone() override {
return std::make_unique<RadosBucket>(*this);
}
bool spawn_next() override {
if (shard < num_shards) {
RGWRados::BucketShard bs(store->getRados());
- bs.init(dpp, bucket_info, index, shard);
+ bs.init(dpp, bucket_info, index, shard, null_yield);
spawn(new RGWRadosRemoveOidCR(store, std::move(bs.bucket_obj), nullptr),
false);
++shard;
marker = iter->first;
}
- ret = driver->set_buckets_enabled(dpp, bucket_names, !suspended);
+ ret = driver->set_buckets_enabled(dpp, bucket_names, !suspended, y);
if (ret < 0) {
set_err_msg(err_msg, "failed to modify bucket");
return ret;
}
rgw_cls_bi_entry entry;
- ret = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->bi_get(dpp(), bucket->get_info(), obj, bi_index_type, &entry);
+ ret = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->bi_get(dpp(), bucket->get_info(), obj, bi_index_type, &entry, null_yield);
if (ret < 0) {
cerr << "ERROR: bi_get(): " << cpp_strerror(-ret) << std::endl;
return -ret;
rgw_obj obj(bucket->get_key(), key);
- ret = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->bi_put(dpp(), bucket->get_key(), obj, entry);
+ ret = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->bi_put(dpp(), bucket->get_key(), obj, entry, null_yield);
if (ret < 0) {
cerr << "ERROR: bi_put(): " << cpp_strerror(-ret) << std::endl;
return -ret;
ldpp_dout(dpp(), 20) << "INFO: " << __func__ << ": starting shard=" << i << dendl;
RGWRados::BucketShard bs(static_cast<rgw::sal::RadosStore*>(driver)->getRados());
- int ret = bs.init(dpp(), bucket->get_info(), index, i);
+ int ret = bs.init(dpp(), bucket->get_info(), index, i, null_yield);
marker.clear();
if (ret < 0) {
do {
entries.clear();
// if object is specified, we use that as a filter to only retrieve some some entries
- ret = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->bi_list(bs, object, marker, max_entries, &entries, &is_truncated);
+ ret = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->bi_list(bs, object, marker, max_entries, &entries, &is_truncated, null_yield);
if (ret < 0) {
cerr << "ERROR: bi_list(): " << cpp_strerror(-ret) << std::endl;
return -ret;
const int max_shards = rgw::num_shards(index);
for (int i = 0; i < max_shards; i++) {
RGWRados::BucketShard bs(static_cast<rgw::sal::RadosStore*>(driver)->getRados());
- int ret = bs.init(dpp(), bucket->get_info(), index, i);
+ int ret = bs.init(dpp(), bucket->get_info(), index, i, null_yield);
if (ret < 0) {
cerr << "ERROR: bs.init(bucket=" << bucket << ", shard=" << i << "): " << cpp_strerror(-ret) << std::endl;
return -ret;
if (res != -ECANCELED) {
break;
}
- res = s->bucket->try_refresh_info(s, nullptr);
+ res = s->bucket->try_refresh_info(s, nullptr, null_yield);
if (res != 0) {
break;
}
// general, they should just return op_ret.
namespace {
template<typename F>
-int retry_raced_bucket_write(const DoutPrefixProvider *dpp, rgw::sal::Bucket* b, const F& f) {
+int retry_raced_bucket_write(const DoutPrefixProvider *dpp, rgw::sal::Bucket* b, const F& f, optional_yield y) {
auto r = f();
for (auto i = 0u; i < 15u && r == -ECANCELED; ++i) {
- r = b->try_refresh_info(dpp, nullptr);
+ r = b->try_refresh_info(dpp, nullptr, y);
if (r >= 0) {
r = f();
}
rgw::sal::Attrs attrs = s->bucket->get_attrs();
attrs[RGW_ATTR_TAGS] = tags_bl;
return s->bucket->merge_and_store_attrs(this, attrs, y);
- });
+ }, y);
}
<< " returned err= " << op_ret << dendl;
}
return op_ret;
- });
+ }, y);
}
int RGWGetBucketReplication::verify_permission(optional_yield y)
}
return 0;
- });
+ }, y);
}
void RGWDeleteBucketReplication::pre_exec()
}
return 0;
- });
+ }, y);
}
int RGWOp::do_aws4_auth_completion()
}
s->bucket->set_attrs(rgw::sal::Attrs(s->bucket_attrs));
return s->bucket->put_info(this, false, real_time());
- });
+ }, y);
if (!modified) {
return;
s->bucket->get_info().website_conf = website_conf;
op_ret = s->bucket->put_info(this, false, real_time());
return op_ret;
- });
+ }, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket->get_name()
s->bucket->get_info().website_conf = RGWBucketWebsiteConf();
op_ret = s->bucket->put_info(this, false, real_time());
return op_ret;
- });
+ }, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket
<< " returned err=" << op_ret << dendl;
if (op_ret) {
return;
}
- op_ret = bucket->update_container_stats(s);
+ op_ret = bucket->update_container_stats(s, y);
}
int RGWListBucket::verify_permission(optional_yield y)
}
if (need_container_stats()) {
- op_ret = s->bucket->update_container_stats(s);
+ op_ret = s->bucket->update_container_stats(s, y);
}
rgw::sal::Bucket::ListParams params;
* the same call. */
op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield);
return op_ret;
- });
+ }, y);
}
int RGWPutMetadataObject::verify_permission(optional_yield y)
s->object->set_atomic();
bool ver_restored = false;
- op_ret = s->object->swift_versioning_restore(ver_restored, this);
+ op_ret = s->object->swift_versioning_restore(ver_restored, this, y);
if (op_ret < 0) {
return;
}
rgw::sal::Attrs attrs(s->bucket_attrs);
attrs[RGW_ATTR_CORS] = cors_bl;
return s->bucket->merge_and_store_attrs(this, attrs, s->yield);
- });
+ }, y);
}
int RGWDeleteCORS::verify_permission(optional_yield y)
<< " returned err=" << op_ret << dendl;
}
return op_ret;
- });
+ }, y);
}
void RGWOptionsCORS::get_response_params(string& hdrs, string& exp_hdrs, unsigned *max_age) {
attrs[RGW_ATTR_IAM_POLICY].append(p.text);
op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield);
return op_ret;
- });
+ }, y);
} catch (rgw::IAM::PolicyParseException& e) {
ldpp_dout(this, 5) << "failed to parse policy: " << e.what() << dendl;
op_ret = -EINVAL;
attrs.erase(RGW_ATTR_IAM_POLICY);
op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield);
return op_ret;
- });
+ }, y);
}
void RGWPutBucketObjectLock::pre_exec()
s->bucket->get_info().obj_lock = obj_lock;
op_ret = s->bucket->put_info(this, false, real_time());
return op_ret;
- });
+ }, y);
return;
}
rgw::sal::Attrs attrs(s->bucket_attrs);
attrs[RGW_ATTR_PUBLIC_ACCESS] = bl;
return s->bucket->merge_and_store_attrs(this, attrs, s->yield);
- });
+ }, y);
}
attrs.erase(RGW_ATTR_PUBLIC_ACCESS);
op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield);
return op_ret;
- });
+ }, y);
}
int RGWPutBucketEncryption::get_params(optional_yield y)
rgw::sal::Attrs attrs = s->bucket->get_attrs();
attrs[RGW_ATTR_BUCKET_ENCRYPTION_POLICY] = conf_bl;
return s->bucket->merge_and_store_attrs(this, attrs, y);
- });
+ }, y);
}
int RGWGetBucketEncryption::verify_permission(optional_yield y)
attrs.erase(RGW_ATTR_BUCKET_ENCRYPTION_KEY_ID);
op_ret = s->bucket->merge_and_store_attrs(this, attrs, y);
return op_ret;
- });
+ }, y);
}
void rgw_slo_entry::decode_json(JSONObj *obj)
/** Get global rate limit configuration*/
virtual void get_ratelimit(RGWRateLimitInfo& bucket_ratelimit, RGWRateLimitInfo& user_ratelimit, RGWRateLimitInfo& anon_ratelimit) = 0;
/** Enable or disable a set of bucket. e.g. if a User is suspended */
- virtual int set_buckets_enabled(const DoutPrefixProvider* dpp, std::vector<rgw_bucket>& buckets, bool enabled) = 0;
+ virtual int set_buckets_enabled(const DoutPrefixProvider* dpp, std::vector<rgw_bucket>& buckets, bool enabled, optional_yield y) = 0;
/** Get a new request ID */
virtual uint64_t get_new_req_id() = 0;
/** Get a handler for bucket sync policy. */
/** Sync this bucket's stats to the owning user's stats in the backing store */
virtual int sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y) = 0;
/** Refresh the metadata stats (size, count, and so on) from the backing store */
- virtual int update_container_stats(const DoutPrefixProvider* dpp) = 0;
+ virtual int update_container_stats(const DoutPrefixProvider* dpp, optional_yield y) = 0;
/** Check if this bucket needs resharding, and schedule it if it does */
virtual int check_bucket_shards(const DoutPrefixProvider* dpp, optional_yield y) = 0;
/** Change the owner of this bucket in the backing store. Current owner must be set. Does not
virtual int merge_and_store_attrs(const DoutPrefixProvider* dpp, Attrs& new_attrs, optional_yield y) = 0;
/** Try to refresh the cached bucket info from the backing store. Used in
* read-modify-update loop. */
- virtual int try_refresh_info(const DoutPrefixProvider* dpp, ceph::real_time* pmtime) = 0;
+ virtual int try_refresh_info(const DoutPrefixProvider* dpp, ceph::real_time* pmtime, optional_yield y) = 0;
/** Read usage information about this bucket from the backing store */
virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
bool* is_truncated, RGWUsageIter& usage_iter,
/** Set a timeout on the check_index() call. May be removed from API */
virtual int set_tag_timeout(const DoutPrefixProvider *dpp, uint64_t timeout) = 0;
/** Remove this specific bucket instance from the backing store. May be removed from API */
- virtual int purge_instance(const DoutPrefixProvider* dpp) = 0;
+ virtual int purge_instance(const DoutPrefixProvider* dpp, optional_yield y) = 0;
/** Set the cached object count of this bucket */
virtual void set_count(uint64_t _count) = 0;
/** Set the cached size of this bucket */
/** Restore the previous swift version of this object */
virtual int swift_versioning_restore(bool& restored, /* out */
- const DoutPrefixProvider* dpp) = 0;
+ const DoutPrefixProvider* dpp, optional_yield y) = 0;
/** Copy the current version of a swift object to the configured destination bucket*/
virtual int swift_versioning_copy(const DoutPrefixProvider* dpp,
optional_yield y) = 0;
return 0;
}
- int DBBucket::update_container_stats(const DoutPrefixProvider *dpp)
+ int DBBucket::update_container_stats(const DoutPrefixProvider *dpp, optional_yield y)
{
return 0;
}
return ret;
}
- int DBBucket::try_refresh_info(const DoutPrefixProvider *dpp, ceph::real_time *pmtime)
+ int DBBucket::try_refresh_info(const DoutPrefixProvider *dpp, ceph::real_time *pmtime, optional_yield y)
{
int ret = 0;
return 0;
}
- int DBBucket::purge_instance(const DoutPrefixProvider *dpp)
+ int DBBucket::purge_instance(const DoutPrefixProvider *dpp, optional_yield y)
{
/* XXX: CHECK: for dbstore only single instance supported.
* Remove all the objects for that instance? Anything extra needed?
}
int DBObject::swift_versioning_restore(bool& restored,
- const DoutPrefixProvider* dpp)
+ const DoutPrefixProvider* dpp, optional_yield y)
{
return 0;
}
return;
}
- int DBStore::set_buckets_enabled(const DoutPrefixProvider *dpp, vector<rgw_bucket>& buckets, bool enabled)
+ int DBStore::set_buckets_enabled(const DoutPrefixProvider *dpp, vector<rgw_bucket>& buckets, bool enabled, optional_yield y)
{
int ret = 0;
bool *syncstopped = nullptr) override;
virtual int read_stats_async(const DoutPrefixProvider *dpp, const bucket_index_layout_generation& idx_layout, int shard_id, RGWGetBucketStats_CB* ctx) override;
virtual int sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y) override;
- virtual int update_container_stats(const DoutPrefixProvider *dpp) override;
+ virtual int update_container_stats(const DoutPrefixProvider *dpp, optional_yield y) override;
virtual int check_bucket_shards(const DoutPrefixProvider *dpp, optional_yield y) override;
virtual int chown(const DoutPrefixProvider *dpp, User& new_user, optional_yield y) override;
virtual int put_info(const DoutPrefixProvider *dpp, bool exclusive, ceph::real_time mtime) override;
virtual int check_empty(const DoutPrefixProvider *dpp, optional_yield y) override;
virtual int check_quota(const DoutPrefixProvider *dpp, RGWQuota& quota, uint64_t obj_size, optional_yield y, bool check_size_only = false) override;
virtual int merge_and_store_attrs(const DoutPrefixProvider *dpp, Attrs& attrs, optional_yield y) override;
- virtual int try_refresh_info(const DoutPrefixProvider *dpp, ceph::real_time *pmtime) override;
+ virtual int try_refresh_info(const DoutPrefixProvider *dpp, ceph::real_time *pmtime, optional_yield y) override;
virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
bool *is_truncated, RGWUsageIter& usage_iter,
std::map<rgw_user_bucket, rgw_usage_log_entry>& usage) override;
virtual int check_index(const DoutPrefixProvider *dpp, std::map<RGWObjCategory, RGWStorageStats>& existing_stats, std::map<RGWObjCategory, RGWStorageStats>& calculated_stats) override;
virtual int rebuild_index(const DoutPrefixProvider *dpp) override;
virtual int set_tag_timeout(const DoutPrefixProvider *dpp, uint64_t timeout) override;
- virtual int purge_instance(const DoutPrefixProvider *dpp) override;
+ virtual int purge_instance(const DoutPrefixProvider *dpp, optional_yield y) override;
virtual std::unique_ptr<Bucket> clone() override {
return std::make_unique<DBBucket>(*this);
}
/* Swift versioning */
virtual int swift_versioning_restore(bool& restored,
- const DoutPrefixProvider* dpp) override;
+ const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int swift_versioning_copy(const DoutPrefixProvider* dpp,
optional_yield y) override;
const std::map<std::string, std::string>& meta) override;
virtual void get_ratelimit(RGWRateLimitInfo& bucket_ratelimit, RGWRateLimitInfo& user_ratelimit, RGWRateLimitInfo& anon_ratelimit) override;
virtual void get_quota(RGWQuota& quota) override;
- virtual int set_buckets_enabled(const DoutPrefixProvider *dpp, std::vector<rgw_bucket>& buckets, bool enabled) override;
+ virtual int set_buckets_enabled(const DoutPrefixProvider *dpp, std::vector<rgw_bucket>& buckets, bool enabled, optional_yield y) override;
virtual int get_sync_policy_handler(const DoutPrefixProvider *dpp,
std::optional<rgw_zone_id> zone,
std::optional<rgw_bucket> bucket,
}
int FilterDriver::set_buckets_enabled(const DoutPrefixProvider* dpp,
- std::vector<rgw_bucket>& buckets, bool enabled)
+ std::vector<rgw_bucket>& buckets, bool enabled, optional_yield y)
{
- return next->set_buckets_enabled(dpp, buckets, enabled);
+ return next->set_buckets_enabled(dpp, buckets, enabled, y);
}
uint64_t FilterDriver::get_new_req_id()
return next->sync_user_stats(dpp, y);
}
-int FilterBucket::update_container_stats(const DoutPrefixProvider* dpp)
+int FilterBucket::update_container_stats(const DoutPrefixProvider* dpp, optional_yield y)
{
- return next->update_container_stats(dpp);
+ return next->update_container_stats(dpp, y);
}
int FilterBucket::check_bucket_shards(const DoutPrefixProvider* dpp, optional_yield y)
}
int FilterBucket::try_refresh_info(const DoutPrefixProvider* dpp,
- ceph::real_time* pmtime)
+ ceph::real_time* pmtime, optional_yield y)
{
- return next->try_refresh_info(dpp, pmtime);
+ return next->try_refresh_info(dpp, pmtime, y);
}
int FilterBucket::read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch,
return next->set_tag_timeout(dpp, timeout);
}
-int FilterBucket::purge_instance(const DoutPrefixProvider* dpp)
+int FilterBucket::purge_instance(const DoutPrefixProvider* dpp, optional_yield y)
{
- return next->purge_instance(dpp);
+ return next->purge_instance(dpp, y);
}
std::unique_ptr<MultipartUpload> FilterBucket::get_multipart_upload(
};
int FilterObject::swift_versioning_restore(bool& restored,
- const DoutPrefixProvider* dpp)
+ const DoutPrefixProvider* dpp, optional_yield y)
{
- return next->swift_versioning_restore(restored, dpp);
+ return next->swift_versioning_restore(restored, dpp, y);
}
int FilterObject::swift_versioning_copy(const DoutPrefixProvider* dpp,
RGWRateLimitInfo& anon_ratelimit) override;
virtual int set_buckets_enabled(const DoutPrefixProvider* dpp,
std::vector<rgw_bucket>& buckets,
- bool enabled) override;
+ bool enabled, optional_yield y) override;
virtual uint64_t get_new_req_id() override;
virtual int get_sync_policy_handler(const DoutPrefixProvider* dpp,
std::optional<rgw_zone_id> zone,
const bucket_index_layout_generation& idx_layout,
int shard_id, RGWGetBucketStats_CB* ctx) override;
virtual int sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y) override;
- virtual int update_container_stats(const DoutPrefixProvider* dpp) override;
+ virtual int update_container_stats(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int check_bucket_shards(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int chown(const DoutPrefixProvider* dpp, User& new_user,
optional_yield y) override;
virtual int merge_and_store_attrs(const DoutPrefixProvider* dpp,
Attrs& new_attrs, optional_yield y) override;
virtual int try_refresh_info(const DoutPrefixProvider* dpp,
- ceph::real_time* pmtime) override;
+ ceph::real_time* pmtime, optional_yield y) override;
virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch,
uint64_t end_epoch, uint32_t max_entries,
bool* is_truncated, RGWUsageIter& usage_iter,
calculated_stats) override;
virtual int rebuild_index(const DoutPrefixProvider *dpp) override;
virtual int set_tag_timeout(const DoutPrefixProvider *dpp, uint64_t timeout) override;
- virtual int purge_instance(const DoutPrefixProvider* dpp) override;
+ virtual int purge_instance(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual void set_count(uint64_t _count) override { return next->set_count(_count); }
virtual void set_size(uint64_t _size) override { return next->set_size(_size); }
virtual bool empty() const override { return next->empty(); }
virtual void clear_instance() override { return next->clear_instance(); }
virtual int swift_versioning_restore(bool& restored, /* out */
- const DoutPrefixProvider* dpp) override;
+ const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int swift_versioning_copy(const DoutPrefixProvider* dpp,
optional_yield y) override;