auto& bucket = i->second;
std::unique_ptr<rgw::sal::Bucket> actual_bucket;
- int r = driver->get_bucket(dpp, &user, user.get_tenant(), bucket->get_name(), &actual_bucket, null_yield);
+ int r = driver->get_bucket(dpp, &user, user.get_tenant(), bucket->get_name(), &actual_bucket, y);
if (r < 0) {
ldout(driver->ctx(), 0) << "could not get bucket info for bucket=" << bucket << dendl;
continue;
cout << "bucket info mismatch: expected " << actual_bucket << " got " << bucket << std::endl;
if (fix) {
cout << "fixing" << std::endl;
- r = actual_bucket->chown(dpp, user, null_yield);
+ r = actual_bucket->chown(dpp, user, y);
if (r < 0) {
cerr << "failed to fix bucket: " << cpp_strerror(-r) << std::endl;
}
* as we may now not reach the end of
* the loop body */
- ret = bucket->load_bucket(dpp, null_yield);
+ ret = bucket->load_bucket(dpp, y);
if (ret < 0)
continue;
int ret = 0;
const std::string& bucket_name = op_state.get_bucket_name();
if (!bucket_name.empty()) {
- ret = bucket.init(driver, op_state, null_yield, dpp);
+ ret = bucket.init(driver, op_state, y, dpp);
if (-ENOENT == ret)
return -ERR_NO_SUCH_BUCKET;
else if (ret < 0)
if (!pent) {
pent = &ent;
}
- int r = svc.bi->read_stats(dpp, bucket_info, pent, null_yield);
+ int r = svc.bi->read_stats(dpp, bucket_info, pent, y);
if (r < 0) {
ldpp_dout(dpp, 20) << __func__ << "(): failed to read bucket stats (r=" << r << ")" << dendl;
return r;
int RGWAsyncPutBucketInstanceInfo::_send_request(const DoutPrefixProvider *dpp)
{
auto r = store->getRados()->put_bucket_instance_info(bucket_info, exclusive,
- mtime, attrs, dpp);
+ mtime, attrs, dpp, null_yield);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to put bucket instance info for "
<< bucket_info.bucket << dendl;
(sync_pipe.dest_bucket_info.flags & BUCKET_VERSIONS_SUSPENDED)) {
ldout(sc->cct, 0) << "SYNC_ARCHIVE: sync_object: enabling object versioning for archive bucket" << dendl;
sync_pipe.dest_bucket_info.flags = (sync_pipe.dest_bucket_info.flags & ~BUCKET_VERSIONS_SUSPENDED) | BUCKET_VERSIONED;
- int op_ret = sync_env->driver->getRados()->put_bucket_instance_info(sync_pipe.dest_bucket_info, false, real_time(), NULL, sync_env->dpp);
+ int op_ret = sync_env->driver->getRados()->put_bucket_instance_info(sync_pipe.dest_bucket_info, false, real_time(), NULL, sync_env->dpp, null_yield);
if (op_ret < 0) {
ldpp_dout(sync_env->dpp, 0) << "SYNC_ARCHIVE: sync_object: error versioning archive bucket" << dendl;
return NULL;
return r;
}
- ret = put_linked_bucket_info(info, exclusive, ceph::real_time(), pep_objv, &attrs, true, dpp);
+ ret = put_linked_bucket_info(info, exclusive, ceph::real_time(), pep_objv, &attrs, true, dpp, y);
if (ret == -ECANCELED) {
ret = -EEXIST;
}
info.owner = owner.get_id();
- r = put_bucket_instance_info(info, false, real_time(), &attrs, dpp);
+ r = put_bucket_instance_info(info, false, real_time(), &attrs, dpp, null_yield);
if (r < 0) {
ldpp_dout(dpp, 0) << "NOTICE: put_bucket_info on bucket=" << bucket.name << " returned err=" << r << dendl;
return r;
info.flags |= BUCKET_SUSPENDED;
}
- r = put_bucket_instance_info(info, false, real_time(), &attrs, dpp);
+ r = put_bucket_instance_info(info, false, real_time(), &attrs, dpp, null_yield);
if (r < 0) {
ldpp_dout(dpp, 0) << "NOTICE: put_bucket_info on bucket=" << bucket.name << " returned err=" << r << ", skipping bucket" << dendl;
ret = r;
int RGWRados::put_bucket_instance_info(RGWBucketInfo& info, bool exclusive,
real_time mtime, map<string, bufferlist> *pattrs,
- const DoutPrefixProvider *dpp)
+ const DoutPrefixProvider *dpp, optional_yield y)
{
- return ctl.bucket->store_bucket_instance_info(info.bucket, info, null_yield, dpp,
+ return ctl.bucket->store_bucket_instance_info(info.bucket, info, y, dpp,
RGWBucketCtl::BucketInstance::PutParams()
.set_exclusive(exclusive)
.set_mtime(mtime)
int RGWRados::put_linked_bucket_info(RGWBucketInfo& info, bool exclusive, real_time mtime, obj_version *pep_objv,
map<string, bufferlist> *pattrs, bool create_entry_point,
- const DoutPrefixProvider *dpp)
+ const DoutPrefixProvider *dpp, optional_yield y)
{
bool create_head = !info.has_instance_obj || create_entry_point;
- int ret = put_bucket_instance_info(info, exclusive, mtime, pattrs, dpp);
+ int ret = put_bucket_instance_info(info, exclusive, mtime, pattrs, dpp, y);
if (ret < 0) {
return ret;
}
*pep_objv = ot.write_version;
}
}
- ret = ctl.bucket->store_bucket_entrypoint_info(info.bucket, entry_point, null_yield, dpp, RGWBucketCtl::Bucket::PutParams()
+ ret = ctl.bucket->store_bucket_entrypoint_info(info.bucket, entry_point, y, dpp, RGWBucketCtl::Bucket::PutParams()
.set_exclusive(exclusive)
.set_objv_tracker(&ot)
.set_mtime(mtime));
std::map<RGWObjCategory, RGWStorageStats>& stats, std::string *max_marker, bool* syncstopped = NULL);
int get_bucket_stats_async(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, const rgw::bucket_index_layout_generation& idx_layout, int shard_id, RGWGetBucketStats_CB *cb);
- int put_bucket_instance_info(RGWBucketInfo& info, bool exclusive, ceph::real_time mtime, std::map<std::string, bufferlist> *pattrs, const DoutPrefixProvider *dpp);
+ int put_bucket_instance_info(RGWBucketInfo& info, bool exclusive, ceph::real_time mtime, std::map<std::string, bufferlist> *pattrs, const DoutPrefixProvider *dpp, optional_yield y);
/* xxx dang obj_ctx -> svc */
int get_bucket_instance_info(const std::string& meta_key, RGWBucketInfo& info, ceph::real_time *pmtime, std::map<std::string, bufferlist> *pattrs, optional_yield y, const DoutPrefixProvider *dpp);
int get_bucket_instance_info(const rgw_bucket& bucket, RGWBucketInfo& info, ceph::real_time *pmtime, std::map<std::string, bufferlist> *pattrs, optional_yield y, const DoutPrefixProvider *dpp);
int put_linked_bucket_info(RGWBucketInfo& info, bool exclusive, ceph::real_time mtime, obj_version *pep_objv,
std::map<std::string, bufferlist> *pattrs, bool create_entry_point,
- const DoutPrefixProvider *dpp);
+ const DoutPrefixProvider *dpp, optional_yield y);
int cls_obj_prepare_op(const DoutPrefixProvider *dpp, BucketShard& bs, RGWModifyOp op, std::string& tag, rgw_obj& obj, uint16_t bilog_flags, optional_yield y, rgw_zone_set *zones_trace = nullptr);
int cls_obj_complete_op(BucketShard& bs, const rgw_obj& obj, RGWModifyOp op, std::string& tag, int64_t pool, uint64_t epoch,
if (ret = fault.check("set_target_layout");
ret == 0) { // no fault injected, write the bucket instance metadata
ret = store->getRados()->put_bucket_instance_info(bucket_info, false,
- real_time(), &bucket_attrs, dpp);
+ real_time(), &bucket_attrs, dpp, null_yield);
} else if (ret == -ECANCELED) {
fault.clear(); // clear the fault so a retry can succeed
}
ret == 0) { // no fault injected, revert the bucket instance metadata
ret = store->getRados()->put_bucket_instance_info(bucket_info, false,
real_time(),
- &bucket_attrs, dpp);
+ &bucket_attrs, dpp, null_yield);
} else if (ret == -ECANCELED) {
fault.clear(); // clear the fault so a retry can succeed
}
int ret = fault.check("commit_target_layout");
if (ret == 0) { // no fault injected, write the bucket instance metadata
ret = store->getRados()->put_bucket_instance_info(
- bucket_info, false, real_time(), &bucket_attrs, dpp);
+ bucket_info, false, real_time(), &bucket_attrs, dpp, null_yield);
} else if (ret == -ECANCELED) {
fault.clear(); // clear the fault so a retry can succeed
}
string bucket_ver, master_ver;
- ret = load_bucket(dpp, null_yield);
+ ret = load_bucket(dpp, y);
if (ret < 0)
return ret;
results.is_truncated = true;
while (results.is_truncated) {
- ret = list(dpp, params, listing_max_entries, results, null_yield);
+ ret = list(dpp, params, listing_max_entries, results, y);
if (ret < 0)
return ret;
}
} // for all shadow objs
- ret = head_obj->delete_obj_aio(dpp, astate, handles.get(), keep_index_consistent, null_yield);
+ ret = head_obj->delete_obj_aio(dpp, astate, handles.get(), keep_index_consistent, y);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: delete obj aio failed with " << ret << dendl;
return ret;
int RadosBucket::put_info(const DoutPrefixProvider* dpp, bool exclusive, ceph::real_time _mtime)
{
mtime = _mtime;
- return store->getRados()->put_bucket_instance_info(info, exclusive, mtime, &attrs, dpp);
+ return store->getRados()->put_bucket_instance_info(info, exclusive, mtime, &attrs, dpp, null_yield);
}
/* Make sure to call get_bucket_info() if you need it first */
std::unique_ptr<rgw::sal::Object::ReadOp> read_op(get_read_op());
read_op->params.lastmod = &read_mtime;
- ret = read_op->prepare(null_yield, dpp);
+ ret = read_op->prepare(y, dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: Updating tier object(" << o.key << ") failed ret=" << ret << dendl;
return ret;
target_placement.inherit_from(tier_ctx.bucket_info.placement_rule);
target_placement.storage_class = tier->get_storage_class();
- ret = write_cloud_tier(dpp, null_yield, tier_ctx.o.versioned_epoch,
+ ret = write_cloud_tier(dpp, y, tier_ctx.o.versioned_epoch,
tier, tier_ctx.is_multipart_upload,
target_placement, tier_ctx.obj);
std::string oid = info.tenant + get_names_oid_prefix() + role_name;
bufferlist bl;
- int ret = rgw_get_system_obj(sysobj, store->svc()->zone->get_zone_params().roles_pool, oid, bl, nullptr, nullptr, null_yield, dpp);
+ int ret = rgw_get_system_obj(sysobj, store->svc()->zone->get_zone_params().roles_pool, oid, bl, nullptr, nullptr, y, dpp);
if (ret < 0) {
return ret;
}
std::string oid = info.tenant + get_names_oid_prefix() + info.name;
bufferlist bl;
- int ret = rgw_get_system_obj(sysobj, store->svc()->zone->get_zone_params().roles_pool, oid, bl, nullptr, nullptr, null_yield, dpp);
+ int ret = rgw_get_system_obj(sysobj, store->svc()->zone->get_zone_params().roles_pool, oid, bl, nullptr, nullptr, y, dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed reading role name from Role pool: " << info.name <<
": " << cpp_strerror(-ret) << dendl;
RGWUserInfo info;
real_time mtime;
- int ret = read_user_info(ctx, user, &info, nullptr, &mtime, nullptr, nullptr, null_yield, dpp);
+ int ret = read_user_info(ctx, user, &info, nullptr, &mtime, nullptr, nullptr, y, dpp);
if (ret < 0)
{
return ret;