const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace,
- bool *pcanceled, optional_yield y)
+ bool *pcanceled, optional_yield y,
+ bool log_op)
{
int r = writer.drain();
if (r < 0) {
read_cloudtier_info_from_attrs(attrs, obj_op.meta.category, manifest);
- r = obj_op.write_meta(dpp, actual_size, accounted_size, attrs, y);
+ r = obj_op.write_meta(dpp, actual_size, accounted_size, attrs, y, log_op);
if (r < 0) {
if (r == -ETIMEDOUT) {
// The head object write may eventually succeed, clear the set of objects for deletion. if it
const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace,
- bool *pcanceled, optional_yield y)
+ bool *pcanceled, optional_yield y,
+ bool log_op)
{
int r = writer.drain();
if (r < 0) {
obj_op.meta.zones_trace = zones_trace;
obj_op.meta.modify_tail = true;
- r = obj_op.write_meta(dpp, actual_size, accounted_size, attrs, y);
+ r = obj_op.write_meta(dpp, actual_size, accounted_size, attrs, y, log_op);
if (r < 0)
return r;
ceph::real_time set_mtime, rgw::sal::Attrs& attrs,
ceph::real_time delete_at, const char *if_match, const char *if_nomatch,
const string *user_data, rgw_zone_set *zones_trace, bool *pcanceled,
- optional_yield y)
+ optional_yield y, bool log_op)
{
int r = writer.drain();
if (r < 0)
}
r = obj_op.write_meta(dpp, actual_size + cur_size,
accounted_size + *cur_accounted_size,
- attrs, y);
+ attrs, y, log_op);
if (r < 0) {
return r;
}
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
- optional_yield y) override;
+ optional_yield y,
+ bool log_op = true) override;
};
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
- optional_yield y) override;
+ optional_yield y,
+ bool log_op = true) override;
};
std::map<std::string, bufferlist>& attrs, ceph::real_time delete_at,
const char *if_match, const char *if_nomatch, const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
- optional_yield y) override;
+ optional_yield y,
+ bool log_op = true) override;
};
} // namespace putobj
continue;
}
- // This null_yield can stay, for now, since we're in our own thread
- add_datalog_entry(&dpp, store->svc.datalog_rados, bucket_info,
- bs.shard_id, null_yield);
+ if (c->log_op) {
+ // This null_yield can stay, for now, since we're in our own thread
+ add_datalog_entry(&dpp, store->svc.datalog_rados, bucket_info,
+ bs.shard_id, null_yield);
+ }
}
}
}
uint64_t size, uint64_t accounted_size,
map<string, bufferlist>& attrs,
bool assume_noent, bool modify_tail,
- void *_index_op, optional_yield y)
+ void *_index_op, optional_yield y,
+ bool log_op)
{
RGWRados::Bucket::UpdateIndex *index_op = static_cast<RGWRados::Bucket::UpdateIndex *>(_index_op);
RGWRados *store = target->get_store();
if (!index_op->is_prepared()) {
tracepoint(rgw_rados, prepare_enter, req_id.c_str());
- r = index_op->prepare(dpp, CLS_RGW_OP_ADD, &state->write_tag, y);
+ r = index_op->prepare(dpp, CLS_RGW_OP_ADD, &state->write_tag, y, log_op);
tracepoint(rgw_rados, prepare_exit, req_id.c_str());
if (r < 0)
return r;
meta.set_mtime, etag, content_type,
storage_class, &acl_bl,
meta.category, meta.remove_objs, y,
- meta.user_data, meta.appendable);
+ meta.user_data, meta.appendable, log_op);
tracepoint(rgw_rados, complete_exit, req_id.c_str());
if (r < 0)
goto done_cancel;
state = NULL;
if (versioned_op && meta.olh_epoch) {
- r = store->set_olh(dpp, target->get_ctx(), target->get_bucket_info(), obj, false, NULL, *meta.olh_epoch, real_time(), false, y, meta.zones_trace);
+ r = store->set_olh(dpp, target->get_ctx(), target->get_bucket_info(), obj, false, NULL, *meta.olh_epoch, real_time(), false, y, meta.zones_trace, log_op);
if (r < 0) {
return r;
}
return 0;
done_cancel:
- int ret = index_op->cancel(dpp, meta.remove_objs, y);
+ int ret = index_op->cancel(dpp, meta.remove_objs, y, log_op);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: index_op.cancel() returned ret=" << ret << dendl;
}
}
int RGWRados::Object::Write::write_meta(const DoutPrefixProvider *dpp, uint64_t size, uint64_t accounted_size,
- map<string, bufferlist>& attrs, optional_yield y)
+ map<string, bufferlist>& attrs, optional_yield y, bool log_op)
{
RGWBucketInfo& bucket_info = target->get_bucket_info();
bool assume_noent = (meta.if_match == NULL && meta.if_nomatch == NULL);
int r;
if (assume_noent) {
- r = _do_write_meta(dpp, size, accounted_size, attrs, assume_noent, meta.modify_tail, (void *)&index_op, y);
+ r = _do_write_meta(dpp, size, accounted_size, attrs, assume_noent, meta.modify_tail, (void *)&index_op, y, log_op);
if (r == -EEXIST) {
assume_noent = false;
}
}
if (!assume_noent) {
- r = _do_write_meta(dpp, size, accounted_size, attrs, assume_noent, meta.modify_tail, (void *)&index_op, y);
+ r = _do_write_meta(dpp, size, accounted_size, attrs, assume_noent, meta.modify_tail, (void *)&index_op, y, log_op);
}
return r;
}
real_time delete_at,
string *petag,
const DoutPrefixProvider *dpp,
- optional_yield y)
+ optional_yield y,
+ bool log_op)
{
string tag;
append_rand_alpha(cct, tag, tag, 32);
}
return processor.complete(accounted_size, etag, mtime, set_mtime, attrs, delete_at,
- nullptr, nullptr, nullptr, nullptr, nullptr, y);
+ nullptr, nullptr, nullptr, nullptr, nullptr, y, log_op);
}
int RGWRados::transition_obj(RGWObjectCtx& obj_ctx,
const real_time& mtime,
uint64_t olh_epoch,
const DoutPrefixProvider *dpp,
- optional_yield y)
+ optional_yield y,
+ bool log_op)
{
rgw::sal::Attrs attrs;
real_time read_mtime;
real_time(),
nullptr /* petag */,
dpp,
- y);
+ y,
+ log_op);
if (ret < 0) {
return ret;
}
* obj: name of the object to delete
* Returns: 0 on success, -ERR# otherwise.
*/
-int RGWRados::Object::Delete::delete_obj(optional_yield y, const DoutPrefixProvider *dpp)
+int RGWRados::Object::Delete::delete_obj(optional_yield y, const DoutPrefixProvider *dpp, bool log_op)
{
RGWRados *store = target->get_store();
const rgw_obj& src_obj = target->get_obj();
bool explicit_marker_version = (!params.marker_version_id.empty());
if (params.versioning_status & BUCKET_VERSIONED || explicit_marker_version) {
+ bool add_log = log_op && store->svc.zone->need_to_log_data();
+
if (instance.empty() || explicit_marker_version) {
rgw_obj marker = obj;
marker.key.instance.clear();
meta.mtime = params.mtime;
}
- int r = store->set_olh(dpp, target->get_ctx(), target->get_bucket_info(), marker, true, &meta, params.olh_epoch, params.unmod_since, params.high_precision_time, y, params.zones_trace);
+ int r = store->set_olh(dpp, target->get_ctx(), target->get_bucket_info(), marker, true,
+ &meta, params.olh_epoch, params.unmod_since, params.high_precision_time,
+ y, params.zones_trace, add_log);
if (r < 0) {
return r;
}
return r;
}
result.delete_marker = dirent.is_delete_marker();
- r = store->unlink_obj_instance(dpp, target->get_ctx(), target->get_bucket_info(), obj, params.olh_epoch, y, params.zones_trace);
+ r = store->unlink_obj_instance(dpp, target->get_ctx(), target->get_bucket_info(), obj, params.olh_epoch,
+ y, params.zones_trace, log_op);
if (r < 0) {
return r;
}
return r;
}
- add_datalog_entry(dpp, store->svc.datalog_rados,
- target->get_bucket_info(), bs->shard_id, y);
+ if (add_log) {
+ add_datalog_entry(dpp, store->svc.datalog_rados,
+ target->get_bucket_info(), bs->shard_id, y);
+ }
return 0;
}
index_op.set_zones_trace(params.zones_trace);
index_op.set_bilog_flags(params.bilog_flags);
- r = index_op.prepare(dpp, CLS_RGW_OP_DEL, &state->write_tag, y);
+ r = index_op.prepare(dpp, CLS_RGW_OP_DEL, &state->write_tag, y, log_op);
if (r < 0)
return r;
tombstone_entry entry{*state};
obj_tombstone_cache->add(obj, entry);
}
- r = index_op.complete_del(dpp, poolid, ioctx.get_last_version(), state->mtime, params.remove_objs, y);
+ r = index_op.complete_del(dpp, poolid, ioctx.get_last_version(), state->mtime, params.remove_objs, y, log_op);
int ret = target->complete_atomic_modification(dpp);
if (ret < 0) {
}
/* other than that, no need to propagate error */
} else {
- int ret = index_op.cancel(dpp, params.remove_objs, y);
+ int ret = index_op.cancel(dpp, params.remove_objs, y, log_op);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: index_op.cancel() returned ret=" << ret << dendl;
}
int versioning_status, // versioning flags defined in enum RGWBucketFlags
uint16_t bilog_flags,
const real_time& expiration_time,
- rgw_zone_set *zones_trace)
+ rgw_zone_set *zones_trace,
+ bool log_op)
{
RGWRados::Object del_target(this, bucket_info, obj_ctx, obj);
RGWRados::Object::Delete del_op(&del_target);
del_op.params.expiration_time = expiration_time;
del_op.params.zones_trace = zones_trace;
- return del_op.delete_obj(null_yield, dpp);
+ return del_op.delete_obj(null_yield, dpp, log_op);
}
int RGWRados::delete_raw_obj(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj)
return 0;
}
-int RGWRados::Bucket::UpdateIndex::prepare(const DoutPrefixProvider *dpp, RGWModifyOp op, const string *write_tag, optional_yield y)
+int RGWRados::Bucket::UpdateIndex::prepare(const DoutPrefixProvider *dpp, RGWModifyOp op, const string *write_tag,
+ optional_yield y, bool log_op)
{
if (blind) {
return 0;
}
}
+ bool add_log = log_op && store->svc.zone->need_to_log_data();
+
int r = guard_reshard(dpp, obj, nullptr, [&](BucketShard *bs) -> int {
- return store->cls_obj_prepare_op(dpp, *bs, op, optag, obj, bilog_flags, y, zones_trace);
+ return store->cls_obj_prepare_op(dpp, *bs, op, optag, obj, bilog_flags, y, zones_trace, add_log);
});
if (r < 0) {
list<rgw_obj_index_key> *remove_objs,
optional_yield y,
const string *user_data,
- bool appendable)
+ bool appendable,
+ bool log_op)
{
if (blind) {
return 0;
ent.meta.content_type = content_type;
ent.meta.appendable = appendable;
- ret = store->cls_obj_complete_add(*bs, obj, optag, poolid, epoch, ent, category, remove_objs, bilog_flags, zones_trace);
+ bool add_log = log_op && store->svc.zone->need_to_log_data();
- add_datalog_entry(dpp, store->svc.datalog_rados,
- target->bucket_info, bs->shard_id, y);
+ ret = store->cls_obj_complete_add(*bs, obj, optag, poolid, epoch, ent, category, remove_objs, bilog_flags, zones_trace, add_log);
+ if (add_log) {
+ add_datalog_entry(dpp, store->svc.datalog_rados,
+ target->bucket_info, bs->shard_id, y);
+ }
return ret;
}
int64_t poolid, uint64_t epoch,
real_time& removed_mtime,
list<rgw_obj_index_key> *remove_objs,
- optional_yield y)
+ optional_yield y,
+ bool log_op)
{
if (blind) {
return 0;
return ret;
}
- ret = store->cls_obj_complete_del(*bs, optag, poolid, epoch, obj, removed_mtime, remove_objs, bilog_flags, zones_trace);
+ bool add_log = log_op && store->svc.zone->need_to_log_data();
- add_datalog_entry(dpp, store->svc.datalog_rados,
- target->bucket_info, bs->shard_id, y);
+ ret = store->cls_obj_complete_del(*bs, optag, poolid, epoch, obj, removed_mtime, remove_objs, bilog_flags, zones_trace, add_log);
+
+ if (add_log) {
+ add_datalog_entry(dpp, store->svc.datalog_rados,
+ target->bucket_info, bs->shard_id, y);
+ }
return ret;
}
int RGWRados::Bucket::UpdateIndex::cancel(const DoutPrefixProvider *dpp,
list<rgw_obj_index_key> *remove_objs,
- optional_yield y)
+ optional_yield y,
+ bool log_op)
{
if (blind) {
return 0;
RGWRados *store = target->get_store();
BucketShard *bs;
+ bool add_log = log_op && store->svc.zone->need_to_log_data();
+
int ret = guard_reshard(dpp, obj, &bs, [&](BucketShard *bs) -> int {
- return store->cls_obj_complete_cancel(*bs, optag, obj, remove_objs, bilog_flags, zones_trace);
+ return store->cls_obj_complete_cancel(*bs, optag, obj, remove_objs, bilog_flags, zones_trace, add_log);
});
- /*
- * need to update data log anyhow, so that whoever follows needs to update its internal markers
- * for following the specific bucket shard log. Otherwise they end up staying behind, and users
- * have no way to tell that they're all caught up
- */
- add_datalog_entry(dpp, store->svc.datalog_rados,
- target->bucket_info, bs->shard_id, y);
+ if (add_log) {
+ /*
+ * need to update data log anyhow, so that whoever follows needs to update its internal markers
+ * for following the specific bucket shard log. Otherwise they end up staying behind, and users
+ * have no way to tell that they're all caught up
+ */
+ add_datalog_entry(dpp, store->svc.datalog_rados,
+ target->bucket_info, bs->shard_id, y);
+ }
return ret;
}
delete_marker, op_tag, meta, olh_epoch,
unmod_since, high_precision_time,
svc.zone->need_to_log_data(), zones_trace);
+ log_data_change, zones_trace);
return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
});
if (r < 0) {
RGWBucketInfo& bucket_info,
const rgw_obj& obj_instance,
const string& op_tag, const string& olh_tag,
- uint64_t olh_epoch, rgw_zone_set *_zones_trace)
+ uint64_t olh_epoch, rgw_zone_set *_zones_trace, bool log_op)
{
rgw_rados_ref ref;
int r = get_obj_head_ref(dpp, bucket_info, obj_instance, &ref);
op.assert_exists(); // bucket index shard must exist
cls_rgw_guard_bucket_resharding(op, -ERR_BUSY_RESHARDING);
cls_rgw_bucket_unlink_instance(op, key, op_tag,
- olh_tag, olh_epoch, svc.zone->need_to_log_data(), zones_trace);
+ olh_tag, olh_epoch, log_op, zones_trace);
return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
});
if (r < 0) {
bufferlist& olh_tag,
std::map<uint64_t, std::vector<rgw_bucket_olh_log_entry> >& log,
uint64_t *plast_ver,
- rgw_zone_set* zones_trace)
+ rgw_zone_set* zones_trace,
+ bool log_op)
{
if (log.empty()) {
return 0;
liter != remove_instances.end(); ++liter) {
cls_rgw_obj_key& key = *liter;
rgw_obj obj_instance(bucket, key);
- int ret = delete_obj(dpp, obj_ctx, bucket_info, obj_instance, 0, RGW_BILOG_FLAG_VERSIONED_OP, ceph::real_time(), zones_trace);
+ int ret = delete_obj(dpp, obj_ctx, bucket_info, obj_instance, 0, RGW_BILOG_FLAG_VERSIONED_OP, ceph::real_time(), zones_trace, log_op);
if (ret < 0 && ret != -ENOENT) {
ldpp_dout(dpp, 0) << "ERROR: delete_obj() returned " << ret << " obj_instance=" << obj_instance << dendl;
return ret;
/*
* read olh log and apply it
*/
-int RGWRados::update_olh(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, RGWObjState *state, RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_zone_set *zones_trace)
+int RGWRados::update_olh(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, RGWObjState *state, RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_zone_set *zones_trace, bool log_op)
{
map<uint64_t, vector<rgw_bucket_olh_log_entry> > log;
bool is_truncated;
if (ret < 0) {
return ret;
}
- ret = apply_olh_log(dpp, obj_ctx, *state, bucket_info, obj, state->olh_tag, log, &ver_marker, zones_trace);
+ ret = apply_olh_log(dpp, obj_ctx, *state, bucket_info, obj, state->olh_tag, log, &ver_marker, zones_trace, log_op);
if (ret < 0) {
return ret;
}
// it's possible that the pending xattr from this op prevented the olh
// object from being cleaned by another thread that was deleting the last
// existing version. We invoke a best-effort update_olh here to handle this case.
- int r = update_olh(dpp, obj_ctx, state, bucket_info, olh_obj);
+ int r = update_olh(dpp, obj_ctx, state, bucket_info, olh_obj, zones_trace, log_data_change);
if (r < 0 && r != -ECANCELED) {
ldpp_dout(dpp, 20) << "update_olh() target_obj=" << olh_obj << " returned " << r << dendl;
}
return -EIO;
}
- ret = update_olh(dpp, obj_ctx, state, bucket_info, olh_obj);
+ ret = update_olh(dpp, obj_ctx, state, bucket_info, olh_obj, zones_trace, log_data_change);
if (ret == -ECANCELED) { /* already did what we needed, no need to retry, raced with another user */
ret = 0;
}
}
int RGWRados::unlink_obj_instance(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, RGWBucketInfo& bucket_info, const rgw_obj& target_obj,
- uint64_t olh_epoch, optional_yield y, rgw_zone_set *zones_trace)
+ uint64_t olh_epoch, optional_yield y, rgw_zone_set *zones_trace, bool log_op)
{
string op_tag;
string olh_tag(state->olh_tag.c_str(), state->olh_tag.length());
- ret = bucket_index_unlink_instance(dpp, bucket_info, target_obj, op_tag, olh_tag, olh_epoch, zones_trace);
+ ret = bucket_index_unlink_instance(dpp, bucket_info, target_obj, op_tag, olh_tag, olh_epoch, zones_trace, log_op);
if (ret < 0) {
olh_cancel_modification(dpp, bucket_info, *state, olh_obj, op_tag, y);
ldpp_dout(dpp, 20) << "bucket_index_unlink_instance() target_obj=" << target_obj << " returned " << ret << dendl;
// it's possible that the pending xattr from this op prevented the olh
// object from being cleaned by another thread that was deleting the last
// existing version. We invoke a best-effort update_olh here to handle this case.
- int r = update_olh(dpp, obj_ctx, state, bucket_info, olh_obj, zones_trace);
+ int r = update_olh(dpp, obj_ctx, state, bucket_info, olh_obj, zones_trace, log_op);
if (r < 0 && r != -ECANCELED) {
ldpp_dout(dpp, 20) << "update_olh() target_obj=" << olh_obj << " returned " << r << dendl;
}
return -EIO;
}
- ret = update_olh(dpp, obj_ctx, state, bucket_info, olh_obj, zones_trace);
+ ret = update_olh(dpp, obj_ctx, state, bucket_info, olh_obj, zones_trace, log_op);
if (ret == -ECANCELED) { /* already did what we needed, no need to retry, raced with another user */
return 0;
}
}
int RGWRados::cls_obj_prepare_op(const DoutPrefixProvider *dpp, BucketShard& bs, RGWModifyOp op, string& tag,
- rgw_obj& obj, uint16_t bilog_flags, optional_yield y, rgw_zone_set *_zones_trace)
+ rgw_obj& obj, uint16_t bilog_flags, optional_yield y, rgw_zone_set *_zones_trace,
+ bool log_op)
{
const bool bitx = cct->_conf->rgw_bucket_index_transaction_instrumentation;
ldout_bitx(bitx, dpp, 10) << "ENTERING " << __func__ << ": bucket-shard=" << bs << " obj=" << obj << " tag=" << tag << " op=" << op << dendl_bitx;
cls_rgw_obj_key key(obj.key.get_index_key_name(), obj.key.instance);
cls_rgw_guard_bucket_resharding(o, -ERR_BUSY_RESHARDING);
- cls_rgw_bucket_prepare_op(o, op, tag, key, obj.key.get_loc(), svc.zone->need_to_log_data(), bilog_flags, zones_trace);
+ cls_rgw_bucket_prepare_op(o, op, tag, key, obj.key.get_loc(), log_op, bilog_flags, zones_trace);
int ret = bs.bucket_obj.operate(dpp, &o, y);
ldout_bitx(bitx, dpp, 10) << "EXITING " << __func__ << ": ret=" << ret << dendl_bitx;
return ret;
int RGWRados::cls_obj_complete_op(BucketShard& bs, const rgw_obj& obj, RGWModifyOp op, string& tag,
int64_t pool, uint64_t epoch,
rgw_bucket_dir_entry& ent, RGWObjCategory category,
- list<rgw_obj_index_key> *remove_objs, uint16_t bilog_flags, rgw_zone_set *_zones_trace)
+ list<rgw_obj_index_key> *remove_objs, uint16_t bilog_flags,
+ rgw_zone_set *_zones_trace, bool log_op)
{
const bool bitx = cct->_conf->rgw_bucket_index_transaction_instrumentation;
ldout_bitx_c(bitx, cct, 10) << "ENTERING " << __func__ << ": bucket-shard=" << bs <<
" obj=" << obj << " tag=" << tag << " op=" << op <<
- ", remove_objs=" << (remove_objs ? *remove_objs : std::list<rgw_obj_index_key>()) << dendl_bitx;
+ ", remove_objs=" << (remove_objs ? *remove_objs : std::list<rgw_obj_index_key>()) <<
+ ", log_op=" << log_op << dendl_bitx;
ldout_bitx_c(bitx, cct, 25) << "BACKTRACE: " << __func__ << ": " << ClibBackTrace(0) << dendl_bitx;
ObjectWriteOperation o;
cls_rgw_obj_key key(ent.key.name, ent.key.instance);
cls_rgw_guard_bucket_resharding(o, -ERR_BUSY_RESHARDING);
cls_rgw_bucket_complete_op(o, op, tag, ver, key, dir_meta, remove_objs,
- svc.zone->need_to_log_data(), bilog_flags, &zones_trace);
+ log_op, bilog_flags, &zones_trace);
complete_op_data *arg;
index_completion_manager->create_completion(obj, op, tag, ver, key, dir_meta, remove_objs,
- svc.zone->need_to_log_data(), bilog_flags, &zones_trace, &arg);
+ log_op, bilog_flags, &zones_trace, &arg);
librados::AioCompletion *completion = arg->rados_completion;
int ret = bs.bucket_obj.aio_operate(arg->rados_completion, &o);
completion->release(); /* can't reference arg here, as it might have already been released */
int RGWRados::cls_obj_complete_add(BucketShard& bs, const rgw_obj& obj, string& tag,
int64_t pool, uint64_t epoch,
rgw_bucket_dir_entry& ent, RGWObjCategory category,
- list<rgw_obj_index_key> *remove_objs, uint16_t bilog_flags, rgw_zone_set *zones_trace)
+ list<rgw_obj_index_key> *remove_objs, uint16_t bilog_flags,
+ rgw_zone_set *zones_trace, bool log_op)
{
- return cls_obj_complete_op(bs, obj, CLS_RGW_OP_ADD, tag, pool, epoch, ent, category, remove_objs, bilog_flags, zones_trace);
+ return cls_obj_complete_op(bs, obj, CLS_RGW_OP_ADD, tag, pool, epoch,
+ ent, category, remove_objs, bilog_flags,
+ zones_trace, log_op);
}
int RGWRados::cls_obj_complete_del(BucketShard& bs, string& tag,
real_time& removed_mtime,
list<rgw_obj_index_key> *remove_objs,
uint16_t bilog_flags,
- rgw_zone_set *zones_trace)
+ rgw_zone_set *zones_trace,
+ bool log_op)
{
rgw_bucket_dir_entry ent;
ent.meta.mtime = removed_mtime;
obj.key.get_index_key(&ent.key);
return cls_obj_complete_op(bs, obj, CLS_RGW_OP_DEL, tag, pool, epoch,
ent, RGWObjCategory::None, remove_objs,
- bilog_flags, zones_trace);
+ bilog_flags, zones_trace, log_op);
}
int RGWRados::cls_obj_complete_cancel(BucketShard& bs, string& tag, rgw_obj& obj,
list<rgw_obj_index_key> *remove_objs,
- uint16_t bilog_flags, rgw_zone_set *zones_trace)
+ uint16_t bilog_flags, rgw_zone_set *zones_trace, bool log_op)
{
rgw_bucket_dir_entry ent;
obj.key.get_index_key(&ent.key);
return cls_obj_complete_op(bs, obj, CLS_RGW_OP_CANCEL, tag,
-1 /* pool id */, 0, ent,
RGWObjCategory::None, remove_objs, bilog_flags,
- zones_trace);
+ zones_trace, log_op);
}
int RGWRados::cls_obj_set_bucket_tag_timeout(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, uint64_t timeout)
uint64_t size, uint64_t accounted_size,
std::map<std::string, bufferlist>& attrs,
bool modify_tail, bool assume_noent,
- void *index_op, optional_yield y);
+ void *index_op, optional_yield y,
+ bool log_op = true);
int write_meta(const DoutPrefixProvider *dpp, uint64_t size, uint64_t accounted_size,
- std::map<std::string, bufferlist>& attrs, optional_yield y);
+ std::map<std::string, bufferlist>& attrs, optional_yield y, bool log_op = true);
int write_data(const char *data, uint64_t ofs, uint64_t len, bool exclusive);
const req_state* get_req_state() {
return nullptr; /* XXX dang Only used by LTTng, and it handles null anyway */
explicit Delete(RGWRados::Object *_target) : target(_target) {}
- int delete_obj(optional_yield y, const DoutPrefixProvider *dpp);
+ int delete_obj(optional_yield y, const DoutPrefixProvider *dpp, bool log_op = true);
};
struct Stat {
zones_trace = _zones_trace;
}
- int prepare(const DoutPrefixProvider *dpp, RGWModifyOp, const std::string *write_tag, optional_yield y);
+ int prepare(const DoutPrefixProvider *dpp, RGWModifyOp, const std::string *write_tag, optional_yield y, bool log_op = true);
int complete(const DoutPrefixProvider *dpp, int64_t poolid, uint64_t epoch, uint64_t size,
uint64_t accounted_size, ceph::real_time& ut,
const std::string& etag, const std::string& content_type,
std::list<rgw_obj_index_key> *remove_objs,
optional_yield y,
const std::string *user_data = nullptr,
- bool appendable = false);
+ bool appendable = false,
+ bool log_op = true);
int complete_del(const DoutPrefixProvider *dpp,
int64_t poolid, uint64_t epoch,
ceph::real_time& removed_mtime, /* mtime of removed object */
std::list<rgw_obj_index_key> *remove_objs,
- optional_yield y);
+ optional_yield y,
+ bool log_op = true);
int cancel(const DoutPrefixProvider *dpp,
std::list<rgw_obj_index_key> *remove_objs,
- optional_yield y);
+ optional_yield y,
+ bool log_op = true);
const std::string *get_optag() { return &optag; }
ceph::real_time delete_at,
std::string *petag,
const DoutPrefixProvider *dpp,
- optional_yield y);
+ optional_yield y,
+ bool log_op = true);
int transition_obj(RGWObjectCtx& obj_ctx,
RGWBucketInfo& bucket_info,
const real_time& mtime,
uint64_t olh_epoch,
const DoutPrefixProvider *dpp,
- optional_yield y);
+ optional_yield y,
+ bool log_op = true);
int check_bucket_empty(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, optional_yield y);
int versioning_status, // versioning flags defined in enum RGWBucketFlags
uint16_t bilog_flags = 0,
const ceph::real_time& expiration_time = ceph::real_time(),
- rgw_zone_set *zones_trace = nullptr);
+ rgw_zone_set *zones_trace = nullptr,
+ bool log_op = true);
int delete_raw_obj(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj);
RGWBucketInfo& bucket_info,
const rgw_obj& obj_instance,
const std::string& op_tag, const std::string& olh_tag,
- uint64_t olh_epoch, rgw_zone_set *zones_trace = nullptr);
+ uint64_t olh_epoch, rgw_zone_set *zones_trace = nullptr,
+ bool log_op = true);
int bucket_index_read_olh_log(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info, RGWObjState& state,
const rgw_obj& obj_instance, uint64_t ver_marker,
int bucket_index_clear_olh(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, const std::string& olh_tag, const rgw_obj& obj_instance);
int apply_olh_log(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, RGWObjState& obj_state, RGWBucketInfo& bucket_info, const rgw_obj& obj,
bufferlist& obj_tag, std::map<uint64_t, std::vector<rgw_bucket_olh_log_entry> >& log,
- uint64_t *plast_ver, rgw_zone_set *zones_trace = nullptr);
- int update_olh(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, RGWObjState *state, RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_zone_set *zones_trace = nullptr);
+ uint64_t *plast_ver, rgw_zone_set *zones_trace = nullptr, bool log_op = true);
+ int update_olh(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, RGWObjState *state, RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_zone_set *zones_trace = nullptr, bool log_op = true);
int clear_olh(const DoutPrefixProvider *dpp,
RGWObjectCtx& obj_ctx,
const rgw_obj& obj,
int repair_olh(const DoutPrefixProvider *dpp, RGWObjState* state, const RGWBucketInfo& bucket_info,
const rgw_obj& obj);
int unlink_obj_instance(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, RGWBucketInfo& bucket_info, const rgw_obj& target_obj,
- uint64_t olh_epoch, optional_yield y, rgw_zone_set *zones_trace = nullptr);
+ uint64_t olh_epoch, optional_yield y, rgw_zone_set *zones_trace = nullptr, bool log_op = true);
void check_pending_olh_entries(const DoutPrefixProvider *dpp, std::map<std::string, bufferlist>& pending_entries, std::map<std::string, bufferlist> *rm_pending_entries);
int remove_olh_pending_entries(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, std::map<std::string, bufferlist>& pending_attrs);
std::map<std::string, bufferlist> *pattrs, bool create_entry_point,
const DoutPrefixProvider *dpp, optional_yield y);
- int cls_obj_prepare_op(const DoutPrefixProvider *dpp, BucketShard& bs, RGWModifyOp op, std::string& tag, rgw_obj& obj, uint16_t bilog_flags, optional_yield y, rgw_zone_set *zones_trace = nullptr);
+ int cls_obj_prepare_op(const DoutPrefixProvider *dpp, BucketShard& bs, RGWModifyOp op, std::string& tag, rgw_obj& obj,
+ uint16_t bilog_flags, optional_yield y, rgw_zone_set *zones_trace = nullptr, bool log_op = true);
int cls_obj_complete_op(BucketShard& bs, const rgw_obj& obj, RGWModifyOp op, std::string& tag, int64_t pool, uint64_t epoch,
- rgw_bucket_dir_entry& ent, RGWObjCategory category, std::list<rgw_obj_index_key> *remove_objs, uint16_t bilog_flags, rgw_zone_set *zones_trace = nullptr);
+ rgw_bucket_dir_entry& ent, RGWObjCategory category, std::list<rgw_obj_index_key> *remove_objs,
+ uint16_t bilog_flags, rgw_zone_set *zones_trace = nullptr, bool log_op = true);
int cls_obj_complete_add(BucketShard& bs, const rgw_obj& obj, std::string& tag, int64_t pool, uint64_t epoch, rgw_bucket_dir_entry& ent,
- RGWObjCategory category, std::list<rgw_obj_index_key> *remove_objs, uint16_t bilog_flags, rgw_zone_set *zones_trace = nullptr);
+ RGWObjCategory category, std::list<rgw_obj_index_key> *remove_objs, uint16_t bilog_flags,
+ rgw_zone_set *zones_trace = nullptr, bool log_op = true);
int cls_obj_complete_del(BucketShard& bs, std::string& tag, int64_t pool, uint64_t epoch, rgw_obj& obj,
- ceph::real_time& removed_mtime, std::list<rgw_obj_index_key> *remove_objs, uint16_t bilog_flags, rgw_zone_set *zones_trace = nullptr);
+ ceph::real_time& removed_mtime, std::list<rgw_obj_index_key> *remove_objs,
+ uint16_t bilog_flags, rgw_zone_set *zones_trace = nullptr, bool log_op = true);
int cls_obj_complete_cancel(BucketShard& bs, std::string& tag, rgw_obj& obj,
std::list<rgw_obj_index_key> *remove_objs,
- uint16_t bilog_flags, rgw_zone_set *zones_trace = nullptr);
+ uint16_t bilog_flags, rgw_zone_set *zones_trace = nullptr, bool log_op = true);
int cls_obj_set_bucket_tag_timeout(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, uint64_t timeout);
using ent_map_t =
const real_time& mtime,
uint64_t olh_epoch,
const DoutPrefixProvider* dpp,
- optional_yield y)
+ optional_yield y,
+ bool log_op)
{
- return store->getRados()->transition_obj(*rados_ctx, bucket->get_info(), get_obj(), placement_rule, mtime, olh_epoch, dpp, y);
+ return store->getRados()->transition_obj(*rados_ctx, bucket->get_info(), get_obj(), placement_rule, mtime, olh_epoch, dpp, y, log_op);
}
int RadosObject::transition_to_cloud(Bucket* bucket,
parent_op(&op_target)
{ }
-int RadosObject::RadosDeleteOp::delete_obj(const DoutPrefixProvider* dpp, optional_yield y)
+int RadosObject::RadosDeleteOp::delete_obj(const DoutPrefixProvider* dpp, optional_yield y, bool log_op)
{
parent_op.params.bucket_owner = params.bucket_owner.get_id();
parent_op.params.versioning_status = params.versioning_status;
parent_op.params.abortmp = params.abortmp;
parent_op.params.parts_accounted_size = params.parts_accounted_size;
- int ret = parent_op.delete_obj(y, dpp);
+ int ret = parent_op.delete_obj(y, dpp, log_op);
if (ret < 0)
return ret;
}
-int RadosMultipartUpload::abort(const DoutPrefixProvider *dpp, CephContext *cct)
+int RadosMultipartUpload::abort(const DoutPrefixProvider *dpp, CephContext *cct, bool log_op)
{
std::unique_ptr<rgw::sal::Object> meta_obj = get_meta_obj();
meta_obj->set_in_extra_data(true);
std::unique_ptr<rgw::sal::Object> obj = bucket->get_object(
rgw_obj_key(obj_part->oid, std::string(), RGW_OBJ_NS_MULTIPART));
obj->set_hash_source(mp_obj.get_key());
- ret = obj->delete_object(dpp, null_yield);
+ ret = obj->delete_object(dpp, null_yield, log_op);
if (ret < 0 && ret != -ENOENT)
return ret;
} else {
del_op->params.parts_accounted_size = parts_accounted_size;
// and also remove the metadata obj
- ret = del_op->delete_obj(dpp, null_yield);
+ ret = del_op->delete_obj(dpp, null_yield, log_op);
if (ret < 0) {
ldpp_dout(dpp, 20) << __func__ << ": del_op.delete_obj returned " <<
ret << dendl;
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
- optional_yield y)
+ optional_yield y,
+ bool log_op)
{
return processor.complete(accounted_size, etag, mtime, set_mtime, attrs, delete_at,
- if_match, if_nomatch, user_data, zones_trace, canceled, y);
+ if_match, if_nomatch, user_data, zones_trace, canceled, y, log_op);
}
int RadosAppendWriter::prepare(optional_yield y)
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
- optional_yield y)
+ optional_yield y,
+ bool log_op)
{
return processor.complete(accounted_size, etag, mtime, set_mtime, attrs, delete_at,
- if_match, if_nomatch, user_data, zones_trace, canceled, y);
+ if_match, if_nomatch, user_data, zones_trace, canceled, y, log_op);
}
int RadosMultipartWriter::prepare(optional_yield y)
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
- optional_yield y)
+ optional_yield y,
+ bool log_op)
{
return processor.complete(accounted_size, etag, mtime, set_mtime, attrs, delete_at,
- if_match, if_nomatch, user_data, zones_trace, canceled, y);
+ if_match, if_nomatch, user_data, zones_trace, canceled, y, log_op);
}
const std::string& RadosZoneGroup::get_endpoint() const
if (local_zone)
return store->svc()->zone->get_zone().tier_type;
- return rgw_zone.id;
+ return rgw_zone.tier_type;
}
RGWBucketSyncPolicyHandlerRef RadosZone::get_sync_policy_handler()
public:
RadosDeleteOp(RadosObject* _source);
- virtual int delete_obj(const DoutPrefixProvider* dpp, optional_yield y) override;
+ virtual int delete_obj(const DoutPrefixProvider* dpp, optional_yield y, bool log_op = true) override;
};
RadosObject(RadosStore *_st, const rgw_obj_key& _k)
const real_time& mtime,
uint64_t olh_epoch,
const DoutPrefixProvider* dpp,
- optional_yield y) override;
+ optional_yield y,
+ bool log_op = true) override;
virtual int transition_to_cloud(Bucket* bucket,
rgw::sal::PlacementTier* tier,
rgw_bucket_dir_entry& o,
int num_parts, int marker,
int* next_marker, bool* truncated,
bool assume_unsorted = false) override;
- virtual int abort(const DoutPrefixProvider* dpp, CephContext* cct) override;
+ virtual int abort(const DoutPrefixProvider* dpp, CephContext* cct, bool log_op = true) override;
virtual int complete(const DoutPrefixProvider* dpp,
optional_yield y, CephContext* cct,
std::map<int, std::string>& part_etags,
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
- optional_yield y) override;
+ optional_yield y,
+ bool log_op = true) override;
};
class RadosAppendWriter : public StoreWriter {
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
- optional_yield y) override;
+ optional_yield y,
+ bool log_op = true) override;
};
class RadosMultipartWriter : public StoreWriter {
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
- optional_yield y) override;
+ optional_yield y,
+ bool log_op = true) override;
};
class RadosLuaManager : public StoreLuaManager {
static std::string lc_id = "rgw lifecycle";
static std::string lc_req_id = "0";
+/* do all zones in the zone group process LC? */
+static bool zonegroup_lc_check(const DoutPrefixProvider *dpp, rgw::sal::Zone* zone)
+{
+ auto& zonegroup = zone->get_zonegroup();
+ std::list<std::string> ids;
+ int ret = zonegroup.list_zones(ids);
+ if (ret < 0) {
+ return false;
+ }
+
+ return std::all_of(ids.begin(), ids.end(), [&](const auto& id) {
+ std::unique_ptr<rgw::sal::Zone> zone;
+ ret = zonegroup.get_zone_by_id(id, &zone);
+ if (ret < 0) {
+ return false;
+ }
+ const auto& tier_type = zone->get_tier_type();
+ ldpp_dout(dpp, 20) << "checking zone tier_type=" << tier_type << dendl;
+ return (tier_type == "rgw" || tier_type == "archive" || tier_type == "");
+ });
+}
+
static int remove_expired_obj(
const DoutPrefixProvider *dpp, lc_op_ctx& oc, bool remove_indeed,
rgw::notify::EventType event_type)
<< dendl;
return ret;
}
- ret = del_op->delete_obj(dpp, null_yield);
+
+ bool log_op = !remove_indeed || !zonegroup_lc_check(dpp, oc.driver->get_zone());
+ ret = del_op->delete_obj(dpp, null_yield, log_op);
if (ret < 0) {
ldpp_dout(dpp, 1) <<
"ERROR: publishing notification failed, with error: " << ret << dendl;
if (obj_has_expired(this, cct, obj.meta.mtime, rule.mp_expiration)) {
rgw_obj_key key(obj.key);
std::unique_ptr<rgw::sal::MultipartUpload> mpu = target->get_multipart_upload(key.name);
- int ret = mpu->abort(this, cct);
+
+ bool log_op = !zonegroup_lc_check(wk->get_lc(), driver->get_zone());
+ int ret = mpu->abort(this, cct, log_op);
if (ret == 0) {
if (perfcounter) {
perfcounter->inc(l_rgw_lc_abort_mpu, 1);
return -EINVAL;
}
+ bool log_op = !zonegroup_lc_check(oc.dpp, oc.driver->get_zone());
int r = oc.obj->transition(oc.bucket, target_placement, o.meta.mtime,
- o.versioned_epoch, oc.dpp, null_yield);
+ o.versioned_epoch, oc.dpp, null_yield, log_op);
if (r < 0) {
ldpp_dout(oc.dpp, 0) << "ERROR: failed to transition obj "
<< oc.bucket << ":" << o.key
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
- optional_yield y) = 0;
+ optional_yield y,
+ bool log_op = true) = 0;
};
/** Base class for AIO completions */
virtual ~DeleteOp() = default;
/** Delete the object */
- virtual int delete_obj(const DoutPrefixProvider* dpp, optional_yield y) = 0;
+ virtual int delete_obj(const DoutPrefixProvider* dpp, optional_yield y, bool log_op = true) = 0;
};
Object() {}
const real_time& mtime,
uint64_t olh_epoch,
const DoutPrefixProvider* dpp,
- optional_yield y) = 0;
+ optional_yield y,
+ bool log_op = true) = 0;
/** Move an object to the cloud */
virtual int transition_to_cloud(Bucket* bucket,
rgw::sal::PlacementTier* tier,
int* next_marker, bool* truncated,
bool assume_unsorted = false) = 0;
/** Abort this upload */
- virtual int abort(const DoutPrefixProvider* dpp, CephContext* cct) = 0;
+ virtual int abort(const DoutPrefixProvider* dpp, CephContext* cct, bool log_op = true) = 0;
/** Complete this upload, making it available as a normal object */
virtual int complete(const DoutPrefixProvider* dpp,
optional_yield y, CephContext* cct,
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
- optional_yield y) = 0;
+ optional_yield y,
+ bool log_op = true) = 0;
};
int DaosObject::transition(Bucket* bucket,
const rgw_placement_rule& placement_rule,
const real_time& mtime, uint64_t olh_epoch,
- const DoutPrefixProvider* dpp, optional_yield y) {
+ const DoutPrefixProvider* dpp, optional_yield y,
+ bool log_op) {
return DAOS_NOT_IMPLEMENTED_LOG(dpp);
}
// 3. Handle empty directories
// 4. Fail when file doesn't exist
int DaosObject::DaosDeleteOp::delete_obj(const DoutPrefixProvider* dpp,
- optional_yield y) {
+ optional_yield y, bool log_op) {
ldpp_dout(dpp, 20) << "DaosDeleteOp::delete_obj "
<< source->get_key().get_oid() << " from "
<< source->get_bucket()->get_name() << dendl;
ceph::real_time set_mtime, std::map<std::string, bufferlist>& attrs,
ceph::real_time delete_at, const char* if_match, const char* if_nomatch,
const std::string* user_data, rgw_zone_set* zones_trace, bool* canceled,
- optional_yield y) {
+ optional_yield y, bool log_op) {
ldpp_dout(dpp, 20) << "DEBUG: complete" << dendl;
bufferlist bl;
rgw_bucket_dir_entry ent;
}
int DaosMultipartUpload::abort(const DoutPrefixProvider* dpp,
- CephContext* cct) {
+ CephContext* cct, optional_yield y, bool log_op) {
// Remove upload from bucket multipart index
ldpp_dout(dpp, 20) << "DEBUG: abort" << dendl;
return ds3_upload_remove(bucket->get_name().c_str(), get_upload_id().c_str(),
ceph::real_time set_mtime, std::map<std::string, bufferlist>& attrs,
ceph::real_time delete_at, const char* if_match, const char* if_nomatch,
const std::string* user_data, rgw_zone_set* zones_trace, bool* canceled,
- optional_yield y) {
+ optional_yield y, bool log_op) {
ldpp_dout(dpp, 20) << "DaosMultipartWriter::complete(): enter part="
<< part_num_str << dendl;
DaosDeleteOp(DaosObject* _source);
virtual int delete_obj(const DoutPrefixProvider* dpp,
- optional_yield y) override;
+ optional_yield y, bool log_op = true) override;
};
ds3_obj_t* ds3o = nullptr;
const rgw_placement_rule& placement_rule,
const real_time& mtime, uint64_t olh_epoch,
const DoutPrefixProvider* dpp,
- optional_yield y) override;
+ optional_yield y,
+ bool log_op = true) override;
virtual int transition_to_cloud(Bucket* bucket, rgw::sal::PlacementTier* tier,
rgw_bucket_dir_entry& o,
std::set<std::string>& cloud_targets,
ceph::real_time delete_at, const char* if_match,
const char* if_nomatch, const std::string* user_data,
rgw_zone_set* zones_trace, bool* canceled,
- optional_yield y) override;
+ optional_yield y,
+ bool log_op = true) override;
};
class DaosMultipartWriter : public StoreWriter {
ceph::real_time delete_at, const char* if_match,
const char* if_nomatch, const std::string* user_data,
rgw_zone_set* zones_trace, bool* canceled,
- optional_yield y) override;
+ optional_yield y,
+ bool log_op = true) override;
const std::string& get_bucket_name();
};
int num_parts, int marker, int* next_marker,
bool* truncated,
bool assume_unsorted = false) override;
- virtual int abort(const DoutPrefixProvider* dpp, CephContext* cct) override;
+ virtual int abort(const DoutPrefixProvider* dpp, CephContext* cct, optional_yield y, bool log_op = true) override;
virtual int complete(const DoutPrefixProvider* dpp, optional_yield y,
CephContext* cct, std::map<int, std::string>& part_etags,
std::list<rgw_obj_index_key>& remove_objs,
const real_time& mtime,
uint64_t olh_epoch,
const DoutPrefixProvider* dpp,
- optional_yield y)
+ optional_yield y,
+ bool log_op)
{
DB::Object op_target(store->getDB(),
get_bucket()->get_info(), get_obj());
parent_op(&op_target)
{ }
- int DBObject::DBDeleteOp::delete_obj(const DoutPrefixProvider* dpp, optional_yield y)
+ int DBObject::DBDeleteOp::delete_obj(const DoutPrefixProvider* dpp, optional_yield y, bool log_op)
{
parent_op.params.bucket_owner = params.bucket_owner.get_id();
parent_op.params.versioning_status = params.versioning_status;
return 0;
}
- int DBMultipartUpload::abort(const DoutPrefixProvider *dpp, CephContext *cct)
+ int DBMultipartUpload::abort(const DoutPrefixProvider *dpp, CephContext *cct, bool log_op)
{
std::unique_ptr<rgw::sal::Object> meta_obj = get_meta_obj();
meta_obj->set_in_extra_data(true);
// Since the data objects are associated with meta obj till
// MultipartUpload::Complete() is done, removing the metadata obj
// should remove all the uploads so far.
- ret = del_op->delete_obj(dpp, null_yield);
+ ret = del_op->delete_obj(dpp, null_yield, log_op);
if (ret < 0) {
ldpp_dout(dpp, 20) << __func__ << ": del_op.delete_obj returned " <<
ret << dendl;
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
- optional_yield y)
+ optional_yield y,
+ bool log_op)
{
int ret = 0;
/* XXX: same as AtomicWriter..consolidate code */
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
- optional_yield y)
+ optional_yield y,
+ bool log_op)
{
parent_op.meta.mtime = mtime;
parent_op.meta.delete_at = delete_at;
int num_parts, int marker,
int* next_marker, bool* truncated,
bool assume_unsorted = false) override;
- virtual int abort(const DoutPrefixProvider* dpp, CephContext* cct) override;
+ virtual int abort(const DoutPrefixProvider* dpp, CephContext* cct, bool log_op = true) override;
virtual int complete(const DoutPrefixProvider* dpp,
optional_yield y, CephContext* cct,
std::map<int, std::string>& part_etags,
public:
DBDeleteOp(DBObject* _source);
- virtual int delete_obj(const DoutPrefixProvider* dpp, optional_yield y) override;
+ virtual int delete_obj(const DoutPrefixProvider* dpp, optional_yield y, bool log_op = true) override;
};
DBObject() = default;
const real_time& mtime,
uint64_t olh_epoch,
const DoutPrefixProvider* dpp,
- optional_yield y) override;
+ optional_yield y,
+ bool log_op = true) override;
virtual bool placement_rules_match(rgw_placement_rule& r1, rgw_placement_rule& r2) override;
virtual int dump_obj_layout(const DoutPrefixProvider *dpp, optional_yield y, Formatter* f) override;
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
- optional_yield y) override;
+ optional_yield y,
+ bool log_op = true) override;
};
class DBMultipartWriter : public StoreWriter {
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
- optional_yield y) override;
+ optional_yield y,
+ bool log_op = true) override;
};
class DBStore : public StoreDriver {
const real_time& mtime,
uint64_t olh_epoch,
const DoutPrefixProvider* dpp,
- optional_yield y)
+ optional_yield y,
+ bool log_op)
{
return next->transition(nextBucket(bucket), placement_rule, mtime, olh_epoch,
- dpp, y);
+ dpp, y, log_op);
}
int FilterObject::transition_to_cloud(Bucket* bucket,
}
int FilterObject::FilterDeleteOp::delete_obj(const DoutPrefixProvider* dpp,
- optional_yield y)
+ optional_yield y, bool log_op)
{
/* Copy params into next */
next->params = params;
- int ret = next->delete_obj(dpp, y);
+ int ret = next->delete_obj(dpp, y, log_op);
/* Copy result back */
result = next->result;
return ret;
return 0;
}
-int FilterMultipartUpload::abort(const DoutPrefixProvider *dpp, CephContext *cct)
+int FilterMultipartUpload::abort(const DoutPrefixProvider *dpp, CephContext *cct, bool log_op)
{
return next->abort(dpp, cct);
}
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
- optional_yield y)
+ optional_yield y,
+ bool log_op)
{
return next->complete(accounted_size, etag, mtime, set_mtime, attrs,
delete_at, if_match, if_nomatch, user_data, zones_trace,
- canceled, y);
+ canceled, y, log_op);
}
int FilterLuaManager::get_script(const DoutPrefixProvider* dpp, optional_yield y,
FilterDeleteOp(std::unique_ptr<DeleteOp> _next) : next(std::move(_next)) {}
virtual ~FilterDeleteOp() = default;
- virtual int delete_obj(const DoutPrefixProvider* dpp, optional_yield y) override;
+ virtual int delete_obj(const DoutPrefixProvider* dpp, optional_yield y, bool log_op = true) override;
};
FilterObject(std::unique_ptr<Object> _next) : next(std::move(_next)) {}
const real_time& mtime,
uint64_t olh_epoch,
const DoutPrefixProvider* dpp,
- optional_yield y) override;
+ optional_yield y,
+ bool log_op = true) override;
virtual int transition_to_cloud(Bucket* bucket,
rgw::sal::PlacementTier* tier,
rgw_bucket_dir_entry& o,
int num_parts, int marker,
int* next_marker, bool* truncated,
bool assume_unsorted = false) override;
- virtual int abort(const DoutPrefixProvider* dpp, CephContext* cct) override;
+ virtual int abort(const DoutPrefixProvider* dpp, CephContext* cct, bool log_op = true) override;
virtual int complete(const DoutPrefixProvider* dpp,
optional_yield y, CephContext* cct,
std::map<int, std::string>& part_etags,
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
- optional_yield y) override;
+ optional_yield y,
+ bool log_op = true) override;
};
class FilterLuaManager : public LuaManager {
const real_time& mtime,
uint64_t olh_epoch,
const DoutPrefixProvider* dpp,
- optional_yield y)
+ optional_yield y,
+ bool log_op)
{
return 0;
}
// Delete::delete_obj() in rgw_rados.cc shows how rados backend process the
// params.
// 2. Delete an object when its versioning is turned on.
-int MotrObject::MotrDeleteOp::delete_obj(const DoutPrefixProvider* dpp, optional_yield y)
+int MotrObject::MotrDeleteOp::delete_obj(const DoutPrefixProvider* dpp, optional_yield y, bool log_op)
{
ldpp_dout(dpp, 20) << "delete " << source->get_key().get_oid() << " from " << source->get_bucket()->get_name() << dendl;
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
- optional_yield y)
+ optional_yield y,
+ bool log_op)
{
int rc = 0;
return store->delete_motr_idx_by_name(obj_part_iname);
}
-int MotrMultipartUpload::abort(const DoutPrefixProvider *dpp, CephContext *cct)
+int MotrMultipartUpload::abort(const DoutPrefixProvider *dpp, CephContext *cct, bool log_op)
{
int rc;
// Check if multipart upload exists
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
- optional_yield y)
+ optional_yield y,
+ bool log_op)
{
// Should the dir entry(object metadata) be updated? For example
// mtime.
public:
MotrDeleteOp(MotrObject* _source);
- virtual int delete_obj(const DoutPrefixProvider* dpp, optional_yield y) override;
+ virtual int delete_obj(const DoutPrefixProvider* dpp, optional_yield y, bool log_op = true) override;
};
MotrObject() = default;
const real_time& mtime,
uint64_t olh_epoch,
const DoutPrefixProvider* dpp,
- optional_yield y) override;
+ optional_yield y,
+ bool log_op = true) override;
virtual bool placement_rules_match(rgw_placement_rule& r1, rgw_placement_rule& r2) override;
virtual int dump_obj_layout(const DoutPrefixProvider *dpp, optional_yield y, Formatter* f) override;
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
- optional_yield y) override;
+ optional_yield y,
+ bool log_op = true) override;
unsigned populate_bvec(unsigned len, bufferlist::iterator &bi);
void cleanup();
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
- optional_yield y) override;
+ optional_yield y,
+ bool log_op = true) override;
};
// The implementation of multipart upload in POC roughly follows the
int num_parts, int marker,
int* next_marker, bool* truncated,
bool assume_unsorted = false) override;
- virtual int abort(const DoutPrefixProvider* dpp, CephContext* cct) override;
+ virtual int abort(const DoutPrefixProvider* dpp, CephContext* cct, bool log_op = true) override;
virtual int complete(const DoutPrefixProvider* dpp,
optional_yield y, CephContext* cct,
std::map<int, std::string>& part_etags,