RGWFetchObjFilter *filter,
bool stat_follow_olh,
const rgw_obj& stat_dest_obj,
- const rgw_zone_set_entry& source_trace_entry,
+ std::optional<rgw_zone_set_entry> source_trace_entry,
rgw_zone_set *zones_trace,
std::optional<uint64_t>* bytes_transferred)
{
//erase the append attr
cb.get_attrs().erase(RGW_ATTR_APPEND_PART_NUM);
- { // add x-amz-replication-status=REPLICA
- auto& bl = cb.get_attrs()[RGW_ATTR_OBJ_REPLICATION_STATUS];
- bl.clear(); // overwrite source's status
- bl.append("REPLICA");
- }
- { // update replication trace
- std::vector<rgw_zone_set_entry> trace;
- if (auto i = cb.get_attrs().find(RGW_ATTR_OBJ_REPLICATION_TRACE);
- i != cb.get_attrs().end()) {
- try {
- decode(trace, i->second);
- } catch (const buffer::error&) {}
+ if (source_trace_entry) { // replication attrs only if we are replicating
+ { // add x-amz-replication-status=REPLICA
+ auto& bl = cb.get_attrs()[RGW_ATTR_OBJ_REPLICATION_STATUS];
+ bl.clear(); // overwrite source's status
+ bl.append("REPLICA");
}
- // add the source entry to the end
- trace.push_back(source_trace_entry);
+ { // update replication trace
+ std::vector<rgw_zone_set_entry> trace;
+ if (auto i = cb.get_attrs().find(RGW_ATTR_OBJ_REPLICATION_TRACE);
+ i != cb.get_attrs().end()) {
+ try {
+ decode(trace, i->second);
+ } catch (const buffer::error&) {}
+ }
+ // add the source entry to the end
+ trace.push_back(*source_trace_entry);
- bufferlist bl;
- encode(trace, bl);
- cb.get_attrs()[RGW_ATTR_OBJ_REPLICATION_TRACE] = std::move(bl);
- }
- {
- // add x-amz-replicated-at
- bufferlist bl;
- ceph::real_time timestamp = real_clock::now();
- encode(timestamp, bl);
- cb.get_attrs()[RGW_ATTR_OBJ_REPLICATION_TIMESTAMP] = std::move(bl);
+ bufferlist bl;
+ encode(trace, bl);
+ cb.get_attrs()[RGW_ATTR_OBJ_REPLICATION_TRACE] = std::move(bl);
+ }
+ {
+ // add x-amz-replicated-at
+ bufferlist bl;
+ ceph::real_time timestamp = real_clock::now();
+ encode(timestamp, bl);
+ cb.get_attrs()[RGW_ATTR_OBJ_REPLICATION_TIMESTAMP] = std::move(bl);
+ }
+ } else {
+ // remove replication attrs
+ cb.get_attrs().erase(RGW_ATTR_OBJ_REPLICATION_STATUS);
+ cb.get_attrs().erase(RGW_ATTR_OBJ_REPLICATION_TRACE);
+ cb.get_attrs().erase(RGW_ATTR_OBJ_REPLICATION_TIMESTAMP);
}
if (source_zone.empty()) {
ldpp_dout(dpp, 5) << "Copy object " << src_obj.bucket << ":" << src_obj.get_oid() << " => " << dest_obj.bucket << ":" << dest_obj.get_oid() << dendl;
if (remote_src || !source_zone.empty()) {
- rgw_zone_set_entry source_trace_entry{source_zone.id, std::nullopt};
// null_yield resolves a crash when calling progress_cb(), because the beast
// frontend tried to use this same yield context to write the progress
// response to the frontend socket. call fetch_remote_obj() synchronously so
unmod_ptr, high_precision_time,
if_match, if_nomatch, attrs_mod, copy_if_newer, attrs, category,
olh_epoch, delete_at, ptag, petag, progress_cb, progress_data, rctx,
- nullptr /* filter */, stat_follow_olh, stat_dest_obj, source_trace_entry);
+ nullptr /* filter */, stat_follow_olh, stat_dest_obj, std::nullopt);
}
map<string, bufferlist> src_attrs;
CreateBucketConfiguration={'LocationConstraint': zg.name})
assert e.response['ResponseMetadata']['HTTPStatusCode'] == 400
+def run_per_zonegroup(func):
+ def wrapper(*args, **kwargs):
+ for zonegroup in realm.current_period.zonegroups:
+ func(zonegroup, *args, **kwargs)
+
+ return wrapper
+
def allow_bucket_replication(function):
def wrapper(*args, **kwargs):
zonegroup = realm.master_zonegroup()
remove_sync_policy_group(c1, "sync-group")
return
+
+@run_per_zonegroup
+def test_copy_obj_between_zonegroups(zonegroup):
+ if len(realm.current_period.zonegroups) < 2:
+ raise SkipTest('need at least 2 zonegroups to run this test')
+
+ source_zone = ZonegroupConns(zonegroup).rw_zones[0]
+ source_bucket = source_zone.create_bucket(gen_bucket_name())
+
+ objname = 'dummy'
+ k = new_key(source_zone, source_bucket.name, objname)
+ k.set_contents_from_string('foo')
+
+ for zg in realm.current_period.zonegroups:
+ if zg.name == zonegroup.name:
+ continue
+
+ dest_zone = ZonegroupConns(zg).rw_zones[0]
+ dest_bucket = dest_zone.create_bucket(gen_bucket_name())
+ realm_meta_checkpoint(realm)
+
+ # copy object
+ dest_zone.s3_client.copy_object(
+ Bucket=dest_bucket.name,
+ CopySource=f'{source_bucket.name}/{objname}',
+ Key=objname
+ )
+
+ # check that object exists in destination bucket
+ k = get_key(dest_zone, dest_bucket, objname)
+ assert_equal(k.get_contents_as_string().decode('utf-8'), 'foo')