While restoring non-current object versions, ensure they remain non-current.
Read `olh_epoch` from the restored object's metadata into a new attr
"RGW_ATTR_RESTORE_VERSIONED_EPOCH". This attr/olh_epoch is used while
updating bi entry and also to reset HEAD object post expiry of temporary
copies.
Signed-off-by: Soumya Koduri <skoduri@redhat.com>
rgw_bucket_dir_entry& o,
CephContext* cct,
RGWObjTier& tier_config,
- real_time& mtime,
uint64_t olh_epoch,
std::optional<uint64_t> days,
const DoutPrefixProvider* dpp,
rgw_bucket_dir_entry& o,
CephContext* cct,
RGWObjTier& tier_config,
- real_time& mtime,
uint64_t olh_epoch,
std::optional<uint64_t> days,
const DoutPrefixProvider* dpp,
rgw_bucket_dir_entry& o,
CephContext* cct,
RGWObjTier& tier_config,
- real_time& mtime,
uint64_t olh_epoch,
std::optional<uint64_t> days,
const DoutPrefixProvider* dpp,
rgw_bucket_dir_entry& o,
CephContext* cct,
RGWObjTier& tier_config,
- real_time& mtime,
uint64_t olh_epoch,
std::optional<uint64_t> days,
const DoutPrefixProvider* dpp,
const auto aiter = generic_attrs_map.find(name);
if (aiter != std::end(generic_attrs_map)) {
- ldpp_dout(tier_ctx.dpp, 20) << __func__ << " Received attrs aiter->first = " << aiter->first << ", aiter->second = " << aiter->second << ret << dendl;
- attrs[aiter->second] = bl;
+ attrs[aiter->second] = bl;
+ } else {
+ std::string s1 = boost::algorithm::to_lower_copy(header.first);
+ std::replace(s1.begin(), s1.end(), '_', '-');
+
+ // copy versioned epoch
+ if (s1 == "x-amz-meta-rgwx-versioned-epoch") {
+ attrs[s1] = bl;
+ }
}
if (header.first == "CONTENT_LENGTH") {
* cloudtier config info read from the attrs.
* Since these attrs are used internally for only replication, do not store them
* in the head object.
+ *
+ * Update versioned epoch incase the object is being restored.
*/
-void read_cloudtier_info_from_attrs(rgw::sal::Attrs& attrs, RGWObjCategory& category,
- RGWObjManifest& manifest) {
+int read_cloudtier_info_from_attrs(rgw::sal::Attrs& attrs, RGWObjCategory& category,
+ std::optional<uint64_t>& olh_epoch, RGWObjManifest& manifest) {
auto attr_iter = attrs.find(RGW_ATTR_CLOUD_TIER_TYPE);
if (attr_iter != attrs.end()) {
auto i = attr_iter->second;
manifest.set_tier_config(tier_config);
attrs.erase(config_iter);
} catch (buffer::error& err) {
+ return -EIO;
}
}
}
attrs.erase(attr_iter);
}
+ attr_iter = attrs.find(RGW_ATTR_RESTORE_VERSIONED_EPOCH);
+ if (attr_iter != attrs.end()) {
+ try {
+ using ceph::decode;
+ uint64_t v_epoch = 0;
+ decode(v_epoch, attr_iter->second);
+ olh_epoch = v_epoch;
+ /*
+ * Keep this attr only for Temp restored copies as its needed while
+ * resetting head object post expiry.
+ */
+ auto r_iter = attrs.find(RGW_ATTR_RESTORE_TYPE);
+ if (r_iter != attrs.end()) {
+ rgw::sal::RGWRestoreType restore_type;
+ using ceph::decode;
+ decode(restore_type, r_iter->second);
+ if (restore_type != rgw::sal::RGWRestoreType::Temporary) {
+ attrs.erase(attr_iter);
+ }
+ }
+ } catch (buffer::error& err) {
+ return -EIO;
+ }
+ }
+ return 0;
}
int HeadObjectProcessor::process(bufferlist&& data, uint64_t logical_offset)
obj_op.meta.zones_trace = zones_trace;
obj_op.meta.modify_tail = true;
- read_cloudtier_info_from_attrs(attrs, obj_op.meta.category, manifest);
+ r = read_cloudtier_info_from_attrs(attrs, obj_op.meta.category, obj_op.meta.olh_epoch, manifest);
+
+ if (r < 0) { // incase of any errors while decoding tier_config/restore attrs
+ return r;
+ }
r = obj_op.write_meta(actual_size, accounted_size, attrs, rctx,
writer.get_trace(), flags & rgw::sal::FLAG_LOG_OP);
const rgw_obj& dest_obj,
rgw_placement_rule& dest_placement,
RGWObjTier& tier_config,
- real_time& mtime,
uint64_t olh_epoch,
std::optional<uint64_t> days,
const DoutPrefixProvider *dpp,
//XXX: read below from attrs .. check transition_obj()
ACLOwner owner;
rgw::sal::Attrs attrs;
+ real_time mtime;
const req_context rctx{dpp, y, nullptr};
int ret = 0;
bufferlist t, t_tier;
return 0;
});
+ // fetch mtime of the object and other attrs of the object
+ // to check for restore_status
+ RGWRados::Object op_target(this, dest_bucket_info, obj_ctx, dest_obj);
+ RGWRados::Object::Read read_op(&op_target);
+ read_op.params.lastmod = &mtime;
+ read_op.params.attrs = &attrs;
+
+ ret = read_op.prepare(y, dpp);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "Restoring object(" << dest_obj << ") , read_op failed ret=" << ret << dendl;
+ return ret;
+ }
+
uint64_t accounted_size = 0;
string etag;
real_time set_mtime;
return ret;
}
+ {
+ if (!olh_epoch) {
+ const auto aiter = attrs.find("x-amz-meta-rgwx-versioned-epoch");
+ if (aiter != std::end(attrs)) {
+ std::optional<uint64_t> olh_ep = ceph::parse<uint64_t>(rgw_bl_str(aiter->second));
+ if (olh_ep) {
+ olh_epoch = *olh_ep;
+ }
+ attrs.erase(aiter);
+ }
+ }
+ if (olh_epoch) { // needed for only versioned objects
+ bufferlist bl;
+ encode(olh_epoch, bl);
+ attrs[RGW_ATTR_RESTORE_VERSIONED_EPOCH] = std::move(bl);
+ }
+ }
+
{
bufferlist bl;
encode(rgw::sal::RGWRestoreStatus::CloudRestored, bl);
attrs[RGW_ATTR_STORAGE_CLASS] = std::move(bl);
}
+ for (auto& iter: attrs) {
+ ldpp_dout(dpp, 30) << "Restore attrs set: " << iter.first << dendl;
+ }
// XXX: handle COMPLETE_RETRY like in fetch_remote_obj
bool canceled = false;
rgw_zone_set zone_set{};
return ret;
}
- // XXX: handle olh_epoch for versioned objects like in fetch_remote_obj
return ret;
}
const rgw_obj& dest_obj,
rgw_placement_rule& dest_placement,
RGWObjTier& tier_config,
- real_time& mtime,
uint64_t olh_epoch,
std::optional<uint64_t> days,
const DoutPrefixProvider *dpp,
rgw_bucket_dir_entry& o,
CephContext* cct,
RGWObjTier& tier_config,
- real_time& mtime,
uint64_t olh_epoch,
std::optional<uint64_t> days,
const DoutPrefixProvider* dpp,
int ret = 0;
string src_storage_class = o.meta.storage_class; // or take src_placement also as input
- // fetch mtime of the object
- std::unique_ptr<rgw::sal::Object::ReadOp> read_op(get_read_op());
- read_op->params.lastmod = &mtime;
-
- ret = read_op->prepare(y, dpp);
- if (ret < 0) {
- ldpp_dout(dpp, 0) << "Restoring object(" << o.key << "): read_op failed ret=" << ret << dendl;
- return ret;
- }
-
if (bucket_name.empty()) {
bucket_name = "rgwx-" + zonegroup.get_name() + "-" + tier->get_storage_class() +
"-cloud-bucket";
ret = store->getRados()->restore_obj_from_cloud(tier_ctx, *rados_ctx,
bucket->get_info(), get_obj(), placement_rule,
tier_config,
- mtime, olh_epoch, days, dpp, y, flags & FLAG_LOG_OP);
+ olh_epoch, days, dpp, y, flags & FLAG_LOG_OP);
if (ret < 0) { //failed to restore
ldpp_dout(dpp, 0) << "Restoring object(" << o.key << ") from the cloud endpoint(" << endpoint << ") failed, ret=" << ret << dendl;
RGWObjManifest *pmanifest;
pmanifest = &m;
- Object* head_obj = (Object*)this;
+ Object* head_obj = (Object*)this;
RGWObjTier tier_config;
m.get_tier_config(&tier_config);
pmanifest->set_obj_size(0);
obj_op.meta.manifest = pmanifest;
+ auto v_iter = attrs.find(RGW_ATTR_RESTORE_VERSIONED_EPOCH);
+ if (v_iter != attrs.end()) {
+ uint64_t versioned_epoch;
+ using ceph::decode;
+ decode(versioned_epoch, v_iter->second);
+ obj_op.meta.olh_epoch = versioned_epoch;
+ }
+
// erase restore attrs
attrs.erase(RGW_ATTR_RESTORE_STATUS);
attrs.erase(RGW_ATTR_RESTORE_TYPE);
attrs.erase(RGW_ATTR_RESTORE_TIME);
attrs.erase(RGW_ATTR_RESTORE_EXPIRY_DATE);
attrs.erase(RGW_ATTR_CLOUDTIER_STORAGE_CLASS);
+ attrs.erase(RGW_ATTR_RESTORE_VERSIONED_EPOCH);
bufferlist bl;
bl.append(tier_config.name);
rgw_bucket_dir_entry& o,
CephContext* cct,
RGWObjTier& tier_config,
- real_time& mtime,
uint64_t olh_epoch,
std::optional<uint64_t> days,
const DoutPrefixProvider* dpp,
#define RGW_ATTR_RESTORE_TIME RGW_ATTR_PREFIX "restored-at"
#define RGW_ATTR_RESTORE_EXPIRY_DATE RGW_ATTR_PREFIX "restore-expiry-date"
#define RGW_ATTR_TRANSITION_TIME RGW_ATTR_PREFIX "transition-at"
+#define RGW_ATTR_RESTORE_VERSIONED_EPOCH RGW_ATTR_PREFIX "restore-versioned-epoch"
#define RGW_ATTR_TEMPURL_KEY1 RGW_ATTR_META_PREFIX "temp-url-key"
#define RGW_ATTR_TEMPURL_KEY2 RGW_ATTR_META_PREFIX "temp-url-key-2"
// fill in the entry. XXX: Maybe we can avoid it by passing only necessary params
rgw_bucket_dir_entry ent;
ent.key.name = s->object->get_key().name;
+ ent.key.instance = s->object->get_key().instance;
ent.meta.accounted_size = ent.meta.size = s->obj_size;
ent.meta.etag = "" ;
- ceph::real_time mtime = s->object->get_mtime();
uint64_t epoch = 0;
op_ret = get_system_versioning_params(s, &epoch, NULL);
+ if (!ent.key.instance.empty()) { // non-current versioned object
+ ent.flags |= rgw_bucket_dir_entry::FLAG_VER;
+ }
ldpp_dout(dpp, 20) << "getting versioning params tier placement handle cloud tier" << op_ret << dendl;
if (op_ret < 0) {
ldpp_dout(dpp, 20) << "failed to get versioning params, op_ret = " << op_ret << dendl;
s->err.message = "failed to restore object";
return op_ret;
}
- op_ret = s->object->restore_obj_from_cloud(pbucket, tier.get(), target_placement, ent, s->cct, tier_config,
- mtime, epoch, days, dpp, y, s->bucket->get_info().flags);
+ op_ret = s->object->restore_obj_from_cloud(pbucket, tier.get(), target_placement, ent,
+ s->cct, tier_config, epoch,
+ days, dpp, y, s->bucket->get_info().flags);
if (op_ret < 0) {
ldpp_dout(dpp, 0) << "object " << ent.key.name << " fetching failed" << op_ret << dendl;
s->err.message = "failed to restore object";
rgw_bucket_dir_entry& o,
CephContext* cct,
RGWObjTier& tier_config,
- real_time& mtime,
uint64_t olh_epoch,
std::optional<uint64_t> days,
const DoutPrefixProvider* dpp,
rgw_bucket_dir_entry& o,
CephContext* cct,
RGWObjTier& tier_config,
- real_time& mtime,
uint64_t olh_epoch,
std::optional<uint64_t> days,
const DoutPrefixProvider* dpp,
uint32_t flags)
{
return next->restore_obj_from_cloud(nextBucket(bucket), nextPlacementTier(tier),
- placement_rule, o, cct, tier_config, mtime, olh_epoch, days, dpp, y, flags);
+ placement_rule, o, cct, tier_config, olh_epoch, days, dpp, y, flags);
}
bool FilterObject::placement_rules_match(rgw_placement_rule& r1, rgw_placement_rule& r2)
rgw_bucket_dir_entry& o,
CephContext* cct,
RGWObjTier& tier_config,
- real_time& mtime,
uint64_t olh_epoch,
std::optional<uint64_t> days,
const DoutPrefixProvider* dpp,
rgw_bucket_dir_entry& o,
CephContext* cct,
RGWObjTier& tier_config,
- real_time& mtime,
uint64_t olh_epoch,
std::optional<uint64_t> days,
const DoutPrefixProvider* dpp,