]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
rgw/multisite: on reaching incremental sync, write rest of the shards of a
authorShilpa Jagannath <smanjara@redhat.com>
Fri, 3 Jun 2022 20:29:36 +0000 (16:29 -0400)
committerAdam C. Emerson <aemerson@redhat.com>
Mon, 8 Aug 2022 19:44:15 +0000 (15:44 -0400)
bucket to error repo instead of triggering a bucket sync on them.

Signed-off-by: Shilpa Jagannath <smanjara@redhat.com>
src/rgw/rgw_data_sync.cc
src/rgw/rgw_data_sync.h
src/rgw/rgw_rest_log.cc

index 88cb9db899fd04a5077371667a2da9f389fbb5d7..49efc9c96028a53e005cebafd2e8e114ccb2026f 100644 (file)
@@ -1477,6 +1477,15 @@ class RGWDataSyncShardCR : public RGWCoroutine {
     return rgw_bucket_parse_bucket_key(sync_env->cct, key,
                                        &bs.bucket, &bs.shard_id);
   }
+
+  rgw_raw_obj datalog_oid_for_error_repo(rgw_bucket_shard& bs) {
+    auto shard_shift = (bs.shard_id > 0 ? bs.shard_id : 0);
+    auto datalog_shard = (ceph_str_hash_linux(bs.bucket.name.data(), bs.bucket.name.size()) +
+        shard_shift) % cct->_conf->rgw_data_log_num_shards;
+    string oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, datalog_shard);
+    return rgw_raw_obj(pool, oid + ".retry");
+  }
+
   RGWCoroutine* sync_single_entry(const rgw_bucket_shard& src,
                                   std::optional<uint64_t> gen,
                                   const std::string& marker,
@@ -1749,6 +1758,32 @@ public:
             marker_tracker->try_update_high_marker(log_iter->log_id, 0, log_iter->log_timestamp);
             continue;
           }
+          if (!log_iter->entry.gen) {
+            yield {
+              rgw_bucket_index_marker_info remote_info;
+              BucketIndexShardsManager remote_markers;
+              retcode = rgw_read_remote_bilog_info(sync_env->dpp, sc->conn, source_bs.bucket,
+                            remote_info, remote_markers, null_yield);
+
+              if (retcode < 0) {
+                tn->log(1, SSTR(" rgw_read_remote_bilog_info failed with retcode=" << retcode));
+                return retcode;
+              }
+              for (const auto& each : remote_info.gen_numshards) {
+                for (int sid = 0; sid < each.second; sid++) {
+                  rgw_bucket_shard bs(source_bs.bucket, sid);
+                  error_repo = datalog_oid_for_error_repo(bs);
+                  tn->log(10, SSTR("writing shard_id " << sid << "of gen" << each.first << " to error repo for retry"));
+                  call(rgw::error_repo::write_cr(sync_env->store->svc()->rados, error_repo,
+                                                      rgw::error_repo::encode_key(bs, each.first),
+                                                      ceph::real_time{}));
+                  if (retcode < 0) {
+                    tn->log(0, SSTR("ERROR: failed to log sync failure in error repo: retcode=" << retcode));
+                  }
+                }
+              }
+            }
+          }
           if (!marker_tracker->start(log_iter->log_id, 0, log_iter->log_timestamp)) {
             tn->log(0, SSTR("ERROR: cannot start syncing " << log_iter->log_id << ". Duplicate entry?"));
           } else {
index 3eb809f08f69076b5473caff891c7553edd079f2..07814a7ec8736b62a1cc309f3e38dba7d5fd0c2e 100644 (file)
@@ -674,6 +674,7 @@ struct rgw_bucket_index_marker_info {
   bool syncstopped{false};
   uint64_t oldest_gen = 0;
   uint64_t latest_gen = 0;
+  std::vector<std::pair<int, int>> gen_numshards;
 
   void decode_json(JSONObj *obj) {
     JSONDecoder::decode_json("bucket_ver", bucket_ver, obj);
index fe91a16de60bb97a4dad02f6b5fcd8dc9a80e489..500c85bac6f95883e55266c23883dcb0d10f9eb4 100644 (file)
@@ -565,6 +565,17 @@ void RGWOp_BILog_Info::execute(optional_yield y) {
 
   oldest_gen = logs.front().gen;
   latest_gen = logs.back().gen;
+
+  std::vector<std::pair<int, int>> gen_numshards;
+  for (auto gen = logs.front().gen; gen <= logs.back().gen; gen++) {
+    auto log = std::find_if(logs.begin(), logs.end(), rgw::matches_gen(gen));
+    if (log == logs.end()) {
+      ldpp_dout(s, 5) << "ERROR: no log layout with gen=" << gen << dendl;
+      op_ret = -ENOENT;
+    }
+    const auto& num_shards = log->layout.in_index.layout.num_shards;
+    gen_numshards.push_back(std::make_pair(gen, num_shards));
+  }
 }
 
 void RGWOp_BILog_Info::send_response() {