]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
rgw: configurable multisite sync concurrency windows 45784/head
authorCasey Bodley <cbodley@redhat.com>
Tue, 5 Apr 2022 17:45:24 +0000 (13:45 -0400)
committerCasey Bodley <cbodley@redhat.com>
Wed, 6 Apr 2022 17:08:13 +0000 (13:08 -0400)
Signed-off-by: Casey Bodley <cbodley@redhat.com>
doc/radosgw/config-ref.rst
src/common/options/rgw.yaml.in
src/rgw/rgw_data_sync.cc
src/rgw/rgw_sync.cc

index eb68f8e960c7f8aada6c21d29412108dc4c7ad7b..3520f12abb24ae783858e4e6cb334204b4915e82 100644 (file)
@@ -148,6 +148,9 @@ file under each ``[client.radosgw.{instance-name}]`` instance.
 .. confval:: rgw_md_log_max_shards
 .. confval:: rgw_data_sync_poll_interval
 .. confval:: rgw_meta_sync_poll_interval
+.. confval:: rgw_bucket_sync_spawn_window
+.. confval:: rgw_data_sync_spawn_window
+.. confval:: rgw_meta_sync_spawn_window
 
 .. important:: The values of :confval:`rgw_data_log_num_shards` and
    :confval:`rgw_md_log_max_shards` should not be changed after sync has
index 58b3f91f864a23b6e523c01495c472eba6d5c9a1..dd977d4e218d9f66509d7fffbbd0e1a6a694179d 100644 (file)
@@ -1979,6 +1979,42 @@ options:
   see_also:
   - rgw_data_sync_poll_interval
   with_legacy: true
+- name: rgw_bucket_sync_spawn_window
+  type: int
+  level: dev
+  default: 20
+  fmt_desc: The maximum number of items that bucket sync is willing to
+    process in parallel (per remote bilog shard).
+  services:
+  - rgw
+  see_also:
+  - rgw_data_sync_spawn_window
+  - rgw_meta_sync_spawn_window
+  with_legacy: true
+- name: rgw_data_sync_spawn_window
+  type: int
+  level: dev
+  default: 20
+  fmt_desc: The maximum number of items that data sync is willing to
+    process in parallel (per remote datalog shard).
+  services:
+  - rgw
+  see_also:
+  - rgw_bucket_sync_spawn_window
+  - rgw_meta_sync_spawn_window
+  with_legacy: true
+- name: rgw_meta_sync_spawn_window
+  type: int
+  level: dev
+  default: 20
+  fmt_desc: The maximum number of items that metadata sync is willing to
+    process in parallel (per remote mdlog shard).
+  services:
+  - rgw
+  see_also:
+  - rgw_bucket_sync_spawn_window
+  - rgw_data_sync_spawn_window
+  with_legacy: true
 - name: rgw_bucket_quota_ttl
   type: int
   level: advanced
index 2bfb0a10042bf4ad47f4251ffaecac87c02cc767..822ace39c404c460580b6c9be88acf8ce8710111 100644 (file)
@@ -1391,7 +1391,6 @@ public:
   }
 };
 
-#define BUCKET_SHARD_SYNC_SPAWN_WINDOW 20
 #define DATA_SYNC_MAX_ERR_ENTRIES 10
 
 class RGWDataSyncShardCR : public RGWCoroutine {
@@ -1429,7 +1428,6 @@ class RGWDataSyncShardCR : public RGWCoroutine {
   set<string>::iterator modified_iter;
 
   uint64_t total_entries = 0;
-  static constexpr int spawn_window = BUCKET_SHARD_SYNC_SPAWN_WINDOW;
   bool *reset_backoff = nullptr;
 
   boost::intrusive_ptr<RGWContinuousLeaseCR> lease_cr;
@@ -1594,7 +1592,7 @@ public:
             // fetch remote and write locally
             yield_spawn_window(sync_single_entry(source_bs, iter->first, iter->first,
                                                  entry_timestamp, false),
-                               spawn_window, std::nullopt);
+                               cct->_conf->rgw_data_sync_spawn_window, std::nullopt);
           }
           sync_marker.marker = iter->first;
         }
@@ -1743,7 +1741,7 @@ public:
           } else {
             yield_spawn_window(sync_single_entry(source_bs, log_iter->entry.key, log_iter->log_id,
                                                  log_iter->log_timestamp, false),
-                               spawn_window, std::nullopt);
+                               cct->_conf->rgw_data_sync_spawn_window, std::nullopt);
           }
         }
 
@@ -3806,7 +3804,7 @@ int RGWBucketShardFullSyncCR::operate(const DoutPrefixProvider *dpp)
                                  entry->key, &marker_tracker, zones_trace, tn),
                       false);
         }
-        drain_with_cb(BUCKET_SYNC_SPAWN_WINDOW,
+        drain_with_cb(cct->_conf->rgw_bucket_sync_spawn_window,
                       [&](uint64_t stack_id, int ret) {
                 if (ret < 0) {
                   tn->log(10, "a sync operation returned error");
@@ -4103,7 +4101,7 @@ int RGWBucketShardIncrementalSyncCR::operate(const DoutPrefixProvider *dpp)
                   false);
           }
         // }
-        drain_with_cb(BUCKET_SYNC_SPAWN_WINDOW,
+        drain_with_cb(cct->_conf->rgw_bucket_sync_spawn_window,
                       [&](uint64_t stack_id, int ret) {
                 if (ret < 0) {
                   tn->log(10, "a sync operation returned error");
index 7fec1d5ba154ce611f1051446c7b54a2d9895fc0..db314b9a61053bf17afb0c25f610838ef460cea8 100644 (file)
@@ -1394,8 +1394,6 @@ public:
   int state_store_mdlog_entries_complete();
 };
 
-#define META_SYNC_SPAWN_WINDOW 20
-
 class RGWMetaSyncShardCR : public RGWCoroutine {
   RGWMetaSyncEnv *sync_env;
 
@@ -1626,7 +1624,7 @@ public:
               pos_to_prev[marker] = marker;
             }
             // limit spawn window
-            while (num_spawned() > META_SYNC_SPAWN_WINDOW) {
+            while (num_spawned() > cct->_conf->rgw_meta_sync_spawn_window) {
               yield wait_for_child();
               collect_children();
             }
@@ -1824,7 +1822,7 @@ public:
                 pos_to_prev[log_iter->id] = marker;
               }
               // limit spawn window
-              while (num_spawned() > META_SYNC_SPAWN_WINDOW) {
+              while (num_spawned() > cct->_conf->rgw_meta_sync_spawn_window) {
                 yield wait_for_child();
                 collect_children();
               }