]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/rgw: Include rgw_restore_processor_period in s3tests
authorSoumya Koduri <skoduri@redhat.com>
Thu, 28 Aug 2025 14:13:45 +0000 (19:43 +0530)
committerSoumya Koduri <skoduri@redhat.com>
Mon, 22 Sep 2025 18:12:50 +0000 (23:42 +0530)
Signed-off-by: Soumya Koduri <skoduri@redhat.com>
qa/suites/rgw/cloud-transition/overrides.yaml
qa/suites/rgw/cloud-transition/tasks/restore/cloud_restore_s3tests.yaml
qa/tasks/s3tests.py
src/rgw/rgw_restore.cc

index b35c134125e6e13dd5817b4ea8c955e0dbc45db6..1e25002ad79caa5e198af6e70ea0335f840a8507 100644 (file)
@@ -12,6 +12,7 @@ overrides:
         rgw s3 auth use sts: true
         rgw lc debug interval: 10
         rgw_restore_debug_interval: 20
+        rgw_restore_processor_period: 10
   rgw:
     storage classes:
       LUKEWARM:
index 26b3e70e012fb92bd34ccd9f45f3a625f782070f..7517720c9924b1638792ceafa29bd689d515d9d8 100644 (file)
@@ -27,5 +27,6 @@ tasks:
       extra_attrs: ["cloud_restore"]
       lc_debug_interval: 10
       rgw_restore_debug_interval: 20
+      rgw_restore_processor_period: 10
       lifecycle_tests: True
       cloudtier_tests: True
index 8de299171d767d0e7f02475abe25566541ccd27c..93d370251c14c7e38789836b132dbc02e1d55213 100644 (file)
@@ -372,6 +372,10 @@ def configure(ctx, config):
         if rgw_restore_debug_interval:
             s3tests_conf['s3 main']['rgw_restore_debug_interval'] = rgw_restore_debug_interval
 
+        rgw_restore_processor_period = properties.get('rgw_restore_processor_period')
+        if rgw_restore_processor_period:
+            s3tests_conf['s3 main']['rgw_restore_processor_period'] = rgw_restore_processor_period
+
         if ctx.rgw_cloudtier is not None:
             log.info(' ctx.rgw_cloudtier config  is %s ...', ctx.rgw_cloudtier.config)
             client_rgw_config = ctx.rgw_cloudtier.config.get(client)
index 87f805259ee72a3e16efdb7d165a69ec7ab99966..2d9a61d077758657643fe447cf1ce39508b6a67d 100644 (file)
@@ -341,13 +341,14 @@ int Restore::process(int index, int max_secs, optional_yield y)
       ret = process_restore_entry(entry, y);
 
       if (!ret && entry.status == rgw::sal::RGWRestoreStatus::RestoreAlreadyInProgress) {
-        r_entries.push_back(entry);
+        r_entries.push_back(entry);
          ldpp_dout(this, 20) << __PRETTY_FUNCTION__ << ": re-pushing entry: '" << entry
                         << "' on shard:"
                         << obj_names[index] << dendl;   
       }
 
-      if (ret < 0)
+      // Skip the entry of object/bucket which no longer exists
+      if (ret < 0 && (ret != -ENOENT))
         goto done;
 
       ///process all entries, trim and re-add