]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
qa: Edit test files to incorporate unset arguments, crush_rule, size,
authorKamoltat Sirivadhna <ksirivad@redhat.com>
Fri, 22 Nov 2024 20:33:17 +0000 (20:33 +0000)
committerKamoltat Sirivadhna <ksirivad@redhat.com>
Tue, 18 Mar 2025 14:52:36 +0000 (14:52 +0000)
min_size

Fixes: https://tracker.ceph.com/issues/68842
Signed-off-by: Kamoltat Sirivadhna <ksirivad@redhat.com>
qa/tasks/stretch_cluster.py
qa/tasks/test_netsplit_3az_stretch_pool.py
qa/workunits/mon/mon-stretch-pool.sh

index 48acf0025ee95e9f06b2e9448d7a1de8fda5dcb9..0038b9d4dcb8f81884373e1ca44ce4cac8657f84 100644 (file)
@@ -58,6 +58,7 @@ class TestStretchCluster(MgrTestCase):
     PEERING_CRUSH_BUCKET_TARGET = 3
     PEERING_CRUSH_BUCKET_BARRIER = 'datacenter'
     CRUSH_RULE = 'replicated_rule_custom'
+    DEFAULT_CRUSH_RULE = 'replicated_rule'
     SIZE = 6
     MIN_SIZE = 3
     BUCKET_MAX = SIZE // PEERING_CRUSH_BUCKET_TARGET
@@ -594,6 +595,17 @@ class TestStretchCluster(MgrTestCase):
             success_hold_time=self.SUCCESS_HOLD_TIME
         )
 
+        # Unset the pool back to replicated rule expects PGs to be 100% active+clean
+        self.mgr_cluster.mon_manager.raw_cluster_cmd(
+            'osd', 'pool', 'stretch', 'unset',
+            self.POOL, self.DEFAULT_CRUSH_RULE,
+            str(self.SIZE), str(self.MIN_SIZE))
+        self.wait_until_true_and_hold(
+            lambda: self._pg_all_active_clean(),
+            timeout=self.RECOVERY_PERIOD,
+            success_hold_time=self.SUCCESS_HOLD_TIME
+        )
+
     def test_set_stretch_pool_no_active_pgs(self):
         """
         Test setting a pool to stretch cluster and checks whether
@@ -686,10 +698,20 @@ class TestStretchCluster(MgrTestCase):
             timeout=self.RECOVERY_PERIOD,
             success_hold_time=self.SUCCESS_HOLD_TIME)
 
-        # Bring back osds iin DC2 expects PGs to be 100% active+clean
+        # Bring back osds in DC2 expects PGs to be 100% active+clean
         self._bring_back_all_osds_in_dc('dc2')
         self.wait_until_true_and_hold(
             lambda: self._pg_all_active_clean(),
             timeout=self.RECOVERY_PERIOD,
             success_hold_time=self.SUCCESS_HOLD_TIME
         )
+        # Unset the pool back to replicated rule expects PGs to be 100% active+clean
+        self.mgr_cluster.mon_manager.raw_cluster_cmd(
+            'osd', 'pool', 'stretch', 'unset',
+            self.POOL, self.DEFAULT_CRUSH_RULE,
+            str(self.SIZE), str(self.MIN_SIZE))
+        self.wait_until_true_and_hold(
+            lambda: self._pg_all_active_clean(),
+            timeout=self.RECOVERY_PERIOD,
+            success_hold_time=self.SUCCESS_HOLD_TIME
+        )
\ No newline at end of file
index 11f1d8d273e41efb897f2cb54c347bf60426dd05..195eab5fe1420d99bfc27623cfa949fb11a53a04 100755 (executable)
@@ -21,6 +21,7 @@ class TestNetSplit(CephTestCase):
     PEERING_CRUSH_BUCKET_BARRIER = 'datacenter'
     POOL = 'pool_stretch'
     CRUSH_RULE = 'replicated_rule_custom'
+    DEFAULT_CRUSH_RULE = 'replicated_rule'
     SIZE = 6
     MIN_SIZE = 3
     BUCKET_MAX = SIZE // PEERING_CRUSH_BUCKET_TARGET
@@ -278,4 +279,14 @@ class TestNetSplit(CephTestCase):
             timeout=self.RECOVERY_PERIOD,
             success_hold_time=self.SUCCESS_HOLD_TIME
         )
+        # Unset the pool back to replicated rule expects PGs to be 100% active+clean
+        self.mgr_cluster.mon_manager.raw_cluster_cmd(
+            'osd', 'pool', 'stretch', 'unset',
+            self.POOL, self.DEFAULT_CRUSH_RULE,
+            str(self.SIZE), str(self.MIN_SIZE))
+        self.wait_until_true_and_hold(
+            lambda: self._pg_all_active_clean(),
+            timeout=self.RECOVERY_PERIOD,
+            success_hold_time=self.SUCCESS_HOLD_TIME
+        )
         log.info("test_mon_netsplit passed!")
index 2c62082db50991e36aade89b066a3a87f9b4e470..066db81eac0b31cdf407ba3e8b6a7905c5ea5704 100755 (executable)
@@ -97,10 +97,12 @@ expect_false ceph osd pool stretch set non_exist_pool 2 3 datacenter $TEST_CRUSH
 expect_false ceph osd pool stretch set $TEST_POOL_STRETCH 2 3 non_exist_barrier $TEST_CRUSH_RULE 6 3
 # Non existence crush_rule should return appropriate error
 expect_false ceph osd pool stretch set $TEST_POOL_STRETCH 2 3 datacenter $TEST_CRUSH_RULE 6 3
+# Unsetting a pool with missing arguments
+expect_false ceph osd pool stretch unset $TEST_POOL_STRETCH
 # Unsetting a non existence pool should return error
-expect_false ceph osd pool stretch unset non_exist_pool
+expect_false ceph osd pool stretch unset non_exist_pool replicated_rule 6 3
 # Unsetting a non-stretch pool should return error
-expect_false ceph osd pool stretch unset $TEST_POOL_STRETCH
+expect_false ceph osd pool stretch unset $TEST_POOL_STRETCH replicated_rule 6 3
 
 # Create a custom crush rule
 ceph osd getcrushmap > crushmap
@@ -139,7 +141,7 @@ expect_true ceph osd pool stretch set $TEST_POOL_STRETCH 2 3 datacenter $TEST_CR
 expect_true ceph osd pool stretch show $TEST_POOL_STRETCH
 
 # Unset the stretch pool and expects it to work
-expect_true ceph osd pool stretch unset $TEST_POOL_STRETCH
+expect_true ceph osd pool stretch unset $TEST_POOL_STRETCH replicated_rule 6 3
 # try to show the stretch pool values again, should return error since
 # the pool is not a stretch pool anymore.
 expect_false ceph osd pool stretch show $TEST_POOL_STRETCH