From ecce75132a525b7e80ad1a0acda2c2bf5554f66e Mon Sep 17 00:00:00 2001 From: Kamoltat Sirivadhna Date: Fri, 22 Nov 2024 20:33:17 +0000 Subject: [PATCH] qa: Edit test files to incorporate unset arguments, crush_rule, size, min_size Fixes: https://tracker.ceph.com/issues/68842 Signed-off-by: Kamoltat Sirivadhna --- qa/tasks/stretch_cluster.py | 24 +++++++++++++++++++++- qa/tasks/test_netsplit_3az_stretch_pool.py | 11 ++++++++++ qa/workunits/mon/mon-stretch-pool.sh | 8 +++++--- 3 files changed, 39 insertions(+), 4 deletions(-) diff --git a/qa/tasks/stretch_cluster.py b/qa/tasks/stretch_cluster.py index 48acf0025ee..0038b9d4dcb 100644 --- a/qa/tasks/stretch_cluster.py +++ b/qa/tasks/stretch_cluster.py @@ -58,6 +58,7 @@ class TestStretchCluster(MgrTestCase): PEERING_CRUSH_BUCKET_TARGET = 3 PEERING_CRUSH_BUCKET_BARRIER = 'datacenter' CRUSH_RULE = 'replicated_rule_custom' + DEFAULT_CRUSH_RULE = 'replicated_rule' SIZE = 6 MIN_SIZE = 3 BUCKET_MAX = SIZE // PEERING_CRUSH_BUCKET_TARGET @@ -594,6 +595,17 @@ class TestStretchCluster(MgrTestCase): success_hold_time=self.SUCCESS_HOLD_TIME ) + # Unset the pool back to replicated rule expects PGs to be 100% active+clean + self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'osd', 'pool', 'stretch', 'unset', + self.POOL, self.DEFAULT_CRUSH_RULE, + str(self.SIZE), str(self.MIN_SIZE)) + self.wait_until_true_and_hold( + lambda: self._pg_all_active_clean(), + timeout=self.RECOVERY_PERIOD, + success_hold_time=self.SUCCESS_HOLD_TIME + ) + def test_set_stretch_pool_no_active_pgs(self): """ Test setting a pool to stretch cluster and checks whether @@ -686,10 +698,20 @@ class TestStretchCluster(MgrTestCase): timeout=self.RECOVERY_PERIOD, success_hold_time=self.SUCCESS_HOLD_TIME) - # Bring back osds iin DC2 expects PGs to be 100% active+clean + # Bring back osds in DC2 expects PGs to be 100% active+clean self._bring_back_all_osds_in_dc('dc2') self.wait_until_true_and_hold( lambda: self._pg_all_active_clean(), timeout=self.RECOVERY_PERIOD, success_hold_time=self.SUCCESS_HOLD_TIME ) + # Unset the pool back to replicated rule expects PGs to be 100% active+clean + self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'osd', 'pool', 'stretch', 'unset', + self.POOL, self.DEFAULT_CRUSH_RULE, + str(self.SIZE), str(self.MIN_SIZE)) + self.wait_until_true_and_hold( + lambda: self._pg_all_active_clean(), + timeout=self.RECOVERY_PERIOD, + success_hold_time=self.SUCCESS_HOLD_TIME + ) \ No newline at end of file diff --git a/qa/tasks/test_netsplit_3az_stretch_pool.py b/qa/tasks/test_netsplit_3az_stretch_pool.py index 11f1d8d273e..195eab5fe14 100755 --- a/qa/tasks/test_netsplit_3az_stretch_pool.py +++ b/qa/tasks/test_netsplit_3az_stretch_pool.py @@ -21,6 +21,7 @@ class TestNetSplit(CephTestCase): PEERING_CRUSH_BUCKET_BARRIER = 'datacenter' POOL = 'pool_stretch' CRUSH_RULE = 'replicated_rule_custom' + DEFAULT_CRUSH_RULE = 'replicated_rule' SIZE = 6 MIN_SIZE = 3 BUCKET_MAX = SIZE // PEERING_CRUSH_BUCKET_TARGET @@ -278,4 +279,14 @@ class TestNetSplit(CephTestCase): timeout=self.RECOVERY_PERIOD, success_hold_time=self.SUCCESS_HOLD_TIME ) + # Unset the pool back to replicated rule expects PGs to be 100% active+clean + self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'osd', 'pool', 'stretch', 'unset', + self.POOL, self.DEFAULT_CRUSH_RULE, + str(self.SIZE), str(self.MIN_SIZE)) + self.wait_until_true_and_hold( + lambda: self._pg_all_active_clean(), + timeout=self.RECOVERY_PERIOD, + success_hold_time=self.SUCCESS_HOLD_TIME + ) log.info("test_mon_netsplit passed!") diff --git a/qa/workunits/mon/mon-stretch-pool.sh b/qa/workunits/mon/mon-stretch-pool.sh index 2c62082db50..066db81eac0 100755 --- a/qa/workunits/mon/mon-stretch-pool.sh +++ b/qa/workunits/mon/mon-stretch-pool.sh @@ -97,10 +97,12 @@ expect_false ceph osd pool stretch set non_exist_pool 2 3 datacenter $TEST_CRUSH expect_false ceph osd pool stretch set $TEST_POOL_STRETCH 2 3 non_exist_barrier $TEST_CRUSH_RULE 6 3 # Non existence crush_rule should return appropriate error expect_false ceph osd pool stretch set $TEST_POOL_STRETCH 2 3 datacenter $TEST_CRUSH_RULE 6 3 +# Unsetting a pool with missing arguments +expect_false ceph osd pool stretch unset $TEST_POOL_STRETCH # Unsetting a non existence pool should return error -expect_false ceph osd pool stretch unset non_exist_pool +expect_false ceph osd pool stretch unset non_exist_pool replicated_rule 6 3 # Unsetting a non-stretch pool should return error -expect_false ceph osd pool stretch unset $TEST_POOL_STRETCH +expect_false ceph osd pool stretch unset $TEST_POOL_STRETCH replicated_rule 6 3 # Create a custom crush rule ceph osd getcrushmap > crushmap @@ -139,7 +141,7 @@ expect_true ceph osd pool stretch set $TEST_POOL_STRETCH 2 3 datacenter $TEST_CR expect_true ceph osd pool stretch show $TEST_POOL_STRETCH # Unset the stretch pool and expects it to work -expect_true ceph osd pool stretch unset $TEST_POOL_STRETCH +expect_true ceph osd pool stretch unset $TEST_POOL_STRETCH replicated_rule 6 3 # try to show the stretch pool values again, should return error since # the pool is not a stretch pool anymore. expect_false ceph osd pool stretch show $TEST_POOL_STRETCH -- 2.39.5