PEERING_CRUSH_BUCKET_TARGET = 3
PEERING_CRUSH_BUCKET_BARRIER = 'datacenter'
CRUSH_RULE = 'replicated_rule_custom'
+ DEFAULT_CRUSH_RULE = 'replicated_rule'
SIZE = 6
MIN_SIZE = 3
BUCKET_MAX = SIZE // PEERING_CRUSH_BUCKET_TARGET
success_hold_time=self.SUCCESS_HOLD_TIME
)
+ # Unset the pool back to replicated rule expects PGs to be 100% active+clean
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'osd', 'pool', 'stretch', 'unset',
+ self.POOL, self.DEFAULT_CRUSH_RULE,
+ str(self.SIZE), str(self.MIN_SIZE))
+ self.wait_until_true_and_hold(
+ lambda: self._pg_all_active_clean(),
+ timeout=self.RECOVERY_PERIOD,
+ success_hold_time=self.SUCCESS_HOLD_TIME
+ )
+
def test_set_stretch_pool_no_active_pgs(self):
"""
Test setting a pool to stretch cluster and checks whether
timeout=self.RECOVERY_PERIOD,
success_hold_time=self.SUCCESS_HOLD_TIME)
- # Bring back osds iin DC2 expects PGs to be 100% active+clean
+ # Bring back osds in DC2 expects PGs to be 100% active+clean
self._bring_back_all_osds_in_dc('dc2')
self.wait_until_true_and_hold(
lambda: self._pg_all_active_clean(),
timeout=self.RECOVERY_PERIOD,
success_hold_time=self.SUCCESS_HOLD_TIME
)
+ # Unset the pool back to replicated rule expects PGs to be 100% active+clean
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'osd', 'pool', 'stretch', 'unset',
+ self.POOL, self.DEFAULT_CRUSH_RULE,
+ str(self.SIZE), str(self.MIN_SIZE))
+ self.wait_until_true_and_hold(
+ lambda: self._pg_all_active_clean(),
+ timeout=self.RECOVERY_PERIOD,
+ success_hold_time=self.SUCCESS_HOLD_TIME
+ )
\ No newline at end of file
PEERING_CRUSH_BUCKET_BARRIER = 'datacenter'
POOL = 'pool_stretch'
CRUSH_RULE = 'replicated_rule_custom'
+ DEFAULT_CRUSH_RULE = 'replicated_rule'
SIZE = 6
MIN_SIZE = 3
BUCKET_MAX = SIZE // PEERING_CRUSH_BUCKET_TARGET
timeout=self.RECOVERY_PERIOD,
success_hold_time=self.SUCCESS_HOLD_TIME
)
+ # Unset the pool back to replicated rule expects PGs to be 100% active+clean
+ self.mgr_cluster.mon_manager.raw_cluster_cmd(
+ 'osd', 'pool', 'stretch', 'unset',
+ self.POOL, self.DEFAULT_CRUSH_RULE,
+ str(self.SIZE), str(self.MIN_SIZE))
+ self.wait_until_true_and_hold(
+ lambda: self._pg_all_active_clean(),
+ timeout=self.RECOVERY_PERIOD,
+ success_hold_time=self.SUCCESS_HOLD_TIME
+ )
log.info("test_mon_netsplit passed!")
expect_false ceph osd pool stretch set $TEST_POOL_STRETCH 2 3 non_exist_barrier $TEST_CRUSH_RULE 6 3
# Non existence crush_rule should return appropriate error
expect_false ceph osd pool stretch set $TEST_POOL_STRETCH 2 3 datacenter $TEST_CRUSH_RULE 6 3
+# Unsetting a pool with missing arguments
+expect_false ceph osd pool stretch unset $TEST_POOL_STRETCH
# Unsetting a non existence pool should return error
-expect_false ceph osd pool stretch unset non_exist_pool
+expect_false ceph osd pool stretch unset non_exist_pool replicated_rule 6 3
# Unsetting a non-stretch pool should return error
-expect_false ceph osd pool stretch unset $TEST_POOL_STRETCH
+expect_false ceph osd pool stretch unset $TEST_POOL_STRETCH replicated_rule 6 3
# Create a custom crush rule
ceph osd getcrushmap > crushmap
expect_true ceph osd pool stretch show $TEST_POOL_STRETCH
# Unset the stretch pool and expects it to work
-expect_true ceph osd pool stretch unset $TEST_POOL_STRETCH
+expect_true ceph osd pool stretch unset $TEST_POOL_STRETCH replicated_rule 6 3
# try to show the stretch pool values again, should return error since
# the pool is not a stretch pool anymore.
expect_false ceph osd pool stretch show $TEST_POOL_STRETCH