From: Kamoltat Date: Fri, 11 Feb 2022 19:01:29 +0000 (+0000) Subject: qa/cephadm: Added workunit test for orch cli mon X-Git-Tag: v18.0.0~148^2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=24e48f9425097bd01167bd5f1b3edbbc7e9f9bc1;p=ceph-ci.git qa/cephadm: Added workunit test for orch cli mon Added a test where we have 5 MONs on 5 different hosts and try to reduce the number of MONs from 5 to 3 using the command ``ceph orch apply mon 3``. Also,increasing the number of MONs from 3 to 5 using the command: ``ceph orch apply mon 5``. Evaluating the correctness of the commands and inspecting if there are crashes. This test was motivated by the bug: https://tracker.ceph.com/issues/50089 Signed-off-by: Kamoltat --- diff --git a/qa/suites/orch/cephadm/workunits/task/test_orch_cli_mon.yaml b/qa/suites/orch/cephadm/workunits/task/test_orch_cli_mon.yaml new file mode 100644 index 00000000000..2a33dc8399c --- /dev/null +++ b/qa/suites/orch/cephadm/workunits/task/test_orch_cli_mon.yaml @@ -0,0 +1,45 @@ +roles: +- - host.a + - osd.0 + - osd.1 + - osd.2 + - mon.a + - mgr.a + - client.0 +- - host.b + - osd.3 + - osd.4 + - osd.5 + - mon.b + - mgr.b + - client.1 +- - host.c + - osd.6 + - osd.7 + - osd.8 + - mon.c + - mgr.c + - client.2 +- - host.d + - osd.9 + - osd.10 + - osd.11 + - mon.d + - mgr.d + - client.3 +- - host.e + - osd.12 + - osd.13 + - osd.14 + - mon.e + - mgr.e + - client.4 +tasks: +- install: +- cephadm: +- cephadm.shell: + host.a: + - ceph orch apply mds a +- cephfs_test_runner: + modules: + - tasks.cephadm_cases.test_cli_mon diff --git a/qa/tasks/cephadm_cases/test_cli_mon.py b/qa/tasks/cephadm_cases/test_cli_mon.py new file mode 100644 index 00000000000..72aee094ef9 --- /dev/null +++ b/qa/tasks/cephadm_cases/test_cli_mon.py @@ -0,0 +1,71 @@ +import json +import logging + +from tasks.mgr.mgr_test_case import MgrTestCase + +log = logging.getLogger(__name__) + + +class TestCephadmCLI(MgrTestCase): + + APPLY_MON_PERIOD = 60 + + def _cmd(self, *args) -> str: + assert self.mgr_cluster is not None + return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args) + + def _orch_cmd(self, *args) -> str: + return self._cmd("orch", *args) + + def setUp(self): + super(TestCephadmCLI, self).setUp() + + def _create_and_write_pool(self, pool_name): + # Create new pool and write to it, simulating a small workload. + self.mgr_cluster.mon_manager.create_pool(pool_name) + args = [ + "rados", "-p", pool_name, "bench", "30", "write", "-t", "16"] + self.mgr_cluster.admin_remote.run(args=args, wait=True) + + def _get_quorum_size(self) -> int: + # Evaluate if the quorum size of the cluster is correct. + # log the quorum_status before reducing the monitors + retstr = self._cmd('quorum_status') + log.info("test_apply_mon._check_quorum_size: %s" % json.dumps(retstr, indent=2)) + quorum_size = len(json.loads(retstr)['quorum']) # get quorum size + return quorum_size + + def _check_no_crashes(self): + # Evaluate if there are no crashes + # log the crash + retstr = self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'crash', 'ls', + ) + log.info("test_apply_mon._check_no_crashes: %s" % retstr) + self.assertEqual(0, len(retstr)) # check if there are no crashes + + def test_apply_mon_three(self): + # Evaluating the process of reducing the number of + # monitors from 5 to 3 and increasing the number of + # monitors from 3 to 5, using the `ceph orch apply mon ` command. + + self.wait_until_equal(lambda : self._get_quorum_size(), 5, + timeout=self.APPLY_MON_PERIOD, period=10) + + self._orch_cmd('apply', 'mon', '3') # reduce the monitors from 5 -> 3 + + self._create_and_write_pool('test_pool1') + + self.wait_until_equal(lambda : self._get_quorum_size(), 3, + timeout=self.APPLY_MON_PERIOD, period=10) + + self._check_no_crashes() + + self._orch_cmd('apply', 'mon', '5') # increase the monitors from 3 -> 5 + + self._create_and_write_pool('test_pool2') + + self.wait_until_equal(lambda : self._get_quorum_size(), 5, + timeout=self.APPLY_MON_PERIOD, period=10) + + self._check_no_crashes() \ No newline at end of file