From: Rishabh Dave Date: Fri, 11 Oct 2024 19:08:09 +0000 (+0530) Subject: qa/cephfs: add tests for mgr/vol config pause_cloning X-Git-Tag: v20.3.0~370^2~2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=e7eb36e4393c8401c7cf1aa1f714b52c1ced9ca0;p=ceph.git qa/cephfs: add tests for mgr/vol config pause_cloning mgr/vol config option pause_cloning allows pausing of cloner threads. Add tests for this. Signed-off-by: Rishabh Dave --- diff --git a/qa/suites/fs/volumes/tasks/volumes/test/async.yaml b/qa/suites/fs/volumes/tasks/volumes/test/async.yaml index f82e179bf142..bbb523ac40c9 100644 --- a/qa/suites/fs/volumes/tasks/volumes/test/async.yaml +++ b/qa/suites/fs/volumes/tasks/volumes/test/async.yaml @@ -3,3 +3,4 @@ tasks: fail_on_skip: false modules: - tasks.cephfs.test_volumes.TestPausePurging + - tasks.cephfs.test_volumes.TestPauseCloning diff --git a/qa/tasks/cephfs/test_volumes.py b/qa/tasks/cephfs/test_volumes.py index 09da5a672453..adaf0a06b907 100644 --- a/qa/tasks/cephfs/test_volumes.py +++ b/qa/tasks/cephfs/test_volumes.py @@ -4627,6 +4627,140 @@ class TestPausePurging(TestVolumesHelper): self._wait_for_trash_empty() +class TestPauseCloning(TestVolumesHelper): + ''' + Tests related to config "mgr/volumes/pause_cloning". + ''' + + CLIENTS_REQUIRED = 1 + MDSS_REQUIRED = 1 + + CONF_OPT = 'mgr/volumes/pause_cloning' + + def setUp(self): + super().setUp() + + self.NUM_OF_CLONER_THREADS = 4 + self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', self.NUM_OF_CLONER_THREADS) + self.config_set('mgr', 'mgr/volumes/snapshot_clone_no_wait', 'false') + + def tearDown(self): + # every test will change value of this config option as per its need. + # assure that this config option's default value is re-stored during + # tearDown() so that there's zero chance that it interferes with next + # test. + self.config_set('mgr', self.CONF_OPT, False) + + # ensure purge threads have no jobs left from previous test so that + # next test doesn't have to face unnecessary complications. + self._wait_for_trash_empty() + + super().tearDown() + + def test_pausing_prevents_news_clones_from_starting(self): + v = self.volname + sv = 'sv1' + ss = 'ss1' + c = 'ss1c1' + + self.run_ceph_cmd(f'fs subvolume create {v} {sv} --mode=777') + self._do_subvolume_io(sv, None, None, 1, 10) + sv_path = self.get_ceph_cmd_stdout(f'fs subvolume getpath {v} ' + f'{sv}')[1:].strip() + + self.run_ceph_cmd(f'fs subvolume snapshot create {v} {sv} {ss}') + self.run_ceph_cmd(f'config set mgr {self.CONF_OPT} true') + self.run_ceph_cmd(f'fs subvolume snapshot clone {v} {sv} {ss} {c}') + time.sleep(10) + + # n = num of files, value returned by "wc -l" + n = self.mount_a.get_shell_stdout(f'ls {sv_path}/{sv}/ | wc -l') + # num of files should be 0, cloning should've not begun + self.assertEqual(int(n), 0) + + def test_pausing_halts_ongoing_cloning(self): + v = self.volname + sv = 'sv1' + ss = 'ss1' + c = 'ss1c1' + + NUM_OF_FILES = 3 + self.run_ceph_cmd(f'fs subvolume create {v} {sv} --mode=777') + self._do_subvolume_io(sv, None, None, NUM_OF_FILES, 1024) + sv_path = self.get_ceph_cmd_stdout(f'fs subvolume getpath {v} ' + f'{sv}')[1:].strip() + + self.run_ceph_cmd(f'fs subvolume snapshot create {v} {sv} {ss}') + self.run_ceph_cmd(f'fs subvolume snapshot clone {v} {sv} {ss} {c}') + # let few cloning begin... + time.sleep(2) + # ...and now let's pause cloning + self.run_ceph_cmd(f'config set mgr {self.CONF_OPT} true') + + path = os.path.dirname(os.path.dirname(sv_path)) + uuid = self.mount_a.get_shell_stdout(f'ls {path}/{c}').strip() + # n = num of files, value returned by "wc -l" + n = self.mount_a.get_shell_stdout(f'ls {path}/{c}/{uuid} | wc -l') + # num of files should be less or equal number of cloner threads + self.assertLessEqual(int(n), self.NUM_OF_CLONER_THREADS) + + def test_resuming_begins_pending_cloning(self): + v = self.volname + sv = 'sv1' + ss = 'ss1' + c = 'ss1c1' + + NUM_OF_FILES = 3 + self.run_ceph_cmd(f'fs subvolume create {v} {sv} --mode=777') + self._do_subvolume_io(sv, None, None, NUM_OF_FILES, 1024) + sv_path = self.get_ceph_cmd_stdout(f'fs subvolume getpath {v} ' + f'{sv}')[1:].strip() + + self.run_ceph_cmd(f'fs subvolume snapshot create {v} {sv} {ss}') + self.run_ceph_cmd(f'config set mgr {self.CONF_OPT} true') + self.run_ceph_cmd(f'fs subvolume snapshot clone {v} {sv} {ss} {c}') + time.sleep(2) + + # n = num of files, value returned by "wc -l" + n = self.mount_a.get_shell_stdout(f'ls {sv_path}/{sv}/ | wc -l') + # num of files should be 0, cloning should've not begun + self.assertEqual(int(n), 0) + + self.run_ceph_cmd(f'config set mgr {self.CONF_OPT} false') + # test that cloning begun and reached completion + with safe_while(tries=3, sleep=10) as proceed: + while proceed(): + n = self.mount_a.get_shell_stdout(f'ls {sv_path} | wc -l') + if int(n) == NUM_OF_FILES: + break + + def test_resuming_causes_partly_cloned_subvol_to_clone_fully(self): + v = self.volname + sv = 'sv1' + ss = 'ss1' + c = 'ss1c1' + + NUM_OF_FILES = 3 + self.run_ceph_cmd(f'fs subvolume create {v} {sv} --mode=777') + self._do_subvolume_io(sv, None, None, NUM_OF_FILES, 1024) + sv_path = self.get_ceph_cmd_stdout(f'fs subvolume getpath {v} ' + f'{sv}')[1:].strip() + + self.run_ceph_cmd(f'fs subvolume snapshot create {v} {sv} {ss}') + self.run_ceph_cmd(f'fs subvolume snapshot clone {v} {sv} {ss} {c}') + time.sleep(2) + self.run_ceph_cmd(f'config set mgr {self.CONF_OPT} true') + time.sleep(2) + + self.run_ceph_cmd(f'config set mgr {self.CONF_OPT} false') + # test that cloning was resumed and reached completion + with safe_while(tries=3, sleep=10) as proceed: + while proceed(): + n = self.mount_a.get_shell_stdout(f'ls {sv_path} | wc -l') + if int(n) == NUM_OF_FILES: + break + + class TestSubvolumeGroupSnapshots(TestVolumesHelper): """Tests for FS subvolume group snapshot operations.""" @unittest.skip("skipping subvolumegroup snapshot tests")