From 71fd9a34a911745957b8dbc4a904ab37616678b8 Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Thu, 14 Mar 2024 10:37:12 -0400 Subject: [PATCH] qa: use kwarg for rank parameter Otherwise it gets included in the *args list. This is necessary after commit `qa: simplify calls to (rank|mds)_(tell|asok)`. Signed-off-by: Patrick Donnelly --- qa/tasks/cephfs/cephfs_test_case.py | 2 +- qa/tasks/cephfs/filesystem.py | 2 +- qa/tasks/cephfs/test_damage.py | 4 ++-- qa/tasks/cephfs/test_data_scan.py | 18 +++++++++--------- qa/tasks/cephfs/test_forward_scrub.py | 2 +- qa/tasks/cephfs/test_multimds_misc.py | 6 +++--- qa/tasks/cephfs/test_scrub_checks.py | 8 ++++---- qa/tasks/cephfs/test_strays.py | 8 ++++---- 8 files changed, 25 insertions(+), 25 deletions(-) diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py index 7334a59721735..6f46bb7734e9c 100644 --- a/qa/tasks/cephfs/cephfs_test_case.py +++ b/qa/tasks/cephfs/cephfs_test_case.py @@ -194,7 +194,7 @@ class CephFSTestCase(CephTestCase): # Load an config settings of interest for setting in self.LOAD_SETTINGS: setattr(self, setting, float(self.fs.mds_asok( - ['config', 'get', setting], list(self.mds_cluster.mds_ids)[0] + ['config', 'get', setting], mds_id=list(self.mds_cluster.mds_ids)[0] )[setting])) self.configs_set = set() diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index 9a231c65f3544..0e57973b61248 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -604,7 +604,7 @@ class Filesystem(MDSCluster): def set_config(self, opt, val, rank=0, status=None): command = ["config", "set", opt, val] - self.rank_asok(command, rank, status=status) + self.rank_asok(command, rank=rank, status=status) def set_allow_multifs(self, yes=True): self.set_flag("enable_multiple", yes) diff --git a/qa/tasks/cephfs/test_damage.py b/qa/tasks/cephfs/test_damage.py index 339b0e6c05565..ebf6dc03b6490 100644 --- a/qa/tasks/cephfs/test_damage.py +++ b/qa/tasks/cephfs/test_damage.py @@ -51,7 +51,7 @@ class TestDamage(CephFSTestCase): # to avoid waiting through reconnect on every MDS start. self.mount_a.umount_wait() for mds_name in self.fs.get_active_names(): - self.fs.mds_asok(["flush", "journal"], mds_name) + self.fs.mds_asok(["flush", "journal"], mds_id=mds_name) self.fs.fail() @@ -387,7 +387,7 @@ class TestDamage(CephFSTestCase): self.mount_a.umount_wait() for mds_name in self.fs.get_active_names(): - self.fs.mds_asok(["flush", "journal"], mds_name) + self.fs.mds_asok(["flush", "journal"], mds_id=mds_name) self.fs.fail() diff --git a/qa/tasks/cephfs/test_data_scan.py b/qa/tasks/cephfs/test_data_scan.py index 49c609c78af41..6533ac98a2d68 100644 --- a/qa/tasks/cephfs/test_data_scan.py +++ b/qa/tasks/cephfs/test_data_scan.py @@ -549,11 +549,11 @@ class TestDataScan(CephFSTestCase): # Ensure that one directory is fragmented mds_id = self.fs.get_active_names()[0] - self.fs.mds_asok(["dirfrag", "split", "/subdir", "0/0", "1"], mds_id) + self.fs.mds_asok(["dirfrag", "split", "/subdir", "0/0", "1"], mds_id=mds_id) # Flush journal and stop MDS self.mount_a.umount_wait() - self.fs.mds_asok(["flush", "journal"], mds_id) + self.fs.mds_asok(["flush", "journal"], mds_id=mds_id) self.fs.fail() # Pick a dentry and wipe out its key @@ -596,8 +596,8 @@ class TestDataScan(CephFSTestCase): # Finally, close the loop by checking our injected dentry survives a merge mds_id = self.fs.get_active_names()[0] self.mount_a.ls("subdir") # Do an ls to ensure both frags are in cache so the merge will work - self.fs.mds_asok(["dirfrag", "merge", "/subdir", "0/0"], mds_id) - self.fs.mds_asok(["flush", "journal"], mds_id) + self.fs.mds_asok(["dirfrag", "merge", "/subdir", "0/0"], mds_id=mds_id) + self.fs.mds_asok(["flush", "journal"], mds_id=mds_id) frag_obj_id = "{0:x}.00000000".format(dir_ino) keys = self._dirfrag_keys(frag_obj_id) self.assertListEqual(sorted(keys), sorted(["%s_head" % f for f in file_names])) @@ -667,7 +667,7 @@ class TestDataScan(CephFSTestCase): self.mount_a.run_shell(["ln", "testdir1/file1", "testdir2/link2"]) mds_id = self.fs.get_active_names()[0] - self.fs.mds_asok(["flush", "journal"], mds_id) + self.fs.mds_asok(["flush", "journal"], mds_id=mds_id) dirfrag1_keys = self._dirfrag_keys(dirfrag1_oid) @@ -687,7 +687,7 @@ class TestDataScan(CephFSTestCase): self.mount_a.run_shell(["touch", "testdir1/file1"]) self.mount_a.umount_wait() - self.fs.mds_asok(["flush", "journal"], mds_id) + self.fs.mds_asok(["flush", "journal"], mds_id=mds_id) self.fs.fail() # repair linkage errors @@ -738,8 +738,8 @@ class TestDataScan(CephFSTestCase): self.mount_a.umount_wait() - self.fs.mds_asok(["flush", "journal"], mds0_id) - self.fs.mds_asok(["flush", "journal"], mds1_id) + self.fs.mds_asok(["flush", "journal"], mds_id=mds0_id) + self.fs.mds_asok(["flush", "journal"], mds_id=mds1_id) self.fs.fail() self.fs.radosm(["rm", "mds0_inotable"]) @@ -777,7 +777,7 @@ class TestDataScan(CephFSTestCase): self.mount_a.umount_wait() mds0_id = self.fs.get_active_names()[0] - self.fs.mds_asok(["flush", "journal"], mds0_id) + self.fs.mds_asok(["flush", "journal"], mds_id=mds0_id) # wait for mds to update removed snaps time.sleep(10) diff --git a/qa/tasks/cephfs/test_forward_scrub.py b/qa/tasks/cephfs/test_forward_scrub.py index 334a73e1cda88..12a0fa6dafaf2 100644 --- a/qa/tasks/cephfs/test_forward_scrub.py +++ b/qa/tasks/cephfs/test_forward_scrub.py @@ -325,7 +325,7 @@ class TestForwardScrub(CephFSTestCase): self.mount_a.umount_wait() for mds_name in self.fs.get_active_names(): - self.fs.mds_asok(["flush", "journal"], mds_name) + self.fs.mds_asok(["flush", "journal"], mds_id=mds_name) self.fs.fail() diff --git a/qa/tasks/cephfs/test_multimds_misc.py b/qa/tasks/cephfs/test_multimds_misc.py index e0e46fb24c09d..66dcbceddc967 100644 --- a/qa/tasks/cephfs/test_multimds_misc.py +++ b/qa/tasks/cephfs/test_multimds_misc.py @@ -70,14 +70,14 @@ class TestScrub2(CephFSTestCase): self._wait_subtrees([('/d1/d2/d3/d4', 1), ('/d1/d2/d3/d4/d5/d6', 2)], status, 2) for rank in range(3): - self.fs.rank_tell(["flush", "journal"], rank) + self.fs.rank_tell(["flush", "journal"], rank=rank) def test_apply_tag(self): self._setup_subtrees() inos = self._find_path_inos('d1/d2/d3/') tag = "tag123" - out_json = self.fs.rank_tell(["tag", "path", "/d1/d2/d3", tag], 0) + out_json = self.fs.rank_tell(["tag", "path", "/d1/d2/d3", tag], rank=0) self.assertNotEqual(out_json, None) self.assertEqual(out_json["return_code"], 0) self.assertEqual(self.fs.wait_until_scrub_complete(tag=out_json["scrub_tag"]), True) @@ -103,7 +103,7 @@ class TestScrub2(CephFSTestCase): self.assertEqual(self.fs.wait_until_scrub_complete(tag=out_json["scrub_tag"]), True) def _check_damage(mds_rank, inos): - all_damage = self.fs.rank_tell(["damage", "ls"], mds_rank) + all_damage = self.fs.rank_tell(["damage", "ls"], rank=mds_rank) damage = [d for d in all_damage if d['ino'] in inos and d['damage_type'] == "backtrace"] return len(damage) >= len(inos) diff --git a/qa/tasks/cephfs/test_scrub_checks.py b/qa/tasks/cephfs/test_scrub_checks.py index f17a6ceb11537..473b8bc11b085 100644 --- a/qa/tasks/cephfs/test_scrub_checks.py +++ b/qa/tasks/cephfs/test_scrub_checks.py @@ -278,7 +278,7 @@ class TestScrubChecks(CephFSTestCase): command = "scrub start {file}".format(file=test_new_file) def _check_and_clear_damage(ino, dtype): - all_damage = self.fs.rank_tell(["damage", "ls"], mds_rank) + all_damage = self.fs.rank_tell(["damage", "ls"], rank=mds_rank) damage = [d for d in all_damage if d['ino'] == ino and d['damage_type'] == dtype] for d in damage: self.run_ceph_cmd( @@ -308,7 +308,7 @@ class TestScrubChecks(CephFSTestCase): mnt.run_shell(["mkdir", f"{client_path}/.snap/snap1-{test_dir}"]) mnt.run_shell(f"find {client_path}/ -type f -delete") mnt.run_shell(["rmdir", f"{client_path}/.snap/snap1-{test_dir}"]) - perf_dump = fs.rank_tell(["perf", "dump"], 0) + perf_dump = fs.rank_tell(["perf", "dump"], rank=0) self.assertNotEqual(perf_dump.get('mds_cache').get('num_strays'), 0, "mdcache.num_strays is zero") @@ -322,7 +322,7 @@ class TestScrubChecks(CephFSTestCase): self.assertEqual( fs.wait_until_scrub_complete(tag=out_json["scrub_tag"]), True) - perf_dump = fs.rank_tell(["perf", "dump"], 0) + perf_dump = fs.rank_tell(["perf", "dump"], rank=0) self.assertEqual(int(perf_dump.get('mds_cache').get('num_strays')), 0, "mdcache.num_strays is non-zero") @@ -390,7 +390,7 @@ class TestScrubChecks(CephFSTestCase): log.info("Running command '{command}'".format(command=command)) command_list = command.split() - jout = self.fs.rank_tell(command_list, mds_rank) + jout = self.fs.rank_tell(command_list, rank=mds_rank) log.info("command '{command}' returned '{jout}'".format( command=command, jout=jout)) diff --git a/qa/tasks/cephfs/test_strays.py b/qa/tasks/cephfs/test_strays.py index 11701dc28368e..274cc238c3253 100644 --- a/qa/tasks/cephfs/test_strays.py +++ b/qa/tasks/cephfs/test_strays.py @@ -681,8 +681,8 @@ ln dir_1/original dir_2/linkto # empty mds cache. otherwise mds reintegrates stray when unlink finishes self.mount_a.umount_wait() - self.fs.mds_asok(['flush', 'journal'], rank_1_id) - self.fs.mds_asok(['cache', 'drop'], rank_1_id) + self.fs.mds_asok(['flush', 'journal'], mds_id=rank_1_id) + self.fs.mds_asok(['cache', 'drop'], mds_id=rank_1_id) self.mount_a.mount_wait() self.mount_a.run_shell(["rm", "-f", "dir_1/original"]) @@ -726,8 +726,8 @@ touch pin/placeholder self.assertEqual(self.get_mdc_stat("strays_enqueued", mds_id=rank_1_id), 0) # Test loading unlinked dir into cache - self.fs.mds_asok(['flush', 'journal'], rank_1_id) - self.fs.mds_asok(['cache', 'drop'], rank_1_id) + self.fs.mds_asok(['flush', 'journal'], mds_id=rank_1_id) + self.fs.mds_asok(['cache', 'drop'], mds_id=rank_1_id) # Shut down rank 1 self.fs.set_max_mds(1) -- 2.39.5