]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: use kwarg for rank parameter
authorPatrick Donnelly <pdonnell@redhat.com>
Thu, 14 Mar 2024 14:37:12 +0000 (10:37 -0400)
committerPatrick Donnelly <pdonnell@redhat.com>
Wed, 20 Mar 2024 14:56:58 +0000 (10:56 -0400)
Otherwise it gets included in the *args list. This is necessary after commit
`qa: simplify calls to (rank|mds)_(tell|asok)`.

Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
qa/tasks/cephfs/cephfs_test_case.py
qa/tasks/cephfs/filesystem.py
qa/tasks/cephfs/test_damage.py
qa/tasks/cephfs/test_data_scan.py
qa/tasks/cephfs/test_forward_scrub.py
qa/tasks/cephfs/test_multimds_misc.py
qa/tasks/cephfs/test_scrub_checks.py
qa/tasks/cephfs/test_strays.py

index 7334a597217353d3e0f41d79dc6c4a7b1b45e959..6f46bb7734e9cd97ea790eafbc986ad1f5d971c4 100644 (file)
@@ -194,7 +194,7 @@ class CephFSTestCase(CephTestCase):
         # Load an config settings of interest
         for setting in self.LOAD_SETTINGS:
             setattr(self, setting, float(self.fs.mds_asok(
-                ['config', 'get', setting], list(self.mds_cluster.mds_ids)[0]
+                ['config', 'get', setting], mds_id=list(self.mds_cluster.mds_ids)[0]
             )[setting]))
 
         self.configs_set = set()
index 9a231c65f35441a8ae2e8a58fe79e0150a4ffd37..0e57973b612482e3b4875e87ad1f97da70d31f3f 100644 (file)
@@ -604,7 +604,7 @@ class Filesystem(MDSCluster):
 
     def set_config(self, opt, val, rank=0, status=None):
         command = ["config", "set", opt, val]
-        self.rank_asok(command, rank, status=status)
+        self.rank_asok(command, rank=rank, status=status)
 
     def set_allow_multifs(self, yes=True):
         self.set_flag("enable_multiple", yes)
index 339b0e6c05565871c5e8cd06da4daa771b72ae34..ebf6dc03b64905c8324dda56cc2cf492c58fc402 100644 (file)
@@ -51,7 +51,7 @@ class TestDamage(CephFSTestCase):
         # to avoid waiting through reconnect on every MDS start.
         self.mount_a.umount_wait()
         for mds_name in self.fs.get_active_names():
-            self.fs.mds_asok(["flush", "journal"], mds_name)
+            self.fs.mds_asok(["flush", "journal"], mds_id=mds_name)
 
         self.fs.fail()
 
@@ -387,7 +387,7 @@ class TestDamage(CephFSTestCase):
 
         self.mount_a.umount_wait()
         for mds_name in self.fs.get_active_names():
-            self.fs.mds_asok(["flush", "journal"], mds_name)
+            self.fs.mds_asok(["flush", "journal"], mds_id=mds_name)
 
         self.fs.fail()
 
index 49c609c78af4109ec0c432f8d2e4590afbf96605..6533ac98a2d686f05bd971ff71336098d89346cd 100644 (file)
@@ -549,11 +549,11 @@ class TestDataScan(CephFSTestCase):
 
         # Ensure that one directory is fragmented
         mds_id = self.fs.get_active_names()[0]
-        self.fs.mds_asok(["dirfrag", "split", "/subdir", "0/0", "1"], mds_id)
+        self.fs.mds_asok(["dirfrag", "split", "/subdir", "0/0", "1"], mds_id=mds_id)
 
         # Flush journal and stop MDS
         self.mount_a.umount_wait()
-        self.fs.mds_asok(["flush", "journal"], mds_id)
+        self.fs.mds_asok(["flush", "journal"], mds_id=mds_id)
         self.fs.fail()
 
         # Pick a dentry and wipe out its key
@@ -596,8 +596,8 @@ class TestDataScan(CephFSTestCase):
         # Finally, close the loop by checking our injected dentry survives a merge
         mds_id = self.fs.get_active_names()[0]
         self.mount_a.ls("subdir")  # Do an ls to ensure both frags are in cache so the merge will work
-        self.fs.mds_asok(["dirfrag", "merge", "/subdir", "0/0"], mds_id)
-        self.fs.mds_asok(["flush", "journal"], mds_id)
+        self.fs.mds_asok(["dirfrag", "merge", "/subdir", "0/0"], mds_id=mds_id)
+        self.fs.mds_asok(["flush", "journal"], mds_id=mds_id)
         frag_obj_id = "{0:x}.00000000".format(dir_ino)
         keys = self._dirfrag_keys(frag_obj_id)
         self.assertListEqual(sorted(keys), sorted(["%s_head" % f for f in file_names]))
@@ -667,7 +667,7 @@ class TestDataScan(CephFSTestCase):
         self.mount_a.run_shell(["ln", "testdir1/file1", "testdir2/link2"])
 
         mds_id = self.fs.get_active_names()[0]
-        self.fs.mds_asok(["flush", "journal"], mds_id)
+        self.fs.mds_asok(["flush", "journal"], mds_id=mds_id)
 
         dirfrag1_keys = self._dirfrag_keys(dirfrag1_oid)
 
@@ -687,7 +687,7 @@ class TestDataScan(CephFSTestCase):
         self.mount_a.run_shell(["touch", "testdir1/file1"])
         self.mount_a.umount_wait()
 
-        self.fs.mds_asok(["flush", "journal"], mds_id)
+        self.fs.mds_asok(["flush", "journal"], mds_id=mds_id)
         self.fs.fail()
 
         # repair linkage errors
@@ -738,8 +738,8 @@ class TestDataScan(CephFSTestCase):
 
         self.mount_a.umount_wait()
 
-        self.fs.mds_asok(["flush", "journal"], mds0_id)
-        self.fs.mds_asok(["flush", "journal"], mds1_id)
+        self.fs.mds_asok(["flush", "journal"], mds_id=mds0_id)
+        self.fs.mds_asok(["flush", "journal"], mds_id=mds1_id)
         self.fs.fail()
 
         self.fs.radosm(["rm", "mds0_inotable"])
@@ -777,7 +777,7 @@ class TestDataScan(CephFSTestCase):
         self.mount_a.umount_wait()
 
         mds0_id = self.fs.get_active_names()[0]
-        self.fs.mds_asok(["flush", "journal"], mds0_id)
+        self.fs.mds_asok(["flush", "journal"], mds_id=mds0_id)
 
         # wait for mds to update removed snaps
         time.sleep(10)
index 334a73e1cda8840805ea19618648c91bd9761556..12a0fa6dafaf2ca1986cd12a00916198ea0c51d7 100644 (file)
@@ -325,7 +325,7 @@ class TestForwardScrub(CephFSTestCase):
 
         self.mount_a.umount_wait()
         for mds_name in self.fs.get_active_names():
-            self.fs.mds_asok(["flush", "journal"], mds_name)
+            self.fs.mds_asok(["flush", "journal"], mds_id=mds_name)
 
         self.fs.fail()
 
index e0e46fb24c09d19a7885134059fe25fea4f771e1..66dcbceddc96779617a6676334e720d3cb08d555 100644 (file)
@@ -70,14 +70,14 @@ class TestScrub2(CephFSTestCase):
         self._wait_subtrees([('/d1/d2/d3/d4', 1), ('/d1/d2/d3/d4/d5/d6', 2)], status, 2)
 
         for rank in range(3):
-            self.fs.rank_tell(["flush", "journal"], rank)
+            self.fs.rank_tell(["flush", "journal"], rank=rank)
 
     def test_apply_tag(self):
         self._setup_subtrees()
         inos = self._find_path_inos('d1/d2/d3/')
 
         tag = "tag123"
-        out_json = self.fs.rank_tell(["tag", "path", "/d1/d2/d3", tag], 0)
+        out_json = self.fs.rank_tell(["tag", "path", "/d1/d2/d3", tag], rank=0)
         self.assertNotEqual(out_json, None)
         self.assertEqual(out_json["return_code"], 0)
         self.assertEqual(self.fs.wait_until_scrub_complete(tag=out_json["scrub_tag"]), True)
@@ -103,7 +103,7 @@ class TestScrub2(CephFSTestCase):
         self.assertEqual(self.fs.wait_until_scrub_complete(tag=out_json["scrub_tag"]), True)
 
         def _check_damage(mds_rank, inos):
-            all_damage = self.fs.rank_tell(["damage", "ls"], mds_rank)
+            all_damage = self.fs.rank_tell(["damage", "ls"], rank=mds_rank)
             damage = [d for d in all_damage if d['ino'] in inos and d['damage_type'] == "backtrace"]
             return len(damage) >= len(inos)
 
index f17a6ceb11537878b193239817b259505007a550..473b8bc11b0851f2fa2048e564022f1fadb59376 100644 (file)
@@ -278,7 +278,7 @@ class TestScrubChecks(CephFSTestCase):
         command = "scrub start {file}".format(file=test_new_file)
 
         def _check_and_clear_damage(ino, dtype):
-            all_damage = self.fs.rank_tell(["damage", "ls"], mds_rank)
+            all_damage = self.fs.rank_tell(["damage", "ls"], rank=mds_rank)
             damage = [d for d in all_damage if d['ino'] == ino and d['damage_type'] == dtype]
             for d in damage:
                 self.run_ceph_cmd(
@@ -308,7 +308,7 @@ class TestScrubChecks(CephFSTestCase):
         mnt.run_shell(["mkdir", f"{client_path}/.snap/snap1-{test_dir}"])
         mnt.run_shell(f"find {client_path}/ -type f -delete")
         mnt.run_shell(["rmdir", f"{client_path}/.snap/snap1-{test_dir}"])
-        perf_dump = fs.rank_tell(["perf", "dump"], 0)
+        perf_dump = fs.rank_tell(["perf", "dump"], rank=0)
         self.assertNotEqual(perf_dump.get('mds_cache').get('num_strays'),
                             0, "mdcache.num_strays is zero")
 
@@ -322,7 +322,7 @@ class TestScrubChecks(CephFSTestCase):
         self.assertEqual(
             fs.wait_until_scrub_complete(tag=out_json["scrub_tag"]), True)
 
-        perf_dump = fs.rank_tell(["perf", "dump"], 0)
+        perf_dump = fs.rank_tell(["perf", "dump"], rank=0)
         self.assertEqual(int(perf_dump.get('mds_cache').get('num_strays')),
                          0, "mdcache.num_strays is non-zero")
 
@@ -390,7 +390,7 @@ class TestScrubChecks(CephFSTestCase):
         log.info("Running command '{command}'".format(command=command))
 
         command_list = command.split()
-        jout = self.fs.rank_tell(command_list, mds_rank)
+        jout = self.fs.rank_tell(command_list, rank=mds_rank)
 
         log.info("command '{command}' returned '{jout}'".format(
                      command=command, jout=jout))
index 11701dc28368e0e9d165d12154975dc93720371a..274cc238c325383a91996c438bf7f7efef40aea1 100644 (file)
@@ -681,8 +681,8 @@ ln dir_1/original dir_2/linkto
 
         # empty mds cache. otherwise mds reintegrates stray when unlink finishes
         self.mount_a.umount_wait()
-        self.fs.mds_asok(['flush', 'journal'], rank_1_id)
-        self.fs.mds_asok(['cache', 'drop'], rank_1_id)
+        self.fs.mds_asok(['flush', 'journal'], mds_id=rank_1_id)
+        self.fs.mds_asok(['cache', 'drop'], mds_id=rank_1_id)
 
         self.mount_a.mount_wait()
         self.mount_a.run_shell(["rm", "-f", "dir_1/original"])
@@ -726,8 +726,8 @@ touch pin/placeholder
         self.assertEqual(self.get_mdc_stat("strays_enqueued", mds_id=rank_1_id), 0)
 
         # Test loading unlinked dir into cache
-        self.fs.mds_asok(['flush', 'journal'], rank_1_id)
-        self.fs.mds_asok(['cache', 'drop'], rank_1_id)
+        self.fs.mds_asok(['flush', 'journal'], mds_id=rank_1_id)
+        self.fs.mds_asok(['cache', 'drop'], mds_id=rank_1_id)
 
         # Shut down rank 1
         self.fs.set_max_mds(1)