]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/cephfs: add and use run_ceph_cmd()
authorRishabh Dave <ridave@redhat.com>
Thu, 16 Mar 2023 09:41:08 +0000 (15:11 +0530)
committerRishabh Dave <ridave@redhat.com>
Wed, 13 Mar 2024 13:42:30 +0000 (19:12 +0530)
Instead of writing something as long as
"self.mds_cluster.mon_manager.run_cluster_cmd()" to execute a command,
let's add a helper method to class CephFSTestCase and use it instead.

With this, running a command becomes simple - "self.run_ceph_cmd()".

Signed-off-by: Rishabh Dave <ridave@redhat.com>
(cherry picked from commit f8f2154e54e202996b24904162da6022081e9d93)

Conflicts:
qa/tasks/cephfs/test_damage.py
Conflict was due to the fact that this file is slightly different on
quincy branch compared to the main branch version when the commit
being cherry-picked was merged.

qa/tasks/cephfs/cephfs_test_case.py
qa/tasks/cephfs/test_admin.py
qa/tasks/cephfs/test_damage.py
qa/tasks/cephfs/test_multifs_auth.py
qa/tasks/cephfs/test_nfs.py
qa/tasks/cephfs/test_volumes.py

index 240c9e224f4a1638bc1b277ddd83da41057e0ee2..80797be421382e17b8f143a41f7f811c0f4680c8 100644 (file)
@@ -3,8 +3,6 @@ import logging
 import os
 import re
 
-from shlex import split as shlex_split
-
 from tasks.ceph_test_case import CephTestCase
 
 from teuthology import contextutil
@@ -57,12 +55,19 @@ class MountDetails():
 
 class RunCephCmd:
 
+    def run_ceph_cmd(self, *args, **kwargs):
+        if kwargs.get('args') is None and args:
+            if len(args) == 1:
+                args = args[0]
+            kwargs['args'] = args
+        return self.mon_manager.run_cluster_cmd(**kwargs)
+
     def get_ceph_cmd_result(self, *args, **kwargs):
         if kwargs.get('args') is None and args:
             if len(args) == 1:
                 args = args[0]
             kwargs['args'] = args
-        return self.mon_manager.run_cluster_cmd(**kwargs).exitstatus
+        return self.run_ceph_cmd(**kwargs).exitstatus
 
 
 class CephFSTestCase(CephTestCase, RunCephCmd):
@@ -106,7 +111,7 @@ class CephFSTestCase(CephTestCase, RunCephCmd):
         # In case anything is in the OSD blocklist list, clear it out.  This is to avoid
         # the OSD map changing in the background (due to blocklist expiry) while tests run.
         try:
-            self.mds_cluster.mon_manager.run_cluster_cmd(args="osd blocklist clear")
+            self.run_ceph_cmd("osd blocklist clear")
         except CommandFailedError:
             # Fallback for older Ceph cluster
             try:
@@ -114,14 +119,14 @@ class CephFSTestCase(CephTestCase, RunCephCmd):
                                       "dump", "--format=json-pretty"))['blocklist']
                 log.info(f"Removing {len(blocklist)} blocklist entries")
                 for addr, blocklisted_at in blocklist.items():
-                    self.mds_cluster.mon_manager.raw_cluster_cmd("osd", "blocklist", "rm", addr)
+                    self.run_ceph_cmd("osd", "blocklist", "rm", addr)
             except KeyError:
                 # Fallback for more older Ceph clusters, who will use 'blacklist' instead.
                 blacklist = json.loads(self.mds_cluster.mon_manager.raw_cluster_cmd("osd",
                                       "dump", "--format=json-pretty"))['blacklist']
                 log.info(f"Removing {len(blacklist)} blacklist entries")
                 for addr, blocklisted_at in blacklist.items():
-                    self.mds_cluster.mon_manager.raw_cluster_cmd("osd", "blacklist", "rm", addr)
+                    self.run_ceph_cmd("osd", "blacklist", "rm", addr)
 
     def _init_mon_manager(self):
         # if vstart_runner.py has invoked this code
@@ -183,7 +188,7 @@ class CephFSTestCase(CephTestCase, RunCephCmd):
         for entry in self.auth_list():
             ent_type, ent_id = entry['entity'].split(".")
             if ent_type == "client" and ent_id not in client_mount_ids and not (ent_id == "admin" or ent_id[:6] == 'mirror'):
-                self.mds_cluster.mon_manager.raw_cluster_cmd("auth", "del", entry['entity'])
+                self.run_ceph_cmd("auth", "del", entry['entity'])
 
         if self.REQUIRE_FILESYSTEM:
             self.fs = self.mds_cluster.newfs(create=True)
@@ -211,9 +216,8 @@ class CephFSTestCase(CephTestCase, RunCephCmd):
         if self.REQUIRE_BACKUP_FILESYSTEM:
             if not self.REQUIRE_FILESYSTEM:
                 self.skipTest("backup filesystem requires a primary filesystem as well")
-            self.fs.mon_manager.raw_cluster_cmd('fs', 'flag', 'set',
-                                                'enable_multiple', 'true',
-                                                '--yes-i-really-mean-it')
+            self.run_ceph_cmd('fs', 'flag', 'set', 'enable_multiple', 'true',
+                              '--yes-i-really-mean-it')
             self.backup_fs = self.mds_cluster.newfs(name="backup_fs")
             self.backup_fs.wait_for_daemons()
 
@@ -434,11 +438,6 @@ class CephFSTestCase(CephTestCase, RunCephCmd):
         except contextutil.MaxWhileTries as e:
             raise RuntimeError("rank {0} failed to reach desired subtree state".format(rank)) from e
 
-    def run_cluster_cmd(self, cmd):
-        if isinstance(cmd, str):
-            cmd = shlex_split(cmd)
-        return self.fs.mon_manager.raw_cluster_cmd(*cmd)
-
     def create_client(self, client_id, moncap=None, osdcap=None, mdscap=None):
         if not (moncap or osdcap or mdscap):
             if self.fs:
@@ -456,5 +455,5 @@ class CephFSTestCase(CephTestCase, RunCephCmd):
         if mdscap:
             cmd += ['mds', mdscap]
 
-        self.run_cluster_cmd(cmd)
-        return self.run_cluster_cmd(f'auth get {self.client_name}')
+        self.run_ceph_cmd(*cmd)
+        return self.run_ceph_cmd(f'auth get {self.client_name}')
index baf9873b497510ec3c5bdeb79f886c65ce0f8aef..6a4794854ec8c7650279f9c8fffefc1973ba0479 100644 (file)
@@ -158,8 +158,8 @@ class TestFsNew(TestAdminCommands):
 
         # test that fsname not with "goodchars" fails
         args = ['fs', 'new', badname, metapoolname, datapoolname]
-        proc = self.fs.mon_manager.run_cluster_cmd(args=args,stderr=StringIO(),
-                                                   check_status=False)
+        proc = self.run_ceph_cmd(args=args, stderr=StringIO(),
+                                 check_status=False)
         self.assertIn('invalid chars', proc.stderr.getvalue().lower())
 
         self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', metapoolname,
@@ -255,8 +255,8 @@ class TestFsNew(TestAdminCommands):
         keys = ['metadata', 'data']
         pool_names = [fs_name+'-'+key for key in keys]
         for p in pool_names:
-            self.run_cluster_cmd(f'osd pool create {p}')
-        self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid  {fscid} --force')
+            self.run_ceph_cmd(f'osd pool create {p}')
+        self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid  {fscid} --force')
         self.fs.status().get_fsmap(fscid)
         for i in range(2):
             self.check_pool_application_metadata_key_value(pool_names[i], 'cephfs', keys[i], fs_name)
@@ -270,9 +270,9 @@ class TestFsNew(TestAdminCommands):
         keys = ['metadata', 'data']
         pool_names = [fs_name+'-'+key for key in keys]
         for p in pool_names:
-            self.run_cluster_cmd(f'osd pool create {p}')
-        self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid  {fscid} --force')
-        self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid  {fscid} --force')
+            self.run_ceph_cmd(f'osd pool create {p}')
+        self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid  {fscid} --force')
+        self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid  {fscid} --force')
         self.fs.status().get_fsmap(fscid)
 
     def test_fs_new_with_specific_id_fails_without_force_flag(self):
@@ -284,9 +284,9 @@ class TestFsNew(TestAdminCommands):
         keys = ['metadata', 'data']
         pool_names = [fs_name+'-'+key for key in keys]
         for p in pool_names:
-            self.run_cluster_cmd(f'osd pool create {p}')
+            self.run_ceph_cmd(f'osd pool create {p}')
         try:
-            self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid  {fscid}')
+            self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid  {fscid}')
         except CommandFailedError as ce:
             self.assertEqual(ce.exitstatus, errno.EINVAL,
                 "invalid error code on creating a file system with specifc ID without --force flag")
@@ -303,9 +303,9 @@ class TestFsNew(TestAdminCommands):
         keys = ['metadata', 'data']
         pool_names = [fs_name+'-'+key for key in keys]
         for p in pool_names:
-            self.run_cluster_cmd(f'osd pool create {p}')
+            self.run_ceph_cmd(f'osd pool create {p}')
         try:
-            self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid  {fscid} --force')
+            self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid  {fscid} --force')
         except CommandFailedError as ce:
             self.assertEqual(ce.exitstatus, errno.EINVAL,
                 "invalid error code on creating a file system with specifc ID that is already in use")
@@ -335,7 +335,7 @@ class TestRenameCommand(TestAdminCommands):
         new_fs_name = 'new_cephfs'
         client_id = 'test_new_cephfs'
 
-        self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+        self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
 
         # authorize a cephx ID access to the renamed file system.
         # use the ID to write to the file system.
@@ -356,7 +356,7 @@ class TestRenameCommand(TestAdminCommands):
 
         # cleanup
         self.mount_a.umount_wait()
-        self.run_cluster_cmd(f'auth rm client.{client_id}')
+        self.run_ceph_cmd(f'auth rm client.{client_id}')
 
     def test_fs_rename_idempotency(self):
         """
@@ -368,8 +368,8 @@ class TestRenameCommand(TestAdminCommands):
         orig_fs_name = self.fs.name
         new_fs_name = 'new_cephfs'
 
-        self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
-        self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+        self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+        self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
 
         # original file system name does not appear in `fs ls` command
         self.assertFalse(self.fs.exists())
@@ -388,10 +388,10 @@ class TestRenameCommand(TestAdminCommands):
         new_fs_name = 'new_cephfs'
         data_pool = self.fs.get_data_pool_name()
         metadata_pool = self.fs.get_metadata_pool_name()
-        self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+        self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
 
         try:
-            self.run_cluster_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool}")
+            self.run_ceph_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool}")
         except CommandFailedError as ce:
             self.assertEqual(ce.exitstatus, errno.EINVAL,
                 "invalid error code on creating a new file system with old "
@@ -401,7 +401,7 @@ class TestRenameCommand(TestAdminCommands):
                       "existing pools to fail.")
 
         try:
-            self.run_cluster_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} --force")
+            self.run_ceph_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} --force")
         except CommandFailedError as ce:
             self.assertEqual(ce.exitstatus, errno.EEXIST,
                 "invalid error code on creating a new file system with old "
@@ -411,7 +411,7 @@ class TestRenameCommand(TestAdminCommands):
                       "existing pools, and --force flag to fail.")
 
         try:
-            self.run_cluster_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} "
+            self.run_ceph_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} "
                                  "--allow-dangerous-metadata-overlay")
         except CommandFailedError as ce:
             self.assertEqual(ce.exitstatus, errno.EINVAL,
@@ -426,7 +426,7 @@ class TestRenameCommand(TestAdminCommands):
         That renaming a file system without '--yes-i-really-mean-it' flag fails.
         """
         try:
-            self.run_cluster_cmd(f"fs rename {self.fs.name} new_fs")
+            self.run_ceph_cmd(f"fs rename {self.fs.name} new_fs")
         except CommandFailedError as ce:
             self.assertEqual(ce.exitstatus, errno.EPERM,
                 "invalid error code on renaming a file system without the  "
@@ -440,7 +440,7 @@ class TestRenameCommand(TestAdminCommands):
         That renaming a non-existent file system fails.
         """
         try:
-            self.run_cluster_cmd("fs rename non_existent_fs new_fs --yes-i-really-mean-it")
+            self.run_ceph_cmd("fs rename non_existent_fs new_fs --yes-i-really-mean-it")
         except CommandFailedError as ce:
             self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on renaming a non-existent fs")
         else:
@@ -453,7 +453,7 @@ class TestRenameCommand(TestAdminCommands):
         self.fs2 = self.mds_cluster.newfs(name='cephfs2', create=True)
 
         try:
-            self.run_cluster_cmd(f"fs rename {self.fs.name} {self.fs2.name} --yes-i-really-mean-it")
+            self.run_ceph_cmd(f"fs rename {self.fs.name} {self.fs2.name} --yes-i-really-mean-it")
         except CommandFailedError as ce:
             self.assertEqual(ce.exitstatus, errno.EINVAL,
                              "invalid error code on renaming to a fs name that is already in use")
@@ -467,14 +467,14 @@ class TestRenameCommand(TestAdminCommands):
         orig_fs_name = self.fs.name
         new_fs_name = 'new_cephfs'
 
-        self.run_cluster_cmd(f'fs mirror enable {orig_fs_name}')
+        self.run_ceph_cmd(f'fs mirror enable {orig_fs_name}')
         try:
-            self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+            self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
         except CommandFailedError as ce:
             self.assertEqual(ce.exitstatus, errno.EPERM, "invalid error code on renaming a mirrored file system")
         else:
             self.fail("expected renaming of a mirrored file system to fail")
-        self.run_cluster_cmd(f'fs mirror disable {orig_fs_name}')
+        self.run_ceph_cmd(f'fs mirror disable {orig_fs_name}')
 
 
 class TestDump(CephFSTestCase):
@@ -981,10 +981,10 @@ class TestFsAuthorize(CapsHelper):
         fs_name = "cephfs-_."
         self.fs = self.mds_cluster.newfs(name=fs_name)
         self.fs.wait_for_daemons()
-        self.run_cluster_cmd(f'auth caps client.{self.mount_a.client_id} '
-                             f'mon "allow r" '
-                             f'osd "allow rw pool={self.fs.get_data_pool_name()}" '
-                             f'mds allow')
+        self.run_ceph_cmd(f'auth caps client.{self.mount_a.client_id} '
+                          f'mon "allow r" '
+                          f'osd "allow rw pool={self.fs.get_data_pool_name()}" '
+                          f'mds allow')
         self.mount_a.remount(cephfs_name=self.fs.name)
         perm = 'rw'
         filepaths, filedata, mounts, keyring = self.setup_test_env(perm)
@@ -1024,7 +1024,7 @@ class TestFsAuthorize(CapsHelper):
 
     def tearDown(self):
         self.mount_a.umount_wait()
-        self.run_cluster_cmd(f'auth rm {self.client_name}')
+        self.run_ceph_cmd(f'auth rm {self.client_name}')
 
         super(type(self), self).tearDown()
 
index bfaa23453b0fa5f1c8dcaa08663f51a39d859758..682b81bd730f929a17d50ab57bd36dd86b5d773e 100644 (file)
@@ -645,7 +645,7 @@ class TestDamage(CephFSTestCase):
         # so now we want to trigger commit but this will crash, so:
         with self.assert_cluster_log("MDS abort because newly corrupt dentry"):
             c = ['--connect-timeout=60', 'tell', f"mds.{fscid}:0", "flush", "journal"]
-            p = self.ceph_cluster.mon_manager.run_cluster_cmd(args=c, wait=False, timeoutcmd=30)
+            p = self.run_ceph_cmd(args=c, wait=False, timeoutcmd=30)
             self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(), timeout=self.fs.beacon_timeout)
         self.config_rm("mds", "mds_inject_journal_corrupt_dentry_first")
         self.fs.rank_freeze(False, rank=0)
index 6768d57cfc0afc0f7def2fbcd531fb8d24e7c61a..f289ad8b9ab07ecd2474514f07b1aff0a6786e4c 100644 (file)
@@ -26,15 +26,15 @@ class TestMultiFS(CapsHelper):
 
         # we might have it - the client - if the same cluster was used for a
         # different vstart_runner.py run.
-        self.run_cluster_cmd(f'auth rm {self.client_name}')
+        self.run_ceph_cmd(f'auth rm {self.client_name}')
 
         self.fs1 = self.fs
         self.fs2 = self.mds_cluster.newfs(name='cephfs2', create=True)
 
         # we'll reassign caps to client.1 so that it can operate with cephfs2
-        self.run_cluster_cmd(f'auth caps client.{self.mount_b.client_id} mon '
-                             f'"allow r" osd "allow rw '
-                             f'pool={self.fs2.get_data_pool_name()}" mds allow')
+        self.run_ceph_cmd(f'auth caps client.{self.mount_b.client_id} mon '
+                          f'"allow r" osd "allow rw '
+                          f'pool={self.fs2.get_data_pool_name()}" mds allow')
         self.mount_b.remount(cephfs_name=self.fs2.name)
 
 
index c1b4cf5db985531235ca92c35d5d6c428554815b..8813d7f5d9735dcb959c7462e7c7d51daf627194 100644 (file)
@@ -22,10 +22,8 @@ class TestNFS(MgrTestCase):
         return self._cmd("nfs", *args)
 
     def _nfs_complete_cmd(self, cmd):
-        return self.mgr_cluster.mon_manager.run_cluster_cmd(args=f"nfs {cmd}",
-                                                            stdout=StringIO(),
-                                                            stderr=StringIO(),
-                                                            check_status=False)
+        return self.run_ceph_cmd(args=f"nfs {cmd}", stdout=StringIO(),
+                                 stderr=StringIO(), check_status=False)
 
     def _orch_cmd(self, *args):
         return self._cmd("orch", *args)
index 68208383ea88e6e9124850c98ee13e6b46dcbf94..fd777e8267adaf73db1a277f43024d2598ad825e 100644 (file)
@@ -5559,9 +5559,9 @@ class TestSubvolumeSnapshots(TestVolumesHelper):
 
         # try to get metadata after removing snapshot.
         # Expecting error ENOENT with error message of snapshot does not exist
-        cmd_ret = self.mgr_cluster.mon_manager.run_cluster_cmd(
-                args=["fs", "subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group],
-                check_status=False, stdout=StringIO(), stderr=StringIO())
+        cmd_ret = self.run_ceph_cmd(
+            args=["fs", "subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group], check_status=False, stdout=StringIO(),
+            stderr=StringIO())
         self.assertEqual(cmd_ret.returncode, errno.ENOENT, "Expecting ENOENT error")
         self.assertIn(f"snapshot '{snapshot}' does not exist", cmd_ret.stderr.getvalue(),
                 f"Expecting message: snapshot '{snapshot}' does not exist ")