]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
qa/cephfs: add and use run_ceph_cmd()
authorRishabh Dave <ridave@redhat.com>
Thu, 16 Mar 2023 09:41:08 +0000 (15:11 +0530)
committerRishabh Dave <ridave@redhat.com>
Wed, 3 Apr 2024 17:21:44 +0000 (22:51 +0530)
Instead of writing something as long as
"self.mds_cluster.mon_manager.run_cluster_cmd()" to execute a command,
let's add a helper method to class CephFSTestCase and use it instead.

With this, running a command becomes simple - "self.run_ceph_cmd()".

Signed-off-by: Rishabh Dave <ridave@redhat.com>
(cherry picked from commit f8f2154e54e202996b24904162da6022081e9d93)

Conflicts:
qa/tasks/cephfs/test_damage.py
This file is slightly different because this commit
c8f8324ee2fae48e8d3c2bbdbf45cc9ffe46fd4c was merged on main and
backported after the commit being cherry-picked here was merged
in main.

qa/tasks/cephfs/cephfs_test_case.py
qa/tasks/cephfs/test_admin.py
qa/tasks/cephfs/test_multifs_auth.py
qa/tasks/cephfs/test_nfs.py
qa/tasks/cephfs/test_volumes.py

index 240c9e224f4a1638bc1b277ddd83da41057e0ee2..80797be421382e17b8f143a41f7f811c0f4680c8 100644 (file)
@@ -3,8 +3,6 @@ import logging
 import os
 import re
 
-from shlex import split as shlex_split
-
 from tasks.ceph_test_case import CephTestCase
 
 from teuthology import contextutil
@@ -57,12 +55,19 @@ class MountDetails():
 
 class RunCephCmd:
 
+    def run_ceph_cmd(self, *args, **kwargs):
+        if kwargs.get('args') is None and args:
+            if len(args) == 1:
+                args = args[0]
+            kwargs['args'] = args
+        return self.mon_manager.run_cluster_cmd(**kwargs)
+
     def get_ceph_cmd_result(self, *args, **kwargs):
         if kwargs.get('args') is None and args:
             if len(args) == 1:
                 args = args[0]
             kwargs['args'] = args
-        return self.mon_manager.run_cluster_cmd(**kwargs).exitstatus
+        return self.run_ceph_cmd(**kwargs).exitstatus
 
 
 class CephFSTestCase(CephTestCase, RunCephCmd):
@@ -106,7 +111,7 @@ class CephFSTestCase(CephTestCase, RunCephCmd):
         # In case anything is in the OSD blocklist list, clear it out.  This is to avoid
         # the OSD map changing in the background (due to blocklist expiry) while tests run.
         try:
-            self.mds_cluster.mon_manager.run_cluster_cmd(args="osd blocklist clear")
+            self.run_ceph_cmd("osd blocklist clear")
         except CommandFailedError:
             # Fallback for older Ceph cluster
             try:
@@ -114,14 +119,14 @@ class CephFSTestCase(CephTestCase, RunCephCmd):
                                       "dump", "--format=json-pretty"))['blocklist']
                 log.info(f"Removing {len(blocklist)} blocklist entries")
                 for addr, blocklisted_at in blocklist.items():
-                    self.mds_cluster.mon_manager.raw_cluster_cmd("osd", "blocklist", "rm", addr)
+                    self.run_ceph_cmd("osd", "blocklist", "rm", addr)
             except KeyError:
                 # Fallback for more older Ceph clusters, who will use 'blacklist' instead.
                 blacklist = json.loads(self.mds_cluster.mon_manager.raw_cluster_cmd("osd",
                                       "dump", "--format=json-pretty"))['blacklist']
                 log.info(f"Removing {len(blacklist)} blacklist entries")
                 for addr, blocklisted_at in blacklist.items():
-                    self.mds_cluster.mon_manager.raw_cluster_cmd("osd", "blacklist", "rm", addr)
+                    self.run_ceph_cmd("osd", "blacklist", "rm", addr)
 
     def _init_mon_manager(self):
         # if vstart_runner.py has invoked this code
@@ -183,7 +188,7 @@ class CephFSTestCase(CephTestCase, RunCephCmd):
         for entry in self.auth_list():
             ent_type, ent_id = entry['entity'].split(".")
             if ent_type == "client" and ent_id not in client_mount_ids and not (ent_id == "admin" or ent_id[:6] == 'mirror'):
-                self.mds_cluster.mon_manager.raw_cluster_cmd("auth", "del", entry['entity'])
+                self.run_ceph_cmd("auth", "del", entry['entity'])
 
         if self.REQUIRE_FILESYSTEM:
             self.fs = self.mds_cluster.newfs(create=True)
@@ -211,9 +216,8 @@ class CephFSTestCase(CephTestCase, RunCephCmd):
         if self.REQUIRE_BACKUP_FILESYSTEM:
             if not self.REQUIRE_FILESYSTEM:
                 self.skipTest("backup filesystem requires a primary filesystem as well")
-            self.fs.mon_manager.raw_cluster_cmd('fs', 'flag', 'set',
-                                                'enable_multiple', 'true',
-                                                '--yes-i-really-mean-it')
+            self.run_ceph_cmd('fs', 'flag', 'set', 'enable_multiple', 'true',
+                              '--yes-i-really-mean-it')
             self.backup_fs = self.mds_cluster.newfs(name="backup_fs")
             self.backup_fs.wait_for_daemons()
 
@@ -434,11 +438,6 @@ class CephFSTestCase(CephTestCase, RunCephCmd):
         except contextutil.MaxWhileTries as e:
             raise RuntimeError("rank {0} failed to reach desired subtree state".format(rank)) from e
 
-    def run_cluster_cmd(self, cmd):
-        if isinstance(cmd, str):
-            cmd = shlex_split(cmd)
-        return self.fs.mon_manager.raw_cluster_cmd(*cmd)
-
     def create_client(self, client_id, moncap=None, osdcap=None, mdscap=None):
         if not (moncap or osdcap or mdscap):
             if self.fs:
@@ -456,5 +455,5 @@ class CephFSTestCase(CephTestCase, RunCephCmd):
         if mdscap:
             cmd += ['mds', mdscap]
 
-        self.run_cluster_cmd(cmd)
-        return self.run_cluster_cmd(f'auth get {self.client_name}')
+        self.run_ceph_cmd(*cmd)
+        return self.run_ceph_cmd(f'auth get {self.client_name}')
index 3659b3144a1132229321fd0fe2469f68d18cb0e2..5a794d0776ace7af5f2b3c2bca709d7b4eeb957a 100644 (file)
@@ -271,8 +271,8 @@ class TestFsNew(TestAdminCommands):
 
         # test that fsname not with "goodchars" fails
         args = ['fs', 'new', badname, metapoolname, datapoolname]
-        proc = self.fs.mon_manager.run_cluster_cmd(args=args,stderr=StringIO(),
-                                                   check_status=False)
+        proc = self.run_ceph_cmd(args=args, stderr=StringIO(),
+                                 check_status=False)
         self.assertIn('invalid chars', proc.stderr.getvalue().lower())
 
         self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', metapoolname,
@@ -368,8 +368,8 @@ class TestFsNew(TestAdminCommands):
         keys = ['metadata', 'data']
         pool_names = [fs_name+'-'+key for key in keys]
         for p in pool_names:
-            self.run_cluster_cmd(f'osd pool create {p}')
-        self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid  {fscid} --force')
+            self.run_ceph_cmd(f'osd pool create {p}')
+        self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid  {fscid} --force')
         self.fs.status().get_fsmap(fscid)
         for i in range(2):
             self.check_pool_application_metadata_key_value(pool_names[i], 'cephfs', keys[i], fs_name)
@@ -383,9 +383,9 @@ class TestFsNew(TestAdminCommands):
         keys = ['metadata', 'data']
         pool_names = [fs_name+'-'+key for key in keys]
         for p in pool_names:
-            self.run_cluster_cmd(f'osd pool create {p}')
-        self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid  {fscid} --force')
-        self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid  {fscid} --force')
+            self.run_ceph_cmd(f'osd pool create {p}')
+        self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid  {fscid} --force')
+        self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid  {fscid} --force')
         self.fs.status().get_fsmap(fscid)
 
     def test_fs_new_with_specific_id_fails_without_force_flag(self):
@@ -397,9 +397,9 @@ class TestFsNew(TestAdminCommands):
         keys = ['metadata', 'data']
         pool_names = [fs_name+'-'+key for key in keys]
         for p in pool_names:
-            self.run_cluster_cmd(f'osd pool create {p}')
+            self.run_ceph_cmd(f'osd pool create {p}')
         try:
-            self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid  {fscid}')
+            self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid  {fscid}')
         except CommandFailedError as ce:
             self.assertEqual(ce.exitstatus, errno.EINVAL,
                 "invalid error code on creating a file system with specifc ID without --force flag")
@@ -416,9 +416,9 @@ class TestFsNew(TestAdminCommands):
         keys = ['metadata', 'data']
         pool_names = [fs_name+'-'+key for key in keys]
         for p in pool_names:
-            self.run_cluster_cmd(f'osd pool create {p}')
+            self.run_ceph_cmd(f'osd pool create {p}')
         try:
-            self.run_cluster_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid  {fscid} --force')
+            self.run_ceph_cmd(f'fs new {fs_name} {pool_names[0]} {pool_names[1]} --fscid  {fscid} --force')
         except CommandFailedError as ce:
             self.assertEqual(ce.exitstatus, errno.EINVAL,
                 "invalid error code on creating a file system with specifc ID that is already in use")
@@ -681,7 +681,7 @@ class TestRenameCommand(TestAdminCommands):
         new_fs_name = 'new_cephfs'
         client_id = 'test_new_cephfs'
 
-        self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+        self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
 
         # authorize a cephx ID access to the renamed file system.
         # use the ID to write to the file system.
@@ -702,7 +702,7 @@ class TestRenameCommand(TestAdminCommands):
 
         # cleanup
         self.mount_a.umount_wait()
-        self.run_cluster_cmd(f'auth rm client.{client_id}')
+        self.run_ceph_cmd(f'auth rm client.{client_id}')
 
     def test_fs_rename_idempotency(self):
         """
@@ -714,8 +714,8 @@ class TestRenameCommand(TestAdminCommands):
         orig_fs_name = self.fs.name
         new_fs_name = 'new_cephfs'
 
-        self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
-        self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+        self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+        self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
 
         # original file system name does not appear in `fs ls` command
         self.assertFalse(self.fs.exists())
@@ -734,10 +734,10 @@ class TestRenameCommand(TestAdminCommands):
         new_fs_name = 'new_cephfs'
         data_pool = self.fs.get_data_pool_name()
         metadata_pool = self.fs.get_metadata_pool_name()
-        self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+        self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
 
         try:
-            self.run_cluster_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool}")
+            self.run_ceph_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool}")
         except CommandFailedError as ce:
             self.assertEqual(ce.exitstatus, errno.EINVAL,
                 "invalid error code on creating a new file system with old "
@@ -747,7 +747,7 @@ class TestRenameCommand(TestAdminCommands):
                       "existing pools to fail.")
 
         try:
-            self.run_cluster_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} --force")
+            self.run_ceph_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} --force")
         except CommandFailedError as ce:
             self.assertEqual(ce.exitstatus, errno.EINVAL,
                 "invalid error code on creating a new file system with old "
@@ -757,7 +757,7 @@ class TestRenameCommand(TestAdminCommands):
                       "existing pools, and --force flag to fail.")
 
         try:
-            self.run_cluster_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} "
+            self.run_ceph_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool} "
                                  "--allow-dangerous-metadata-overlay")
         except CommandFailedError as ce:
             self.assertEqual(ce.exitstatus, errno.EINVAL,
@@ -772,7 +772,7 @@ class TestRenameCommand(TestAdminCommands):
         That renaming a file system without '--yes-i-really-mean-it' flag fails.
         """
         try:
-            self.run_cluster_cmd(f"fs rename {self.fs.name} new_fs")
+            self.run_ceph_cmd(f"fs rename {self.fs.name} new_fs")
         except CommandFailedError as ce:
             self.assertEqual(ce.exitstatus, errno.EPERM,
                 "invalid error code on renaming a file system without the  "
@@ -786,7 +786,7 @@ class TestRenameCommand(TestAdminCommands):
         That renaming a non-existent file system fails.
         """
         try:
-            self.run_cluster_cmd("fs rename non_existent_fs new_fs --yes-i-really-mean-it")
+            self.run_ceph_cmd("fs rename non_existent_fs new_fs --yes-i-really-mean-it")
         except CommandFailedError as ce:
             self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on renaming a non-existent fs")
         else:
@@ -799,7 +799,7 @@ class TestRenameCommand(TestAdminCommands):
         self.fs2 = self.mds_cluster.newfs(name='cephfs2', create=True)
 
         try:
-            self.run_cluster_cmd(f"fs rename {self.fs.name} {self.fs2.name} --yes-i-really-mean-it")
+            self.run_ceph_cmd(f"fs rename {self.fs.name} {self.fs2.name} --yes-i-really-mean-it")
         except CommandFailedError as ce:
             self.assertEqual(ce.exitstatus, errno.EINVAL,
                              "invalid error code on renaming to a fs name that is already in use")
@@ -813,14 +813,14 @@ class TestRenameCommand(TestAdminCommands):
         orig_fs_name = self.fs.name
         new_fs_name = 'new_cephfs'
 
-        self.run_cluster_cmd(f'fs mirror enable {orig_fs_name}')
+        self.run_ceph_cmd(f'fs mirror enable {orig_fs_name}')
         try:
-            self.run_cluster_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+            self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
         except CommandFailedError as ce:
             self.assertEqual(ce.exitstatus, errno.EPERM, "invalid error code on renaming a mirrored file system")
         else:
             self.fail("expected renaming of a mirrored file system to fail")
-        self.run_cluster_cmd(f'fs mirror disable {orig_fs_name}')
+        self.run_ceph_cmd(f'fs mirror disable {orig_fs_name}')
 
 
 class TestDump(CephFSTestCase):
@@ -1357,10 +1357,10 @@ class TestFsAuthorize(CephFSTestCase):
         fs_name = "cephfs-_."
         self.fs = self.mds_cluster.newfs(name=fs_name)
         self.fs.wait_for_daemons()
-        self.run_cluster_cmd(f'auth caps client.{self.mount_a.client_id} '
-                             f'mon "allow r" '
-                             f'osd "allow rw pool={self.fs.get_data_pool_name()}" '
-                             f'mds allow')
+        self.run_ceph_cmd(f'auth caps client.{self.mount_a.client_id} '
+                          f'mon "allow r" '
+                          f'osd "allow rw pool={self.fs.get_data_pool_name()}" '
+                          f'mds allow')
         self.mount_a.remount(cephfs_name=self.fs.name)
         PERM = 'rw'
         FS_AUTH_CAPS = (('/', PERM),)
@@ -1401,7 +1401,7 @@ class TestFsAuthorize(CephFSTestCase):
 
     def tearDown(self):
         self.mount_a.umount_wait()
-        self.run_cluster_cmd(f'auth rm {self.client_name}')
+        self.run_ceph_cmd(f'auth rm {self.client_name}')
 
         super(type(self), self).tearDown()
 
index 966744a42cfef00d86e62496cc9211f2d416f5c8..592a84164532e4dd976b3b6cd748a2f773a391e0 100644 (file)
@@ -26,15 +26,15 @@ class TestMultiFS(CephFSTestCase):
 
         # we might have it - the client - if the same cluster was used for a
         # different vstart_runner.py run.
-        self.run_cluster_cmd(f'auth rm {self.client_name}')
+        self.run_ceph_cmd(f'auth rm {self.client_name}')
 
         self.fs1 = self.fs
         self.fs2 = self.mds_cluster.newfs(name='cephfs2', create=True)
 
         # we'll reassign caps to client.1 so that it can operate with cephfs2
-        self.run_cluster_cmd(f'auth caps client.{self.mount_b.client_id} mon '
-                             f'"allow r" osd "allow rw '
-                             f'pool={self.fs2.get_data_pool_name()}" mds allow')
+        self.run_ceph_cmd(f'auth caps client.{self.mount_b.client_id} mon '
+                          f'"allow r" osd "allow rw '
+                          f'pool={self.fs2.get_data_pool_name()}" mds allow')
         self.mount_b.remount(cephfs_name=self.fs2.name)
 
 
index a43f206e7059e85545da597ba88567d4c1772288..49bd38d9e4fbc9d7b631c5cc5da3c213a4dcba1e 100644 (file)
@@ -22,10 +22,8 @@ class TestNFS(MgrTestCase):
         return self._cmd("nfs", *args)
 
     def _nfs_complete_cmd(self, cmd):
-        return self.mgr_cluster.mon_manager.run_cluster_cmd(args=f"nfs {cmd}",
-                                                            stdout=StringIO(),
-                                                            stderr=StringIO(),
-                                                            check_status=False)
+        return self.run_ceph_cmd(args=f"nfs {cmd}", stdout=StringIO(),
+                                 stderr=StringIO(), check_status=False)
 
     def _orch_cmd(self, *args):
         return self._cmd("orch", *args)
index 02705a9abc2264930de9e07e398439261af8c3a3..9046da94945e4f9090fb2526b781ba25a2f03f0b 100644 (file)
@@ -5644,9 +5644,9 @@ class TestSubvolumeSnapshots(TestVolumesHelper):
 
         # try to get metadata after removing snapshot.
         # Expecting error ENOENT with error message of snapshot does not exist
-        cmd_ret = self.mgr_cluster.mon_manager.run_cluster_cmd(
-                args=["fs", "subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group],
-                check_status=False, stdout=StringIO(), stderr=StringIO())
+        cmd_ret = self.run_ceph_cmd(
+            args=["fs", "subvolume", "snapshot", "metadata", "get", self.volname, subvolname, snapshot, key, group], check_status=False, stdout=StringIO(),
+            stderr=StringIO())
         self.assertEqual(cmd_ret.returncode, errno.ENOENT, "Expecting ENOENT error")
         self.assertIn(f"snapshot '{snapshot}' does not exist", cmd_ret.stderr.getvalue(),
                 f"Expecting message: snapshot '{snapshot}' does not exist ")