]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/cephfs: when cmd output is not needed call run_ceph_cmd()
authorRishabh Dave <ridave@redhat.com>
Thu, 16 Mar 2023 16:28:22 +0000 (21:58 +0530)
committerRishabh Dave <ridave@redhat.com>
Wed, 13 Mar 2024 13:42:30 +0000 (19:12 +0530)
instead of get_ceph_cmd_stdout().

Signed-off-by: Rishabh Dave <ridave@redhat.com>
(cherry picked from commit c7c38ba558e089c51d71e0b0713b3e0a368b9699)

Conflicts:
qa/tasks/cephfs/test_admin.py
Commit from main branch had more patches since test_admin.py too
had lots of test functions.

17 files changed:
qa/tasks/cephfs/test_admin.py
qa/tasks/cephfs/test_damage.py
qa/tasks/cephfs/test_data_scan.py
qa/tasks/cephfs/test_failover.py
qa/tasks/cephfs/test_fragment.py
qa/tasks/cephfs/test_fstop.py
qa/tasks/cephfs/test_full.py
qa/tasks/cephfs/test_journal_repair.py
qa/tasks/cephfs/test_mds_metrics.py
qa/tasks/cephfs/test_mirroring.py
qa/tasks/cephfs/test_misc.py
qa/tasks/cephfs/test_multimds_misc.py
qa/tasks/cephfs/test_recovery_fs.py
qa/tasks/cephfs/test_recovery_pool.py
qa/tasks/cephfs/test_scrub_checks.py
qa/tasks/cephfs/test_strays.py
qa/tasks/cephfs/test_volumes.py

index b90b0d7bf8de92a3ecf367ed34647910c1bd7341..280044d9cb1881e39ea17c26790d5b0dfaadfb92 100644 (file)
@@ -31,12 +31,12 @@ class TestAdminCommands(CephFSTestCase):
 
     def setup_ec_pools(self, n, metadata=True, overwrites=True):
         if metadata:
-            self.get_ceph_cmd_stdout('osd', 'pool', 'create', n+"-meta", "8")
+            self.run_ceph_cmd('osd', 'pool', 'create', n+"-meta", "8")
         cmd = ['osd', 'erasure-code-profile', 'set', n+"-profile", "m=2", "k=2", "crush-failure-domain=osd"]
-        self.get_ceph_cmd_stdout(cmd)
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile")
+        self.run_ceph_cmd(cmd)
+        self.run_ceph_cmd('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile")
         if overwrites:
-            self.get_ceph_cmd_stdout('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')
+            self.run_ceph_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')
 
 @classhook('_add_valid_tell')
 class TestValidTell(TestAdminCommands):
@@ -151,8 +151,8 @@ class TestFsNew(TestAdminCommands):
         metapoolname, datapoolname = n+'-testmetapool', n+'-testdatapool'
         badname = n+'badname@#'
 
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', n+metapoolname)
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', n+datapoolname)
+        self.run_ceph_cmd('osd', 'pool', 'create', n+metapoolname)
+        self.run_ceph_cmd('osd', 'pool', 'create', n+datapoolname)
 
         # test that fsname not with "goodchars" fails
         args = ['fs', 'new', badname, metapoolname, datapoolname]
@@ -160,12 +160,12 @@ class TestFsNew(TestAdminCommands):
                                  check_status=False)
         self.assertIn('invalid chars', proc.stderr.getvalue().lower())
 
-        self.get_ceph_cmd_stdout('osd', 'pool', 'rm', metapoolname,
-                                 metapoolname,
-                                 '--yes-i-really-really-mean-it-not-faking')
-        self.get_ceph_cmd_stdout('osd', 'pool', 'rm', datapoolname,
-                                 datapoolname,
-                                 '--yes-i-really-really-mean-it-not-faking')
+        self.run_ceph_cmd('osd', 'pool', 'rm', metapoolname,
+                          metapoolname,
+                          '--yes-i-really-really-mean-it-not-faking')
+        self.run_ceph_cmd('osd', 'pool', 'rm', datapoolname,
+                          datapoolname,
+                          '--yes-i-really-really-mean-it-not-faking')
 
     def test_new_default_ec(self):
         """
@@ -177,7 +177,7 @@ class TestFsNew(TestAdminCommands):
         n = "test_new_default_ec"
         self.setup_ec_pools(n)
         try:
-            self.get_ceph_cmd_stdout('fs', 'new', n, n+"-meta", n+"-data")
+            self.run_ceph_cmd('fs', 'new', n, n+"-meta", n+"-data")
         except CommandFailedError as e:
             if e.exitstatus == 22:
                 pass
@@ -195,7 +195,7 @@ class TestFsNew(TestAdminCommands):
         self.mds_cluster.delete_all_filesystems()
         n = "test_new_default_ec_force"
         self.setup_ec_pools(n)
-        self.get_ceph_cmd_stdout('fs', 'new', n, n+"-meta", n+"-data", "--force")
+        self.run_ceph_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
 
     def test_new_default_ec_no_overwrite(self):
         """
@@ -207,7 +207,7 @@ class TestFsNew(TestAdminCommands):
         n = "test_new_default_ec_no_overwrite"
         self.setup_ec_pools(n, overwrites=False)
         try:
-            self.get_ceph_cmd_stdout('fs', 'new', n, n+"-meta", n+"-data")
+            self.run_ceph_cmd('fs', 'new', n, n+"-meta", n+"-data")
         except CommandFailedError as e:
             if e.exitstatus == 22:
                 pass
@@ -217,7 +217,7 @@ class TestFsNew(TestAdminCommands):
             raise RuntimeError("expected failure")
         # and even with --force !
         try:
-            self.get_ceph_cmd_stdout('fs', 'new', n, n+"-meta", n+"-data", "--force")
+            self.run_ceph_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
         except CommandFailedError as e:
             if e.exitstatus == 22:
                 pass
@@ -818,17 +818,17 @@ class TestMirroringCommands(CephFSTestCase):
     MDSS_REQUIRED = 1
 
     def _enable_mirroring(self, fs_name):
-        self.get_ceph_cmd_stdout("fs", "mirror", "enable", fs_name)
+        self.run_ceph_cmd("fs", "mirror", "enable", fs_name)
 
     def _disable_mirroring(self, fs_name):
-        self.get_ceph_cmd_stdout("fs", "mirror", "disable", fs_name)
+        self.run_ceph_cmd("fs", "mirror", "disable", fs_name)
 
     def _add_peer(self, fs_name, peer_spec, remote_fs_name):
         peer_uuid = str(uuid.uuid4())
-        self.get_ceph_cmd_stdout("fs", "mirror", "peer_add", fs_name, peer_uuid, peer_spec, remote_fs_name)
+        self.run_ceph_cmd("fs", "mirror", "peer_add", fs_name, peer_uuid, peer_spec, remote_fs_name)
 
     def _remove_peer(self, fs_name, peer_uuid):
-        self.get_ceph_cmd_stdout("fs", "mirror", "peer_remove", fs_name, peer_uuid)
+        self.run_ceph_cmd("fs", "mirror", "peer_remove", fs_name, peer_uuid)
 
     def _verify_mirroring(self, fs_name, flag_str):
         status = self.fs.status()
index 6c66ff5f59217786bd5d9977f1590b3a258d413f..339b0e6c05565871c5e8cd06da4daa771b72ae34 100644 (file)
@@ -244,7 +244,7 @@ class TestDamage(CephFSTestCase):
             # Reset MDS state
             self.mount_a.umount_wait(force=True)
             self.fs.fail()
-            self.get_ceph_cmd_stdout('mds', 'repaired', '0')
+            self.run_ceph_cmd('mds', 'repaired', '0')
 
             # Reset RADOS pool state
             self.fs.radosm(['import', '-'], stdin=BytesIO(serialized))
@@ -467,7 +467,7 @@ class TestDamage(CephFSTestCase):
         self.fs.radosm(["setomapval", dirfrag_obj, "file_to_be_damaged_head", junk])
 
         # Clean up the damagetable entry
-        self.get_ceph_cmd_stdout(
+        self.run_ceph_cmd(
             'tell', f'mds.{self.fs.get_active_names()[0]}',
             "damage", "rm", f"{damage_id}")
 
@@ -528,7 +528,7 @@ class TestDamage(CephFSTestCase):
         self.assertEqual(damage[0]['damage_type'], "backtrace")
         self.assertEqual(damage[0]['ino'], file1_ino)
 
-        self.get_ceph_cmd_stdout(
+        self.run_ceph_cmd(
             'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
             "damage", "rm", str(damage[0]['id']))
 
@@ -561,7 +561,7 @@ class TestDamage(CephFSTestCase):
             self.assertEqual(damage[1]['ino'], file2_ino)
 
         for entry in damage:
-            self.get_ceph_cmd_stdout(
+            self.run_ceph_cmd(
                 'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
                 "damage", "rm", str(entry['id']))
 
index f6e40c99bc2803fe443805b6cd68eae3af72f8ba..5a55e4fe9ba789a07bafacf188413b699a4173ef 100644 (file)
@@ -428,7 +428,7 @@ class TestDataScan(CephFSTestCase):
         self.fs.data_scan(["scan_links"])
 
         # Mark the MDS repaired
-        self.get_ceph_cmd_stdout('mds', 'repaired', '0')
+        self.run_ceph_cmd('mds', 'repaired', '0')
 
         # Start the MDS
         self.fs.mds_restart()
index 590a4d929d0170cae5e09df3e068518a0bd5878d..4948e77d0ff1e118cec3045c8af39e8766803e36 100644 (file)
@@ -414,7 +414,7 @@ class TestFailover(CephFSTestCase):
 
         standbys = self.mds_cluster.get_standby_daemons()
         self.assertGreaterEqual(len(standbys), 1)
-        self.get_ceph_cmd_stdout('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)))
+        self.run_ceph_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)))
 
         # Kill a standby and check for warning
         victim = standbys.pop()
@@ -432,11 +432,11 @@ class TestFailover(CephFSTestCase):
         # Set it one greater than standbys ever seen
         standbys = self.mds_cluster.get_standby_daemons()
         self.assertGreaterEqual(len(standbys), 1)
-        self.get_ceph_cmd_stdout('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)+1))
+        self.run_ceph_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)+1))
         self.wait_for_health("MDS_INSUFFICIENT_STANDBY", self.fs.beacon_timeout)
 
         # Set it to 0
-        self.get_ceph_cmd_stdout('fs', 'set', self.fs.name, 'standby_count_wanted', '0')
+        self.run_ceph_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', '0')
         self.wait_for_health_clear(timeout=30)
 
     def test_discontinuous_mdsmap(self):
@@ -685,8 +685,8 @@ class TestMultiFilesystems(CephFSTestCase):
 
     def setUp(self):
         super(TestMultiFilesystems, self).setUp()
-        self.get_ceph_cmd_stdout("fs", "flag", "set", "enable_multiple",
-                                 "true", "--yes-i-really-mean-it")
+        self.run_ceph_cmd("fs", "flag", "set", "enable_multiple",
+                          "true", "--yes-i-really-mean-it")
 
     def _setup_two(self):
         fs_a = self.mds_cluster.newfs(name="alpha")
@@ -768,7 +768,7 @@ class TestMultiFilesystems(CephFSTestCase):
 
         # Kill fs_a's active MDS, see a standby take over
         self.mds_cluster.mds_stop(original_a)
-        self.get_ceph_cmd_stdout("mds", "fail", original_a)
+        self.run_ceph_cmd("mds", "fail", original_a)
         self.wait_until_equal(lambda: len(fs_a.get_active_names()), 1, 30,
                               reject_fn=lambda v: v > 1)
         # Assert that it's a *different* daemon that has now appeared in the map for fs_a
@@ -776,7 +776,7 @@ class TestMultiFilesystems(CephFSTestCase):
 
         # Kill fs_b's active MDS, see a standby take over
         self.mds_cluster.mds_stop(original_b)
-        self.get_ceph_cmd_stdout("mds", "fail", original_b)
+        self.run_ceph_cmd("mds", "fail", original_b)
         self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30,
                               reject_fn=lambda v: v > 1)
         # Assert that it's a *different* daemon that has now appeared in the map for fs_a
index 42df5138f7f88b3a19b14c8244e8513058e7c074..902a53e79f7d01f367fa83a85cb74e1a453c4263 100644 (file)
@@ -160,14 +160,13 @@ class TestFragmentation(CephFSTestCase):
             target_files = branch_factor**depth * int(split_size * 1.5)
             create_files = target_files - files_written
 
-            self.get_ceph_cmd_stdout("log",
+            self.run_ceph_cmd("log",
                 "{0} Writing {1} files (depth={2})".format(
                     self.__class__.__name__, create_files, depth
                 ))
             self.mount_a.create_n_files("splitdir/file_{0}".format(depth),
                                         create_files)
-            self.get_ceph_cmd_stdout("log",
-                "{0} Done".format(self.__class__.__name__))
+            self.run_ceph_cmd("log","{0} Done".format(self.__class__.__name__))
 
             files_written += create_files
             log.info("Now have {0} files".format(files_written))
index 7d17a4f268c3fba61b24cd33c38359eac4b87965..09896703d81f91616c9fa7c63c124491a1c65dab 100644 (file)
@@ -93,8 +93,8 @@ class TestFSTop(CephFSTestCase):
         # umount mount_b, mount another filesystem on it and use --dumpfs filter
         self.mount_b.umount_wait()
 
-        self.get_ceph_cmd_stdout("fs", "flag", "set", "enable_multiple",
-                                 "true", "--yes-i-really-mean-it")
+        self.run_ceph_cmd("fs", "flag", "set", "enable_multiple", "true",
+                          "--yes-i-really-mean-it")
 
         # create a new filesystem
         fs_b = self.mds_cluster.newfs(name=newfs_name)
index b5d2cbca9b2f0688d505077c857387f9b36ebab1..90a65f069ef615b401e6659310bcaa55575e8c5f 100644 (file)
@@ -61,8 +61,8 @@ class FullnessTestCase(CephFSTestCase):
         self.assertGreaterEqual(mount_a_initial_epoch, self.initial_osd_epoch)
 
         # Set and unset a flag to cause OSD epoch to increment
-        self.get_ceph_cmd_stdout("osd", "set", "pause")
-        self.get_ceph_cmd_stdout("osd", "unset", "pause")
+        self.run_ceph_cmd("osd", "set", "pause")
+        self.run_ceph_cmd("osd", "unset", "pause")
 
         out = self.get_ceph_cmd_stdout("osd", "dump", "--format=json").strip()
         new_epoch = json.loads(out)['epoch']
@@ -376,8 +376,8 @@ class TestQuotaFull(FullnessTestCase):
         super(TestQuotaFull, self).setUp()
 
         pool_name = self.fs.get_data_pool_name()
-        self.get_ceph_cmd_stdout("osd", "pool", "set-quota", pool_name,
-                                 "max_bytes", f"{self.pool_capacity}")
+        self.run_ceph_cmd("osd", "pool", "set-quota", pool_name,
+                          "max_bytes", f"{self.pool_capacity}")
 
 
 class TestClusterFull(FullnessTestCase):
index 7561ddee974028002f51209865c47e67d62b1192..365140fd9f60a9f33164c7fedf916ee4934e5f07 100644 (file)
@@ -233,8 +233,8 @@ class TestJournalRepair(CephFSTestCase):
         self.fs.table_tool(["0", "reset", "session"])
         self.fs.journal_tool(["journal", "reset"], 0)
         self.fs.erase_mds_objects(1)
-        self.get_ceph_cmd_stdout('fs', 'reset', self.fs.name,
-                                 '--yes-i-really-mean-it')
+        self.run_ceph_cmd('fs', 'reset', self.fs.name,
+                          '--yes-i-really-mean-it')
 
         # Bring an MDS back online, mount a client, and see that we can walk the full
         # filesystem tree again
index 296b33859806fdb18f26b437c8c183500036661e..0e824d3d278dc4392c550ce661ee6d513bc90c5d 100644 (file)
@@ -404,7 +404,7 @@ class TestMDSMetrics(CephFSTestCase):
         invalid_mds_rank = "1,"
         # try, 'fs perf stat' command with invalid mds_rank
         try:
-            self.get_ceph_cmd_stdout("fs", "perf", "stats", "--mds_rank", invalid_mds_rank)
+            self.run_ceph_cmd("fs", "perf", "stats", "--mds_rank", invalid_mds_rank)
         except CommandFailedError as ce:
             if ce.exitstatus != errno.EINVAL:
                 raise
@@ -415,7 +415,7 @@ class TestMDSMetrics(CephFSTestCase):
         invalid_client_id = "abcd"
         # try, 'fs perf stat' command with invalid client_id
         try:
-            self.get_ceph_cmd_stdout("fs", "perf", "stats", "--client_id", invalid_client_id)
+            self.run_ceph_cmd("fs", "perf", "stats", "--client_id", invalid_client_id)
         except CommandFailedError as ce:
             if ce.exitstatus != errno.EINVAL:
                 raise
@@ -426,7 +426,7 @@ class TestMDSMetrics(CephFSTestCase):
         invalid_client_ip = "1.2.3"
         # try, 'fs perf stat' command with invalid client_ip
         try:
-            self.get_ceph_cmd_stdout("fs", "perf", "stats", "--client_ip", invalid_client_ip)
+            self.run_ceph_cmd("fs", "perf", "stats", "--client_ip", invalid_client_ip)
         except CommandFailedError as ce:
             if ce.exitstatus != errno.EINVAL:
                 raise
@@ -501,8 +501,8 @@ class TestMDSMetrics(CephFSTestCase):
         self.mount_b.umount_wait()
         self.fs.delete_all_filesystems()
 
-        self.get_ceph_cmd_stdout("fs", "flag", "set", "enable_multiple",
-                                 "true", "--yes-i-really-mean-it")
+        self.run_ceph_cmd("fs", "flag", "set", "enable_multiple",
+            "true", "--yes-i-really-mean-it")
 
         # creating filesystem
         fs_a = self._setup_fs(fs_name="fs1")
@@ -569,8 +569,8 @@ class TestMDSMetrics(CephFSTestCase):
         self.mount_a.umount_wait()
         self.mount_b.umount_wait()
 
-        self.get_ceph_cmd_stdout("fs", "flag", "set", "enable_multiple",
-                                 "true", "--yes-i-really-mean-it")
+        self.run_ceph_cmd("fs", "flag", "set", "enable_multiple",
+            "true", "--yes-i-really-mean-it")
 
         # creating filesystem
         fs_b = self._setup_fs(fs_name="fs2")
index c344dad8101f379cab51278cf9113a7229b9cfe8..24dbd61b17fbbf4bcad794e055fb5d99aed95aae 100644 (file)
@@ -34,13 +34,13 @@ class TestMirroring(CephFSTestCase):
         super(TestMirroring, self).tearDown()
 
     def enable_mirroring_module(self):
-        self.get_ceph_cmd_stdout("mgr", "module", "enable", TestMirroring.MODULE_NAME)
+        self.run_ceph_cmd("mgr", "module", "enable", TestMirroring.MODULE_NAME)
 
     def disable_mirroring_module(self):
-        self.get_ceph_cmd_stdout("mgr", "module", "disable", TestMirroring.MODULE_NAME)
+        self.run_ceph_cmd("mgr", "module", "disable", TestMirroring.MODULE_NAME)
 
     def enable_mirroring(self, fs_name, fs_id):
-        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "enable", fs_name)
+        self.run_ceph_cmd("fs", "snapshot", "mirror", "enable", fs_name)
         time.sleep(10)
         # verify via asok
         res = self.mirror_daemon_command(f'mirror status for fs: {fs_name}',
@@ -49,7 +49,7 @@ class TestMirroring(CephFSTestCase):
         self.assertTrue(res['snap_dirs']['dir_count'] == 0)
 
     def disable_mirroring(self, fs_name, fs_id):
-        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "disable", fs_name)
+        self.run_ceph_cmd("fs", "snapshot", "mirror", "disable", fs_name)
         time.sleep(10)
         # verify via asok
         try:
@@ -76,15 +76,15 @@ class TestMirroring(CephFSTestCase):
 
     def peer_add(self, fs_name, fs_id, peer_spec, remote_fs_name=None):
         if remote_fs_name:
-            self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec, remote_fs_name)
+            self.run_ceph_cmd("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec, remote_fs_name)
         else:
-            self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec)
+            self.run_ceph_cmd("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec)
         time.sleep(10)
         self.verify_peer_added(fs_name, fs_id, peer_spec, remote_fs_name)
 
     def peer_remove(self, fs_name, fs_id, peer_spec):
         peer_uuid = self.get_peer_uuid(peer_spec)
-        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_remove", fs_name, peer_uuid)
+        self.run_ceph_cmd("fs", "snapshot", "mirror", "peer_remove", fs_name, peer_uuid)
         time.sleep(10)
         # verify via asok
         res = self.mirror_daemon_command(f'mirror status for fs: {fs_name}',
@@ -98,8 +98,8 @@ class TestMirroring(CephFSTestCase):
         return outj['token']
 
     def import_peer(self, fs_name, token):
-        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror",
-                                 "peer_bootstrap", "import", fs_name, token)
+        self.run_ceph_cmd("fs", "snapshot", "mirror", "peer_bootstrap",
+                          "import", fs_name, token)
 
     def add_directory(self, fs_name, fs_id, dir_name):
         # get initial dir count
@@ -108,7 +108,7 @@ class TestMirroring(CephFSTestCase):
         dir_count = res['snap_dirs']['dir_count']
         log.debug(f'initial dir_count={dir_count}')
 
-        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "add", fs_name, dir_name)
+        self.run_ceph_cmd("fs", "snapshot", "mirror", "add", fs_name, dir_name)
 
         time.sleep(10)
         # verify via asok
@@ -125,7 +125,7 @@ class TestMirroring(CephFSTestCase):
         dir_count = res['snap_dirs']['dir_count']
         log.debug(f'initial dir_count={dir_count}')
 
-        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "remove", fs_name, dir_name)
+        self.run_ceph_cmd("fs", "snapshot", "mirror", "remove", fs_name, dir_name)
 
         time.sleep(10)
         # verify via asok
@@ -322,7 +322,7 @@ class TestMirroring(CephFSTestCase):
 
         # try removing peer
         try:
-            self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_remove", self.primary_fs_name, 'dummy-uuid')
+            self.run_ceph_cmd("fs", "snapshot", "mirror", "peer_remove", self.primary_fs_name, 'dummy-uuid')
         except CommandFailedError as ce:
             if ce.exitstatus != errno.EINVAL:
                 raise RuntimeError(-errno.EINVAL, 'incorrect error code when removing a peer')
@@ -702,7 +702,7 @@ class TestMirroring(CephFSTestCase):
 
         # enable mirroring through mon interface -- this should result in the mirror daemon
         # failing to enable mirroring due to absence of `cephfs_mirorr` index object.
-        self.get_ceph_cmd_stdout("fs", "mirror", "enable", self.primary_fs_name)
+        self.run_ceph_cmd("fs", "mirror", "enable", self.primary_fs_name)
 
         with safe_while(sleep=5, tries=10, action='wait for failed state') as proceed:
             while proceed():
@@ -717,7 +717,7 @@ class TestMirroring(CephFSTestCase):
                 except:
                     pass
 
-        self.get_ceph_cmd_stdout("fs", "mirror", "disable", self.primary_fs_name)
+        self.run_ceph_cmd("fs", "mirror", "disable", self.primary_fs_name)
         time.sleep(10)
         # verify via asok
         try:
@@ -739,7 +739,7 @@ class TestMirroring(CephFSTestCase):
         # enable mirroring through mon interface -- this should result in the mirror daemon
         # failing to enable mirroring due to absence of `cephfs_mirror` index object.
 
-        self.get_ceph_cmd_stdout("fs", "mirror", "enable", self.primary_fs_name)
+        self.run_ceph_cmd("fs", "mirror", "enable", self.primary_fs_name)
         # need safe_while since non-failed status pops up as mirroring is restarted
         # internally in mirror daemon.
         with safe_while(sleep=5, tries=20, action='wait for failed state') as proceed:
@@ -770,7 +770,7 @@ class TestMirroring(CephFSTestCase):
         self.assertTrue(res['peers'] == {})
         self.assertTrue(res['snap_dirs']['dir_count'] == 0)
 
-        self.get_ceph_cmd_stdout("fs", "mirror", "disable", self.primary_fs_name)
+        self.run_ceph_cmd("fs", "mirror", "disable", self.primary_fs_name)
         time.sleep(10)
         # verify via asok
         try:
@@ -898,7 +898,7 @@ class TestMirroring(CephFSTestCase):
         dir_path_p = "/d0/d1"
         dir_path = "/d0/d1/d2"
 
-        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path)
+        self.run_ceph_cmd("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path)
 
         time.sleep(10)
         # this uses an undocumented interface to get dirpath map state
@@ -907,11 +907,11 @@ class TestMirroring(CephFSTestCase):
         # there are no mirror daemons
         self.assertTrue(res['state'], 'stalled')
 
-        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "remove", self.primary_fs_name, dir_path)
+        self.run_ceph_cmd("fs", "snapshot", "mirror", "remove", self.primary_fs_name, dir_path)
 
         time.sleep(10)
         try:
-            self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path)
+            self.run_ceph_cmd("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path)
         except CommandFailedError as ce:
             if ce.exitstatus != errno.ENOENT:
                 raise RuntimeError('invalid errno when checking dirmap status for non-existent directory')
@@ -919,7 +919,7 @@ class TestMirroring(CephFSTestCase):
             raise RuntimeError('incorrect errno when checking dirmap state for non-existent directory')
 
         # adding a parent directory should be allowed
-        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path_p)
+        self.run_ceph_cmd("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path_p)
 
         time.sleep(10)
         # however, this directory path should get stalled too
index 51c9bd204ce47d9204ccea6b573a17ba315bb3d8..40a71002486dff17ccd953bce59ea7ccb4728e0e 100644 (file)
@@ -95,16 +95,15 @@ class TestMisc(CephFSTestCase):
 
         self.fs.fail()
 
-        self.get_ceph_cmd_stdout('fs', 'rm', self.fs.name,
-                                 '--yes-i-really-mean-it')
+        self.run_ceph_cmd('fs', 'rm', self.fs.name, '--yes-i-really-mean-it')
 
-        self.get_ceph_cmd_stdout('osd', 'pool', 'delete',
-                                 self.fs.metadata_pool_name,
-                                 self.fs.metadata_pool_name,
-                                 '--yes-i-really-really-mean-it')
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create',
-                                 self.fs.metadata_pool_name,
-                                 '--pg_num_min', str(self.fs.pg_num_min))
+        self.run_ceph_cmd('osd', 'pool', 'delete',
+                          self.fs.metadata_pool_name,
+                          self.fs.metadata_pool_name,
+                           '--yes-i-really-really-mean-it')
+        self.run_ceph_cmd('osd', 'pool', 'create',
+                          self.fs.metadata_pool_name,
+                          '--pg_num_min', str(self.fs.pg_num_min))
 
         # insert a garbage object
         self.fs.radosm(["put", "foo", "-"], stdin=StringIO("bar"))
@@ -118,34 +117,34 @@ class TestMisc(CephFSTestCase):
         self.wait_until_true(lambda: get_pool_df(self.fs, self.fs.metadata_pool_name), timeout=30)
 
         try:
-            self.get_ceph_cmd_stdout('fs', 'new', self.fs.name,
-                                     self.fs.metadata_pool_name,
-                                     data_pool_name)
+            self.run_ceph_cmd('fs', 'new', self.fs.name,
+                              self.fs.metadata_pool_name,
+                              data_pool_name)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
             raise AssertionError("Expected EINVAL")
 
-        self.get_ceph_cmd_stdout('fs', 'new', self.fs.name,
-                                 self.fs.metadata_pool_name,
-                                 data_pool_name, "--force")
-
-        self.get_ceph_cmd_stdout('fs', 'fail', self.fs.name)
-
-        self.get_ceph_cmd_stdout('fs', 'rm', self.fs.name,
-                                 '--yes-i-really-mean-it'])
-
-        self.get_ceph_cmd_stdout('osd', 'pool', 'delete',
-                                 self.fs.metadata_pool_name,
-                                 self.fs.metadata_pool_name,
-                                 '--yes-i-really-really-mean-it')
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create',
-                                 self.fs.metadata_pool_name,
-                                 '--pg_num_min', str(self.fs.pg_num_min))
-        self.get_ceph_cmd_stdout('fs', 'new', self.fs.name,
-                                 self.fs.metadata_pool_name,
-                                 data_pool_name,
-                                 '--allow_dangerous_metadata_overlay')
+        self.run_ceph_cmd('fs', 'new', self.fs.name,
+                          self.fs.metadata_pool_name,
+                          data_pool_name, "--force")
+
+        self.run_ceph_cmd('fs', 'fail', self.fs.name)
+
+        self.run_ceph_cmd('fs', 'rm', self.fs.name,
+                          '--yes-i-really-mean-it')
+
+        self.run_ceph_cmd('osd', 'pool', 'delete',
+                          self.fs.metadata_pool_name,
+                          self.fs.metadata_pool_name,
+                          '--yes-i-really-really-mean-it')
+        self.run_ceph_cmd('osd', 'pool', 'create',
+                          self.fs.metadata_pool_name,
+                          '--pg_num_min', str(self.fs.pg_num_min))
+        self.run_ceph_cmd('fs', 'new', self.fs.name,
+                          self.fs.metadata_pool_name,
+                          data_pool_name,
+                          '--allow_dangerous_metadata_overlay')
 
     def test_cap_revoke_nonresponder(self):
         """
index 09847b6ea6d10ee44d43ff0a53740687de800077..e0e46fb24c09d19a7885134059fe25fea4f771e1 100644 (file)
@@ -116,7 +116,7 @@ class TestScrub2(CephFSTestCase):
 
         def expect_exdev(cmd, mds):
             try:
-                self.get_ceph_cmd_stdout('tell', 'mds.{0}'.format(mds), *cmd)
+                self.run_ceph_cmd('tell', 'mds.{0}'.format(mds), *cmd)
             except CommandFailedError as e:
                 if e.exitstatus == errno.EXDEV:
                     pass
index e91a3f1913d67b4fd8dc70c3760d3f2f9856f07b..17669c0f2a881ddb0e41c9c23449dd25ac66ff8b 100644 (file)
@@ -27,7 +27,7 @@ class TestFSRecovery(CephFSTestCase):
         # recovered/intact
         self.fs.rm()
         # Recreate file system with pool and previous fscid
-        self.get_ceph_cmd_stdout(
+        self.run_ceph_cmd(
             'fs', 'new', self.fs.name, metadata_pool, data_pool,
             '--recover', '--force', '--fscid', f'{self.fs.id}')
         self.fs.set_joinable()
index bf815547488c3662947f4ba36331c04d46345837..7aef2822985907b24368f8e70cba6772a0db5daa 100644 (file)
@@ -119,7 +119,7 @@ class TestRecoveryPool(CephFSTestCase):
         recovery_fs.create(recover=True, metadata_overlay=True)
 
         recovery_pool = recovery_fs.get_metadata_pool_name()
-        self.get_ceph_cmd_stdout('-s')
+        self.run_ceph_cmd('-s')
 
         # Reset the MDS map in case multiple ranks were in play: recovery procedure
         # only understands how to rebuild metadata under rank 0
index bae048444f303f1bb8d3e3e5febcaee29720e348..f17a6ceb11537878b193239817b259505007a550 100644 (file)
@@ -281,7 +281,7 @@ class TestScrubChecks(CephFSTestCase):
             all_damage = self.fs.rank_tell(["damage", "ls"], mds_rank)
             damage = [d for d in all_damage if d['ino'] == ino and d['damage_type'] == dtype]
             for d in damage:
-                self.get_ceph_cmd_stdout(
+                self.run_ceph_cmd(
                     'tell', f'mds.{self.fs.get_active_names()[mds_rank]}',
                     "damage", "rm", str(d['id']))
             return len(damage) > 0
index 2b31d00c541387e950ffc73c889cea8736d7c6bf..11701dc28368e0e9d165d12154975dc93720371a 100644 (file)
@@ -651,8 +651,8 @@ class TestStrays(CephFSTestCase):
         self.assertFalse(self._is_stopped(1))
 
         # Permit the daemon to start purging again
-        self.get_ceph_cmd_stdout('tell', 'mds.{0}'.format(rank_1_id),
-                                 'injectargs', "--mds_max_purge_files 100")
+        self.run_ceph_cmd('tell', 'mds.{0}'.format(rank_1_id),
+                          'injectargs', "--mds_max_purge_files 100")
 
         # It should now proceed through shutdown
         self.fs.wait_for_daemons(timeout=120)
index 6629bcbb460c91afd35ffa42c1f34e7ea2a15da5..0fabf97a0044fa46551b27b1e19cf12d8df5425c 100644 (file)
@@ -2795,11 +2795,11 @@ class TestSubvolumes(TestVolumesHelper):
         group = self._generate_random_group_name()
 
         # Create auth_id
-        self.get_ceph_cmd_stdout(
+        self.run_ceph_cmd(
             "auth", "get-or-create", "client.guest1",
             "mds", "allow *",
             "osd", "allow rw",
-            "mon", "allow *"]
+            "mon", "allow *"
         )
 
         auth_id = "guest1"
@@ -2824,7 +2824,7 @@ class TestSubvolumes(TestVolumesHelper):
             self.fail("expected the 'fs subvolume authorize' command to fail")
 
         # clean up
-        self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
+        self.run_ceph_cmd("auth", "rm", "client.guest1")
         self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
         self._fs_cmd("subvolumegroup", "rm", self.volname, group)
 
@@ -2839,7 +2839,7 @@ class TestSubvolumes(TestVolumesHelper):
         group = self._generate_random_group_name()
 
         # Create auth_id
-        self.get_ceph_cmd_stdout(
+        self.run_ceph_cmd(
             "auth", "get-or-create", "client.guest1",
             "mds", "allow *",
             "osd", "allow rw",
@@ -2867,7 +2867,7 @@ class TestSubvolumes(TestVolumesHelper):
         # clean up
         self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id,
                      "--group_name", group)
-        self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
+        self.run_ceph_cmd("auth", "rm", "client.guest1")
         self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
         self._fs_cmd("subvolumegroup", "rm", self.volname, group)
 
@@ -2974,7 +2974,7 @@ class TestSubvolumes(TestVolumesHelper):
         # clean up
         self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group)
         guest_mount.umount_wait()
-        self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
+        self.run_ceph_cmd("auth", "rm", "client.guest1")
         self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
         self._fs_cmd("subvolumegroup", "rm", self.volname, group)
 
@@ -3030,7 +3030,7 @@ class TestSubvolumes(TestVolumesHelper):
         # clean up
         self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, "guest1", "--group_name", group)
         guest_mount.umount_wait()
-        self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
+        self.run_ceph_cmd("auth", "rm", "client.guest1")
         self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
         self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
         self._fs_cmd("subvolumegroup", "rm", self.volname, group)
@@ -3105,7 +3105,7 @@ class TestSubvolumes(TestVolumesHelper):
         self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
         self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group)
         guest_mount.umount_wait()
-        self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
+        self.run_ceph_cmd("auth", "rm", "client.guest1")
         self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
         self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
         self._fs_cmd("subvolumegroup", "rm", self.volname, group)
@@ -3177,7 +3177,7 @@ class TestSubvolumes(TestVolumesHelper):
         # clean up
         self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
         guest_mount.umount_wait()
-        self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
+        self.run_ceph_cmd("auth", "rm", "client.guest1")
         self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
         self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
         self._fs_cmd("subvolumegroup", "rm", self.volname, group)
@@ -7073,8 +7073,8 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         new_pool = "new_pool"
         self.fs.add_data_pool(new_pool)
 
-        self.get_ceph_cmd_stdout("osd", "pool", "set-quota", new_pool,
-                                 "max_bytes", f"{pool_capacity // 4}")
+        self.run_ceph_cmd("osd", "pool", "set-quota", new_pool,
+                          "max_bytes", f"{pool_capacity // 4}")
 
         # schedule a clone
         self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1, "--pool_layout", new_pool)