]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/cephfs: when cmd output is not needed call run_ceph_cmd()
authorRishabh Dave <ridave@redhat.com>
Thu, 16 Mar 2023 16:28:22 +0000 (21:58 +0530)
committerRishabh Dave <ridave@redhat.com>
Wed, 3 Apr 2024 17:32:08 +0000 (23:02 +0530)
instead of get_ceph_cmd_stdout().

Signed-off-by: Rishabh Dave <ridave@redhat.com>
(cherry picked from commit c7c38ba558e089c51d71e0b0713b3e0a368b9699)

Conflicts:
qa/tasks/cephfs/test_mirroring.py
- Commit e4dd0e41a3a0 was not present on main but it is now
present on main as well as on Reef, which leads to conflict.
- The line located right before one of the patches in this
  commit was modified in latest Reef branch, thus creating
  conflict when PR branch was rebased on latest Reef.

17 files changed:
qa/tasks/cephfs/test_admin.py
qa/tasks/cephfs/test_damage.py
qa/tasks/cephfs/test_data_scan.py
qa/tasks/cephfs/test_failover.py
qa/tasks/cephfs/test_fragment.py
qa/tasks/cephfs/test_fstop.py
qa/tasks/cephfs/test_full.py
qa/tasks/cephfs/test_journal_repair.py
qa/tasks/cephfs/test_mds_metrics.py
qa/tasks/cephfs/test_mirroring.py
qa/tasks/cephfs/test_misc.py
qa/tasks/cephfs/test_multimds_misc.py
qa/tasks/cephfs/test_recovery_fs.py
qa/tasks/cephfs/test_recovery_pool.py
qa/tasks/cephfs/test_scrub_checks.py
qa/tasks/cephfs/test_strays.py
qa/tasks/cephfs/test_volumes.py

index 862aeef5bdf8426bb75072ac007b66f687adffd4..123d913026624e22637d55ce6eb5f1b9d6e34c76 100644 (file)
@@ -83,12 +83,12 @@ class TestAdminCommands(CephFSTestCase):
 
     def setup_ec_pools(self, n, metadata=True, overwrites=True):
         if metadata:
-            self.get_ceph_cmd_stdout('osd', 'pool', 'create', n+"-meta", "8")
+            self.run_ceph_cmd('osd', 'pool', 'create', n+"-meta", "8")
         cmd = ['osd', 'erasure-code-profile', 'set', n+"-profile", "m=2", "k=2", "crush-failure-domain=osd"]
-        self.get_ceph_cmd_stdout(cmd)
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile")
+        self.run_ceph_cmd(cmd)
+        self.run_ceph_cmd('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile")
         if overwrites:
-            self.get_ceph_cmd_stdout('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')
+            self.run_ceph_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')
 
 @classhook('_add_valid_tell')
 class TestValidTell(TestAdminCommands):
@@ -201,22 +201,22 @@ class TestAddDataPool(TestAdminCommands):
         first_fs = "first_fs"
         first_metadata_pool = "first_metadata_pool"
         first_data_pool = "first_data_pool"
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
-        self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+        self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
 
         # create second data pool, metadata pool and add with filesystem
         second_fs = "second_fs"
         second_metadata_pool = "second_metadata_pool"
         second_data_pool = "second_data_pool"
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_metadata_pool)
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_data_pool)
-        self.get_ceph_cmd_stdout('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', second_metadata_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', second_data_pool)
+        self.run_ceph_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
 
         # try to add 'first_data_pool' with 'second_fs'
         # Expecting EINVAL exit status because 'first_data_pool' is already in use with 'first_fs'
         try:
-            self.get_ceph_cmd_stdout('fs', 'add_data_pool', second_fs, first_data_pool)
+            self.run_ceph_cmd('fs', 'add_data_pool', second_fs, first_data_pool)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
@@ -231,23 +231,23 @@ class TestAddDataPool(TestAdminCommands):
         first_fs = "first_fs"
         first_metadata_pool = "first_metadata_pool"
         first_data_pool = "first_data_pool"
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
-        self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+        self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
 
         # create second data pool, metadata pool and add with filesystem
         second_fs = "second_fs"
         second_metadata_pool = "second_metadata_pool"
         second_data_pool = "second_data_pool"
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_metadata_pool)
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_data_pool)
-        self.get_ceph_cmd_stdout('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', second_metadata_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', second_data_pool)
+        self.run_ceph_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
 
         # try to add 'second_metadata_pool' with 'first_fs' as a data pool
         # Expecting EINVAL exit status because 'second_metadata_pool'
         # is already in use with 'second_fs' as a metadata pool
         try:
-            self.get_ceph_cmd_stdout('fs', 'add_data_pool', first_fs, second_metadata_pool)
+            self.run_ceph_cmd('fs', 'add_data_pool', first_fs, second_metadata_pool)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
@@ -264,8 +264,8 @@ class TestFsNew(TestAdminCommands):
         metapoolname, datapoolname = n+'-testmetapool', n+'-testdatapool'
         badname = n+'badname@#'
 
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', n+metapoolname)
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', n+datapoolname)
+        self.run_ceph_cmd('osd', 'pool', 'create', n+metapoolname)
+        self.run_ceph_cmd('osd', 'pool', 'create', n+datapoolname)
 
         # test that fsname not with "goodchars" fails
         args = ['fs', 'new', badname, metapoolname, datapoolname]
@@ -273,12 +273,12 @@ class TestFsNew(TestAdminCommands):
                                  check_status=False)
         self.assertIn('invalid chars', proc.stderr.getvalue().lower())
 
-        self.get_ceph_cmd_stdout('osd', 'pool', 'rm', metapoolname,
-                                 metapoolname,
-                                 '--yes-i-really-really-mean-it-not-faking')
-        self.get_ceph_cmd_stdout('osd', 'pool', 'rm', datapoolname,
-                                 datapoolname,
-                                 '--yes-i-really-really-mean-it-not-faking')
+        self.run_ceph_cmd('osd', 'pool', 'rm', metapoolname,
+                          metapoolname,
+                          '--yes-i-really-really-mean-it-not-faking')
+        self.run_ceph_cmd('osd', 'pool', 'rm', datapoolname,
+                          datapoolname,
+                          '--yes-i-really-really-mean-it-not-faking')
 
     def test_new_default_ec(self):
         """
@@ -290,7 +290,7 @@ class TestFsNew(TestAdminCommands):
         n = "test_new_default_ec"
         self.setup_ec_pools(n)
         try:
-            self.get_ceph_cmd_stdout('fs', 'new', n, n+"-meta", n+"-data")
+            self.run_ceph_cmd('fs', 'new', n, n+"-meta", n+"-data")
         except CommandFailedError as e:
             if e.exitstatus == 22:
                 pass
@@ -308,7 +308,7 @@ class TestFsNew(TestAdminCommands):
         self.mds_cluster.delete_all_filesystems()
         n = "test_new_default_ec_force"
         self.setup_ec_pools(n)
-        self.get_ceph_cmd_stdout('fs', 'new', n, n+"-meta", n+"-data", "--force")
+        self.run_ceph_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
 
     def test_new_default_ec_no_overwrite(self):
         """
@@ -320,7 +320,7 @@ class TestFsNew(TestAdminCommands):
         n = "test_new_default_ec_no_overwrite"
         self.setup_ec_pools(n, overwrites=False)
         try:
-            self.get_ceph_cmd_stdout('fs', 'new', n, n+"-meta", n+"-data")
+            self.run_ceph_cmd('fs', 'new', n, n+"-meta", n+"-data")
         except CommandFailedError as e:
             if e.exitstatus == 22:
                 pass
@@ -330,7 +330,7 @@ class TestFsNew(TestAdminCommands):
             raise RuntimeError("expected failure")
         # and even with --force !
         try:
-            self.get_ceph_cmd_stdout('fs', 'new', n, n+"-meta", n+"-data", "--force")
+            self.run_ceph_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
         except CommandFailedError as e:
             if e.exitstatus == 22:
                 pass
@@ -432,13 +432,13 @@ class TestFsNew(TestAdminCommands):
         first_fs = "first_fs"
         first_metadata_pool = "first_metadata_pool"
         first_data_pool = "first_data_pool"
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
-        self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+        self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
 
         second_fs = "second_fs"
         second_data_pool = "second_data_pool"
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_data_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', second_data_pool)
 
         # try to create new fs 'second_fs' with following configuration
         # metadata pool -> 'first_metadata_pool'
@@ -446,7 +446,7 @@ class TestFsNew(TestAdminCommands):
         # Expecting EINVAL exit status because 'first_metadata_pool'
         # is already in use with 'first_fs'
         try:
-            self.get_ceph_cmd_stdout('fs', 'new', second_fs, first_metadata_pool, second_data_pool)
+            self.run_ceph_cmd('fs', 'new', second_fs, first_metadata_pool, second_data_pool)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
@@ -461,13 +461,13 @@ class TestFsNew(TestAdminCommands):
         first_fs = "first_fs"
         first_metadata_pool = "first_metadata_pool"
         first_data_pool = "first_data_pool"
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
-        self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+        self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
 
         second_fs = "second_fs"
         second_metadata_pool = "second_metadata_pool"
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_metadata_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', second_metadata_pool)
 
         # try to create new fs 'second_fs' with following configuration
         # metadata pool -> 'second_metadata_pool'
@@ -475,7 +475,7 @@ class TestFsNew(TestAdminCommands):
         # Expecting EINVAL exit status because 'first_data_pool'
         # is already in use with 'first_fs'
         try:
-            self.get_ceph_cmd_stdout('fs', 'new', second_fs, second_metadata_pool, first_data_pool)
+            self.run_ceph_cmd('fs', 'new', second_fs, second_metadata_pool, first_data_pool)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
@@ -490,9 +490,9 @@ class TestFsNew(TestAdminCommands):
         first_fs = "first_fs"
         first_metadata_pool = "first_metadata_pool"
         first_data_pool = "first_data_pool"
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
-        self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+        self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
 
         second_fs = "second_fs"
 
@@ -502,7 +502,7 @@ class TestFsNew(TestAdminCommands):
         # Expecting EINVAL exit status because 'first_metadata_pool' and 'first_data_pool'
         # is already in use with 'first_fs'
         try:
-            self.get_ceph_cmd_stdout('fs', 'new', second_fs, first_metadata_pool, first_data_pool)
+            self.run_ceph_cmd('fs', 'new', second_fs, first_metadata_pool, first_data_pool)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
@@ -517,17 +517,17 @@ class TestFsNew(TestAdminCommands):
         first_fs = "first_fs"
         first_metadata_pool = "first_metadata_pool"
         first_data_pool = "first_data_pool"
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
-        self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+        self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
 
         # create second data pool, metadata pool and add with filesystem
         second_fs = "second_fs"
         second_metadata_pool = "second_metadata_pool"
         second_data_pool = "second_data_pool"
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_metadata_pool)
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_data_pool)
-        self.get_ceph_cmd_stdout('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', second_metadata_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', second_data_pool)
+        self.run_ceph_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
 
         third_fs = "third_fs"
 
@@ -537,7 +537,7 @@ class TestFsNew(TestAdminCommands):
         # Expecting EINVAL exit status because 'first_metadata_pool' and 'second_data_pool'
         # is already in use with 'first_fs' and 'second_fs'
         try:
-            self.get_ceph_cmd_stdout('fs', 'new', third_fs, first_metadata_pool, second_data_pool)
+            self.run_ceph_cmd('fs', 'new', third_fs, first_metadata_pool, second_data_pool)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
@@ -552,9 +552,9 @@ class TestFsNew(TestAdminCommands):
         first_fs = "first_fs"
         first_metadata_pool = "first_metadata_pool"
         first_data_pool = "first_data_pool"
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
-        self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+        self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
 
         second_fs = "second_fs"
 
@@ -564,7 +564,7 @@ class TestFsNew(TestAdminCommands):
         # Expecting EINVAL exit status because 'first_data_pool' and 'first_metadata_pool'
         # is already in use with 'first_fs'
         try:
-            self.get_ceph_cmd_stdout('fs', 'new', second_fs, first_data_pool, first_metadata_pool)
+            self.run_ceph_cmd('fs', 'new', second_fs, first_data_pool, first_metadata_pool)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
@@ -579,17 +579,17 @@ class TestFsNew(TestAdminCommands):
         first_fs = "first_fs"
         first_metadata_pool = "first_metadata_pool"
         first_data_pool = "first_data_pool"
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
-        self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', first_metadata_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', first_data_pool)
+        self.run_ceph_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
 
         # create second data pool, metadata pool and add with filesystem
         second_fs = "second_fs"
         second_metadata_pool = "second_metadata_pool"
         second_data_pool = "second_data_pool"
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_metadata_pool)
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_data_pool)
-        self.get_ceph_cmd_stdout('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', second_metadata_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', second_data_pool)
+        self.run_ceph_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
 
         third_fs = "third_fs"
 
@@ -599,7 +599,7 @@ class TestFsNew(TestAdminCommands):
         # Expecting EINVAL exit status because 'first_data_pool' and 'second_metadata_pool'
         # is already in use with 'first_fs' and 'second_fs'
         try:
-            self.get_ceph_cmd_stdout('fs', 'new', third_fs, first_data_pool, second_metadata_pool)
+            self.run_ceph_cmd('fs', 'new', third_fs, first_data_pool, second_metadata_pool)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
@@ -612,20 +612,20 @@ class TestFsNew(TestAdminCommands):
 
         # create pool and initialise with rbd
         new_pool = "new_pool"
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', new_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', new_pool)
         self.ctx.cluster.run(args=['rbd', 'pool', 'init', new_pool])
 
         new_fs = "new_fs"
         new_data_pool = "new_data_pool"
 
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', new_data_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', new_data_pool)
 
         # try to create new fs 'new_fs' with following configuration
         # metadata pool -> 'new_pool' (already used by rbd app)
         # data pool -> 'new_data_pool'
         # Expecting EINVAL exit status because 'new_pool' is already in use with 'rbd' app
         try:
-            self.get_ceph_cmd_stdout('fs', 'new', new_fs, new_pool, new_data_pool)
+            self.run_ceph_cmd('fs', 'new', new_fs, new_pool, new_data_pool)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
@@ -638,20 +638,20 @@ class TestFsNew(TestAdminCommands):
 
         # create pool and initialise with rbd
         new_pool = "new_pool"
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', new_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', new_pool)
         self.ctx.cluster.run(args=['rbd', 'pool', 'init', new_pool])
 
         new_fs = "new_fs"
         new_metadata_pool = "new_metadata_pool"
 
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create', new_metadata_pool)
+        self.run_ceph_cmd('osd', 'pool', 'create', new_metadata_pool)
 
         # try to create new fs 'new_fs' with following configuration
         # metadata pool -> 'new_metadata_pool'
         # data pool -> 'new_pool' (already used by rbd app)
         # Expecting EINVAL exit status because 'new_pool' is already in use with 'rbd' app
         try:
-            self.get_ceph_cmd_stdout('fs', 'new', new_fs, new_metadata_pool, new_pool)
+            self.run_ceph_cmd('fs', 'new', new_fs, new_metadata_pool, new_pool)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
@@ -1164,17 +1164,17 @@ class TestMirroringCommands(CephFSTestCase):
     MDSS_REQUIRED = 1
 
     def _enable_mirroring(self, fs_name):
-        self.get_ceph_cmd_stdout("fs", "mirror", "enable", fs_name)
+        self.run_ceph_cmd("fs", "mirror", "enable", fs_name)
 
     def _disable_mirroring(self, fs_name):
-        self.get_ceph_cmd_stdout("fs", "mirror", "disable", fs_name)
+        self.run_ceph_cmd("fs", "mirror", "disable", fs_name)
 
     def _add_peer(self, fs_name, peer_spec, remote_fs_name):
         peer_uuid = str(uuid.uuid4())
-        self.get_ceph_cmd_stdout("fs", "mirror", "peer_add", fs_name, peer_uuid, peer_spec, remote_fs_name)
+        self.run_ceph_cmd("fs", "mirror", "peer_add", fs_name, peer_uuid, peer_spec, remote_fs_name)
 
     def _remove_peer(self, fs_name, peer_uuid):
-        self.get_ceph_cmd_stdout("fs", "mirror", "peer_remove", fs_name, peer_uuid)
+        self.run_ceph_cmd("fs", "mirror", "peer_remove", fs_name, peer_uuid)
 
     def _verify_mirroring(self, fs_name, flag_str):
         status = self.fs.status()
index ce6d7fccbc25a8c0a9eb56450b6417ca1786f87d..a39ccaa9f2960954aa159cf81dbe4ff3538e06b4 100644 (file)
@@ -244,7 +244,7 @@ class TestDamage(CephFSTestCase):
             # Reset MDS state
             self.mount_a.umount_wait(force=True)
             self.fs.fail()
-            self.get_ceph_cmd_stdout('mds', 'repaired', '0')
+            self.run_ceph_cmd('mds', 'repaired', '0')
 
             # Reset RADOS pool state
             self.fs.radosm(['import', '-'], stdin=BytesIO(serialized))
@@ -467,7 +467,7 @@ class TestDamage(CephFSTestCase):
         self.fs.radosm(["setomapval", dirfrag_obj, "file_to_be_damaged_head", junk])
 
         # Clean up the damagetable entry
-        self.get_ceph_cmd_stdout(
+        self.run_ceph_cmd(
             'tell', f'mds.{self.fs.get_active_names()[0]}',
             "damage", "rm", f"{damage_id}")
 
@@ -528,7 +528,7 @@ class TestDamage(CephFSTestCase):
         self.assertEqual(damage[0]['damage_type'], "backtrace")
         self.assertEqual(damage[0]['ino'], file1_ino)
 
-        self.get_ceph_cmd_stdout(
+        self.run_ceph_cmd(
             'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
             "damage", "rm", str(damage[0]['id']))
 
@@ -561,7 +561,7 @@ class TestDamage(CephFSTestCase):
             self.assertEqual(damage[1]['ino'], file2_ino)
 
         for entry in damage:
-            self.get_ceph_cmd_stdout(
+            self.run_ceph_cmd(
                 'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
                 "damage", "rm", str(entry['id']))
 
index 404fcc3936567b860ee7a5449015925819a3ec92..63ac60415475ca4b6e6d0eb07e528f2c1f47b352 100644 (file)
@@ -428,7 +428,7 @@ class TestDataScan(CephFSTestCase):
         self.fs.data_scan(["scan_links"])
 
         # Mark the MDS repaired
-        self.get_ceph_cmd_stdout('mds', 'repaired', '0')
+        self.run_ceph_cmd('mds', 'repaired', '0')
 
         # Start the MDS
         self.fs.mds_restart()
index bd4efa2eb1fd675d07e0adb64aa41d70d27136c6..ba2c3f76f323e57851be4b2b6e142d498c107fc5 100644 (file)
@@ -445,7 +445,7 @@ class TestFailover(CephFSTestCase):
 
         standbys = self.mds_cluster.get_standby_daemons()
         self.assertGreaterEqual(len(standbys), 1)
-        self.get_ceph_cmd_stdout('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)))
+        self.run_ceph_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)))
 
         # Kill a standby and check for warning
         victim = standbys.pop()
@@ -463,11 +463,11 @@ class TestFailover(CephFSTestCase):
         # Set it one greater than standbys ever seen
         standbys = self.mds_cluster.get_standby_daemons()
         self.assertGreaterEqual(len(standbys), 1)
-        self.get_ceph_cmd_stdout('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)+1))
+        self.run_ceph_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)+1))
         self.wait_for_health("MDS_INSUFFICIENT_STANDBY", self.fs.beacon_timeout)
 
         # Set it to 0
-        self.get_ceph_cmd_stdout('fs', 'set', self.fs.name, 'standby_count_wanted', '0')
+        self.run_ceph_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', '0')
         self.wait_for_health_clear(timeout=30)
 
     def test_discontinuous_mdsmap(self):
@@ -716,8 +716,8 @@ class TestMultiFilesystems(CephFSTestCase):
 
     def setUp(self):
         super(TestMultiFilesystems, self).setUp()
-        self.get_ceph_cmd_stdout("fs", "flag", "set", "enable_multiple",
-                                 "true", "--yes-i-really-mean-it")
+        self.run_ceph_cmd("fs", "flag", "set", "enable_multiple",
+                          "true", "--yes-i-really-mean-it")
 
     def _setup_two(self):
         fs_a = self.mds_cluster.newfs(name="alpha")
@@ -799,7 +799,7 @@ class TestMultiFilesystems(CephFSTestCase):
 
         # Kill fs_a's active MDS, see a standby take over
         self.mds_cluster.mds_stop(original_a)
-        self.get_ceph_cmd_stdout("mds", "fail", original_a)
+        self.run_ceph_cmd("mds", "fail", original_a)
         self.wait_until_equal(lambda: len(fs_a.get_active_names()), 1, 30,
                               reject_fn=lambda v: v > 1)
         # Assert that it's a *different* daemon that has now appeared in the map for fs_a
@@ -807,7 +807,7 @@ class TestMultiFilesystems(CephFSTestCase):
 
         # Kill fs_b's active MDS, see a standby take over
         self.mds_cluster.mds_stop(original_b)
-        self.get_ceph_cmd_stdout("mds", "fail", original_b)
+        self.run_ceph_cmd("mds", "fail", original_b)
         self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30,
                               reject_fn=lambda v: v > 1)
         # Assert that it's a *different* daemon that has now appeared in the map for fs_a
index 42df5138f7f88b3a19b14c8244e8513058e7c074..902a53e79f7d01f367fa83a85cb74e1a453c4263 100644 (file)
@@ -160,14 +160,13 @@ class TestFragmentation(CephFSTestCase):
             target_files = branch_factor**depth * int(split_size * 1.5)
             create_files = target_files - files_written
 
-            self.get_ceph_cmd_stdout("log",
+            self.run_ceph_cmd("log",
                 "{0} Writing {1} files (depth={2})".format(
                     self.__class__.__name__, create_files, depth
                 ))
             self.mount_a.create_n_files("splitdir/file_{0}".format(depth),
                                         create_files)
-            self.get_ceph_cmd_stdout("log",
-                "{0} Done".format(self.__class__.__name__))
+            self.run_ceph_cmd("log","{0} Done".format(self.__class__.__name__))
 
             files_written += create_files
             log.info("Now have {0} files".format(files_written))
index 7d17a4f268c3fba61b24cd33c38359eac4b87965..09896703d81f91616c9fa7c63c124491a1c65dab 100644 (file)
@@ -93,8 +93,8 @@ class TestFSTop(CephFSTestCase):
         # umount mount_b, mount another filesystem on it and use --dumpfs filter
         self.mount_b.umount_wait()
 
-        self.get_ceph_cmd_stdout("fs", "flag", "set", "enable_multiple",
-                                 "true", "--yes-i-really-mean-it")
+        self.run_ceph_cmd("fs", "flag", "set", "enable_multiple", "true",
+                          "--yes-i-really-mean-it")
 
         # create a new filesystem
         fs_b = self.mds_cluster.newfs(name=newfs_name)
index b5d2cbca9b2f0688d505077c857387f9b36ebab1..90a65f069ef615b401e6659310bcaa55575e8c5f 100644 (file)
@@ -61,8 +61,8 @@ class FullnessTestCase(CephFSTestCase):
         self.assertGreaterEqual(mount_a_initial_epoch, self.initial_osd_epoch)
 
         # Set and unset a flag to cause OSD epoch to increment
-        self.get_ceph_cmd_stdout("osd", "set", "pause")
-        self.get_ceph_cmd_stdout("osd", "unset", "pause")
+        self.run_ceph_cmd("osd", "set", "pause")
+        self.run_ceph_cmd("osd", "unset", "pause")
 
         out = self.get_ceph_cmd_stdout("osd", "dump", "--format=json").strip()
         new_epoch = json.loads(out)['epoch']
@@ -376,8 +376,8 @@ class TestQuotaFull(FullnessTestCase):
         super(TestQuotaFull, self).setUp()
 
         pool_name = self.fs.get_data_pool_name()
-        self.get_ceph_cmd_stdout("osd", "pool", "set-quota", pool_name,
-                                 "max_bytes", f"{self.pool_capacity}")
+        self.run_ceph_cmd("osd", "pool", "set-quota", pool_name,
+                          "max_bytes", f"{self.pool_capacity}")
 
 
 class TestClusterFull(FullnessTestCase):
index 7561ddee974028002f51209865c47e67d62b1192..365140fd9f60a9f33164c7fedf916ee4934e5f07 100644 (file)
@@ -233,8 +233,8 @@ class TestJournalRepair(CephFSTestCase):
         self.fs.table_tool(["0", "reset", "session"])
         self.fs.journal_tool(["journal", "reset"], 0)
         self.fs.erase_mds_objects(1)
-        self.get_ceph_cmd_stdout('fs', 'reset', self.fs.name,
-                                 '--yes-i-really-mean-it')
+        self.run_ceph_cmd('fs', 'reset', self.fs.name,
+                          '--yes-i-really-mean-it')
 
         # Bring an MDS back online, mount a client, and see that we can walk the full
         # filesystem tree again
index 296b33859806fdb18f26b437c8c183500036661e..0e824d3d278dc4392c550ce661ee6d513bc90c5d 100644 (file)
@@ -404,7 +404,7 @@ class TestMDSMetrics(CephFSTestCase):
         invalid_mds_rank = "1,"
         # try, 'fs perf stat' command with invalid mds_rank
         try:
-            self.get_ceph_cmd_stdout("fs", "perf", "stats", "--mds_rank", invalid_mds_rank)
+            self.run_ceph_cmd("fs", "perf", "stats", "--mds_rank", invalid_mds_rank)
         except CommandFailedError as ce:
             if ce.exitstatus != errno.EINVAL:
                 raise
@@ -415,7 +415,7 @@ class TestMDSMetrics(CephFSTestCase):
         invalid_client_id = "abcd"
         # try, 'fs perf stat' command with invalid client_id
         try:
-            self.get_ceph_cmd_stdout("fs", "perf", "stats", "--client_id", invalid_client_id)
+            self.run_ceph_cmd("fs", "perf", "stats", "--client_id", invalid_client_id)
         except CommandFailedError as ce:
             if ce.exitstatus != errno.EINVAL:
                 raise
@@ -426,7 +426,7 @@ class TestMDSMetrics(CephFSTestCase):
         invalid_client_ip = "1.2.3"
         # try, 'fs perf stat' command with invalid client_ip
         try:
-            self.get_ceph_cmd_stdout("fs", "perf", "stats", "--client_ip", invalid_client_ip)
+            self.run_ceph_cmd("fs", "perf", "stats", "--client_ip", invalid_client_ip)
         except CommandFailedError as ce:
             if ce.exitstatus != errno.EINVAL:
                 raise
@@ -501,8 +501,8 @@ class TestMDSMetrics(CephFSTestCase):
         self.mount_b.umount_wait()
         self.fs.delete_all_filesystems()
 
-        self.get_ceph_cmd_stdout("fs", "flag", "set", "enable_multiple",
-                                 "true", "--yes-i-really-mean-it")
+        self.run_ceph_cmd("fs", "flag", "set", "enable_multiple",
+            "true", "--yes-i-really-mean-it")
 
         # creating filesystem
         fs_a = self._setup_fs(fs_name="fs1")
@@ -569,8 +569,8 @@ class TestMDSMetrics(CephFSTestCase):
         self.mount_a.umount_wait()
         self.mount_b.umount_wait()
 
-        self.get_ceph_cmd_stdout("fs", "flag", "set", "enable_multiple",
-                                 "true", "--yes-i-really-mean-it")
+        self.run_ceph_cmd("fs", "flag", "set", "enable_multiple",
+            "true", "--yes-i-really-mean-it")
 
         # creating filesystem
         fs_b = self._setup_fs(fs_name="fs2")
index 9fb6bb4bc47ff939531bfd5bb8a6772d23e9a9f1..6e57df5d0a846bd92611d39c46303a2de2f6805f 100644 (file)
@@ -38,16 +38,16 @@ class TestMirroring(CephFSTestCase):
         super(TestMirroring, self).tearDown()
 
     def enable_mirroring_module(self):
-        self.get_ceph_cmd_stdout("mgr", "module", "enable", TestMirroring.MODULE_NAME)
+        self.run_ceph_cmd("mgr", "module", "enable", TestMirroring.MODULE_NAME)
 
     def disable_mirroring_module(self):
-        self.get_ceph_cmd_stdout("mgr", "module", "disable", TestMirroring.MODULE_NAME)
+        self.run_ceph_cmd("mgr", "module", "disable", TestMirroring.MODULE_NAME)
 
     def enable_mirroring(self, fs_name, fs_id):
         res = self.mirror_daemon_command(f'counter dump for fs: {fs_name}', 'counter', 'dump')
         vbefore = res[TestMirroring.PERF_COUNTER_KEY_NAME_CEPHFS_MIRROR][0]
 
-        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "enable", fs_name)
+        self.run_ceph_cmd("fs", "snapshot", "mirror", "enable", fs_name)
         time.sleep(10)
         # verify via asok
         res = self.mirror_daemon_command(f'mirror status for fs: {fs_name}',
@@ -68,7 +68,7 @@ class TestMirroring(CephFSTestCase):
         res = self.mirror_daemon_command(f'counter dump for fs: {fs_name}', 'counter', 'dump')
         vbefore = res[TestMirroring.PERF_COUNTER_KEY_NAME_CEPHFS_MIRROR][0]
 
-        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "disable", fs_name)
+        self.run_ceph_cmd("fs", "snapshot", "mirror", "disable", fs_name)
         time.sleep(10)
         # verify via asok
         try:
@@ -106,9 +106,9 @@ class TestMirroring(CephFSTestCase):
             vbefore = res[TestMirroring.PERF_COUNTER_KEY_NAME_CEPHFS_MIRROR_FS][0]
 
         if remote_fs_name:
-            self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec, remote_fs_name)
+            self.run_ceph_cmd("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec, remote_fs_name)
         else:
-            self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec)
+            self.run_ceph_cmd("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec)
         time.sleep(10)
         self.verify_peer_added(fs_name, fs_id, peer_spec, remote_fs_name)
 
@@ -122,7 +122,7 @@ class TestMirroring(CephFSTestCase):
         vbefore = res[TestMirroring.PERF_COUNTER_KEY_NAME_CEPHFS_MIRROR_FS][0]
 
         peer_uuid = self.get_peer_uuid(peer_spec)
-        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_remove", fs_name, peer_uuid)
+        self.run_ceph_cmd("fs", "snapshot", "mirror", "peer_remove", fs_name, peer_uuid)
         time.sleep(10)
         # verify via asok
         res = self.mirror_daemon_command(f'mirror status for fs: {fs_name}',
@@ -141,8 +141,8 @@ class TestMirroring(CephFSTestCase):
         return outj['token']
 
     def import_peer(self, fs_name, token):
-        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror",
-                                 "peer_bootstrap", "import", fs_name, token)
+        self.run_ceph_cmd("fs", "snapshot", "mirror", "peer_bootstrap",
+                          "import", fs_name, token)
 
     def add_directory(self, fs_name, fs_id, dir_name, check_perf_counter=True):
         if check_perf_counter:
@@ -155,7 +155,7 @@ class TestMirroring(CephFSTestCase):
         dir_count = res['snap_dirs']['dir_count']
         log.debug(f'initial dir_count={dir_count}')
 
-        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "add", fs_name, dir_name)
+        self.run_ceph_cmd("fs", "snapshot", "mirror", "add", fs_name, dir_name)
 
         time.sleep(10)
         # verify via asok
@@ -179,7 +179,7 @@ class TestMirroring(CephFSTestCase):
         dir_count = res['snap_dirs']['dir_count']
         log.debug(f'initial dir_count={dir_count}')
 
-        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "remove", fs_name, dir_name)
+        self.run_ceph_cmd("fs", "snapshot", "mirror", "remove", fs_name, dir_name)
 
         time.sleep(10)
         # verify via asok
@@ -381,7 +381,7 @@ class TestMirroring(CephFSTestCase):
 
         # try removing peer
         try:
-            self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_remove", self.primary_fs_name, 'dummy-uuid')
+            self.run_ceph_cmd("fs", "snapshot", "mirror", "peer_remove", self.primary_fs_name, 'dummy-uuid')
         except CommandFailedError as ce:
             if ce.exitstatus != errno.EINVAL:
                 raise RuntimeError(-errno.EINVAL, 'incorrect error code when removing a peer')
@@ -805,7 +805,7 @@ class TestMirroring(CephFSTestCase):
 
         # enable mirroring through mon interface -- this should result in the mirror daemon
         # failing to enable mirroring due to absence of `cephfs_mirror` index object.
-        self.get_ceph_cmd_stdout("fs", "mirror", "enable", self.primary_fs_name)
+        self.run_ceph_cmd("fs", "mirror", "enable", self.primary_fs_name)
 
         with safe_while(sleep=5, tries=10, action='wait for failed state') as proceed:
             while proceed():
@@ -820,7 +820,7 @@ class TestMirroring(CephFSTestCase):
                 except:
                     pass
 
-        self.get_ceph_cmd_stdout("fs", "mirror", "disable", self.primary_fs_name)
+        self.run_ceph_cmd("fs", "mirror", "disable", self.primary_fs_name)
         time.sleep(10)
         # verify via asok
         try:
@@ -842,7 +842,7 @@ class TestMirroring(CephFSTestCase):
         # enable mirroring through mon interface -- this should result in the mirror daemon
         # failing to enable mirroring due to absence of `cephfs_mirror` index object.
 
-        self.get_ceph_cmd_stdout("fs", "mirror", "enable", self.primary_fs_name)
+        self.run_ceph_cmd("fs", "mirror", "enable", self.primary_fs_name)
         # need safe_while since non-failed status pops up as mirroring is restarted
         # internally in mirror daemon.
         with safe_while(sleep=5, tries=20, action='wait for failed state') as proceed:
@@ -873,7 +873,7 @@ class TestMirroring(CephFSTestCase):
         self.assertTrue(res['peers'] == {})
         self.assertTrue(res['snap_dirs']['dir_count'] == 0)
 
-        self.get_ceph_cmd_stdout("fs", "mirror", "disable", self.primary_fs_name)
+        self.run_ceph_cmd("fs", "mirror", "disable", self.primary_fs_name)
         time.sleep(10)
         # verify via asok
         try:
@@ -1024,7 +1024,7 @@ class TestMirroring(CephFSTestCase):
         dir_path_p = "/d0/d1"
         dir_path = "/d0/d1/d2"
 
-        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path)
+        self.run_ceph_cmd("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path)
 
         time.sleep(10)
         # this uses an undocumented interface to get dirpath map state
@@ -1033,11 +1033,11 @@ class TestMirroring(CephFSTestCase):
         # there are no mirror daemons
         self.assertTrue(res['state'], 'stalled')
 
-        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "remove", self.primary_fs_name, dir_path)
+        self.run_ceph_cmd("fs", "snapshot", "mirror", "remove", self.primary_fs_name, dir_path)
 
         time.sleep(10)
         try:
-            self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path)
+            self.run_ceph_cmd("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path)
         except CommandFailedError as ce:
             if ce.exitstatus != errno.ENOENT:
                 raise RuntimeError('invalid errno when checking dirmap status for non-existent directory')
@@ -1045,7 +1045,7 @@ class TestMirroring(CephFSTestCase):
             raise RuntimeError('incorrect errno when checking dirmap state for non-existent directory')
 
         # adding a parent directory should be allowed
-        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path_p)
+        self.run_ceph_cmd("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path_p)
 
         time.sleep(10)
         # however, this directory path should get stalled too
index e087a328523dff5f5f6344ff45d6272d9e7f3bf7..72468a81361ae6a4c6afb80b648db9eb271b1f1e 100644 (file)
@@ -96,16 +96,15 @@ class TestMisc(CephFSTestCase):
 
         self.fs.fail()
 
-        self.get_ceph_cmd_stdout('fs', 'rm', self.fs.name,
-                                 '--yes-i-really-mean-it')
+        self.run_ceph_cmd('fs', 'rm', self.fs.name, '--yes-i-really-mean-it')
 
-        self.get_ceph_cmd_stdout('osd', 'pool', 'delete',
-                                 self.fs.metadata_pool_name,
-                                 self.fs.metadata_pool_name,
-                                 '--yes-i-really-really-mean-it')
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create',
-                                 self.fs.metadata_pool_name,
-                                 '--pg_num_min', str(self.fs.pg_num_min))
+        self.run_ceph_cmd('osd', 'pool', 'delete',
+                          self.fs.metadata_pool_name,
+                          self.fs.metadata_pool_name,
+                           '--yes-i-really-really-mean-it')
+        self.run_ceph_cmd('osd', 'pool', 'create',
+                          self.fs.metadata_pool_name,
+                          '--pg_num_min', str(self.fs.pg_num_min))
 
         # insert a garbage object
         self.fs.radosm(["put", "foo", "-"], stdin=StringIO("bar"))
@@ -119,34 +118,34 @@ class TestMisc(CephFSTestCase):
         self.wait_until_true(lambda: get_pool_df(self.fs, self.fs.metadata_pool_name), timeout=30)
 
         try:
-            self.get_ceph_cmd_stdout('fs', 'new', self.fs.name,
-                                     self.fs.metadata_pool_name,
-                                     data_pool_name)
+            self.run_ceph_cmd('fs', 'new', self.fs.name,
+                              self.fs.metadata_pool_name,
+                              data_pool_name)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
             raise AssertionError("Expected EINVAL")
 
-        self.get_ceph_cmd_stdout('fs', 'new', self.fs.name,
-                                 self.fs.metadata_pool_name,
-                                 data_pool_name, "--force")
-
-        self.get_ceph_cmd_stdout('fs', 'fail', self.fs.name)
-
-        self.get_ceph_cmd_stdout('fs', 'rm', self.fs.name,
-                                 '--yes-i-really-mean-it'])
-
-        self.get_ceph_cmd_stdout('osd', 'pool', 'delete',
-                                 self.fs.metadata_pool_name,
-                                 self.fs.metadata_pool_name,
-                                 '--yes-i-really-really-mean-it')
-        self.get_ceph_cmd_stdout('osd', 'pool', 'create',
-                                 self.fs.metadata_pool_name,
-                                 '--pg_num_min', str(self.fs.pg_num_min))
-        self.get_ceph_cmd_stdout('fs', 'new', self.fs.name,
-                                 self.fs.metadata_pool_name,
-                                 data_pool_name,
-                                 '--allow_dangerous_metadata_overlay')
+        self.run_ceph_cmd('fs', 'new', self.fs.name,
+                          self.fs.metadata_pool_name,
+                          data_pool_name, "--force")
+
+        self.run_ceph_cmd('fs', 'fail', self.fs.name)
+
+        self.run_ceph_cmd('fs', 'rm', self.fs.name,
+                          '--yes-i-really-mean-it')
+
+        self.run_ceph_cmd('osd', 'pool', 'delete',
+                          self.fs.metadata_pool_name,
+                          self.fs.metadata_pool_name,
+                          '--yes-i-really-really-mean-it')
+        self.run_ceph_cmd('osd', 'pool', 'create',
+                          self.fs.metadata_pool_name,
+                          '--pg_num_min', str(self.fs.pg_num_min))
+        self.run_ceph_cmd('fs', 'new', self.fs.name,
+                          self.fs.metadata_pool_name,
+                          data_pool_name,
+                          '--allow_dangerous_metadata_overlay')
 
     def test_cap_revoke_nonresponder(self):
         """
index 09847b6ea6d10ee44d43ff0a53740687de800077..e0e46fb24c09d19a7885134059fe25fea4f771e1 100644 (file)
@@ -116,7 +116,7 @@ class TestScrub2(CephFSTestCase):
 
         def expect_exdev(cmd, mds):
             try:
-                self.get_ceph_cmd_stdout('tell', 'mds.{0}'.format(mds), *cmd)
+                self.run_ceph_cmd('tell', 'mds.{0}'.format(mds), *cmd)
             except CommandFailedError as e:
                 if e.exitstatus == errno.EXDEV:
                     pass
index e91a3f1913d67b4fd8dc70c3760d3f2f9856f07b..17669c0f2a881ddb0e41c9c23449dd25ac66ff8b 100644 (file)
@@ -27,7 +27,7 @@ class TestFSRecovery(CephFSTestCase):
         # recovered/intact
         self.fs.rm()
         # Recreate file system with pool and previous fscid
-        self.get_ceph_cmd_stdout(
+        self.run_ceph_cmd(
             'fs', 'new', self.fs.name, metadata_pool, data_pool,
             '--recover', '--force', '--fscid', f'{self.fs.id}')
         self.fs.set_joinable()
index bf815547488c3662947f4ba36331c04d46345837..7aef2822985907b24368f8e70cba6772a0db5daa 100644 (file)
@@ -119,7 +119,7 @@ class TestRecoveryPool(CephFSTestCase):
         recovery_fs.create(recover=True, metadata_overlay=True)
 
         recovery_pool = recovery_fs.get_metadata_pool_name()
-        self.get_ceph_cmd_stdout('-s')
+        self.run_ceph_cmd('-s')
 
         # Reset the MDS map in case multiple ranks were in play: recovery procedure
         # only understands how to rebuild metadata under rank 0
index bae048444f303f1bb8d3e3e5febcaee29720e348..f17a6ceb11537878b193239817b259505007a550 100644 (file)
@@ -281,7 +281,7 @@ class TestScrubChecks(CephFSTestCase):
             all_damage = self.fs.rank_tell(["damage", "ls"], mds_rank)
             damage = [d for d in all_damage if d['ino'] == ino and d['damage_type'] == dtype]
             for d in damage:
-                self.get_ceph_cmd_stdout(
+                self.run_ceph_cmd(
                     'tell', f'mds.{self.fs.get_active_names()[mds_rank]}',
                     "damage", "rm", str(d['id']))
             return len(damage) > 0
index 2b31d00c541387e950ffc73c889cea8736d7c6bf..11701dc28368e0e9d165d12154975dc93720371a 100644 (file)
@@ -651,8 +651,8 @@ class TestStrays(CephFSTestCase):
         self.assertFalse(self._is_stopped(1))
 
         # Permit the daemon to start purging again
-        self.get_ceph_cmd_stdout('tell', 'mds.{0}'.format(rank_1_id),
-                                 'injectargs', "--mds_max_purge_files 100")
+        self.run_ceph_cmd('tell', 'mds.{0}'.format(rank_1_id),
+                          'injectargs', "--mds_max_purge_files 100")
 
         # It should now proceed through shutdown
         self.fs.wait_for_daemons(timeout=120)
index 0700fc39d9afadcdf9ab1ea9d956065831b868cb..612a4ef41d4bb66736eb75d7169b78fc23d77bd0 100644 (file)
@@ -2877,11 +2877,11 @@ class TestSubvolumes(TestVolumesHelper):
         group = self._gen_subvol_grp_name()
 
         # Create auth_id
-        self.get_ceph_cmd_stdout(
+        self.run_ceph_cmd(
             "auth", "get-or-create", "client.guest1",
             "mds", "allow *",
             "osd", "allow rw",
-            "mon", "allow *"]
+            "mon", "allow *"
         )
 
         auth_id = "guest1"
@@ -2906,7 +2906,7 @@ class TestSubvolumes(TestVolumesHelper):
             self.fail("expected the 'fs subvolume authorize' command to fail")
 
         # clean up
-        self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
+        self.run_ceph_cmd("auth", "rm", "client.guest1")
         self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
         self._fs_cmd("subvolumegroup", "rm", self.volname, group)
 
@@ -2921,7 +2921,7 @@ class TestSubvolumes(TestVolumesHelper):
         group = self._gen_subvol_grp_name()
 
         # Create auth_id
-        self.get_ceph_cmd_stdout(
+        self.run_ceph_cmd(
             "auth", "get-or-create", "client.guest1",
             "mds", "allow *",
             "osd", "allow rw",
@@ -2949,7 +2949,7 @@ class TestSubvolumes(TestVolumesHelper):
         # clean up
         self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id,
                      "--group_name", group)
-        self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
+        self.run_ceph_cmd("auth", "rm", "client.guest1")
         self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
         self._fs_cmd("subvolumegroup", "rm", self.volname, group)
 
@@ -3056,7 +3056,7 @@ class TestSubvolumes(TestVolumesHelper):
         # clean up
         self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group)
         guest_mount.umount_wait()
-        self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
+        self.run_ceph_cmd("auth", "rm", "client.guest1")
         self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
         self._fs_cmd("subvolumegroup", "rm", self.volname, group)
 
@@ -3112,7 +3112,7 @@ class TestSubvolumes(TestVolumesHelper):
         # clean up
         self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, "guest1", "--group_name", group)
         guest_mount.umount_wait()
-        self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
+        self.run_ceph_cmd("auth", "rm", "client.guest1")
         self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
         self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
         self._fs_cmd("subvolumegroup", "rm", self.volname, group)
@@ -3187,7 +3187,7 @@ class TestSubvolumes(TestVolumesHelper):
         self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
         self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group)
         guest_mount.umount_wait()
-        self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
+        self.run_ceph_cmd("auth", "rm", "client.guest1")
         self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
         self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
         self._fs_cmd("subvolumegroup", "rm", self.volname, group)
@@ -3259,7 +3259,7 @@ class TestSubvolumes(TestVolumesHelper):
         # clean up
         self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
         guest_mount.umount_wait()
-        self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
+        self.run_ceph_cmd("auth", "rm", "client.guest1")
         self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
         self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
         self._fs_cmd("subvolumegroup", "rm", self.volname, group)
@@ -7172,8 +7172,8 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         new_pool = "new_pool"
         self.fs.add_data_pool(new_pool)
 
-        self.get_ceph_cmd_stdout("osd", "pool", "set-quota", new_pool,
-                                 "max_bytes", f"{pool_capacity // 4}")
+        self.run_ceph_cmd("osd", "pool", "set-quota", new_pool,
+                          "max_bytes", f"{pool_capacity // 4}")
 
         # schedule a clone
         self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1, "--pool_layout", new_pool)