]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/cephfs: add and use get_ceph_cmd_stdout()
authorRishabh Dave <ridave@redhat.com>
Thu, 16 Mar 2023 10:02:39 +0000 (15:32 +0530)
committerRishabh Dave <ridave@redhat.com>
Wed, 3 Apr 2024 17:31:48 +0000 (23:01 +0530)
Add method get_ceph_cmd_stdout() to class CephFSTestCase so that one
doesn't have to type something as long as
"self.mds_cluster.mon_manager.raw_cluster_cmd()" to execute a
command and get its output. And delete and replace
CephFSTestCase.run_cluster_cmd() too.

Signed-off-by: Rishabh Dave <ridave@redhat.com>
(cherry picked from commit 13168834e374120d9c319a120a35768ddbc70de5)

Conflicts:
qa/tasks/cephfs/caps_helper.py
- This file is very different in Reef.
qa/tasks/cephfs/test_mirroring.py
- Commit e4dd0e41a3a0 was not present on main but it is now
  present on main as well as on Reef, which leads to conflict.
- On Reef branch, the line before that patch in this commit was
  thus creating a conflict when the PR branch for this commit
  series was rebased on latest Reef.

23 files changed:
qa/tasks/cephfs/cephfs_test_case.py
qa/tasks/cephfs/test_admin.py
qa/tasks/cephfs/test_damage.py
qa/tasks/cephfs/test_data_scan.py
qa/tasks/cephfs/test_failover.py
qa/tasks/cephfs/test_fragment.py
qa/tasks/cephfs/test_fstop.py
qa/tasks/cephfs/test_full.py
qa/tasks/cephfs/test_journal_repair.py
qa/tasks/cephfs/test_mantle.py
qa/tasks/cephfs/test_mds_metrics.py
qa/tasks/cephfs/test_mirroring.py
qa/tasks/cephfs/test_misc.py
qa/tasks/cephfs/test_multimds_misc.py
qa/tasks/cephfs/test_nfs.py
qa/tasks/cephfs/test_recovery_fs.py
qa/tasks/cephfs/test_recovery_pool.py
qa/tasks/cephfs/test_scrub_checks.py
qa/tasks/cephfs/test_sessionmap.py
qa/tasks/cephfs/test_snap_schedules.py
qa/tasks/cephfs/test_strays.py
qa/tasks/cephfs/test_volumes.py
qa/tasks/cephfs/xfstests_dev.py

index 80797be421382e17b8f143a41f7f811c0f4680c8..7c9cb6aae472ef0c55f5068ddd4e78a7b91c9722 100644 (file)
@@ -2,6 +2,7 @@ import json
 import logging
 import os
 import re
+from io import StringIO
 
 from tasks.ceph_test_case import CephTestCase
 
@@ -69,6 +70,14 @@ class RunCephCmd:
             kwargs['args'] = args
         return self.run_ceph_cmd(**kwargs).exitstatus
 
+    def get_ceph_cmd_stdout(self, *args, **kwargs):
+        if kwargs.get('args') is None and args:
+            if len(args) == 1:
+                args = args[0]
+            kwargs['args'] = args
+        kwargs['stdout'] = kwargs.pop('stdout', StringIO())
+        return self.run_ceph_cmd(**kwargs).stdout.getvalue()
+
 
 class CephFSTestCase(CephTestCase, RunCephCmd):
     """
@@ -115,15 +124,15 @@ class CephFSTestCase(CephTestCase, RunCephCmd):
         except CommandFailedError:
             # Fallback for older Ceph cluster
             try:
-                blocklist = json.loads(self.mds_cluster.mon_manager.raw_cluster_cmd("osd",
-                                      "dump", "--format=json-pretty"))['blocklist']
+                blocklist = json.loads(self.get_ceph_cmd_stdout("osd",
+                    "dump", "--format=json-pretty"))['blocklist']
                 log.info(f"Removing {len(blocklist)} blocklist entries")
                 for addr, blocklisted_at in blocklist.items():
                     self.run_ceph_cmd("osd", "blocklist", "rm", addr)
             except KeyError:
                 # Fallback for more older Ceph clusters, who will use 'blacklist' instead.
-                blacklist = json.loads(self.mds_cluster.mon_manager.raw_cluster_cmd("osd",
-                                      "dump", "--format=json-pretty"))['blacklist']
+                blacklist = json.loads(self.get_ceph_cmd_stdout("osd",
+                    "dump", "--format=json-pretty"))['blacklist']
                 log.info(f"Removing {len(blacklist)} blacklist entries")
                 for addr, blocklisted_at in blacklist.items():
                     self.run_ceph_cmd("osd", "blacklist", "rm", addr)
@@ -253,9 +262,8 @@ class CephFSTestCase(CephTestCase, RunCephCmd):
         """
         Convenience wrapper on "ceph auth ls"
         """
-        return json.loads(self.mds_cluster.mon_manager.raw_cluster_cmd(
-            "auth", "ls", "--format=json-pretty"
-        ))['auth_dump']
+        return json.loads(self.get_ceph_cmd_stdout("auth", "ls",
+            "--format=json-pretty"))['auth_dump']
 
     def assert_session_count(self, expected, ls_data=None, mds_id=None):
         if ls_data is None:
index 5a794d0776ace7af5f2b3c2bca709d7b4eeb957a..862aeef5bdf8426bb75072ac007b66f687adffd4 100644 (file)
@@ -77,18 +77,18 @@ class TestAdminCommands(CephFSTestCase):
     MDSS_REQUIRED = 1
 
     def check_pool_application_metadata_key_value(self, pool, app, key, value):
-        output = self.fs.mon_manager.raw_cluster_cmd(
+        output = self.get_ceph_cmd_stdout(
             'osd', 'pool', 'application', 'get', pool, app, key)
         self.assertEqual(str(output.strip()), value)
 
     def setup_ec_pools(self, n, metadata=True, overwrites=True):
         if metadata:
-            self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-meta", "8")
+            self.get_ceph_cmd_stdout('osd', 'pool', 'create', n+"-meta", "8")
         cmd = ['osd', 'erasure-code-profile', 'set', n+"-profile", "m=2", "k=2", "crush-failure-domain=osd"]
-        self.fs.mon_manager.raw_cluster_cmd(*cmd)
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile")
+        self.get_ceph_cmd_stdout(cmd)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile")
         if overwrites:
-            self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')
+            self.get_ceph_cmd_stdout('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true')
 
 @classhook('_add_valid_tell')
 class TestValidTell(TestAdminCommands):
@@ -129,13 +129,13 @@ class TestFsStatus(TestAdminCommands):
         That `ceph fs status` command functions.
         """
 
-        s = self.fs.mon_manager.raw_cluster_cmd("fs", "status")
+        s = self.get_ceph_cmd_stdout("fs", "status")
         self.assertTrue("active" in s)
 
-        mdsmap = json.loads(self.fs.mon_manager.raw_cluster_cmd("fs", "status", "--format=json-pretty"))["mdsmap"]
+        mdsmap = json.loads(self.get_ceph_cmd_stdout("fs", "status", "--format=json-pretty"))["mdsmap"]
         self.assertEqual(mdsmap[0]["state"], "active")
 
-        mdsmap = json.loads(self.fs.mon_manager.raw_cluster_cmd("fs", "status", "--format=json"))["mdsmap"]
+        mdsmap = json.loads(self.get_ceph_cmd_stdout("fs", "status", "--format=json"))["mdsmap"]
         self.assertEqual(mdsmap[0]["state"], "active")
 
 
@@ -157,7 +157,7 @@ class TestAddDataPool(TestAdminCommands):
         That the application metadata set on a newly added data pool is as expected.
         """
         pool_name = "foo"
-        mon_cmd = self.fs.mon_manager.raw_cluster_cmd
+        mon_cmd = self.get_ceph_cmd_stdout
         mon_cmd('osd', 'pool', 'create', pool_name, '--pg_num_min',
                 str(self.fs.pg_num_min))
         # Check whether https://tracker.ceph.com/issues/43061 is fixed
@@ -201,22 +201,22 @@ class TestAddDataPool(TestAdminCommands):
         first_fs = "first_fs"
         first_metadata_pool = "first_metadata_pool"
         first_data_pool = "first_data_pool"
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
+        self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
 
         # create second data pool, metadata pool and add with filesystem
         second_fs = "second_fs"
         second_metadata_pool = "second_metadata_pool"
         second_data_pool = "second_data_pool"
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_metadata_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_data_pool)
+        self.get_ceph_cmd_stdout('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
 
         # try to add 'first_data_pool' with 'second_fs'
         # Expecting EINVAL exit status because 'first_data_pool' is already in use with 'first_fs'
         try:
-            self.fs.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', second_fs, first_data_pool)
+            self.get_ceph_cmd_stdout('fs', 'add_data_pool', second_fs, first_data_pool)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
@@ -231,23 +231,23 @@ class TestAddDataPool(TestAdminCommands):
         first_fs = "first_fs"
         first_metadata_pool = "first_metadata_pool"
         first_data_pool = "first_data_pool"
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
+        self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
 
         # create second data pool, metadata pool and add with filesystem
         second_fs = "second_fs"
         second_metadata_pool = "second_metadata_pool"
         second_data_pool = "second_data_pool"
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_metadata_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_data_pool)
+        self.get_ceph_cmd_stdout('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
 
         # try to add 'second_metadata_pool' with 'first_fs' as a data pool
         # Expecting EINVAL exit status because 'second_metadata_pool'
         # is already in use with 'second_fs' as a metadata pool
         try:
-            self.fs.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', first_fs, second_metadata_pool)
+            self.get_ceph_cmd_stdout('fs', 'add_data_pool', first_fs, second_metadata_pool)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
@@ -264,10 +264,8 @@ class TestFsNew(TestAdminCommands):
         metapoolname, datapoolname = n+'-testmetapool', n+'-testdatapool'
         badname = n+'badname@#'
 
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
-                                            n+metapoolname)
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
-                                            n+datapoolname)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', n+metapoolname)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', n+datapoolname)
 
         # test that fsname not with "goodchars" fails
         args = ['fs', 'new', badname, metapoolname, datapoolname]
@@ -275,12 +273,12 @@ class TestFsNew(TestAdminCommands):
                                  check_status=False)
         self.assertIn('invalid chars', proc.stderr.getvalue().lower())
 
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', metapoolname,
-                                            metapoolname,
-                                            '--yes-i-really-really-mean-it-not-faking')
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', datapoolname,
-                                            datapoolname,
-                                            '--yes-i-really-really-mean-it-not-faking')
+        self.get_ceph_cmd_stdout('osd', 'pool', 'rm', metapoolname,
+                                 metapoolname,
+                                 '--yes-i-really-really-mean-it-not-faking')
+        self.get_ceph_cmd_stdout('osd', 'pool', 'rm', datapoolname,
+                                 datapoolname,
+                                 '--yes-i-really-really-mean-it-not-faking')
 
     def test_new_default_ec(self):
         """
@@ -292,7 +290,7 @@ class TestFsNew(TestAdminCommands):
         n = "test_new_default_ec"
         self.setup_ec_pools(n)
         try:
-            self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data")
+            self.get_ceph_cmd_stdout('fs', 'new', n, n+"-meta", n+"-data")
         except CommandFailedError as e:
             if e.exitstatus == 22:
                 pass
@@ -310,7 +308,7 @@ class TestFsNew(TestAdminCommands):
         self.mds_cluster.delete_all_filesystems()
         n = "test_new_default_ec_force"
         self.setup_ec_pools(n)
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
+        self.get_ceph_cmd_stdout('fs', 'new', n, n+"-meta", n+"-data", "--force")
 
     def test_new_default_ec_no_overwrite(self):
         """
@@ -322,7 +320,7 @@ class TestFsNew(TestAdminCommands):
         n = "test_new_default_ec_no_overwrite"
         self.setup_ec_pools(n, overwrites=False)
         try:
-            self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data")
+            self.get_ceph_cmd_stdout('fs', 'new', n, n+"-meta", n+"-data")
         except CommandFailedError as e:
             if e.exitstatus == 22:
                 pass
@@ -332,7 +330,7 @@ class TestFsNew(TestAdminCommands):
             raise RuntimeError("expected failure")
         # and even with --force !
         try:
-            self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
+            self.get_ceph_cmd_stdout('fs', 'new', n, n+"-meta", n+"-data", "--force")
         except CommandFailedError as e:
             if e.exitstatus == 22:
                 pass
@@ -350,7 +348,7 @@ class TestFsNew(TestAdminCommands):
         fs_name = "test_fs_new_pool_application"
         keys = ['metadata', 'data']
         pool_names = [fs_name+'-'+key for key in keys]
-        mon_cmd = self.fs.mon_manager.raw_cluster_cmd
+        mon_cmd = self.get_ceph_cmd_stdout
         for p in pool_names:
             mon_cmd('osd', 'pool', 'create', p, '--pg_num_min', str(self.fs.pg_num_min))
             mon_cmd('osd', 'pool', 'application', 'enable', p, 'cephfs')
@@ -434,13 +432,13 @@ class TestFsNew(TestAdminCommands):
         first_fs = "first_fs"
         first_metadata_pool = "first_metadata_pool"
         first_data_pool = "first_data_pool"
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
+        self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
 
         second_fs = "second_fs"
         second_data_pool = "second_data_pool"
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_data_pool)
 
         # try to create new fs 'second_fs' with following configuration
         # metadata pool -> 'first_metadata_pool'
@@ -448,7 +446,7 @@ class TestFsNew(TestAdminCommands):
         # Expecting EINVAL exit status because 'first_metadata_pool'
         # is already in use with 'first_fs'
         try:
-            self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, first_metadata_pool, second_data_pool)
+            self.get_ceph_cmd_stdout('fs', 'new', second_fs, first_metadata_pool, second_data_pool)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
@@ -463,13 +461,13 @@ class TestFsNew(TestAdminCommands):
         first_fs = "first_fs"
         first_metadata_pool = "first_metadata_pool"
         first_data_pool = "first_data_pool"
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
+        self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
 
         second_fs = "second_fs"
         second_metadata_pool = "second_metadata_pool"
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_metadata_pool)
 
         # try to create new fs 'second_fs' with following configuration
         # metadata pool -> 'second_metadata_pool'
@@ -477,7 +475,7 @@ class TestFsNew(TestAdminCommands):
         # Expecting EINVAL exit status because 'first_data_pool'
         # is already in use with 'first_fs'
         try:
-            self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, first_data_pool)
+            self.get_ceph_cmd_stdout('fs', 'new', second_fs, second_metadata_pool, first_data_pool)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
@@ -492,9 +490,9 @@ class TestFsNew(TestAdminCommands):
         first_fs = "first_fs"
         first_metadata_pool = "first_metadata_pool"
         first_data_pool = "first_data_pool"
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
+        self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
 
         second_fs = "second_fs"
 
@@ -504,7 +502,7 @@ class TestFsNew(TestAdminCommands):
         # Expecting EINVAL exit status because 'first_metadata_pool' and 'first_data_pool'
         # is already in use with 'first_fs'
         try:
-            self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, first_metadata_pool, first_data_pool)
+            self.get_ceph_cmd_stdout('fs', 'new', second_fs, first_metadata_pool, first_data_pool)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
@@ -519,17 +517,17 @@ class TestFsNew(TestAdminCommands):
         first_fs = "first_fs"
         first_metadata_pool = "first_metadata_pool"
         first_data_pool = "first_data_pool"
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
+        self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
 
         # create second data pool, metadata pool and add with filesystem
         second_fs = "second_fs"
         second_metadata_pool = "second_metadata_pool"
         second_data_pool = "second_data_pool"
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_metadata_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_data_pool)
+        self.get_ceph_cmd_stdout('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
 
         third_fs = "third_fs"
 
@@ -539,7 +537,7 @@ class TestFsNew(TestAdminCommands):
         # Expecting EINVAL exit status because 'first_metadata_pool' and 'second_data_pool'
         # is already in use with 'first_fs' and 'second_fs'
         try:
-            self.fs.mon_manager.raw_cluster_cmd('fs', 'new', third_fs, first_metadata_pool, second_data_pool)
+            self.get_ceph_cmd_stdout('fs', 'new', third_fs, first_metadata_pool, second_data_pool)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
@@ -554,9 +552,9 @@ class TestFsNew(TestAdminCommands):
         first_fs = "first_fs"
         first_metadata_pool = "first_metadata_pool"
         first_data_pool = "first_data_pool"
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
+        self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
 
         second_fs = "second_fs"
 
@@ -566,7 +564,7 @@ class TestFsNew(TestAdminCommands):
         # Expecting EINVAL exit status because 'first_data_pool' and 'first_metadata_pool'
         # is already in use with 'first_fs'
         try:
-            self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, first_data_pool, first_metadata_pool)
+            self.get_ceph_cmd_stdout('fs', 'new', second_fs, first_data_pool, first_metadata_pool)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
@@ -581,17 +579,17 @@ class TestFsNew(TestAdminCommands):
         first_fs = "first_fs"
         first_metadata_pool = "first_metadata_pool"
         first_data_pool = "first_data_pool"
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_metadata_pool)
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', first_data_pool)
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_metadata_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', first_data_pool)
+        self.get_ceph_cmd_stdout('fs', 'new', first_fs, first_metadata_pool, first_data_pool)
 
         # create second data pool, metadata pool and add with filesystem
         second_fs = "second_fs"
         second_metadata_pool = "second_metadata_pool"
         second_data_pool = "second_data_pool"
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_metadata_pool)
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', second_data_pool)
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_metadata_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', second_data_pool)
+        self.get_ceph_cmd_stdout('fs', 'new', second_fs, second_metadata_pool, second_data_pool)
 
         third_fs = "third_fs"
 
@@ -601,7 +599,7 @@ class TestFsNew(TestAdminCommands):
         # Expecting EINVAL exit status because 'first_data_pool' and 'second_metadata_pool'
         # is already in use with 'first_fs' and 'second_fs'
         try:
-            self.fs.mon_manager.raw_cluster_cmd('fs', 'new', third_fs, first_data_pool, second_metadata_pool)
+            self.get_ceph_cmd_stdout('fs', 'new', third_fs, first_data_pool, second_metadata_pool)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
@@ -614,20 +612,20 @@ class TestFsNew(TestAdminCommands):
 
         # create pool and initialise with rbd
         new_pool = "new_pool"
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', new_pool)
         self.ctx.cluster.run(args=['rbd', 'pool', 'init', new_pool])
 
         new_fs = "new_fs"
         new_data_pool = "new_data_pool"
 
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_data_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', new_data_pool)
 
         # try to create new fs 'new_fs' with following configuration
         # metadata pool -> 'new_pool' (already used by rbd app)
         # data pool -> 'new_data_pool'
         # Expecting EINVAL exit status because 'new_pool' is already in use with 'rbd' app
         try:
-            self.fs.mon_manager.raw_cluster_cmd('fs', 'new', new_fs, new_pool, new_data_pool)
+            self.get_ceph_cmd_stdout('fs', 'new', new_fs, new_pool, new_data_pool)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
@@ -640,20 +638,20 @@ class TestFsNew(TestAdminCommands):
 
         # create pool and initialise with rbd
         new_pool = "new_pool"
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', new_pool)
         self.ctx.cluster.run(args=['rbd', 'pool', 'init', new_pool])
 
         new_fs = "new_fs"
         new_metadata_pool = "new_metadata_pool"
 
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', new_metadata_pool)
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create', new_metadata_pool)
 
         # try to create new fs 'new_fs' with following configuration
         # metadata pool -> 'new_metadata_pool'
         # data pool -> 'new_pool' (already used by rbd app)
         # Expecting EINVAL exit status because 'new_pool' is already in use with 'rbd' app
         try:
-            self.fs.mon_manager.raw_cluster_cmd('fs', 'new', new_fs, new_metadata_pool, new_pool)
+            self.get_ceph_cmd_stdout('fs', 'new', new_fs, new_metadata_pool, new_pool)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
@@ -904,13 +902,13 @@ class TestRequiredClientFeatures(CephFSTestCase):
         """
 
         def is_required(index):
-            out = self.fs.mon_manager.raw_cluster_cmd('fs', 'get', self.fs.name, '--format=json-pretty')
+            out = self.get_ceph_cmd_stdout('fs', 'get', self.fs.name, '--format=json-pretty')
             features = json.loads(out)['mdsmap']['required_client_features']
             if "feature_{0}".format(index) in features:
                 return True;
             return False;
 
-        features = json.loads(self.fs.mon_manager.raw_cluster_cmd('fs', 'feature', 'ls', '--format=json-pretty'))
+        features = json.loads(self.get_ceph_cmd_stdout('fs', 'feature', 'ls', '--format=json-pretty'))
         self.assertGreater(len(features), 0);
 
         for f in features:
@@ -1116,7 +1114,7 @@ class TestConfigCommands(CephFSTestCase):
 
         names = self.fs.get_rank_names()
         for n in names:
-            s = self.fs.mon_manager.raw_cluster_cmd("config", "show", "mds."+n)
+            s = self.get_ceph_cmd_stdout("config", "show", "mds."+n)
             self.assertTrue("NAME" in s)
             self.assertTrue("mon_host" in s)
 
@@ -1166,17 +1164,17 @@ class TestMirroringCommands(CephFSTestCase):
     MDSS_REQUIRED = 1
 
     def _enable_mirroring(self, fs_name):
-        self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "enable", fs_name)
+        self.get_ceph_cmd_stdout("fs", "mirror", "enable", fs_name)
 
     def _disable_mirroring(self, fs_name):
-        self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "disable", fs_name)
+        self.get_ceph_cmd_stdout("fs", "mirror", "disable", fs_name)
 
     def _add_peer(self, fs_name, peer_spec, remote_fs_name):
         peer_uuid = str(uuid.uuid4())
-        self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "peer_add", fs_name, peer_uuid, peer_spec, remote_fs_name)
+        self.get_ceph_cmd_stdout("fs", "mirror", "peer_add", fs_name, peer_uuid, peer_spec, remote_fs_name)
 
     def _remove_peer(self, fs_name, peer_uuid):
-        self.fs.mon_manager.raw_cluster_cmd("fs", "mirror", "peer_remove", fs_name, peer_uuid)
+        self.get_ceph_cmd_stdout("fs", "mirror", "peer_remove", fs_name, peer_uuid)
 
     def _verify_mirroring(self, fs_name, flag_str):
         status = self.fs.status()
index bfaa23453b0fa5f1c8dcaa08663f51a39d859758..ce6d7fccbc25a8c0a9eb56450b6417ca1786f87d 100644 (file)
@@ -244,7 +244,7 @@ class TestDamage(CephFSTestCase):
             # Reset MDS state
             self.mount_a.umount_wait(force=True)
             self.fs.fail()
-            self.fs.mon_manager.raw_cluster_cmd('mds', 'repaired', '0')
+            self.get_ceph_cmd_stdout('mds', 'repaired', '0')
 
             # Reset RADOS pool state
             self.fs.radosm(['import', '-'], stdin=BytesIO(serialized))
@@ -355,8 +355,9 @@ class TestDamage(CephFSTestCase):
                 # EIOs mean something handled by DamageTable: assert that it has
                 # been populated
                 damage = json.loads(
-                    self.fs.mon_manager.raw_cluster_cmd(
-                        'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]), "damage", "ls", '--format=json-pretty'))
+                    self.get_ceph_cmd_stdout(
+                        'tell', f'mds.{self.fs.get_active_names()[0]}',
+                        "damage", "ls", '--format=json-pretty'))
                 if len(damage) == 0:
                     results[mutation] = EIO_NO_DAMAGE
 
@@ -416,8 +417,8 @@ class TestDamage(CephFSTestCase):
 
         # The fact that there is damaged should have bee recorded
         damage = json.loads(
-            self.fs.mon_manager.raw_cluster_cmd(
-                'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
+            self.get_ceph_cmd_stdout(
+                'tell', f'mds.{self.fs.get_active_names()[0]}',
                 "damage", "ls", '--format=json-pretty'))
         self.assertEqual(len(damage), 1)
         damage_id = damage[0]['id']
@@ -466,9 +467,9 @@ class TestDamage(CephFSTestCase):
         self.fs.radosm(["setomapval", dirfrag_obj, "file_to_be_damaged_head", junk])
 
         # Clean up the damagetable entry
-        self.fs.mon_manager.raw_cluster_cmd(
-            'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
-            "damage", "rm", "{did}".format(did=damage_id))
+        self.get_ceph_cmd_stdout(
+            'tell', f'mds.{self.fs.get_active_names()[0]}',
+            "damage", "rm", f"{damage_id}")
 
         # Now I should be able to create a file with the same name as the
         # damaged guy if I want.
@@ -520,14 +521,14 @@ class TestDamage(CephFSTestCase):
 
         # Check that an entry is created in the damage table
         damage = json.loads(
-            self.fs.mon_manager.raw_cluster_cmd(
+            self.get_ceph_cmd_stdout(
                 'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
                 "damage", "ls", '--format=json-pretty'))
         self.assertEqual(len(damage), 1)
         self.assertEqual(damage[0]['damage_type'], "backtrace")
         self.assertEqual(damage[0]['ino'], file1_ino)
 
-        self.fs.mon_manager.raw_cluster_cmd(
+        self.get_ceph_cmd_stdout(
             'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
             "damage", "rm", str(damage[0]['id']))
 
@@ -545,7 +546,7 @@ class TestDamage(CephFSTestCase):
 
         # Check that an entry is created in the damage table
         damage = json.loads(
-            self.fs.mon_manager.raw_cluster_cmd(
+            self.get_ceph_cmd_stdout(
                 'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
                 "damage", "ls", '--format=json-pretty'))
         self.assertEqual(len(damage), 2)
@@ -560,7 +561,7 @@ class TestDamage(CephFSTestCase):
             self.assertEqual(damage[1]['ino'], file2_ino)
 
         for entry in damage:
-            self.fs.mon_manager.raw_cluster_cmd(
+            self.get_ceph_cmd_stdout(
                 'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]),
                 "damage", "rm", str(entry['id']))
 
index 9a93bd622126d46fb326f236deeecb8b49850aa9..404fcc3936567b860ee7a5449015925819a3ec92 100644 (file)
@@ -428,7 +428,7 @@ class TestDataScan(CephFSTestCase):
         self.fs.data_scan(["scan_links"])
 
         # Mark the MDS repaired
-        self.fs.mon_manager.raw_cluster_cmd('mds', 'repaired', '0')
+        self.get_ceph_cmd_stdout('mds', 'repaired', '0')
 
         # Start the MDS
         self.fs.mds_restart()
@@ -603,7 +603,7 @@ class TestDataScan(CephFSTestCase):
             file_path = "mydir/myfile_{0}".format(i)
             ino = self.mount_a.path_to_ino(file_path)
             obj = "{0:x}.{1:08x}".format(ino, 0)
-            pgid = json.loads(self.fs.mon_manager.raw_cluster_cmd(
+            pgid = json.loads(self.get_ceph_cmd_stdout(
                 "osd", "map", self.fs.get_data_pool_name(), obj,
                 "--format=json-pretty"
             ))['pgid']
index c9ac08e9b29e39ef8b4734491087a33224dd1b1c..bd4efa2eb1fd675d07e0adb64aa41d70d27136c6 100644 (file)
@@ -445,7 +445,7 @@ class TestFailover(CephFSTestCase):
 
         standbys = self.mds_cluster.get_standby_daemons()
         self.assertGreaterEqual(len(standbys), 1)
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)))
+        self.get_ceph_cmd_stdout('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)))
 
         # Kill a standby and check for warning
         victim = standbys.pop()
@@ -463,11 +463,11 @@ class TestFailover(CephFSTestCase):
         # Set it one greater than standbys ever seen
         standbys = self.mds_cluster.get_standby_daemons()
         self.assertGreaterEqual(len(standbys), 1)
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)+1))
+        self.get_ceph_cmd_stdout('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)+1))
         self.wait_for_health("MDS_INSUFFICIENT_STANDBY", self.fs.beacon_timeout)
 
         # Set it to 0
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', '0')
+        self.get_ceph_cmd_stdout('fs', 'set', self.fs.name, 'standby_count_wanted', '0')
         self.wait_for_health_clear(timeout=30)
 
     def test_discontinuous_mdsmap(self):
@@ -716,9 +716,8 @@ class TestMultiFilesystems(CephFSTestCase):
 
     def setUp(self):
         super(TestMultiFilesystems, self).setUp()
-        self.mds_cluster.mon_manager.raw_cluster_cmd("fs", "flag", "set",
-            "enable_multiple", "true",
-            "--yes-i-really-mean-it")
+        self.get_ceph_cmd_stdout("fs", "flag", "set", "enable_multiple",
+                                 "true", "--yes-i-really-mean-it")
 
     def _setup_two(self):
         fs_a = self.mds_cluster.newfs(name="alpha")
@@ -800,7 +799,7 @@ class TestMultiFilesystems(CephFSTestCase):
 
         # Kill fs_a's active MDS, see a standby take over
         self.mds_cluster.mds_stop(original_a)
-        self.mds_cluster.mon_manager.raw_cluster_cmd("mds", "fail", original_a)
+        self.get_ceph_cmd_stdout("mds", "fail", original_a)
         self.wait_until_equal(lambda: len(fs_a.get_active_names()), 1, 30,
                               reject_fn=lambda v: v > 1)
         # Assert that it's a *different* daemon that has now appeared in the map for fs_a
@@ -808,7 +807,7 @@ class TestMultiFilesystems(CephFSTestCase):
 
         # Kill fs_b's active MDS, see a standby take over
         self.mds_cluster.mds_stop(original_b)
-        self.mds_cluster.mon_manager.raw_cluster_cmd("mds", "fail", original_b)
+        self.get_ceph_cmd_stdout("mds", "fail", original_b)
         self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30,
                               reject_fn=lambda v: v > 1)
         # Assert that it's a *different* daemon that has now appeared in the map for fs_a
index 7d35ec0dfd53ec25c4301ec2ccb38043a2da7797..42df5138f7f88b3a19b14c8244e8513058e7c074 100644 (file)
@@ -160,13 +160,13 @@ class TestFragmentation(CephFSTestCase):
             target_files = branch_factor**depth * int(split_size * 1.5)
             create_files = target_files - files_written
 
-            self.ceph_cluster.mon_manager.raw_cluster_cmd("log",
+            self.get_ceph_cmd_stdout("log",
                 "{0} Writing {1} files (depth={2})".format(
                     self.__class__.__name__, create_files, depth
                 ))
             self.mount_a.create_n_files("splitdir/file_{0}".format(depth),
                                         create_files)
-            self.ceph_cluster.mon_manager.raw_cluster_cmd("log",
+            self.get_ceph_cmd_stdout("log",
                 "{0} Done".format(self.__class__.__name__))
 
             files_written += create_files
index 8294fceec926b51706413a40a4d61a73e3073f2b..7d17a4f268c3fba61b24cd33c38359eac4b87965 100644 (file)
@@ -20,10 +20,10 @@ class TestFSTop(CephFSTestCase):
         super(TestFSTop, self).tearDown()
 
     def _enable_mgr_stats_plugin(self):
-        return self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "enable", "stats")
+        return self.get_ceph_cmd_stdout("mgr", "module", "enable", "stats")
 
     def _disable_mgr_stats_plugin(self):
-        return self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "disable", "stats")
+        return self.get_ceph_cmd_stdout("mgr", "module", "disable", "stats")
 
     def _fstop_dump(self, *args):
         return self.mount_a.run_shell(['cephfs-top',
@@ -93,8 +93,8 @@ class TestFSTop(CephFSTestCase):
         # umount mount_b, mount another filesystem on it and use --dumpfs filter
         self.mount_b.umount_wait()
 
-        self.mds_cluster.mon_manager.raw_cluster_cmd("fs", "flag", "set", "enable_multiple", "true",
-                                                     "--yes-i-really-mean-it")
+        self.get_ceph_cmd_stdout("fs", "flag", "set", "enable_multiple",
+                                 "true", "--yes-i-really-mean-it")
 
         # create a new filesystem
         fs_b = self.mds_cluster.newfs(name=newfs_name)
index 2b3a7d5f95c748360489da4b09d877a70cf57d64..b5d2cbca9b2f0688d505077c857387f9b36ebab1 100644 (file)
@@ -61,10 +61,10 @@ class FullnessTestCase(CephFSTestCase):
         self.assertGreaterEqual(mount_a_initial_epoch, self.initial_osd_epoch)
 
         # Set and unset a flag to cause OSD epoch to increment
-        self.fs.mon_manager.raw_cluster_cmd("osd", "set", "pause")
-        self.fs.mon_manager.raw_cluster_cmd("osd", "unset", "pause")
+        self.get_ceph_cmd_stdout("osd", "set", "pause")
+        self.get_ceph_cmd_stdout("osd", "unset", "pause")
 
-        out = self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json").strip()
+        out = self.get_ceph_cmd_stdout("osd", "dump", "--format=json").strip()
         new_epoch = json.loads(out)['epoch']
         self.assertNotEqual(self.initial_osd_epoch, new_epoch)
 
@@ -138,7 +138,7 @@ class FullnessTestCase(CephFSTestCase):
         # Wait for the MDS to see the latest OSD map so that it will reliably
         # be applying the policy of rejecting non-deletion metadata operations
         # while in the full state.
-        osd_epoch = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['epoch']
+        osd_epoch = json.loads(self.get_ceph_cmd_stdout("osd", "dump", "--format=json-pretty"))['epoch']
         self.wait_until_true(
             lambda: self.fs.rank_asok(['status'])['osdmap_epoch'] >= osd_epoch,
             timeout=10)
@@ -167,7 +167,7 @@ class FullnessTestCase(CephFSTestCase):
 
         # Wait for the MDS to see the latest OSD map so that it will reliably
         # be applying the free space policy
-        osd_epoch = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['epoch']
+        osd_epoch = json.loads(self.get_ceph_cmd_stdout("osd", "dump", "--format=json-pretty"))['epoch']
         self.wait_until_true(
             lambda: self.fs.rank_asok(['status'])['osdmap_epoch'] >= osd_epoch,
             timeout=10)
@@ -376,8 +376,8 @@ class TestQuotaFull(FullnessTestCase):
         super(TestQuotaFull, self).setUp()
 
         pool_name = self.fs.get_data_pool_name()
-        self.fs.mon_manager.raw_cluster_cmd("osd", "pool", "set-quota", pool_name,
-                                            "max_bytes", "{0}".format(self.pool_capacity))
+        self.get_ceph_cmd_stdout("osd", "pool", "set-quota", pool_name,
+                                 "max_bytes", f"{self.pool_capacity}")
 
 
 class TestClusterFull(FullnessTestCase):
index c5769784d51d899927cbe56732aa3e90da228572..7561ddee974028002f51209865c47e67d62b1192 100644 (file)
@@ -233,8 +233,8 @@ class TestJournalRepair(CephFSTestCase):
         self.fs.table_tool(["0", "reset", "session"])
         self.fs.journal_tool(["journal", "reset"], 0)
         self.fs.erase_mds_objects(1)
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'reset', self.fs.name,
-                '--yes-i-really-mean-it')
+        self.get_ceph_cmd_stdout('fs', 'reset', self.fs.name,
+                                 '--yes-i-really-mean-it')
 
         # Bring an MDS back online, mount a client, and see that we can walk the full
         # filesystem tree again
index 6a3c17d4360f694f1d95f4bf40c5f3c59384ed7a..92583b502d430f01649d599c29719553ad28a20d 100644 (file)
@@ -94,7 +94,7 @@ class TestMantle(CephFSTestCase):
         expect = " : (110) Connection timed out"
 
         # kill the OSDs so that the balancer pull from RADOS times out
-        osd_map = json.loads(self.fs.mon_manager.raw_cluster_cmd('osd', 'dump', '--format=json-pretty'))
+        osd_map = json.loads(self.get_ceph_cmd_stdout('osd', 'dump', '--format=json-pretty'))
         for i in range(0, len(osd_map['osds'])):
           self.get_ceph_cmd_result('osd', 'down', str(i))
           self.get_ceph_cmd_result('osd', 'out', str(i))
index 4fb2f969b382579d8ea993fc33bd072c0ea1b491..296b33859806fdb18f26b437c8c183500036661e 100644 (file)
@@ -57,13 +57,13 @@ class TestMDSMetrics(CephFSTestCase):
         return verify_metrics_cbk
 
     def _fs_perf_stats(self, *args):
-        return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "perf", "stats", *args)
+        return self.get_ceph_cmd_stdout("fs", "perf", "stats", *args)
 
     def _enable_mgr_stats_plugin(self):
-        return self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "enable", "stats")
+        return self.get_ceph_cmd_stdout("mgr", "module", "enable", "stats")
 
     def _disable_mgr_stats_plugin(self):
-        return self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "disable", "stats")
+        return self.get_ceph_cmd_stdout("mgr", "module", "disable", "stats")
 
     def _spread_directory_on_all_ranks(self, fscid):
         fs_status = self.fs.status()
@@ -404,7 +404,7 @@ class TestMDSMetrics(CephFSTestCase):
         invalid_mds_rank = "1,"
         # try, 'fs perf stat' command with invalid mds_rank
         try:
-            self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "perf", "stats", "--mds_rank", invalid_mds_rank)
+            self.get_ceph_cmd_stdout("fs", "perf", "stats", "--mds_rank", invalid_mds_rank)
         except CommandFailedError as ce:
             if ce.exitstatus != errno.EINVAL:
                 raise
@@ -415,7 +415,7 @@ class TestMDSMetrics(CephFSTestCase):
         invalid_client_id = "abcd"
         # try, 'fs perf stat' command with invalid client_id
         try:
-            self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "perf", "stats", "--client_id", invalid_client_id)
+            self.get_ceph_cmd_stdout("fs", "perf", "stats", "--client_id", invalid_client_id)
         except CommandFailedError as ce:
             if ce.exitstatus != errno.EINVAL:
                 raise
@@ -426,7 +426,7 @@ class TestMDSMetrics(CephFSTestCase):
         invalid_client_ip = "1.2.3"
         # try, 'fs perf stat' command with invalid client_ip
         try:
-            self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "perf", "stats", "--client_ip", invalid_client_ip)
+            self.get_ceph_cmd_stdout("fs", "perf", "stats", "--client_ip", invalid_client_ip)
         except CommandFailedError as ce:
             if ce.exitstatus != errno.EINVAL:
                 raise
@@ -501,8 +501,8 @@ class TestMDSMetrics(CephFSTestCase):
         self.mount_b.umount_wait()
         self.fs.delete_all_filesystems()
 
-        self.mds_cluster.mon_manager.raw_cluster_cmd("fs", "flag", "set",
-            "enable_multiple", "true", "--yes-i-really-mean-it")
+        self.get_ceph_cmd_stdout("fs", "flag", "set", "enable_multiple",
+                                 "true", "--yes-i-really-mean-it")
 
         # creating filesystem
         fs_a = self._setup_fs(fs_name="fs1")
@@ -569,8 +569,8 @@ class TestMDSMetrics(CephFSTestCase):
         self.mount_a.umount_wait()
         self.mount_b.umount_wait()
 
-        self.mds_cluster.mon_manager.raw_cluster_cmd("fs", "flag", "set",
-                    "enable_multiple", "true", "--yes-i-really-mean-it")
+        self.get_ceph_cmd_stdout("fs", "flag", "set", "enable_multiple",
+                                 "true", "--yes-i-really-mean-it")
 
         # creating filesystem
         fs_b = self._setup_fs(fs_name="fs2")
index 45479cbc9df69de15a1ddd798cbf063fae2f643d..9fb6bb4bc47ff939531bfd5bb8a6772d23e9a9f1 100644 (file)
@@ -38,16 +38,16 @@ class TestMirroring(CephFSTestCase):
         super(TestMirroring, self).tearDown()
 
     def enable_mirroring_module(self):
-        self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "enable", TestMirroring.MODULE_NAME)
+        self.get_ceph_cmd_stdout("mgr", "module", "enable", TestMirroring.MODULE_NAME)
 
     def disable_mirroring_module(self):
-        self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "disable", TestMirroring.MODULE_NAME)
+        self.get_ceph_cmd_stdout("mgr", "module", "disable", TestMirroring.MODULE_NAME)
 
     def enable_mirroring(self, fs_name, fs_id):
         res = self.mirror_daemon_command(f'counter dump for fs: {fs_name}', 'counter', 'dump')
         vbefore = res[TestMirroring.PERF_COUNTER_KEY_NAME_CEPHFS_MIRROR][0]
 
-        self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "enable", fs_name)
+        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "enable", fs_name)
         time.sleep(10)
         # verify via asok
         res = self.mirror_daemon_command(f'mirror status for fs: {fs_name}',
@@ -68,7 +68,7 @@ class TestMirroring(CephFSTestCase):
         res = self.mirror_daemon_command(f'counter dump for fs: {fs_name}', 'counter', 'dump')
         vbefore = res[TestMirroring.PERF_COUNTER_KEY_NAME_CEPHFS_MIRROR][0]
 
-        self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "disable", fs_name)
+        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "disable", fs_name)
         time.sleep(10)
         # verify via asok
         try:
@@ -106,9 +106,9 @@ class TestMirroring(CephFSTestCase):
             vbefore = res[TestMirroring.PERF_COUNTER_KEY_NAME_CEPHFS_MIRROR_FS][0]
 
         if remote_fs_name:
-            self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec, remote_fs_name)
+            self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec, remote_fs_name)
         else:
-            self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec)
+            self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_add", fs_name, peer_spec)
         time.sleep(10)
         self.verify_peer_added(fs_name, fs_id, peer_spec, remote_fs_name)
 
@@ -122,7 +122,7 @@ class TestMirroring(CephFSTestCase):
         vbefore = res[TestMirroring.PERF_COUNTER_KEY_NAME_CEPHFS_MIRROR_FS][0]
 
         peer_uuid = self.get_peer_uuid(peer_spec)
-        self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "peer_remove", fs_name, peer_uuid)
+        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_remove", fs_name, peer_uuid)
         time.sleep(10)
         # verify via asok
         res = self.mirror_daemon_command(f'mirror status for fs: {fs_name}',
@@ -135,13 +135,14 @@ class TestMirroring(CephFSTestCase):
         self.assertLess(vafter["counters"]["mirroring_peers"], vbefore["counters"]["mirroring_peers"])
 
     def bootstrap_peer(self, fs_name, client_name, site_name):
-        outj = json.loads(self.mgr_cluster.mon_manager.raw_cluster_cmd(
-            "fs", "snapshot", "mirror", "peer_bootstrap", "create", fs_name, client_name, site_name))
+        outj = json.loads(self.get_ceph_cmd_stdout(
+            "fs", "snapshot", "mirror", "peer_bootstrap", "create", fs_name,
+            client_name, site_name))
         return outj['token']
 
     def import_peer(self, fs_name, token):
-        self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "peer_bootstrap", "import",
-                                                     fs_name, token)
+        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror",
+                                 "peer_bootstrap", "import", fs_name, token)
 
     def add_directory(self, fs_name, fs_id, dir_name, check_perf_counter=True):
         if check_perf_counter:
@@ -154,7 +155,7 @@ class TestMirroring(CephFSTestCase):
         dir_count = res['snap_dirs']['dir_count']
         log.debug(f'initial dir_count={dir_count}')
 
-        self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "add", fs_name, dir_name)
+        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "add", fs_name, dir_name)
 
         time.sleep(10)
         # verify via asok
@@ -178,7 +179,7 @@ class TestMirroring(CephFSTestCase):
         dir_count = res['snap_dirs']['dir_count']
         log.debug(f'initial dir_count={dir_count}')
 
-        self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "remove", fs_name, dir_name)
+        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "remove", fs_name, dir_name)
 
         time.sleep(10)
         # verify via asok
@@ -293,7 +294,7 @@ class TestMirroring(CephFSTestCase):
         return json.loads(res)
 
     def get_mirror_daemon_status(self):
-        daemon_status = json.loads(self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "daemon", "status"))
+        daemon_status = json.loads(self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "daemon", "status"))
         log.debug(f'daemon_status: {daemon_status}')
         # running a single mirror daemon is supported
         status = daemon_status[0]
@@ -380,7 +381,7 @@ class TestMirroring(CephFSTestCase):
 
         # try removing peer
         try:
-            self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "peer_remove", self.primary_fs_name, 'dummy-uuid')
+            self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_remove", self.primary_fs_name, 'dummy-uuid')
         except CommandFailedError as ce:
             if ce.exitstatus != errno.EINVAL:
                 raise RuntimeError(-errno.EINVAL, 'incorrect error code when removing a peer')
@@ -804,7 +805,7 @@ class TestMirroring(CephFSTestCase):
 
         # enable mirroring through mon interface -- this should result in the mirror daemon
         # failing to enable mirroring due to absence of `cephfs_mirror` index object.
-        self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "mirror", "enable", self.primary_fs_name)
+        self.get_ceph_cmd_stdout("fs", "mirror", "enable", self.primary_fs_name)
 
         with safe_while(sleep=5, tries=10, action='wait for failed state') as proceed:
             while proceed():
@@ -819,7 +820,7 @@ class TestMirroring(CephFSTestCase):
                 except:
                     pass
 
-        self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "mirror", "disable", self.primary_fs_name)
+        self.get_ceph_cmd_stdout("fs", "mirror", "disable", self.primary_fs_name)
         time.sleep(10)
         # verify via asok
         try:
@@ -841,7 +842,7 @@ class TestMirroring(CephFSTestCase):
         # enable mirroring through mon interface -- this should result in the mirror daemon
         # failing to enable mirroring due to absence of `cephfs_mirror` index object.
 
-        self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "mirror", "enable", self.primary_fs_name)
+        self.get_ceph_cmd_stdout("fs", "mirror", "enable", self.primary_fs_name)
         # need safe_while since non-failed status pops up as mirroring is restarted
         # internally in mirror daemon.
         with safe_while(sleep=5, tries=20, action='wait for failed state') as proceed:
@@ -872,7 +873,7 @@ class TestMirroring(CephFSTestCase):
         self.assertTrue(res['peers'] == {})
         self.assertTrue(res['snap_dirs']['dir_count'] == 0)
 
-        self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "mirror", "disable", self.primary_fs_name)
+        self.get_ceph_cmd_stdout("fs", "mirror", "disable", self.primary_fs_name)
         time.sleep(10)
         # verify via asok
         try:
@@ -898,7 +899,7 @@ class TestMirroring(CephFSTestCase):
 
         # verify via peer_list interface
         peer_uuid = self.get_peer_uuid("client.mirror_peer_bootstrap@site-remote")
-        res = json.loads(self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "peer_list", self.primary_fs_name))
+        res = json.loads(self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "peer_list", self.primary_fs_name))
         self.assertTrue(peer_uuid in res)
 
         # remove peer
@@ -1023,20 +1024,20 @@ class TestMirroring(CephFSTestCase):
         dir_path_p = "/d0/d1"
         dir_path = "/d0/d1/d2"
 
-        self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path)
+        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path)
 
         time.sleep(10)
         # this uses an undocumented interface to get dirpath map state
-        res_json = self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path)
+        res_json = self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path)
         res = json.loads(res_json)
         # there are no mirror daemons
         self.assertTrue(res['state'], 'stalled')
 
-        self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "remove", self.primary_fs_name, dir_path)
+        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "remove", self.primary_fs_name, dir_path)
 
         time.sleep(10)
         try:
-            self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path)
+            self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path)
         except CommandFailedError as ce:
             if ce.exitstatus != errno.ENOENT:
                 raise RuntimeError('invalid errno when checking dirmap status for non-existent directory')
@@ -1044,11 +1045,11 @@ class TestMirroring(CephFSTestCase):
             raise RuntimeError('incorrect errno when checking dirmap state for non-existent directory')
 
         # adding a parent directory should be allowed
-        self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path_p)
+        self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "add", self.primary_fs_name, dir_path_p)
 
         time.sleep(10)
         # however, this directory path should get stalled too
-        res_json = self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path_p)
+        res_json = self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path_p)
         res = json.loads(res_json)
         # there are no mirror daemons
         self.assertTrue(res['state'], 'stalled')
@@ -1060,7 +1061,7 @@ class TestMirroring(CephFSTestCase):
 
         # wait for restart mirror on blocklist
         time.sleep(60)
-        res_json = self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path_p)
+        res_json = self.get_ceph_cmd_stdout("fs", "snapshot", "mirror", "dirmap", self.primary_fs_name, dir_path_p)
         res = json.loads(res_json)
         # there are no mirror daemons
         self.assertTrue(res['state'], 'mapped')
index 084005f19a4f7fcb0907cf1ccd2ddf0f6bd72bb2..e087a328523dff5f5f6344ff45d6272d9e7f3bf7 100644 (file)
@@ -96,16 +96,16 @@ class TestMisc(CephFSTestCase):
 
         self.fs.fail()
 
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
-                                            '--yes-i-really-mean-it')
+        self.get_ceph_cmd_stdout('fs', 'rm', self.fs.name,
+                                 '--yes-i-really-mean-it')
 
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
-                                            self.fs.metadata_pool_name,
-                                            self.fs.metadata_pool_name,
-                                            '--yes-i-really-really-mean-it')
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
-                                            self.fs.metadata_pool_name,
-                                            '--pg_num_min', str(self.fs.pg_num_min))
+        self.get_ceph_cmd_stdout('osd', 'pool', 'delete',
+                                 self.fs.metadata_pool_name,
+                                 self.fs.metadata_pool_name,
+                                 '--yes-i-really-really-mean-it')
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create',
+                                 self.fs.metadata_pool_name,
+                                 '--pg_num_min', str(self.fs.pg_num_min))
 
         # insert a garbage object
         self.fs.radosm(["put", "foo", "-"], stdin=StringIO("bar"))
@@ -119,34 +119,34 @@ class TestMisc(CephFSTestCase):
         self.wait_until_true(lambda: get_pool_df(self.fs, self.fs.metadata_pool_name), timeout=30)
 
         try:
-            self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
-                                                self.fs.metadata_pool_name,
-                                                data_pool_name)
+            self.get_ceph_cmd_stdout('fs', 'new', self.fs.name,
+                                     self.fs.metadata_pool_name,
+                                     data_pool_name)
         except CommandFailedError as e:
             self.assertEqual(e.exitstatus, errno.EINVAL)
         else:
             raise AssertionError("Expected EINVAL")
 
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
-                                            self.fs.metadata_pool_name,
-                                            data_pool_name, "--force")
+        self.get_ceph_cmd_stdout('fs', 'new', self.fs.name,
+                                 self.fs.metadata_pool_name,
+                                 data_pool_name, "--force")
 
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'fail', self.fs.name)
+        self.get_ceph_cmd_stdout('fs', 'fail', self.fs.name)
 
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name,
-                                            '--yes-i-really-mean-it')
+        self.get_ceph_cmd_stdout('fs', 'rm', self.fs.name,
+                                 '--yes-i-really-mean-it'])
 
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
-                                            self.fs.metadata_pool_name,
-                                            self.fs.metadata_pool_name,
-                                            '--yes-i-really-really-mean-it')
-        self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
-                                            self.fs.metadata_pool_name,
-                                            '--pg_num_min', str(self.fs.pg_num_min))
-        self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
-                                            self.fs.metadata_pool_name,
-                                            data_pool_name,
-                                            '--allow_dangerous_metadata_overlay')
+        self.get_ceph_cmd_stdout('osd', 'pool', 'delete',
+                                 self.fs.metadata_pool_name,
+                                 self.fs.metadata_pool_name,
+                                 '--yes-i-really-really-mean-it')
+        self.get_ceph_cmd_stdout('osd', 'pool', 'create',
+                                 self.fs.metadata_pool_name,
+                                 '--pg_num_min', str(self.fs.pg_num_min))
+        self.get_ceph_cmd_stdout('fs', 'new', self.fs.name,
+                                 self.fs.metadata_pool_name,
+                                 data_pool_name,
+                                 '--allow_dangerous_metadata_overlay')
 
     def test_cap_revoke_nonresponder(self):
         """
@@ -199,9 +199,8 @@ class TestMisc(CephFSTestCase):
         pool_name = self.fs.get_data_pool_name()
         raw_df = self.fs.get_pool_df(pool_name)
         raw_avail = float(raw_df["max_avail"])
-        out = self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'get',
-                                                  pool_name, 'size',
-                                                  '-f', 'json-pretty')
+        out = self.get_ceph_cmd_stdout('osd', 'pool', 'get', pool_name,
+                                       'size', '-f', 'json-pretty')
         _ = json.loads(out)
 
         proc = self.mount_a.run_shell(['df', '.'])
@@ -253,9 +252,8 @@ class TestMisc(CephFSTestCase):
         self.fs.set_allow_new_snaps(False)
         self.fs.set_allow_standby_replay(True)
 
-        lsflags = json.loads(self.fs.mon_manager.raw_cluster_cmd('fs', 'lsflags',
-                                                                 self.fs.name,
-                                                                 "--format=json-pretty"))
+        lsflags = json.loads(self.get_ceph_cmd_stdout(
+            'fs', 'lsflags', self.fs.name, "--format=json-pretty"))
         self.assertEqual(lsflags["joinable"], False)
         self.assertEqual(lsflags["allow_snaps"], False)
         self.assertEqual(lsflags["allow_multimds_snaps"], True)
@@ -425,7 +423,7 @@ class TestMisc(CephFSTestCase):
         self.fs.mds_asok(['config', 'set', 'debug_mds', '1/10'])
         self.fs.mds_asok(['config', 'set', 'mds_extraordinary_events_dump_interval', '1'])
         try:
-            mons = json.loads(self.fs.mon_manager.raw_cluster_cmd('mon', 'dump', '-f', 'json'))['mons']
+            mons = json.loads(self.get_ceph_cmd_stdout('mon', 'dump', '-f', 'json'))['mons']
         except:
             self.assertTrue(False, "Error fetching monitors")
 
@@ -468,7 +466,7 @@ class TestMisc(CephFSTestCase):
         self.fs.mds_asok(['config', 'set', 'mds_heartbeat_grace', '1'])
         self.fs.mds_asok(['config', 'set', 'mds_extraordinary_events_dump_interval', '1'])
         try:
-            mons = json.loads(self.fs.mon_manager.raw_cluster_cmd('mon', 'dump', '-f', 'json'))['mons']
+            mons = json.loads(self.get_ceph_cmd_stdout('mon', 'dump', '-f', 'json'))['mons']
         except:
             self.assertTrue(False, "Error fetching monitors")
 
index 2bb6257c7eb8ab06a948268e2bec345a9c9f3c5a..09847b6ea6d10ee44d43ff0a53740687de800077 100644 (file)
@@ -116,7 +116,7 @@ class TestScrub2(CephFSTestCase):
 
         def expect_exdev(cmd, mds):
             try:
-                self.fs.mon_manager.raw_cluster_cmd('tell', 'mds.{0}'.format(mds), *cmd)
+                self.get_ceph_cmd_stdout('tell', 'mds.{0}'.format(mds), *cmd)
             except CommandFailedError as e:
                 if e.exitstatus == errno.EXDEV:
                     pass
index 49bd38d9e4fbc9d7b631c5cc5da3c213a4dcba1e..2d06cbac7baa1ca251febecb75c87d74de8b5cd6 100644 (file)
@@ -16,7 +16,7 @@ NFS_POOL_NAME = '.nfs'  # should match mgr_module.py
 # TODO Add test for cluster update when ganesha can be deployed on multiple ports.
 class TestNFS(MgrTestCase):
     def _cmd(self, *args):
-        return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args)
+        return self.get_ceph_cmd_stdout(args)
 
     def _nfs_cmd(self, *args):
         return self._cmd("nfs", *args)
index bbcdf97697277023feaf8fb960afdb7335f4919e..e91a3f1913d67b4fd8dc70c3760d3f2f9856f07b 100644 (file)
@@ -27,7 +27,7 @@ class TestFSRecovery(CephFSTestCase):
         # recovered/intact
         self.fs.rm()
         # Recreate file system with pool and previous fscid
-        self.fs.mon_manager.raw_cluster_cmd(
+        self.get_ceph_cmd_stdout(
             'fs', 'new', self.fs.name, metadata_pool, data_pool,
             '--recover', '--force', '--fscid', f'{self.fs.id}')
         self.fs.set_joinable()
index 8c4e1967d359662c85a93dba519e80a720d3733e..bf815547488c3662947f4ba36331c04d46345837 100644 (file)
@@ -119,7 +119,7 @@ class TestRecoveryPool(CephFSTestCase):
         recovery_fs.create(recover=True, metadata_overlay=True)
 
         recovery_pool = recovery_fs.get_metadata_pool_name()
-        recovery_fs.mon_manager.raw_cluster_cmd('-s')
+        self.get_ceph_cmd_stdout('-s')
 
         # Reset the MDS map in case multiple ranks were in play: recovery procedure
         # only understands how to rebuild metadata under rank 0
index e41b997a6eebc0a693eca7c30bfd0f49d7ffcc24..bae048444f303f1bb8d3e3e5febcaee29720e348 100644 (file)
@@ -281,8 +281,8 @@ class TestScrubChecks(CephFSTestCase):
             all_damage = self.fs.rank_tell(["damage", "ls"], mds_rank)
             damage = [d for d in all_damage if d['ino'] == ino and d['damage_type'] == dtype]
             for d in damage:
-                self.fs.mon_manager.raw_cluster_cmd(
-                    'tell', 'mds.{0}'.format(self.fs.get_active_names()[mds_rank]),
+                self.get_ceph_cmd_stdout(
+                    'tell', f'mds.{self.fs.get_active_names()[mds_rank]}',
                     "damage", "rm", str(d['id']))
             return len(damage) > 0
 
index ad6fd1d609cb9b21701caed068a68cde1fa6285d..b3b88af7246e373649da0a88c54a97fa0df8828c 100644 (file)
@@ -158,7 +158,7 @@ class TestSessionMap(CephFSTestCase):
         if mon_caps is None:
             mon_caps = "allow r"
 
-        out = self.fs.mon_manager.raw_cluster_cmd(
+        out = self.get_ceph_cmd_stdout(
             "auth", "get-or-create", "client.{name}".format(name=id_name),
             "mds", mds_caps,
             "osd", osd_caps,
index 14618bc574106f761d4745f7a5863b2639c97f34..8bbd679efa30c1047e254885bc7d4e29760c8d80 100644 (file)
@@ -60,7 +60,7 @@ class TestSnapSchedulesHelper(CephFSTestCase):
         self.assertTrue((delta <= timo + 5) and (delta >= timo - 5))
 
     def _fs_cmd(self, *args):
-        return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", *args)
+        return self.get_ceph_cmd_stdout("fs", *args)
 
     def fs_snap_schedule_cmd(self, *args, **kwargs):
         if 'fs' in kwargs:
@@ -85,10 +85,10 @@ class TestSnapSchedulesHelper(CephFSTestCase):
             self.volname = result[0]['name']
 
     def _enable_snap_schedule(self):
-        return self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "enable", "snap_schedule")
+        return self.get_ceph_cmd_stdout("mgr", "module", "enable", "snap_schedule")
 
     def _disable_snap_schedule(self):
-        return self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "disable", "snap_schedule")
+        return self.get_ceph_cmd_stdout("mgr", "module", "disable", "snap_schedule")
 
     def _allow_minute_granularity_snapshots(self):
         self.config_set('mgr', 'mgr/snap_schedule/allow_m_granularity', True)
index 8bdc126e2b647c2d7bcf6451bcf78713cc365fa6..2b31d00c541387e950ffc73c889cea8736d7c6bf 100644 (file)
@@ -651,9 +651,8 @@ class TestStrays(CephFSTestCase):
         self.assertFalse(self._is_stopped(1))
 
         # Permit the daemon to start purging again
-        self.fs.mon_manager.raw_cluster_cmd('tell', 'mds.{0}'.format(rank_1_id),
-                                            'injectargs',
-                                            "--mds_max_purge_files 100")
+        self.get_ceph_cmd_stdout('tell', 'mds.{0}'.format(rank_1_id),
+                                 'injectargs', "--mds_max_purge_files 100")
 
         # It should now proceed through shutdown
         self.fs.wait_for_daemons(timeout=120)
@@ -816,7 +815,7 @@ touch pin/placeholder
 
         :param pool_name: Which pool (must exist)
         """
-        out = self.fs.mon_manager.raw_cluster_cmd("df", "--format=json-pretty")
+        out = self.get_ceph_cmd_stdout("df", "--format=json-pretty")
         for p in json.loads(out)['pools']:
             if p['name'] == pool_name:
                 return p['stats']
index 9046da94945e4f9090fb2526b781ba25a2f03f0b..0700fc39d9afadcdf9ab1ea9d956065831b868cb 100644 (file)
@@ -30,10 +30,10 @@ class TestVolumesHelper(CephFSTestCase):
     DEFAULT_NUMBER_OF_FILES = 1024
 
     def _fs_cmd(self, *args):
-        return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", *args)
+        return self.get_ceph_cmd_stdout("fs", *args)
 
     def _raw_cmd(self, *args):
-        return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args)
+        return self.get_ceph_cmd_stdout(args)
 
     def __check_clone_state(self, state, clone, clone_group=None, timo=120):
         check = 0
@@ -1132,7 +1132,7 @@ class TestSubvolumeGroups(TestVolumesHelper):
 
         # Create auth_id
         authid = "client.guest1"
-        user = json.loads(self.fs.mon_manager.raw_cluster_cmd(
+        user = json.loads(self.get_ceph_cmd_stdout(
             "auth", "get-or-create", authid,
             "mds", "allow rw path=/volumes",
             "mgr", "allow rw",
@@ -1219,7 +1219,7 @@ class TestSubvolumeGroups(TestVolumesHelper):
 
         # Create auth_id
         authid = "client.guest1"
-        user = json.loads(self.fs.mon_manager.raw_cluster_cmd(
+        user = json.loads(self.get_ceph_cmd_stdout(
             "auth", "get-or-create", authid,
             "mds", f"allow rw path={mount_path}",
             "mgr", "allow rw",
@@ -2877,11 +2877,11 @@ class TestSubvolumes(TestVolumesHelper):
         group = self._gen_subvol_grp_name()
 
         # Create auth_id
-        self.fs.mon_manager.raw_cluster_cmd(
+        self.get_ceph_cmd_stdout(
             "auth", "get-or-create", "client.guest1",
             "mds", "allow *",
             "osd", "allow rw",
-            "mon", "allow *"
+            "mon", "allow *"]
         )
 
         auth_id = "guest1"
@@ -2906,7 +2906,7 @@ class TestSubvolumes(TestVolumesHelper):
             self.fail("expected the 'fs subvolume authorize' command to fail")
 
         # clean up
-        self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
+        self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
         self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
         self._fs_cmd("subvolumegroup", "rm", self.volname, group)
 
@@ -2921,7 +2921,7 @@ class TestSubvolumes(TestVolumesHelper):
         group = self._gen_subvol_grp_name()
 
         # Create auth_id
-        self.fs.mon_manager.raw_cluster_cmd(
+        self.get_ceph_cmd_stdout(
             "auth", "get-or-create", "client.guest1",
             "mds", "allow *",
             "osd", "allow rw",
@@ -2949,7 +2949,7 @@ class TestSubvolumes(TestVolumesHelper):
         # clean up
         self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id,
                      "--group_name", group)
-        self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
+        self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
         self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
         self._fs_cmd("subvolumegroup", "rm", self.volname, group)
 
@@ -2983,7 +2983,7 @@ class TestSubvolumes(TestVolumesHelper):
                                   "--group_name", group).rstrip()
 
         # Update caps for guestclient_1 out of band
-        out = self.fs.mon_manager.raw_cluster_cmd(
+        out = self.get_ceph_cmd_stdout(
             "auth", "caps", "client.guest1",
             "mds", "allow rw path=/volumes/{0}, allow rw path={1}".format(group, subvol_path),
             "osd", "allow rw pool=cephfs_data",
@@ -2996,7 +2996,7 @@ class TestSubvolumes(TestVolumesHelper):
 
         # Validate the caps of guestclient_1 after deauthorize. It should not have deleted
         # guestclient_1. The mgr and mds caps should be present which was updated out of band.
-        out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty"))
+        out = json.loads(self.get_ceph_cmd_stdout("auth", "get", "client.guest1", "--format=json-pretty"))
 
         self.assertEqual("client.guest1", out[0]["entity"])
         self.assertEqual("allow rw path=/volumes/{0}".format(group), out[0]["caps"]["mds"])
@@ -3004,7 +3004,7 @@ class TestSubvolumes(TestVolumesHelper):
         self.assertNotIn("osd", out[0]["caps"])
 
         # clean up
-        out = self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
+        out = self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
         self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
         self._fs_cmd("subvolumegroup", "rm", self.volname, group)
 
@@ -3056,7 +3056,7 @@ class TestSubvolumes(TestVolumesHelper):
         # clean up
         self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group)
         guest_mount.umount_wait()
-        self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
+        self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
         self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
         self._fs_cmd("subvolumegroup", "rm", self.volname, group)
 
@@ -3112,7 +3112,7 @@ class TestSubvolumes(TestVolumesHelper):
         # clean up
         self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, "guest1", "--group_name", group)
         guest_mount.umount_wait()
-        self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
+        self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
         self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
         self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
         self._fs_cmd("subvolumegroup", "rm", self.volname, group)
@@ -3187,7 +3187,7 @@ class TestSubvolumes(TestVolumesHelper):
         self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
         self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group)
         guest_mount.umount_wait()
-        self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
+        self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
         self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
         self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
         self._fs_cmd("subvolumegroup", "rm", self.volname, group)
@@ -3259,7 +3259,7 @@ class TestSubvolumes(TestVolumesHelper):
         # clean up
         self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group)
         guest_mount.umount_wait()
-        self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1")
+        self.get_ceph_cmd_stdout("auth", "rm", "client.guest1")
         self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group)
         self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group)
         self._fs_cmd("subvolumegroup", "rm", self.volname, group)
@@ -7172,8 +7172,8 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         new_pool = "new_pool"
         self.fs.add_data_pool(new_pool)
 
-        self.fs.mon_manager.raw_cluster_cmd("osd", "pool", "set-quota", new_pool,
-                                            "max_bytes", "{0}".format(pool_capacity // 4))
+        self.get_ceph_cmd_stdout("osd", "pool", "set-quota", new_pool,
+                                 "max_bytes", f"{pool_capacity // 4}")
 
         # schedule a clone
         self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1, "--pool_layout", new_pool)
@@ -8100,7 +8100,7 @@ class TestMisc(TestVolumesHelper):
         self._fs_cmd("subvolume", "authorize", self.volname, subvol1, authid1)
 
         # Validate that the mds path added is of subvol1 and not of subvol2
-        out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.alice", "--format=json-pretty"))
+        out = json.loads(self.get_ceph_cmd_stdout("auth", "get", "client.alice", "--format=json-pretty"))
         self.assertEqual("client.alice", out[0]["entity"])
         self.assertEqual("allow rw path={0}".format(createpath1[1:]), out[0]["caps"]["mds"])
 
index cbb3443059e2d7e4a228db834dbf7c757b8a153b..7d5233f8ff00f5c7c752d000f213f03b344d7796 100644 (file)
@@ -143,8 +143,8 @@ class XFSTestsDev(CephFSTestCase):
         import configparser
 
         cp = configparser.ConfigParser()
-        cp.read_string(self.fs.mon_manager.raw_cluster_cmd(
-            'auth', 'get-or-create', 'client.admin'))
+        cp.read_string(self.get_ceph_cmd_stdout('auth', 'get-or-create',
+                                                'client.admin'))
 
         return cp['client.admin']['key']