]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/cephfs: use run_ceph_cmd() when cmd output is not needed 50569/head
authorRishabh Dave <ridave@redhat.com>
Mon, 27 Mar 2023 06:21:16 +0000 (11:51 +0530)
committerRishabh Dave <ridave@redhat.com>
Wed, 28 Jun 2023 12:08:19 +0000 (17:38 +0530)
In filesystem.py and wherever instance of class Filesystem are used, use
run_ceph_cmd() instead of get_ceph_cluster_stdout() when output of Ceph
command is not required.

Signed-off-by: Rishabh Dave <ridave@redhat.com>
qa/tasks/cephfs/filesystem.py

index 7ed8e9bba45bd83b63c5c1df8394e5722c10d206..21804f877700ea1af4fdc2c7ee83a263a03e0e37 100644 (file)
@@ -367,7 +367,7 @@ class MDSCluster(CephCluster):
         """
         def _fail_restart(id_):
             self.mds_daemons[id_].stop()
-            self.get_ceph_cmd_stdout("mds", "fail", id_)
+            self.run_ceph_cmd("mds", "fail", id_)
             self.mds_daemons[id_].restart()
 
         self._one_or_all(mds_id, _fail_restart)
@@ -578,21 +578,21 @@ class Filesystem(MDSCluster):
         assert(mds_map['in'] == list(range(0, mds_map['max_mds'])))
 
     def reset(self):
-        self.get_ceph_cmd_stdout("fs", "reset", str(self.name), '--yes-i-really-mean-it')
+        self.run_ceph_cmd("fs", "reset", str(self.name), '--yes-i-really-mean-it')
 
     def fail(self):
-        self.get_ceph_cmd_stdout("fs", "fail", str(self.name))
+        self.run_ceph_cmd("fs", "fail", str(self.name))
 
     def set_flag(self, var, *args):
         a = map(lambda x: str(x).lower(), args)
-        self.get_ceph_cmd_stdout("fs", "flag", "set", var, *a)
+        self.run_ceph_cmd("fs", "flag", "set", var, *a)
 
     def set_allow_multifs(self, yes=True):
         self.set_flag("enable_multiple", yes)
 
     def set_var(self, var, *args):
         a = map(lambda x: str(x).lower(), args)
-        self.get_ceph_cmd_stdout("fs", "set", self.name, var, *a)
+        self.run_ceph_cmd("fs", "set", self.name, var, *a)
 
     def set_down(self, down=True):
         self.set_var("down", str(down).lower())
@@ -620,7 +620,7 @@ class Filesystem(MDSCluster):
 
     def compat(self, *args):
         a = map(lambda x: str(x).lower(), args)
-        self.get_ceph_cmd_stdout("fs", "compat", self.name, *a)
+        self.run_ceph_cmd("fs", "compat", self.name, *a)
 
     def add_compat(self, *args):
         self.compat("add_compat", *args)
@@ -665,24 +665,23 @@ class Filesystem(MDSCluster):
         log.debug("Creating filesystem '{0}'".format(self.name))
 
         try:
-            self.get_ceph_cmd_stdout('osd', 'pool', 'create',
-                                     self.metadata_pool_name,
-                                     '--pg_num_min', str(self.pg_num_min))
-
-            self.get_ceph_cmd_stdout('osd', 'pool', 'create',
-                                     data_pool_name, str(self.pg_num),
-                                     '--pg_num_min', str(self.pg_num_min),
-                                     '--target_size_ratio',
-                                     str(self.target_size_ratio))
+            self.run_ceph_cmd('osd', 'pool', 'create',self.metadata_pool_name,
+                              '--pg_num_min', str(self.pg_num_min))
+
+            self.run_ceph_cmd('osd', 'pool', 'create', data_pool_name,
+                              str(self.pg_num),
+                              '--pg_num_min', str(self.pg_num_min),
+                              '--target_size_ratio',
+                              str(self.target_size_ratio))
         except CommandFailedError as e:
             if e.exitstatus == 22: # nautilus couldn't specify --pg_num_min option
-                self.get_ceph_cmd_stdout('osd', 'pool', 'create',
-                                         self.metadata_pool_name,
-                                         str(self.pg_num_min))
+                self.run_ceph_cmd('osd', 'pool', 'create',
+                                  self.metadata_pool_name,
+                                  str(self.pg_num_min))
 
-                self.get_ceph_cmd_stdout('osd', 'pool', 'create',
-                                         data_pool_name, str(self.pg_num),
-                                         str(self.pg_num_min))
+                self.run_ceph_cmd('osd', 'pool', 'create',
+                                  data_pool_name, str(self.pg_num),
+                                  str(self.pg_num_min))
             else:
                 raise
 
@@ -691,7 +690,7 @@ class Filesystem(MDSCluster):
             args.append('--recover')
         if metadata_overlay:
             args.append('--allow-dangerous-metadata-overlay')
-        self.get_ceph_cmd_stdout(*args)
+        self.run_ceph_cmd(*args)
 
         if not recover:
             if self.ec_profile and 'disabled' not in self.ec_profile:
@@ -699,23 +698,22 @@ class Filesystem(MDSCluster):
                 log.debug("EC profile is %s", self.ec_profile)
                 cmd = ['osd', 'erasure-code-profile', 'set', ec_data_pool_name]
                 cmd.extend(self.ec_profile)
-                self.get_ceph_cmd_stdout(*cmd)
+                self.run_ceph_cmd(*cmd)
                 try:
-                    self.get_ceph_cmd_stdout(
+                    self.run_ceph_cmd(
                         'osd', 'pool', 'create', ec_data_pool_name,
                         'erasure', ec_data_pool_name,
                         '--pg_num_min', str(self.pg_num_min),
                         '--target_size_ratio', str(self.target_size_ratio_ec))
                 except CommandFailedError as e:
                     if e.exitstatus == 22: # nautilus couldn't specify --pg_num_min option
-                        self.get_ceph_cmd_stdout(
+                        self.run_ceph_cmd(
                             'osd', 'pool', 'create', ec_data_pool_name,
                             str(self.pg_num_min), 'erasure', ec_data_pool_name)
                     else:
                         raise
-                self.get_ceph_cmd_stdout(
-                    'osd', 'pool', 'set',
-                    ec_data_pool_name, 'allow_ec_overwrites', 'true')
+                self.run_ceph_cmd('osd', 'pool', 'set', ec_data_pool_name,
+                                  'allow_ec_overwrites', 'true')
                 self.add_data_pool(ec_data_pool_name, create=False)
                 self.check_pool_application(ec_data_pool_name)
 
@@ -726,7 +724,8 @@ class Filesystem(MDSCluster):
 
         # Turn off spurious standby count warnings from modifying max_mds in tests.
         try:
-            self.get_ceph_cmd_stdout('fs', 'set', self.name, 'standby_count_wanted', '0')
+            self.run_ceph_cmd('fs', 'set', self.name, 'standby_count_wanted',
+                              '0')
         except CommandFailedError as e:
             if e.exitstatus == 22:
                 # standby_count_wanted not available prior to luminous (upgrade tests would fail otherwise)
@@ -761,9 +760,9 @@ class Filesystem(MDSCluster):
 
                 for sv in range(0, subvols['create']):
                     sv_name = f'sv_{sv}'
-                    self.get_ceph_cmd_stdout(
-                        'fs', 'subvolume', 'create', self.name, sv_name,
-                        self.fs_config.get('subvol_options', ''))
+                    self.run_ceph_cmd('fs', 'subvolume', 'create', self.name,
+                                      sv_name,
+                                      self.fs_config.get('subvol_options', ''))
 
                     if self.name not in self._ctx.created_subvols:
                         self._ctx.created_subvols[self.name] = []
@@ -904,15 +903,15 @@ class Filesystem(MDSCluster):
     def add_data_pool(self, name, create=True):
         if create:
             try:
-                self.get_ceph_cmd_stdout('osd', 'pool', 'create', name,
-                                         '--pg_num_min', str(self.pg_num_min))
+                self.run_ceph_cmd('osd', 'pool', 'create', name,
+                                  '--pg_num_min', str(self.pg_num_min))
             except CommandFailedError as e:
                 if e.exitstatus == 22: # nautilus couldn't specify --pg_num_min option
-                  self.get_ceph_cmd_stdout('osd', 'pool', 'create', name,
-                                           str(self.pg_num_min))
+                  self.run_ceph_cmd('osd', 'pool', 'create', name,
+                                    str(self.pg_num_min))
                 else:
                     raise
-        self.get_ceph_cmd_stdout('fs', 'add_data_pool', self.name, name)
+        self.run_ceph_cmd('fs', 'add_data_pool', self.name, name)
         self.get_pool_names(refresh = True)
         for poolid, fs_name in self.data_pools.items():
             if name == fs_name:
@@ -1098,13 +1097,13 @@ class Filesystem(MDSCluster):
         self.mds_signal(name, signal)
 
     def rank_freeze(self, yes, rank=0):
-        self.get_ceph_cmd_stdout("mds", "freeze", "{}:{}".format(self.id, rank), str(yes).lower())
+        self.run_ceph_cmd("mds", "freeze", "{}:{}".format(self.id, rank), str(yes).lower())
 
     def rank_repaired(self, rank):
-        self.get_ceph_cmd_stdout("mds", "repaired", "{}:{}".format(self.id, rank))
+        self.run_ceph_cmd("mds", "repaired", "{}:{}".format(self.id, rank))
 
     def rank_fail(self, rank=0):
-        self.get_ceph_cmd_stdout("mds", "fail", "{}:{}".format(self.id, rank))
+        self.run_ceph_cmd("mds", "fail", "{}:{}".format(self.id, rank))
 
     def rank_is_running(self, rank=0, status=None):
         name = self.get_rank(rank=rank, status=status)['name']