]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/cephfs: remove sudo parameter from run_shell() 46191/head
authorRishabh Dave <ridave@redhat.com>
Fri, 6 May 2022 16:41:40 +0000 (22:11 +0530)
committerRishabh Dave <ridave@redhat.com>
Mon, 27 Jun 2022 14:26:04 +0000 (19:56 +0530)
Right now, run_shell() in mount.py accepts both "sudo" and "omit_sudo"
as parameters. It's better to accept only one of these two parameters.
A call to run_shell() where both are set to opposing values will be
buggy. Therefore, methods calling run_shell() must add "sudo" to command
arguments before call and set omit_sudo to False in call.

As a result of this change, methods like stat() and run_python() in
mount.py are now modified to add "sudo" to command arguments
and set omit_sudo to False within their own definitions.

Signed-off-by: Rishabh Dave <ridave@redhat.com>
qa/tasks/cephfs/mount.py
qa/tasks/cephfs/test_cap_flush.py
qa/tasks/cephfs/test_data_scan.py
qa/tasks/cephfs/test_volumes.py

index 706a60f5fd2c02679d27bebf917ea37a8f62e42d..d3c54e5462465e658a4222eca84f37ddb00789bb 100644 (file)
@@ -708,23 +708,20 @@ class CephFSMount(object):
 
     def run_shell(self, args, timeout=300, **kwargs):
         omit_sudo = kwargs.pop('omit_sudo', False)
-        sudo = kwargs.pop('sudo', False)
         cwd = kwargs.pop('cwd', self.mountpoint)
         stdout = kwargs.pop('stdout', StringIO())
         stderr = kwargs.pop('stderr', StringIO())
 
-        if sudo:
-            if isinstance(args, list):
-                args.insert(0, 'sudo')
-            elif isinstance(args, str):
-                args = 'sudo ' + args
-
         return self.client_remote.run(args=args, cwd=cwd, timeout=timeout,
                                       stdout=stdout, stderr=stderr,
                                       omit_sudo=omit_sudo, **kwargs)
 
     def run_shell_payload(self, payload, **kwargs):
-        return self.run_shell(["bash", "-c", Raw(f"'{payload}'")], **kwargs)
+        kwargs['args'] = ["bash", "-c", Raw(f"'{payload}'")]
+        if kwargs.pop('sudo', False):
+            kwargs['args'].insert(0, 'sudo')
+            kwargs['omit_sudo'] = False
+        return self.run_shell(**kwargs)
 
     def run_as_user(self, **kwargs):
         """
@@ -1324,11 +1321,13 @@ class CephFSMount(object):
         """
         Wrap ls: return a list of strings
         """
-        cmd = ["ls"]
+        kwargs['args'] = ["ls"]
         if path:
-            cmd.append(path)
-
-        ls_text = self.run_shell(cmd, **kwargs).stdout.getvalue().strip()
+            kwargs['args'].append(path)
+        if kwargs.pop('sudo', False):
+            kwargs['args'].insert(0, 'sudo')
+            kwargs['omit_sudo'] = False
+        ls_text = self.run_shell(**kwargs).stdout.getvalue().strip()
 
         if ls_text:
             return ls_text.split("\n")
@@ -1346,7 +1345,11 @@ class CephFSMount(object):
         :param val: xattr value
         :return: None
         """
-        self.run_shell(["setfattr", "-n", key, "-v", val, path], **kwargs)
+        kwargs['args'] = ["setfattr", "-n", key, "-v", val, path]
+        if kwargs.pop('sudo', False):
+            kwargs['args'].insert(0, 'sudo')
+            kwargs['omit_sudo'] = False
+        self.run_shell(**kwargs)
 
     def getfattr(self, path, attr, **kwargs):
         """
@@ -1355,7 +1358,12 @@ class CephFSMount(object):
 
         :return: a string
         """
-        p = self.run_shell(["getfattr", "--only-values", "-n", attr, path], wait=False, **kwargs)
+        kwargs['args'] = ["getfattr", "--only-values", "-n", attr, path]
+        if kwargs.pop('sudo', False):
+            kwargs['args'].insert(0, 'sudo')
+            kwargs['omit_sudo'] = False
+        kwargs['wait'] = False
+        p = self.run_shell(**kwargs)
         try:
             p.wait()
         except CommandFailedError as e:
index c472e85bd5dde3cd36208e86b784de19d57de0a2..70fdc38933db933508e143518ff53f0b91a3b031 100644 (file)
@@ -44,7 +44,7 @@ class TestCapFlush(CephFSTestCase):
         self.mount_a.run_python(py_script, sudo=True)
 
         # Modify file mode by different user. ceph-fuse will send a setattr request
-        self.mount_a.run_shell(["chmod", "600", file_path], wait=False, sudo=True)
+        self.mount_a.run_shell(["sudo", "chmod", "600", file_path], wait=False, omit_sudo=False)
 
         time.sleep(10)
 
index 361fa0f027b22585b8a61586d3042ffbc0d05e70..b186cf902b094600817540f268aff1bf4a6c9ccf 100644 (file)
@@ -92,7 +92,7 @@ class SimpleWorkload(Workload):
         self._initial_state = self._mount.stat("subdir/sixmegs")
 
     def validate(self):
-        self._mount.run_shell(["ls", "subdir"], sudo=True)
+        self._mount.run_shell(["sudo", "ls", "subdir"], omit_sudo=False)
         st = self._mount.stat("subdir/sixmegs", sudo=True)
         self.assert_equal(st['st_size'], self._initial_state['st_size'])
         return self._errors
@@ -109,7 +109,7 @@ class SymlinkWorkload(Workload):
         self._mount.run_shell(["ln", "-s", "symdir/onemegs", "symlink1_onemegs"])
 
     def validate(self):
-        self._mount.run_shell(["ls", "symdir"], sudo=True)
+        self._mount.run_shell(["sudo", "ls", "symdir"], omit_sudo=False)
         st = self._mount.lstat("symdir/symlink_onemegs")
         self.assert_true(stat.S_ISLNK(st['st_mode']))
         target = self._mount.readlink("symdir/symlink_onemegs")
@@ -529,7 +529,7 @@ class TestDataScan(CephFSTestCase):
         self.mount_a.mount_wait()
         self.mount_a.run_shell(["ls", "-l", "subdir/"]) # debugging
         # Use sudo because cephfs-data-scan will reinsert the dentry with root ownership, it can't know the real owner.
-        out = self.mount_a.run_shell_payload(f"cat subdir/{victim_dentry}", sudo=True).stdout.getvalue().strip()
+        out = self.mount_a.run_shell_payload(f"sudo cat subdir/{victim_dentry}", omit_sudo=False).stdout.getvalue().strip()
         self.assertEqual(out, victim_dentry)
 
         # Finally, close the loop by checking our injected dentry survives a merge
index 69206c390fac2b5d9b5e169282314b737375f47f..827521a81eb9f7961599d39b14972ac2d15af037 100644 (file)
@@ -260,11 +260,11 @@ class TestVolumesHelper(CephFSTestCase):
         subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
 
         # mode
-        self.mount_a.run_shell(['chmod', mode, subvolpath], sudo=True)
+        self.mount_a.run_shell(['sudo', 'chmod', mode, subvolpath], omit_sudo=False)
 
         # ownership
-        self.mount_a.run_shell(['chown', uid, subvolpath], sudo=True)
-        self.mount_a.run_shell(['chgrp', gid, subvolpath], sudo=True)
+        self.mount_a.run_shell(['sudo', 'chown', uid, subvolpath], omit_sudo=False)
+        self.mount_a.run_shell(['sudo', 'chgrp', gid, subvolpath], omit_sudo=False)
 
     def _do_subvolume_io(self, subvolume, subvolume_group=None, create_dir=None,
                          number_of_files=DEFAULT_NUMBER_OF_FILES, file_size=DEFAULT_FILE_SIZE):
@@ -300,7 +300,7 @@ class TestVolumesHelper(CephFSTestCase):
         self.mount_a.run_shell(["ln", "-s", "./{}".format(reg_file), sym_path1])
         self.mount_a.run_shell(["ln", "-s", "./{}".format(reg_file), sym_path2])
         # flip ownership to nobody. assumption: nobody's id is 65534
-        self.mount_a.run_shell(["chown", "-h", "65534:65534", sym_path2], sudo=True, omit_sudo=False)
+        self.mount_a.run_shell(["sudo", "chown", "-h", "65534:65534", sym_path2], omit_sudo=False)
 
     def _wait_for_trash_empty(self, timeout=60):
         # XXX: construct the trash dir path (note that there is no mgr
@@ -329,7 +329,7 @@ class TestVolumesHelper(CephFSTestCase):
             group = subvol_group if subvol_group is not None else '_nogroup'
             metapath = os.path.join(".", "volumes", group, subvol_name, ".meta")
 
-        out = self.mount_a.run_shell(['cat', metapath], sudo=True)
+        out = self.mount_a.run_shell(['sudo', 'cat', metapath], omit_sudo=False)
         lines = out.stdout.getvalue().strip().split('\n')
         sv_version = -1
         for line in lines:
@@ -344,12 +344,12 @@ class TestVolumesHelper(CephFSTestCase):
         basepath = os.path.join("volumes", group, subvol_name)
         uuid_str = str(uuid.uuid4())
         createpath = os.path.join(basepath, uuid_str)
-        self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
+        self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False)
 
         # create a v1 snapshot, to prevent auto upgrades
         if has_snapshot:
             snappath = os.path.join(createpath, ".snap", "fake")
-            self.mount_a.run_shell(['mkdir', '-p', snappath], sudo=True)
+            self.mount_a.run_shell(['sudo', 'mkdir', '-p', snappath], omit_sudo=False)
 
         # add required xattrs to subvolume
         default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
@@ -368,9 +368,9 @@ class TestVolumesHelper(CephFSTestCase):
         group = subvol_group if subvol_group is not None else '_nogroup'
         trashpath = os.path.join("volumes", group, subvol_name, '.trash', trash_name)
         if create:
-            self.mount_a.run_shell(['mkdir', '-p', trashpath], sudo=True)
+            self.mount_a.run_shell(['sudo', 'mkdir', '-p', trashpath], omit_sudo=False)
         else:
-            self.mount_a.run_shell(['rmdir', trashpath], sudo=True)
+            self.mount_a.run_shell(['sudo', 'rmdir', trashpath], omit_sudo=False)
 
     def _configure_guest_auth(self, guest_mount, authid, key):
         """
@@ -1215,7 +1215,7 @@ class TestSubvolumeGroups(TestVolumesHelper):
 
         # emulate a old-fashioned subvolume -- in a custom group
         createpath1 = os.path.join(".", "volumes", group, subvolume)
-        self.mount_a.run_shell(['mkdir', '-p', createpath1], sudo=True)
+        self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath1], omit_sudo=False)
 
         # this would auto-upgrade on access without anyone noticing
         subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume, "--group-name", group)
@@ -2245,7 +2245,7 @@ class TestSubvolumes(TestVolumesHelper):
 
         # emulate a old-fashioned subvolume in a custom group
         createpath = os.path.join(".", "volumes", group, subvolume)
-        self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
+        self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False)
 
         # add required xattrs to subvolume
         default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
@@ -2679,7 +2679,7 @@ class TestSubvolumes(TestVolumesHelper):
 
         # Induce partial auth update state by modifying the auth metadata file,
         # and then run authorize again.
-        guest_mount.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
+        guest_mount.run_shell(['sudo', 'sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], omit_sudo=False)
 
         # Authorize 'guestclient_1' to access the subvolume.
         self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
@@ -2735,7 +2735,7 @@ class TestSubvolumes(TestVolumesHelper):
 
         # Induce partial auth update state by modifying the auth metadata file,
         # and then run de-authorize.
-        guest_mount.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
+        guest_mount.run_shell(['sudo', 'sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], omit_sudo=False)
 
         # Deauthorize 'guestclient_1' to access the subvolume2.
         self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, guestclient_1["auth_id"],
@@ -2788,7 +2788,7 @@ class TestSubvolumes(TestVolumesHelper):
         self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
 
         # Replace 'subvolumes' to 'volumes', old style auth-metadata file
-        guest_mount.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
+        guest_mount.run_shell(['sudo', 'sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], omit_sudo=False)
 
         # Authorize 'guestclient_1' to access the subvolume2. This should transparently update 'volumes' to 'subvolumes'
         self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"],
@@ -2866,7 +2866,7 @@ class TestSubvolumes(TestVolumesHelper):
         self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
 
         # Replace 'subvolumes' to 'volumes', old style auth-metadata file
-        guest_mount.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
+        guest_mount.run_shell(['sudo', 'sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], omit_sudo=False)
 
         # Deauthorize 'guestclient_1' to access the subvolume2. This should update 'volumes' to subvolumes'
         self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group)
@@ -3748,7 +3748,7 @@ class TestSubvolumes(TestVolumesHelper):
 
         # emulate a old-fashioned subvolume in a custom group
         createpath = os.path.join(".", "volumes", group, subvolname)
-        self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
+        self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False)
 
         # set metadata for subvolume.
         key = "key"
@@ -3782,7 +3782,7 @@ class TestSubvolumes(TestVolumesHelper):
 
         # emulate a old-fashioned subvolume in a custom group
         createpath = os.path.join(".", "volumes", group, subvolname)
-        self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
+        self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath], omit_sudo=False)
 
         # set metadata for subvolume.
         input_metadata_dict =  {f'key_{i}' : f'value_{i}' for i in range(3)}
@@ -4164,13 +4164,13 @@ class TestSubvolumeSnapshots(TestVolumesHelper):
         # Create snapshot at ancestral level
         ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_1")
         ancestral_snappath2 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_2")
-        self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1, ancestral_snappath2], sudo=True)
+        self.mount_a.run_shell(['sudo', 'mkdir', '-p', ancestral_snappath1, ancestral_snappath2], omit_sudo=False)
 
         subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume, group))
         self.assertEqual(len(subvolsnapshotls), snap_count)
 
         # remove ancestral snapshots
-        self.mount_a.run_shell(['rmdir', ancestral_snappath1, ancestral_snappath2], sudo=True)
+        self.mount_a.run_shell(['sudo', 'rmdir', ancestral_snappath1, ancestral_snappath2], omit_sudo=False)
 
         # remove snapshot
         for snapshot in snapshots:
@@ -4204,7 +4204,7 @@ class TestSubvolumeSnapshots(TestVolumesHelper):
         # Create snapshot at ancestral level
         ancestral_snap_name = "ancestral_snap_1"
         ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name)
-        self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1], sudo=True)
+        self.mount_a.run_shell(['sudo', 'mkdir', '-p', ancestral_snappath1], omit_sudo=False)
 
         # Validate existence of inherited snapshot
         group_path = os.path.join(".", "volumes", group)
@@ -4222,7 +4222,7 @@ class TestSubvolumeSnapshots(TestVolumesHelper):
             self.fail("expected snapshot info of inherited snapshot to fail")
 
         # remove ancestral snapshots
-        self.mount_a.run_shell(['rmdir', ancestral_snappath1], sudo=True)
+        self.mount_a.run_shell(['sudo', 'rmdir', ancestral_snappath1], omit_sudo=False)
 
         # remove subvolume
         self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
@@ -4252,7 +4252,7 @@ class TestSubvolumeSnapshots(TestVolumesHelper):
         # Create snapshot at ancestral level
         ancestral_snap_name = "ancestral_snap_1"
         ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name)
-        self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1], sudo=True)
+        self.mount_a.run_shell(['sudo', 'mkdir', '-p', ancestral_snappath1], omit_sudo=False)
 
         # Validate existence of inherited snap
         group_path = os.path.join(".", "volumes", group)
@@ -4270,7 +4270,7 @@ class TestSubvolumeSnapshots(TestVolumesHelper):
             self.fail("expected removing inheirted snapshot to fail")
 
         # remove ancestral snapshots
-        self.mount_a.run_shell(['rmdir', ancestral_snappath1], sudo=True)
+        self.mount_a.run_shell(['sudo', 'rmdir', ancestral_snappath1], omit_sudo=False)
 
         # remove subvolume
         self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
@@ -4300,7 +4300,7 @@ class TestSubvolumeSnapshots(TestVolumesHelper):
 
         # Create subvolumegroup snapshot
         group_snapshot_path = os.path.join(".", "volumes", group, ".snap", group_snapshot)
-        self.mount_a.run_shell(['mkdir', '-p', group_snapshot_path], sudo=True)
+        self.mount_a.run_shell(['sudo', 'mkdir', '-p', group_snapshot_path], omit_sudo=False)
 
         # Validate existence of subvolumegroup snapshot
         self.mount_a.run_shell(['ls', group_snapshot_path])
@@ -4314,7 +4314,7 @@ class TestSubvolumeSnapshots(TestVolumesHelper):
             self.fail("expected subvolume snapshot creation with same name as subvolumegroup snapshot to fail")
 
         # remove subvolumegroup snapshot
-        self.mount_a.run_shell(['rmdir', group_snapshot_path], sudo=True)
+        self.mount_a.run_shell(['sudo', 'rmdir', group_snapshot_path], omit_sudo=False)
 
         # remove subvolume
         self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
@@ -5954,7 +5954,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
 
         # remove snapshot from backend to force the clone failure.
         snappath = os.path.join(".", "volumes", "_nogroup", subvolume, ".snap", snapshot)
-        self.mount_a.run_shell(['rmdir', snappath], sudo=True)
+        self.mount_a.run_shell(['sudo', 'rmdir', snappath], omit_sudo=False)
 
         # wait for clone1 to fail.
         self._wait_for_clone_to_fail(clone1)
@@ -6681,7 +6681,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
 
         # emulate a old-fashioned subvolume
         createpath = os.path.join(".", "volumes", "_nogroup", subvolume)
-        self.mount_a.run_shell_payload(f"mkdir -p -m 777 {createpath}", sudo=True)
+        self.mount_a.run_shell_payload(f"sudo mkdir -p -m 777 {createpath}", omit_sudo=False)
 
         # add required xattrs to subvolume
         default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
@@ -6944,11 +6944,11 @@ class TestMisc(TestVolumesHelper):
         # emulate a old-fashioned subvolume -- one in the default group and
         # the other in a custom group
         createpath1 = os.path.join(".", "volumes", "_nogroup", subvolume1)
-        self.mount_a.run_shell(['mkdir', '-p', createpath1], sudo=True)
+        self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath1], omit_sudo=False)
 
         # create group
         createpath2 = os.path.join(".", "volumes", group, subvolume2)
-        self.mount_a.run_shell(['mkdir', '-p', createpath2], sudo=True)
+        self.mount_a.run_shell(['sudo', 'mkdir', '-p', createpath2], omit_sudo=False)
 
         # this would auto-upgrade on access without anyone noticing
         subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume1)