]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: avoid using sudo for regular test artifacts
authorPatrick Donnelly <pdonnell@redhat.com>
Tue, 29 Jun 2021 16:47:21 +0000 (09:47 -0700)
committerPatrick Donnelly <pdonnell@redhat.com>
Fri, 2 Jul 2021 23:17:34 +0000 (16:17 -0700)
Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
(cherry picked from commit e2b39f6c8c23b54d7e1ff326175b371a7496f85e)

qa/tasks/cephfs/caps_helper.py
qa/tasks/cephfs/fuse_mount.py
qa/tasks/cephfs/mount.py
qa/tasks/cephfs/test_cap_flush.py
qa/tasks/cephfs/test_cephfs_shell.py
qa/tasks/cephfs/test_client_recovery.py
qa/tasks/cephfs/test_data_scan.py
qa/tasks/cephfs/test_nfs.py
qa/tasks/cephfs/test_scrub_checks.py
qa/tasks/cephfs/test_volumes.py

index a6633354623cbac80114af35761e8c61b0a2f9ba..39b5963befa6abb6dd2e5e4aff19b6fe174f3a43 100644 (file)
@@ -60,7 +60,7 @@ class CapsHelper(CephFSTestCase):
                     self.assertEqual(data, contents1)
 
     def conduct_neg_test_for_write_caps(self, filepaths, mounts):
-        cmdargs = ['echo', 'some random data', Raw('|'), 'sudo', 'tee']
+        cmdargs = ['echo', 'some random data', Raw('|'), 'tee']
 
         for mount in mounts:
             for path in filepaths:
index 29dcc6d3be660bcb5b48a4611f75d394504b3dba..5c5d1c85c0ca893b72648406510f18c2970a8941 100644 (file)
@@ -446,7 +446,7 @@ print(_find_admin_socket("{client_name}"))
             client_name="client.{0}".format(self.client_id),
             mountpoint=self.mountpoint)
 
-        asok_path = self.run_python(pyscript)
+        asok_path = self.run_python(pyscript, sudo=True)
         log.info("Found client admin socket at {0}".format(asok_path))
         return asok_path
 
index d7e775f64e022164e244f1e45d83470096b03b97..883acb4d8f246beaa3695d83622d0af6d8a0d725 100644 (file)
@@ -12,7 +12,7 @@ from textwrap import dedent
 from IPy import IP
 
 from teuthology.contextutil import safe_while
-from teuthology.misc import get_file, sudo_write_file
+from teuthology.misc import get_file, write_file
 from teuthology.orchestra import run
 from teuthology.orchestra.run import CommandFailedError, ConnectionLostError, Raw
 
@@ -590,7 +590,7 @@ class CephFSMount(object):
         for suffix in self.test_files:
             log.info("Creating file {0}".format(suffix))
             self.client_remote.run(args=[
-                'sudo', 'touch', os.path.join(self.hostfs_mntpt, suffix)
+                'touch', os.path.join(self.hostfs_mntpt, suffix)
             ])
 
     def test_create_file(self, filename='testfile', dirname=None, user=None,
@@ -604,7 +604,7 @@ class CephFSMount(object):
         for suffix in self.test_files:
             log.info("Checking file {0}".format(suffix))
             r = self.client_remote.run(args=[
-                'sudo', 'ls', os.path.join(self.hostfs_mntpt, suffix)
+                'ls', os.path.join(self.hostfs_mntpt, suffix)
             ], check_status=False)
             if r.exitstatus != 0:
                 raise RuntimeError("Expected file {0} not found".format(suffix))
@@ -617,7 +617,7 @@ class CephFSMount(object):
         if path.find(self.hostfs_mntpt) == -1:
             path = os.path.join(self.hostfs_mntpt, path)
 
-        sudo_write_file(self.client_remote, path, data)
+        write_file(self.client_remote, path, data)
 
         if perms:
             self.run_shell(args=f'chmod {perms} {path}')
@@ -629,7 +629,7 @@ class CephFSMount(object):
         if path.find(self.hostfs_mntpt) == -1:
             path = os.path.join(self.hostfs_mntpt, path)
 
-        return self.run_shell(args=['sudo', 'cat', path], omit_sudo=False).\
+        return self.run_shell(args=['cat', path]).\
             stdout.getvalue().strip()
 
     def create_destroy(self):
@@ -638,34 +638,36 @@ class CephFSMount(object):
         filename = "{0} {1}".format(datetime.datetime.now(), self.client_id)
         log.debug("Creating test file {0}".format(filename))
         self.client_remote.run(args=[
-            'sudo', 'touch', os.path.join(self.hostfs_mntpt, filename)
+            'touch', os.path.join(self.hostfs_mntpt, filename)
         ])
         log.debug("Deleting test file {0}".format(filename))
         self.client_remote.run(args=[
-            'sudo', 'rm', '-f', os.path.join(self.hostfs_mntpt, filename)
+            'rm', '-f', os.path.join(self.hostfs_mntpt, filename)
         ])
 
-    def _run_python(self, pyscript, py_version='python3'):
-        return self.client_remote.run(
-               args=['sudo', 'adjust-ulimits', 'daemon-helper', 'kill',
-                     py_version, '-c', pyscript], wait=False, stdin=run.PIPE,
-               stdout=StringIO())
+    def _run_python(self, pyscript, py_version='python3', sudo=False):
+        args = []
+        if sudo:
+            args.append('sudo')
+        args += ['adjust-ulimits', 'daemon-helper', 'kill', py_version, '-c', pyscript]
+        return self.client_remote.run(args=args, wait=False, stdin=run.PIPE, stdout=StringIO())
 
-    def run_python(self, pyscript, py_version='python3'):
-        p = self._run_python(pyscript, py_version)
+    def run_python(self, pyscript, py_version='python3', sudo=False):
+        p = self._run_python(pyscript, py_version, sudo=sudo)
         p.wait()
         return p.stdout.getvalue().strip()
 
-    def run_shell(self, args, omit_sudo=True, timeout=900, **kwargs):
+    def run_shell(self, args, timeout=900, **kwargs):
         args = args.split() if isinstance(args, str) else args
-        # XXX: all commands ran with CephFS mount as CWD must be executed with
-        #  superuser privileges when tests are being run using teuthology.
-        if args[0] != 'sudo':
-            args.insert(0, 'sudo')
+        kwargs.pop('omit_sudo', False)
+        sudo = kwargs.pop('sudo', False)
         cwd = kwargs.pop('cwd', self.mountpoint)
         stdout = kwargs.pop('stdout', StringIO())
         stderr = kwargs.pop('stderr', StringIO())
 
+        if sudo:
+            args.insert(0, 'sudo')
+
         return self.client_remote.run(args=args, cwd=cwd, timeout=timeout, stdout=stdout, stderr=stderr, **kwargs)
 
     def run_shell_payload(self, payload, **kwargs):
@@ -810,7 +812,7 @@ class CephFSMount(object):
         i = 0
         while i < timeout:
             r = self.client_remote.run(args=[
-                'sudo', 'ls', os.path.join(self.hostfs_mntpt, basename)
+                'stat', os.path.join(self.hostfs_mntpt, basename)
             ], check_status=False)
             if r.exitstatus == 0:
                 log.debug("File {0} became visible from {1} after {2}s".format(
@@ -908,7 +910,7 @@ class CephFSMount(object):
 
         log.info("check lock on file {0}".format(basename))
         self.client_remote.run(args=[
-            'sudo', 'python3', '-c', pyscript
+            'python3', '-c', pyscript
         ])
 
     def write_background(self, basename="background_file", loop=False):
@@ -969,6 +971,7 @@ class CephFSMount(object):
 
     def validate_test_pattern(self, filename, size):
         log.info("Validating {0} bytes from {1}".format(size, filename))
+        # Use sudo because cephfs-data-scan may recreate the file with owner==root
         return self.run_python(dedent("""
             import zlib
             path = "{path}"
@@ -985,7 +988,7 @@ class CephFSMount(object):
         """.format(
             path=os.path.join(self.hostfs_mntpt, filename),
             size=size
-        )))
+        )), sudo=True)
 
     def open_n_background(self, fs_path, count):
         """
@@ -1099,7 +1102,7 @@ class CephFSMount(object):
     def lstat(self, fs_path, follow_symlinks=False, wait=True):
         return self.stat(fs_path, follow_symlinks=False, wait=True)
 
-    def stat(self, fs_path, follow_symlinks=True, wait=True):
+    def stat(self, fs_path, follow_symlinks=True, wait=True, **kwargs):
         """
         stat a file, and return the result as a dictionary like this:
         {
@@ -1139,7 +1142,7 @@ class CephFSMount(object):
                 dict([(a, getattr(s, a)) for a in attrs]),
                 indent=2))
             """).format(stat_call=stat_call)
-        proc = self._run_python(pyscript)
+        proc = self._run_python(pyscript, **kwargs)
         if wait:
             proc.wait()
             return json.loads(proc.stdout.getvalue().strip())
@@ -1205,7 +1208,7 @@ class CephFSMount(object):
         proc.wait()
         return int(proc.stdout.getvalue().strip())
 
-    def ls(self, path=None):
+    def ls(self, path=None, **kwargs):
         """
         Wrap ls: return a list of strings
         """
@@ -1213,7 +1216,7 @@ class CephFSMount(object):
         if path:
             cmd.append(path)
 
-        ls_text = self.run_shell(cmd).stdout.getvalue().strip()
+        ls_text = self.run_shell(cmd, **kwargs).stdout.getvalue().strip()
 
         if ls_text:
             return ls_text.split("\n")
@@ -1222,7 +1225,7 @@ class CephFSMount(object):
             # gives you [''] instead of []
             return []
 
-    def setfattr(self, path, key, val):
+    def setfattr(self, path, key, val, **kwargs):
         """
         Wrap setfattr.
 
@@ -1231,16 +1234,16 @@ class CephFSMount(object):
         :param val: xattr value
         :return: None
         """
-        self.run_shell(["setfattr", "-n", key, "-v", val, path])
+        self.run_shell(["setfattr", "-n", key, "-v", val, path], **kwargs)
 
-    def getfattr(self, path, attr):
+    def getfattr(self, path, attr, **kwargs):
         """
         Wrap getfattr: return the values of a named xattr on one file, or
         None if the attribute is not found.
 
         :return: a string
         """
-        p = self.run_shell(["getfattr", "--only-values", "-n", attr, path], wait=False)
+        p = self.run_shell(["getfattr", "--only-values", "-n", attr, path], wait=False, **kwargs)
         try:
             p.wait()
         except CommandFailedError as e:
index 2fc9410d13a77bb12d5fffe01ab6c6515eae5e55..c472e85bd5dde3cd36208e86b784de19d57de0a2 100644 (file)
@@ -41,10 +41,10 @@ class TestCapFlush(CephFSTestCase):
             fd = os.open("{1}", os.O_CREAT | os.O_RDWR, 0o644)
             os.fchmod(fd, 0o640)
             """).format(dir_path, file_name)
-        self.mount_a.run_python(py_script)
+        self.mount_a.run_python(py_script, sudo=True)
 
         # Modify file mode by different user. ceph-fuse will send a setattr request
-        self.mount_a.run_shell(["chmod", "600", file_path], wait=False)
+        self.mount_a.run_shell(["chmod", "600", file_path], wait=False, sudo=True)
 
         time.sleep(10)
 
index 174abd1fa3af638730d501dba775136e81dc5b6a..83ee3991196e05a3117a820a5a9c83b1b47bc49f 100644 (file)
@@ -512,8 +512,7 @@ class TestDU(TestCephFSShell):
     def test_du_works_for_regfiles(self):
         regfilename = 'some_regfile'
         regfile_abspath = path.join(self.mount_a.mountpoint, regfilename)
-        self.mount_a.client_remote.write_file(regfile_abspath,
-                                              'somedata', sudo=True)
+        self.mount_a.client_remote.write_file(regfile_abspath, 'somedata')
 
         size = humansize(self.mount_a.stat(regfile_abspath)['st_size'])
         expected_output = r'{}{}{}'.format(size, " +", regfilename)
@@ -527,8 +526,7 @@ class TestDU(TestCephFSShell):
         regfilename = 'some_regfile'
         regfile_abspath = path.join(dir_abspath, regfilename)
         self.mount_a.run_shell_payload(f"mkdir {dir_abspath}")
-        self.mount_a.client_remote.write_file(regfile_abspath,
-                                              'somedata', sudo=True)
+        self.mount_a.client_remote.write_file(regfile_abspath, 'somedata')
 
         # XXX: we stat `regfile_abspath` here because ceph du reports a non-empty
         # directory's size as sum of sizes of all files under it.
@@ -553,8 +551,7 @@ class TestDU(TestCephFSShell):
     def test_du_works_for_hardlinks(self):
         regfilename = 'some_regfile'
         regfile_abspath = path.join(self.mount_a.mountpoint, regfilename)
-        self.mount_a.client_remote.write_file(regfile_abspath,
-                                              'somedata', sudo=True)
+        self.mount_a.client_remote.write_file(regfile_abspath, 'somedata')
         hlinkname = 'some_hardlink'
         hlink_abspath = path.join(self.mount_a.mountpoint, hlinkname)
         self.mount_a.run_shell_payload(f"ln {regfile_abspath} {hlink_abspath}")
@@ -568,8 +565,7 @@ class TestDU(TestCephFSShell):
     def test_du_works_for_softlinks_to_files(self):
         regfilename = 'some_regfile'
         regfile_abspath = path.join(self.mount_a.mountpoint, regfilename)
-        self.mount_a.client_remote.write_file(regfile_abspath,
-                                              'somedata', sudo=True)
+        self.mount_a.client_remote.write_file(regfile_abspath, 'somedata')
         slinkname = 'some_softlink'
         slink_abspath = path.join(self.mount_a.mountpoint, slinkname)
         self.mount_a.run_shell_payload(f"ln -s {regfile_abspath} {slink_abspath}")
@@ -624,10 +620,8 @@ class TestDU(TestCephFSShell):
         self.mount_a.run_shell_payload(f"mkdir -p {dir21_abspath}")
         self.mount_a.run_shell_payload(f"touch {regfile121_abspath}")
 
-        self.mount_a.client_remote.write_file(regfile_abspath,
-                                              'somedata', sudo=True)
-        self.mount_a.client_remote.write_file(regfile121_abspath,
-                                              'somemoredata', sudo=True)
+        self.mount_a.client_remote.write_file(regfile_abspath, 'somedata')
+        self.mount_a.client_remote.write_file(regfile121_abspath, 'somemoredata')
 
         # TODO: is there a way to trigger/force update ceph.dir.rbytes?
         # wait so that attr ceph.dir.rbytes gets a chance to be updated.
@@ -815,8 +809,7 @@ class TestQuota(TestCephFSShell):
         file_abspath = path.join(dir_abspath, filename)
         try:
             # Write should fail as bytes quota is set to 6
-            self.mount_a.client_remote.write_file(file_abspath,
-                    'Disk raise Exception', sudo=True)
+            self.mount_a.client_remote.write_file(file_abspath, 'Disk raise Exception')
             raise Exception("Write should have failed")
         except CommandFailedError:
             # Test should pass only when write command fails
index e02088278449c42ee702c9f315fa02360214f63a..c067e8ac933bcdec2acd225e9ddec91f4bf130cd 100644 (file)
@@ -508,9 +508,8 @@ class TestClientRecovery(CephFSTestCase):
         self.assertEqual(current_readdirs, initial_readdirs);
 
         mount_b_gid = self.mount_b.get_global_id()
-        mount_b_pid = self.mount_b.get_client_pid()
         # stop ceph-fuse process of mount_b
-        self.mount_b.client_remote.run(args=["sudo", "kill", "-STOP", mount_b_pid])
+        self.mount_b.suspend_netns()
 
         self.assert_session_state(mount_b_gid, "open")
         time.sleep(session_timeout * 1.5)  # Long enough for MDS to consider session stale
@@ -519,7 +518,7 @@ class TestClientRecovery(CephFSTestCase):
         self.assert_session_state(mount_b_gid, "stale")
 
         # resume ceph-fuse process of mount_b
-        self.mount_b.client_remote.run(args=["sudo", "kill", "-CONT", mount_b_pid])
+        self.mount_b.resume_netns()
         # Is the new file visible from mount_b? (caps become invalid after session stale)
         self.mount_b.run_shell(["ls", "testdir/file2"])
 
@@ -682,7 +681,7 @@ class TestClientRecovery(CephFSTestCase):
                 raise RuntimeError("read() failed to raise error")
             """).format(path=path)
         rproc = self.mount_a.client_remote.run(
-                    args=['sudo', 'python3', '-c', pyscript],
+                    args=['python3', '-c', pyscript],
                     wait=False, stdin=run.PIPE, stdout=run.PIPE)
 
         rproc.stdout.readline()
index 933a7f67d76056014eddf05184d25ab3e64b3fe3..2aa7398ec6f63e757adde00bd868965b6c05ef98 100644 (file)
@@ -82,8 +82,8 @@ class SimpleWorkload(Workload):
         self._initial_state = self._mount.stat("subdir/sixmegs")
 
     def validate(self):
-        self._mount.run_shell(["ls", "subdir"])
-        st = self._mount.stat("subdir/sixmegs")
+        self._mount.run_shell(["ls", "subdir"], sudo=True)
+        st = self._mount.stat("subdir/sixmegs", sudo=True)
         self.assert_equal(st['st_size'], self._initial_state['st_size'])
         return self._errors
 
@@ -104,8 +104,8 @@ class MovedFile(Workload):
         pass
 
     def validate(self):
-        self.assert_equal(self._mount.ls(), ["subdir_alpha"])
-        st = self._mount.stat("subdir_alpha/sixmegs")
+        self.assert_equal(self._mount.ls(sudo=True), ["subdir_alpha"])
+        st = self._mount.stat("subdir_alpha/sixmegs", sudo=True)
         self.assert_equal(st['st_size'], self._initial_state['st_size'])
         return self._errors
 
@@ -124,9 +124,9 @@ class BacktracelessFile(Workload):
         ino_name = "%x" % self._initial_state["st_ino"]
 
         # The inode should be linked into lost+found because we had no path for it
-        self.assert_equal(self._mount.ls(), ["lost+found"])
-        self.assert_equal(self._mount.ls("lost+found"), [ino_name])
-        st = self._mount.stat("lost+found/{ino_name}".format(ino_name=ino_name))
+        self.assert_equal(self._mount.ls(sudo=True), ["lost+found"])
+        self.assert_equal(self._mount.ls("lost+found", sudo=True), [ino_name])
+        st = self._mount.stat(f"lost+found/{ino_name}", sudo=True)
 
         # We might not have got the name or path, but we should still get the size
         self.assert_equal(st['st_size'], self._initial_state['st_size'])
@@ -200,7 +200,7 @@ class StripedStashedLayout(Workload):
         # The unflushed file should have been recovered into lost+found without
         # the correct layout: read back junk
         ino_name = "%x" % self._initial_state["unflushed_ino"]
-        self.assert_equal(self._mount.ls("lost+found"), [ino_name])
+        self.assert_equal(self._mount.ls("lost+found", sudo=True), [ino_name])
         try:
             self._mount.validate_test_pattern(os.path.join("lost+found", ino_name), 1024 * 512)
         except CommandFailedError:
@@ -259,8 +259,8 @@ class MovedDir(Workload):
         self.assert_equal(len(root_files), 1)
         self.assert_equal(root_files[0] in ["grandfather", "grandmother"], True)
         winner = root_files[0]
-        st_opf = self._mount.stat("{0}/parent/orig_pos_file".format(winner))
-        st_npf = self._mount.stat("{0}/parent/new_pos_file".format(winner))
+        st_opf = self._mount.stat(f"{winner}/parent/orig_pos_file", sudo=True)
+        st_npf = self._mount.stat(f"{winner}/parent/new_pos_file", sudo=True)
 
         self.assert_equal(st_opf['st_size'], self._initial_state[0]['st_size'])
         self.assert_equal(st_npf['st_size'], self._initial_state[1]['st_size'])
@@ -278,7 +278,8 @@ class MissingZerothObject(Workload):
         self._filesystem.rados(["rm", zeroth_id], pool=self._filesystem.get_data_pool_name())
 
     def validate(self):
-        st = self._mount.stat("lost+found/{0:x}".format(self._initial_state['st_ino']))
+        ino = self._initial_state['st_ino']
+        st = self._mount.stat(f"lost+found/{ino:x}", sudo=True)
         self.assert_equal(st['st_size'], self._initial_state['st_size'])
 
 
@@ -295,12 +296,11 @@ class NonDefaultLayout(Workload):
 
     def validate(self):
         # Check we got the layout reconstructed properly
-        object_size = int(self._mount.getfattr(
-            "./datafile", "ceph.file.layout.object_size"))
+        object_size = int(self._mount.getfattr("./datafile", "ceph.file.layout.object_size", sudo=True))
         self.assert_equal(object_size, 8388608)
 
         # Check we got the file size reconstructed properly
-        st = self._mount.stat("datafile")
+        st = self._mount.stat("datafile", sudo=True)
         self.assert_equal(st['st_size'], self._initial_state['st_size'])
 
 
@@ -490,7 +490,9 @@ class TestDataScan(CephFSTestCase):
         self.fs.set_joinable()
         self.fs.wait_for_daemons()
         self.mount_a.mount_wait()
-        out = self.mount_a.run_shell(["cat", "subdir/{0}".format(victim_dentry)]).stdout.getvalue().strip()
+        self.mount_a.run_shell(["ls", "-l", "subdir/"]) # debugging
+        # Use sudo because cephfs-data-scan will reinsert the dentry with root ownership, it can't know the real owner.
+        out = self.mount_a.run_shell_payload(f"cat subdir/{victim_dentry}", sudo=True).stdout.getvalue().strip()
         self.assertEqual(out, victim_dentry)
 
         # Finally, close the loop by checking our injected dentry survives a merge
index 83c4797b2818f99e0bd966629c86e99b0bd164ac..c3feb1604c9251562825b7b694d027dcf12d554b 100644 (file)
@@ -264,9 +264,11 @@ class TestNFS(MgrTestCase):
                 return
             raise
 
+        self.ctx.cluster.run(args=['sudo', 'chmod', '1777', '/mnt'])
+
         try:
-            self.ctx.cluster.run(args=['sudo', 'touch', '/mnt/test'])
-            out_mnt = self._sys_cmd(['sudo', 'ls', '/mnt'])
+            self.ctx.cluster.run(args=['touch', '/mnt/test'])
+            out_mnt = self._sys_cmd(['ls', '/mnt'])
             self.assertEqual(out_mnt,  b'test\n')
         finally:
             self.ctx.cluster.run(args=['sudo', 'umount', '/mnt'])
index f1af604802866ec962b6c66aafdeeb26da122414..bcfc2fc9a3a1f368b9dc5de299c6c7db63e6a02e 100644 (file)
@@ -303,8 +303,8 @@ class TestScrubChecks(CephFSTestCase):
         mds_rank = 0
         test_dir = "scrub_repair_path"
 
-        self.mount_a.run_shell(["sudo", "mkdir", test_dir])
-        self.mount_a.run_shell(["sudo", "touch", "{0}/file".format(test_dir)])
+        self.mount_a.run_shell(["mkdir", test_dir])
+        self.mount_a.run_shell(["touch", "{0}/file".format(test_dir)])
         dir_objname = "{:x}.00000000".format(self.mount_a.path_to_ino(test_dir))
 
         self.mount_a.umount_wait()
@@ -323,7 +323,7 @@ class TestScrubChecks(CephFSTestCase):
 
         # fragstat indicates the directory is not empty, rmdir should fail
         with self.assertRaises(CommandFailedError) as ar:
-            self.mount_a.run_shell(["sudo", "rmdir", test_dir])
+            self.mount_a.run_shell(["rmdir", test_dir])
         self.assertEqual(ar.exception.exitstatus, 1)
 
         self.tell_command(mds_rank, "scrub start /{0} repair".format(test_dir),
@@ -333,7 +333,7 @@ class TestScrubChecks(CephFSTestCase):
         time.sleep(10)
 
         # fragstat should be fixed
-        self.mount_a.run_shell(["sudo", "rmdir", test_dir])
+        self.mount_a.run_shell(["rmdir", test_dir])
 
     @staticmethod
     def json_validator(json_out, rc, element, expected_value):
index 43be4c19c5f2deecd597e51d77480a77b6a074ac..8f83da9da5d3aa664329210aad9dfd3d7b054f24 100644 (file)
@@ -233,20 +233,20 @@ class TestVolumesHelper(CephFSTestCase):
         subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
 
         if pool is not None:
-            self.mount_a.setfattr(subvolpath, 'ceph.dir.layout.pool', pool)
+            self.mount_a.setfattr(subvolpath, 'ceph.dir.layout.pool', pool, sudo=True)
 
         if pool_namespace is not None:
-            self.mount_a.setfattr(subvolpath, 'ceph.dir.layout.pool_namespace', pool_namespace)
+            self.mount_a.setfattr(subvolpath, 'ceph.dir.layout.pool_namespace', pool_namespace, sudo=True)
 
     def _do_subvolume_attr_update(self, subvolume, uid, gid, mode, subvolume_group=None):
         subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group)
 
         # mode
-        self.mount_a.run_shell(['chmod', mode, subvolpath])
+        self.mount_a.run_shell(['chmod', mode, subvolpath], sudo=True)
 
         # ownership
-        self.mount_a.run_shell(['chown', uid, subvolpath])
-        self.mount_a.run_shell(['chgrp', gid, subvolpath])
+        self.mount_a.run_shell(['chown', uid, subvolpath], sudo=True)
+        self.mount_a.run_shell(['chgrp', gid, subvolpath], sudo=True)
 
     def _do_subvolume_io(self, subvolume, subvolume_group=None, create_dir=None,
                          number_of_files=DEFAULT_NUMBER_OF_FILES, file_size=DEFAULT_FILE_SIZE):
@@ -262,7 +262,7 @@ class TestVolumesHelper(CephFSTestCase):
         io_path = subvolpath
         if create_dir:
             io_path = os.path.join(subvolpath, create_dir)
-            self.mount_a.run_shell(["mkdir", "-p", io_path])
+            self.mount_a.run_shell_payload(f"mkdir -p {io_path}")
 
         log.debug("filling subvolume {0} with {1} files each {2}MB size under directory {3}".format(subvolume, number_of_files, file_size, io_path))
         for i in range(number_of_files):
@@ -278,11 +278,11 @@ class TestVolumesHelper(CephFSTestCase):
         # this symlink's ownership would be changed
         sym_path2 = os.path.join(dir_path, "sym.0")
 
-        self.mount_a.run_shell(["sudo", "mkdir", dir_path], omit_sudo=False)
-        self.mount_a.run_shell(["sudo", "ln", "-s", "./{}".format(reg_file), sym_path1], omit_sudo=False)
-        self.mount_a.run_shell(["sudo", "ln", "-s", "./{}".format(reg_file), sym_path2], omit_sudo=False)
+        self.mount_a.run_shell(["mkdir", dir_path])
+        self.mount_a.run_shell(["ln", "-s", "./{}".format(reg_file), sym_path1])
+        self.mount_a.run_shell(["ln", "-s", "./{}".format(reg_file), sym_path2])
         # flip ownership to nobody. assumption: nobody's id is 65534
-        self.mount_a.run_shell(["sudo", "chown", "-h", "65534:65534", sym_path2], omit_sudo=False)
+        self.mount_a.run_shell(["chown", "-h", "65534:65534", sym_path2], sudo=True, omit_sudo=False)
 
     def _wait_for_trash_empty(self, timeout=30):
         # XXX: construct the trash dir path (note that there is no mgr
@@ -301,7 +301,7 @@ class TestVolumesHelper(CephFSTestCase):
             group = subvol_group if subvol_group is not None else '_nogroup'
             metapath = os.path.join(".", "volumes", group, subvol_name, ".meta")
 
-        out = self.mount_a.run_shell(['cat', metapath])
+        out = self.mount_a.run_shell(['cat', metapath], sudo=True)
         lines = out.stdout.getvalue().strip().split('\n')
         sv_version = -1
         for line in lines:
@@ -316,16 +316,16 @@ class TestVolumesHelper(CephFSTestCase):
         basepath = os.path.join("volumes", group, subvol_name)
         uuid_str = str(uuid.uuid4())
         createpath = os.path.join(basepath, uuid_str)
-        self.mount_a.run_shell(['mkdir', '-p', createpath])
+        self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
 
         # create a v1 snapshot, to prevent auto upgrades
         if has_snapshot:
             snappath = os.path.join(createpath, ".snap", "fake")
-            self.mount_a.run_shell(['mkdir', '-p', snappath])
+            self.mount_a.run_shell(['mkdir', '-p', snappath], sudo=True)
 
         # add required xattrs to subvolume
         default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
-        self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool)
+        self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
 
         # create a v1 .meta file
         meta_contents = "[GLOBAL]\nversion = 1\ntype = {0}\npath = {1}\nstate = {2}\n".format(subvol_type, "/" + createpath, state)
@@ -333,17 +333,16 @@ class TestVolumesHelper(CephFSTestCase):
             # add a fake clone source
             meta_contents = meta_contents + '[source]\nvolume = fake\nsubvolume = fake\nsnapshot = fake\n'
         meta_filepath1 = os.path.join(self.mount_a.mountpoint, basepath, ".meta")
-        self.mount_a.client_remote.write_file(meta_filepath1,
-                                              meta_contents, sudo=True)
+        self.mount_a.client_remote.write_file(meta_filepath1, meta_contents, sudo=True)
         return createpath
 
     def _update_fake_trash(self, subvol_name, subvol_group=None, trash_name='fake', create=True):
         group = subvol_group if subvol_group is not None else '_nogroup'
         trashpath = os.path.join("volumes", group, subvol_name, '.trash', trash_name)
         if create:
-            self.mount_a.run_shell(['mkdir', '-p', trashpath])
+            self.mount_a.run_shell(['mkdir', '-p', trashpath], sudo=True)
         else:
-            self.mount_a.run_shell(['rmdir', trashpath])
+            self.mount_a.run_shell(['rmdir', trashpath], sudo=True)
 
     def _configure_guest_auth(self, guest_mount, authid, key):
         """
@@ -612,7 +611,7 @@ class TestSubvolumeGroups(TestVolumesHelper):
 
         # create group
         self._fs_cmd("subvolumegroup", "create", self.volname, group1)
-        self._fs_cmd("subvolumegroup", "create", self.volname, group2, "--mode", "777")
+        self._fs_cmd("subvolumegroup", "create", self.volname, group2, f"--mode={expected_mode2}")
 
         group1_path = self._get_subvolume_group_path(self.volname, group1)
         group2_path = self._get_subvolume_group_path(self.volname, group2)
@@ -726,7 +725,7 @@ class TestSubvolumes(TestVolumesHelper):
 
         # create subvolumes
         for subvolume in subvolumes:
-            self._fs_cmd("subvolume", "create", self.volname, subvolume)
+            self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
             self._do_subvolume_io(subvolume, number_of_files=10)
 
         self.mount_a.umount_wait()
@@ -1157,7 +1156,7 @@ class TestSubvolumes(TestVolumesHelper):
             else:
                 raise RuntimeError("expected renaming subvolume incarnation out of subvolume directory to fail")
             """)
-        self.mount_a.run_python(rename_script.format(src=srcpath, dst=dstpath))
+        self.mount_a.run_python(rename_script.format(src=srcpath, dst=dstpath), sudo=True)
 
         # remove subvolume
         self._fs_cmd("subvolume", "rm", self.volname, subvolume)
@@ -1196,11 +1195,11 @@ class TestSubvolumes(TestVolumesHelper):
 
         # emulate a old-fashioned subvolume in a custom group
         createpath = os.path.join(".", "volumes", group, subvolume)
-        self.mount_a.run_shell(['mkdir', '-p', createpath])
+        self.mount_a.run_shell(['mkdir', '-p', createpath], sudo=True)
 
         # add required xattrs to subvolume
         default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
-        self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool)
+        self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
 
         mount_path = os.path.join("/", "volumes", group, subvolume)
 
@@ -1253,7 +1252,7 @@ class TestSubvolumes(TestVolumesHelper):
         guest_mount.umount_wait()
 
         # create group
-        self._fs_cmd("subvolumegroup", "create", self.volname, group)
+        self._fs_cmd("subvolumegroup", "create", self.volname, group, "--mode=777")
 
         # create subvolume in group
         self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group)
@@ -1630,7 +1629,7 @@ class TestSubvolumes(TestVolumesHelper):
 
         # Induce partial auth update state by modifying the auth metadata file,
         # and then run authorize again.
-        guest_mount.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)])
+        guest_mount.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
 
         # Authorize 'guestclient_1' to access the subvolume.
         self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"],
@@ -1686,7 +1685,7 @@ class TestSubvolumes(TestVolumesHelper):
 
         # Induce partial auth update state by modifying the auth metadata file,
         # and then run de-authorize.
-        guest_mount.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)])
+        guest_mount.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
 
         # Deauthorize 'guestclient_1' to access the subvolume2.
         self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, guestclient_1["auth_id"],
@@ -1739,7 +1738,7 @@ class TestSubvolumes(TestVolumesHelper):
         self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
 
         # Replace 'subvolumes' to 'volumes', old style auth-metadata file
-        guest_mount.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)])
+        guest_mount.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
 
         # Authorize 'guestclient_1' to access the subvolume2. This should transparently update 'volumes' to 'subvolumes'
         self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"],
@@ -1817,7 +1816,7 @@ class TestSubvolumes(TestVolumesHelper):
         self.assertIn(auth_metadata_filename, guest_mount.ls("volumes"))
 
         # Replace 'subvolumes' to 'volumes', old style auth-metadata file
-        guest_mount.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)])
+        guest_mount.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)], sudo=True)
 
         # Deauthorize 'guestclient_1' to access the subvolume2. This should update 'volumes' to subvolumes'
         self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group)
@@ -1875,7 +1874,7 @@ class TestSubvolumes(TestVolumesHelper):
         # subvolumes. Mount the two subvolumes. Write data to the volumes.
         for i in range(2):
             # Create subvolume.
-            self._fs_cmd("subvolume", "create", self.volname, subvolumes[i], "--group_name", group)
+            self._fs_cmd("subvolume", "create", self.volname, subvolumes[i], "--group_name", group, "--mode=777")
 
             # authorize guest authID read-write access to subvolume
             key = self._fs_cmd("subvolume", "authorize", self.volname, subvolumes[i], guestclient_1["auth_id"],
@@ -2010,7 +2009,7 @@ class TestSubvolumes(TestVolumesHelper):
         osize = self.DEFAULT_FILE_SIZE*1024*1024*20
         # create subvolume
         subvolname = self._generate_random_subvolume_name()
-        self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
+        self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777")
 
         # make sure it exists
         subvolpath = self._get_subvolume_path(self.volname, subvolname)
@@ -2057,7 +2056,7 @@ class TestSubvolumes(TestVolumesHelper):
         osize = self.DEFAULT_FILE_SIZE*1024*1024*20
         # create subvolume
         subvolname = self._generate_random_subvolume_name()
-        self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
+        self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777")
 
         # make sure it exists
         subvolpath = self._get_subvolume_path(self.volname, subvolname)
@@ -2105,7 +2104,7 @@ class TestSubvolumes(TestVolumesHelper):
         osize = self.DEFAULT_FILE_SIZE*1024*1024*10
         # create subvolume of quota 10MB and make sure it exists
         subvolname = self._generate_random_subvolume_name()
-        self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize))
+        self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize), "--mode=777")
         subvolpath = self._get_subvolume_path(self.volname, subvolname)
         self.assertNotEqual(subvolpath, None)
 
@@ -2181,7 +2180,7 @@ class TestSubvolumes(TestVolumesHelper):
         # create subvolume
         subvolname = self._generate_random_subvolume_name()
         self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size",
-                     str(self.DEFAULT_FILE_SIZE*1024*1024*5))
+                     str(self.DEFAULT_FILE_SIZE*1024*1024*5), "--mode=777")
 
         # make sure it exists
         subvolpath = self._get_subvolume_path(self.volname, subvolname)
@@ -2482,7 +2481,7 @@ class TestSubvolumeSnapshots(TestVolumesHelper):
         snapshot, snap_missing = self._generate_random_snapshot_name(2)
 
         # create subvolume
-        self._fs_cmd("subvolume", "create", self.volname, subvolume)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
 
         # do some IO
         self._do_subvolume_io(subvolume, number_of_files=1)
@@ -2594,13 +2593,13 @@ class TestSubvolumeSnapshots(TestVolumesHelper):
         # Create snapshot at ancestral level
         ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_1")
         ancestral_snappath2 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_2")
-        self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1, ancestral_snappath2])
+        self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1, ancestral_snappath2], sudo=True)
 
         subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume, group))
         self.assertEqual(len(subvolsnapshotls), snap_count)
 
         # remove ancestral snapshots
-        self.mount_a.run_shell(['rmdir', ancestral_snappath1, ancestral_snappath2])
+        self.mount_a.run_shell(['rmdir', ancestral_snappath1, ancestral_snappath2], sudo=True)
 
         # remove snapshot
         for snapshot in snapshots:
@@ -2634,7 +2633,7 @@ class TestSubvolumeSnapshots(TestVolumesHelper):
         # Create snapshot at ancestral level
         ancestral_snap_name = "ancestral_snap_1"
         ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name)
-        self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1])
+        self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1], sudo=True)
 
         # Validate existence of inherited snapshot
         group_path = os.path.join(".", "volumes", group)
@@ -2652,7 +2651,7 @@ class TestSubvolumeSnapshots(TestVolumesHelper):
             self.fail("expected snapshot info of inherited snapshot to fail")
 
         # remove ancestral snapshots
-        self.mount_a.run_shell(['rmdir', ancestral_snappath1])
+        self.mount_a.run_shell(['rmdir', ancestral_snappath1], sudo=True)
 
         # remove subvolume
         self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group)
@@ -2682,7 +2681,7 @@ class TestSubvolumeSnapshots(TestVolumesHelper):
         # Create snapshot at ancestral level
         ancestral_snap_name = "ancestral_snap_1"
         ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name)
-        self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1])
+        self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1], sudo=True)
 
         # Validate existence of inherited snap
         group_path = os.path.join(".", "volumes", group)
@@ -2700,7 +2699,7 @@ class TestSubvolumeSnapshots(TestVolumesHelper):
             self.fail("expected removing inheirted snapshot to fail")
 
         # remove ancestral snapshots
-        self.mount_a.run_shell(['rmdir', ancestral_snappath1])
+        self.mount_a.run_shell(['rmdir', ancestral_snappath1], sudo=True)
 
         # remove subvolume
         self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
@@ -2730,7 +2729,7 @@ class TestSubvolumeSnapshots(TestVolumesHelper):
 
         # Create subvolumegroup snapshot
         group_snapshot_path = os.path.join(".", "volumes", group, ".snap", group_snapshot)
-        self.mount_a.run_shell(['mkdir', '-p', group_snapshot_path])
+        self.mount_a.run_shell(['mkdir', '-p', group_snapshot_path], sudo=True)
 
         # Validate existence of subvolumegroup snapshot
         self.mount_a.run_shell(['ls', group_snapshot_path])
@@ -2744,7 +2743,7 @@ class TestSubvolumeSnapshots(TestVolumesHelper):
             self.fail("expected subvolume snapshot creation with same name as subvolumegroup snapshot to fail")
 
         # remove subvolumegroup snapshot
-        self.mount_a.run_shell(['rmdir', group_snapshot_path])
+        self.mount_a.run_shell(['rmdir', group_snapshot_path], sudo=True)
 
         # remove subvolume
         self._fs_cmd("subvolume", "rm", self.volname, subvolume, group)
@@ -3057,7 +3056,7 @@ class TestSubvolumeSnapshots(TestVolumesHelper):
         clone = self._generate_random_clone_name()
 
         # create subvolume
-        self._fs_cmd("subvolume", "create", self.volname, subvolume)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
 
         # do some IO
         self._do_subvolume_io(subvolume, number_of_files=64)
@@ -3115,7 +3114,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         clone = self._generate_random_clone_name()
 
         # create subvolume
-        self._fs_cmd("subvolume", "create", self.volname, subvolume)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
 
         # do some IO
         self._do_subvolume_io(subvolume, number_of_files=1)
@@ -3175,7 +3174,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         osize = self.DEFAULT_FILE_SIZE*1024*1024*12
 
         # create subvolume, in an isolated namespace with a specified size
-        self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated", "--size", str(osize))
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated", "--size", str(osize), "--mode=777")
 
         # do some IO
         self._do_subvolume_io(subvolume, number_of_files=8)
@@ -3218,7 +3217,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         clone = self._generate_random_clone_name()
 
         # create subvolume
-        self._fs_cmd("subvolume", "create", self.volname, subvolume)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
 
         # do some IO
         self._do_subvolume_io(subvolume, number_of_files=64)
@@ -3264,7 +3263,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         clone = self._generate_random_clone_name()
 
         # create subvolume
-        self._fs_cmd("subvolume", "create", self.volname, subvolume)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
 
         # do some IO
         self._do_subvolume_io(subvolume, number_of_files=64)
@@ -3309,7 +3308,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         clone = self._generate_random_clone_name()
 
         # create subvolume
-        self._fs_cmd("subvolume", "create", self.volname, subvolume)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
 
         # do some IO
         self._do_subvolume_io(subvolume, number_of_files=64)
@@ -3357,7 +3356,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         clone = self._generate_random_clone_name()
 
         # create subvolume
-        self._fs_cmd("subvolume", "create", self.volname, subvolume)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
 
         # store path for clone verification
         subvol1_path = self._get_subvolume_path(self.volname, subvolume)
@@ -3431,7 +3430,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         clone = self._generate_random_clone_name()
 
         # create subvolume
-        self._fs_cmd("subvolume", "create", self.volname, subvolume)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
 
         # store path for clone verification
         subvol_path = self._get_subvolume_path(self.volname, subvolume)
@@ -3476,7 +3475,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         clone = self._generate_random_clone_name(1)
 
         # create subvolume
-        self._fs_cmd("subvolume", "create", self.volname, subvolume)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
 
         # do some IO
         self._do_subvolume_io(subvolume, number_of_files=16)
@@ -3488,7 +3487,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
 
         # recreate subvolume
-        self._fs_cmd("subvolume", "create", self.volname, subvolume)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
 
         # get and store path for clone verification
         subvol2_path = self._get_subvolume_path(self.volname, subvolume)
@@ -3533,7 +3532,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         snapshot = self._generate_random_snapshot_name()
 
         # create subvolume
-        self._fs_cmd("subvolume", "create", self.volname, subvolume)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
 
         # store path for clone verification
         subvol_path = self._get_subvolume_path(self.volname, subvolume)
@@ -3632,7 +3631,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         clone = self._generate_random_clone_name()
 
         # create subvolume
-        self._fs_cmd("subvolume", "create", self.volname, subvolume)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
 
         # do some IO
         self._do_subvolume_io_mixed(subvolume)
@@ -3665,7 +3664,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         clone = self._generate_random_clone_name()
 
         # create subvolume
-        self._fs_cmd("subvolume", "create", self.volname, subvolume)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
 
         # do some IO
         self._do_subvolume_io(subvolume, number_of_files=64)
@@ -3698,7 +3697,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         clone = self._generate_random_clone_name()
 
         # create subvolume
-        self._fs_cmd("subvolume", "create", self.volname, subvolume)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
 
         # Create a file with suid, guid bits set along with executable bit.
         args = ["subvolume", "getpath", self.volname, subvolume]
@@ -3740,7 +3739,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         clone1, clone2 = self._generate_random_clone_name(2)
 
         # create subvolume
-        self._fs_cmd("subvolume", "create", self.volname, subvolume)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
 
         # do some IO
         self._do_subvolume_io(subvolume, number_of_files=32)
@@ -3793,7 +3792,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         clone = self._generate_random_clone_name()
 
         # create subvolume
-        self._fs_cmd("subvolume", "create", self.volname, subvolume)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
 
         # do some IO
         self._do_subvolume_io(subvolume, number_of_files=128)
@@ -3841,7 +3840,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         clones = self._generate_random_clone_name(NR_CLONES)
 
         # create subvolume
-        self._fs_cmd("subvolume", "create", self.volname, subvolume)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
 
         # do some IO
         self._do_subvolume_io(subvolume, number_of_files=4, file_size=FILE_SIZE_MB)
@@ -3898,7 +3897,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         self._fs_cmd("subvolumegroup", "create", self.volname, c_group)
 
         # create subvolume
-        self._fs_cmd("subvolume", "create", self.volname, subvolume, s_group)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, s_group, "--mode=777")
 
         # do some IO
         self._do_subvolume_io(subvolume, subvolume_group=s_group, number_of_files=32)
@@ -3940,7 +3939,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         nr_files = int((pool_capacity * 0.99) / (TestVolumes.DEFAULT_FILE_SIZE * 1024 * 1024))
 
         # create subvolume
-        self._fs_cmd("subvolume", "create", self.volname, subvolume)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
 
         # do some IO
         self._do_subvolume_io(subvolume, number_of_files=nr_files)
@@ -3999,8 +3998,8 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         clone = self._generate_random_clone_name()
 
         # create subvolumes
-        self._fs_cmd("subvolume", "create", self.volname, subvolume1)
-        self._fs_cmd("subvolume", "create", self.volname, subvolume2)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--mode=777")
+        self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--mode=777")
 
         # do some IO
         self._do_subvolume_io(subvolume1, number_of_files=32)
@@ -4055,7 +4054,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         newid = self.fs.add_data_pool(new_pool)
 
         # create subvolume
-        self._fs_cmd("subvolume", "create", self.volname, subvolume)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
 
         # do some IO
         self._do_subvolume_io(subvolume, number_of_files=32)
@@ -4096,7 +4095,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         group = self._generate_random_group_name()
 
         # create subvolume
-        self._fs_cmd("subvolume", "create", self.volname, subvolume)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode=777")
 
         # do some IO
         self._do_subvolume_io(subvolume, number_of_files=32)
@@ -4185,11 +4184,11 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
 
         # emulate a old-fashioned subvolume
         createpath = os.path.join(".", "volumes", "_nogroup", subvolume)
-        self.mount_a.run_shell(['mkdir', '-p', createpath])
+        self.mount_a.run_shell_payload(f"mkdir -p -m 777 {createpath}", sudo=True)
 
         # add required xattrs to subvolume
         default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
-        self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool)
+        self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool, sudo=True)
 
         # do some IO
         self._do_subvolume_io(subvolume, number_of_files=64)
@@ -4259,7 +4258,7 @@ class TestSubvolumeSnapshotClones(TestVolumesHelper):
         self._fs_cmd("subvolumegroup", "create", self.volname, group)
 
         # create subvolume
-        self._fs_cmd("subvolume", "create", self.volname, subvolume, group)
+        self._fs_cmd("subvolume", "create", self.volname, subvolume, group, "--mode=777")
 
         # do some IO
         self._do_subvolume_io(subvolume, subvolume_group=group, number_of_files=32)
@@ -4426,11 +4425,11 @@ class TestMisc(TestVolumesHelper):
         # emulate a old-fashioned subvolume -- one in the default group and
         # the other in a custom group
         createpath1 = os.path.join(".", "volumes", "_nogroup", subvolume1)
-        self.mount_a.run_shell(['mkdir', '-p', createpath1])
+        self.mount_a.run_shell(['mkdir', '-p', createpath1], sudo=True)
 
         # create group
         createpath2 = os.path.join(".", "volumes", group, subvolume2)
-        self.mount_a.run_shell(['mkdir', '-p', createpath2])
+        self.mount_a.run_shell(['mkdir', '-p', createpath2], sudo=True)
 
         # this would auto-upgrade on access without anyone noticing
         subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume1)