]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/cephfs: add tests for multi-FS auth tests
authorRishabh Dave <ridave@redhat.com>
Mon, 27 Jan 2020 06:01:40 +0000 (11:31 +0530)
committerRishabh Dave <ridave@redhat.com>
Fri, 11 Sep 2020 12:32:48 +0000 (18:02 +0530)
Add testsuite for testing authorization on Ceph cluster with multiple
file systems and enable it to be executable with Teuthology framework.

Also add helper methods required to setup the test environment for
multi-FS tests.

Signed-off-by: Rishabh Dave <ridave@redhat.com>
23 files changed:
qa/suites/fs/multifs/tasks/multifs-auth.yaml [new file with mode: 0644]
qa/suites/kcephfs/multifs/% [new file with mode: 0644]
qa/suites/kcephfs/multifs/.qa [new symlink]
qa/suites/kcephfs/multifs/begin.yaml [new symlink]
qa/suites/kcephfs/multifs/clusters/2-mds-2-client.yaml [new file with mode: 0644]
qa/suites/kcephfs/multifs/conf [new symlink]
qa/suites/kcephfs/multifs/kclient [new symlink]
qa/suites/kcephfs/multifs/objectstore-ec [new symlink]
qa/suites/kcephfs/multifs/overrides/+ [new file with mode: 0644]
qa/suites/kcephfs/multifs/overrides/.qa [new symlink]
qa/suites/kcephfs/multifs/overrides/frag_enable.yaml [new symlink]
qa/suites/kcephfs/multifs/overrides/log-config.yaml [new symlink]
qa/suites/kcephfs/multifs/overrides/osd-asserts.yaml [new symlink]
qa/suites/kcephfs/multifs/overrides/whitelist_health.yaml [new symlink]
qa/suites/kcephfs/multifs/overrides/whitelist_wrongly_marked_down.yaml [new symlink]
qa/suites/kcephfs/multifs/tasks/multifs-auth.yaml [new file with mode: 0644]
qa/tasks/cephfs/caps_helper.py [new file with mode: 0644]
qa/tasks/cephfs/cephfs_test_case.py
qa/tasks/cephfs/filesystem.py
qa/tasks/cephfs/fuse_mount.py
qa/tasks/cephfs/kernel_mount.py
qa/tasks/cephfs/test_multifs_auth.py [new file with mode: 0644]
qa/tasks/vstart_runner.py

diff --git a/qa/suites/fs/multifs/tasks/multifs-auth.yaml b/qa/suites/fs/multifs/tasks/multifs-auth.yaml
new file mode 100644 (file)
index 0000000..ed1bdb4
--- /dev/null
@@ -0,0 +1,5 @@
+tasks:
+  - cephfs_test_runner:
+      fail_on_skip: false
+      modules:
+        - tasks.cephfs.test_multifs_auth
diff --git a/qa/suites/kcephfs/multifs/% b/qa/suites/kcephfs/multifs/%
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/qa/suites/kcephfs/multifs/.qa b/qa/suites/kcephfs/multifs/.qa
new file mode 120000 (symlink)
index 0000000..fea2489
--- /dev/null
@@ -0,0 +1 @@
+../.qa
\ No newline at end of file
diff --git a/qa/suites/kcephfs/multifs/begin.yaml b/qa/suites/kcephfs/multifs/begin.yaml
new file mode 120000 (symlink)
index 0000000..311d404
--- /dev/null
@@ -0,0 +1 @@
+.qa/cephfs/begin.yaml
\ No newline at end of file
diff --git a/qa/suites/kcephfs/multifs/clusters/2-mds-2-client.yaml b/qa/suites/kcephfs/multifs/clusters/2-mds-2-client.yaml
new file mode 100644 (file)
index 0000000..89c0d05
--- /dev/null
@@ -0,0 +1,14 @@
+roles:
+- [mon.a, mgr.y, mds.a, mds.c, osd.0, osd.1, osd.2, osd.3]
+- [mon.b, mon.c, mgr.x, mds.b, osd.4, osd.5, osd.6, osd.7]
+- [client.0]
+- [client.1]
+openstack:
+- volumes: # attached to each instance
+    count: 4
+    size: 20 # GB
+- machine:
+    disk: 200 # GB
+log-rotate:
+  ceph-mds: 10G
+  ceph-osd: 10G
diff --git a/qa/suites/kcephfs/multifs/conf b/qa/suites/kcephfs/multifs/conf
new file mode 120000 (symlink)
index 0000000..6d47129
--- /dev/null
@@ -0,0 +1 @@
+.qa/cephfs/conf/
\ No newline at end of file
diff --git a/qa/suites/kcephfs/multifs/kclient b/qa/suites/kcephfs/multifs/kclient
new file mode 120000 (symlink)
index 0000000..893d2d3
--- /dev/null
@@ -0,0 +1 @@
+.qa/cephfs/mount/kclient
\ No newline at end of file
diff --git a/qa/suites/kcephfs/multifs/objectstore-ec b/qa/suites/kcephfs/multifs/objectstore-ec
new file mode 120000 (symlink)
index 0000000..efca613
--- /dev/null
@@ -0,0 +1 @@
+.qa/cephfs/objectstore-ec/
\ No newline at end of file
diff --git a/qa/suites/kcephfs/multifs/overrides/+ b/qa/suites/kcephfs/multifs/overrides/+
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/qa/suites/kcephfs/multifs/overrides/.qa b/qa/suites/kcephfs/multifs/overrides/.qa
new file mode 120000 (symlink)
index 0000000..a602a03
--- /dev/null
@@ -0,0 +1 @@
+../.qa/
\ No newline at end of file
diff --git a/qa/suites/kcephfs/multifs/overrides/frag_enable.yaml b/qa/suites/kcephfs/multifs/overrides/frag_enable.yaml
new file mode 120000 (symlink)
index 0000000..34a39a3
--- /dev/null
@@ -0,0 +1 @@
+.qa/cephfs/overrides/frag_enable.yaml
\ No newline at end of file
diff --git a/qa/suites/kcephfs/multifs/overrides/log-config.yaml b/qa/suites/kcephfs/multifs/overrides/log-config.yaml
new file mode 120000 (symlink)
index 0000000..d955aa5
--- /dev/null
@@ -0,0 +1 @@
+.qa/cephfs/overrides/log-config.yaml
\ No newline at end of file
diff --git a/qa/suites/kcephfs/multifs/overrides/osd-asserts.yaml b/qa/suites/kcephfs/multifs/overrides/osd-asserts.yaml
new file mode 120000 (symlink)
index 0000000..f290c74
--- /dev/null
@@ -0,0 +1 @@
+.qa/cephfs/overrides/osd-asserts.yaml
\ No newline at end of file
diff --git a/qa/suites/kcephfs/multifs/overrides/whitelist_health.yaml b/qa/suites/kcephfs/multifs/overrides/whitelist_health.yaml
new file mode 120000 (symlink)
index 0000000..74f39a4
--- /dev/null
@@ -0,0 +1 @@
+.qa/cephfs/overrides/whitelist_health.yaml
\ No newline at end of file
diff --git a/qa/suites/kcephfs/multifs/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/kcephfs/multifs/overrides/whitelist_wrongly_marked_down.yaml
new file mode 120000 (symlink)
index 0000000..b4528c0
--- /dev/null
@@ -0,0 +1 @@
+.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml
\ No newline at end of file
diff --git a/qa/suites/kcephfs/multifs/tasks/multifs-auth.yaml b/qa/suites/kcephfs/multifs/tasks/multifs-auth.yaml
new file mode 100644 (file)
index 0000000..2825c7a
--- /dev/null
@@ -0,0 +1,4 @@
+tasks:
+  - cephfs_test_runner:
+      modules:
+        - tasks.cephfs.test_multifs_auth
diff --git a/qa/tasks/cephfs/caps_helper.py b/qa/tasks/cephfs/caps_helper.py
new file mode 100644 (file)
index 0000000..a663335
--- /dev/null
@@ -0,0 +1,79 @@
+"""
+Helper methods to test that MON and MDS caps are enforced properly.
+"""
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
+
+from teuthology.orchestra.run import Raw
+
+class CapsHelper(CephFSTestCase):
+
+    def run_mon_cap_tests(self, moncap, keyring):
+        keyring_path = self.create_keyring_file(self.fs.admin_remote, keyring)
+
+        fsls = self.run_cluster_cmd(f'fs ls --id {self.client_id} -k '
+                                    f'{keyring_path}')
+
+        # we need to check only for default FS when fsname clause is absent
+        # in MON/MDS caps
+        if 'fsname' not in moncap:
+            self.assertIn(self.fs.name, fsls)
+            return
+
+        fss = (self.fs1.name, self.fs2.name) if hasattr(self, 'fs1') else \
+            (self.fs.name,)
+        for fsname in fss:
+                if fsname in moncap:
+                    self.assertIn('name: ' + fsname, fsls)
+                else:
+                    self.assertNotIn('name: ' + fsname, fsls)
+
+    def run_mds_cap_tests(self, filepaths, filedata, mounts, perm):
+        self.conduct_pos_test_for_read_caps(filepaths, filedata, mounts)
+
+        if perm == 'rw':
+            self.conduct_pos_test_for_write_caps(filepaths, mounts)
+        elif perm == 'r':
+            self.conduct_neg_test_for_write_caps(filepaths, mounts)
+        else:
+            raise RuntimeError(f'perm = {perm}\nIt should be "r" or "rw".')
+
+    def conduct_pos_test_for_read_caps(self, filepaths, filedata, mounts):
+        for mount in mounts:
+            for path, data in zip(filepaths, filedata):
+                # XXX: conduct tests only if path belongs to current mount; in
+                # teuth tests client are located on same machines.
+                if path.find(mount.hostfs_mntpt) != -1:
+                    contents = mount.read_file(path)
+                    self.assertEqual(data, contents)
+
+    def conduct_pos_test_for_write_caps(self, filepaths, mounts):
+        filedata = ('some new data on first fs', 'some new data on second fs')
+
+        for mount in mounts:
+            for path, data in zip(filepaths, filedata):
+                if path.find(mount.hostfs_mntpt) != -1:
+                    # test that write was successful
+                    mount.write_file(path=path, data=data)
+                    # verify that contents written was same as the one that was
+                    # intended
+                    contents1 = mount.read_file(path=path)
+                    self.assertEqual(data, contents1)
+
+    def conduct_neg_test_for_write_caps(self, filepaths, mounts):
+        cmdargs = ['echo', 'some random data', Raw('|'), 'sudo', 'tee']
+
+        for mount in mounts:
+            for path in filepaths:
+                if path.find(mount.hostfs_mntpt) != -1:
+                    cmdargs.append(path)
+                    mount.negtestcmd(args=cmdargs, retval=1,
+                                     errmsg='permission denied')
+
+    def get_mon_cap_from_keyring(self, client_name):
+        keyring = self.run_cluster_cmd(cmd=f'auth get {client_name}')
+        for line in keyring.split('\n'):
+            if 'caps mon' in line:
+                return line[line.find(' = "') + 4 : -1]
+
+        raise RuntimeError('get_save_mon_cap: mon cap not found in keyring. '
+                           'keyring -\n' + keyring)
index e0648f9475636f6c912f9b4b70d30bf08f34becc..e6718af01ebdcf2c87e4aca78a7ca1c891fe4d90 100644 (file)
@@ -1,20 +1,24 @@
 import json
 import logging
-from tasks.ceph_test_case import CephTestCase
 import os
 import re
 
+from shlex import split as shlex_split
+from io import StringIO
+
+from tasks.ceph_test_case import CephTestCase
 from tasks.cephfs.fuse_mount import FuseMount
 
 from teuthology import contextutil
+from teuthology.misc import sudo_write_file
 from teuthology.orchestra import run
 from teuthology.orchestra.run import CommandFailedError
+
 from teuthology.contextutil import safe_while
 
 
 log = logging.getLogger(__name__)
 
-
 def for_teuthology(f):
     """
     Decorator that adds an "is_for_teuthology" attribute to the wrapped function
@@ -134,11 +138,16 @@ class CephFSTestCase(CephTestCase):
 
             # In case some test messed with auth caps, reset them
             for client_id in client_mount_ids:
-                self.mds_cluster.mon_manager.raw_cluster_cmd_result(
-                    'auth', 'caps', "client.{0}".format(client_id),
-                    'mds', 'allow',
-                    'mon', 'allow r',
-                    'osd', 'allow rw pool={0}'.format(self.fs.get_data_pool_name()))
+                cmd = ['auth', 'caps', f'client.{client_id}', 'mon','allow r',
+                       'osd', f'allow rw pool={self.fs.get_data_pool_name()}',
+                       'mds', 'allow']
+
+                if self.run_cluster_cmd_result(cmd) == 0:
+                    break
+
+                cmd[1] = 'add'
+                if self.run_cluster_cmd_result(cmd) != 0:
+                    raise RuntimeError(f'Failed to create new client {cmd[2]}')
 
             # wait for ranks to become active
             self.fs.wait_for_daemons()
@@ -369,3 +378,43 @@ class CephFSTestCase(CephTestCase):
                         return subtrees
         except contextutil.MaxWhileTries as e:
             raise RuntimeError("rank {0} failed to reach desired subtree state".format(rank)) from e
+
+    def run_cluster_cmd(self, cmd):
+        if isinstance(cmd, str):
+            cmd = shlex_split(cmd)
+        return self.fs.mon_manager.raw_cluster_cmd(*cmd)
+
+    def run_cluster_cmd_result(self, cmd):
+        if isinstance(cmd, str):
+            cmd = shlex_split(cmd)
+        return self.fs.mon_manager.raw_cluster_cmd_result(*cmd)
+
+    def create_client(self, client_id, moncap=None, osdcap=None, mdscap=None):
+        if not (moncap or osdcap or mdscap):
+            if self.fs:
+                return self.fs.authorize(client_id, ('/', 'rw'))
+            else:
+                raise RuntimeError('no caps were passed and the default FS '
+                                   'is not created yet to allow client auth '
+                                   'for it.')
+
+        cmd = ['auth', 'add', f'client.{client_id}']
+        if moncap:
+            cmd += ['mon', moncap]
+        if osdcap:
+            cmd += ['osd', osdcap]
+        if mdscap:
+            cmd += ['mds', mdscap]
+
+        self.run_cluster_cmd(cmd)
+        return self.run_cluster_cmd(f'auth get {self.client_name}')
+
+    def create_keyring_file(self, remote, keyring):
+        keyring_path = remote.run(args=['mktemp'], stdout=StringIO()).\
+            stdout.getvalue().strip()
+        sudo_write_file(remote, keyring_path, keyring)
+
+        # required when triggered using vstart_runner.py.
+        remote.run(args=['chmod', '644', keyring_path])
+
+        return keyring_path
index 084866f22ae0ab76ad7689bfab70c6b31ae78049..b5afbbbc18416bcb44b159e8678b129f5324d700 100644 (file)
@@ -1442,3 +1442,20 @@ class Filesystem(MDSCluster):
 
     def is_full(self):
         return self.is_pool_full(self.get_data_pool_name())
+
+    def enable_multifs(self):
+        self.mon_manager.raw_cluster_cmd('fs', 'flag', 'set',
+            'enable_multiple', 'true', '--yes-i-really-mean-it')
+
+    def authorize(self, client_id, caps=('/', 'rw')):
+        """
+        Run "ceph fs authorize" and run "ceph auth get" to get and returnt the
+        keyring.
+
+        client_id: client id that will be authorized
+        caps: tuple containing the path and permission (can be r or rw)
+              respectively.
+        """
+        client_name = 'client.' + client_id
+        return self.mon_manager.raw_cluster_cmd('fs', 'authorize', self.name,
+                                                client_name, *caps)
index 9a57e87195e34deb8c4f8e619662f6bfa6e74ede..e53ba680b94419704e463934ae8361bc08c35f4b 100644 (file)
@@ -274,10 +274,10 @@ class FuseMount(CephFSMount):
                                        stderr=stderr, omit_sudo=False)
                 break
             except run.CommandFailedError:
-                stderr = stderr.getvalue()
-                if "Read-only file system".lower() in stderr.lower():
+                stderr = stderr.getvalue().lower()
+                if "read-only file system" in stderr:
                     break
-                elif "Permission denied".lower() in stderr.lower():
+                elif "permission denied" in stderr:
                     time.sleep(5)
                 else:
                     raise
index bdca338494bae852d20fb2dbe5a339b90bb3e2d5..33c557c2d346384949eeadb5682119cc2c079248 100644 (file)
@@ -51,8 +51,17 @@ class KernelMount(CephFSMount):
         if retval:
             return retval
 
-        self.client_remote.run(
-            args=['sudo', 'chmod', '1777', self.hostfs_mntpt], timeout=(5*60))
+        stderr = StringIO()
+        try:
+            self.client_remote.run(
+                args=['sudo', 'chmod', '1777', self.hostfs_mntpt],
+                stderr=stderr, timeout=(5*60))
+        except CommandFailedError:
+            # the client does not have write permissions in the caps it holds
+            # for the Ceph FS that was just mounted.
+            if 'permission denied' in stderr.getvalue().lower():
+                pass
+
 
         self.mounted = True
 
diff --git a/qa/tasks/cephfs/test_multifs_auth.py b/qa/tasks/cephfs/test_multifs_auth.py
new file mode 100644 (file)
index 0000000..36fc5c5
--- /dev/null
@@ -0,0 +1,311 @@
+"""
+Test for Ceph clusters with multiple FSs.
+"""
+import logging
+
+from os.path import join as os_path_join
+
+# CapsHelper is subclassed from CephFSTestCase
+from tasks.cephfs.caps_helper import CapsHelper
+
+from teuthology.orchestra.run import CommandFailedError
+
+
+log = logging.getLogger(__name__)
+
+
+class TestMultiFS(CapsHelper):
+    client_id = 'testuser'
+    client_name = 'client.' + client_id
+    # one dedicated for each FS
+    MDSS_REQUIRED = 2
+    CLIENTS_REQUIRED = 2
+
+    def setUp(self):
+        super(TestMultiFS, self).setUp()
+
+        # we might have it - the client - if the same cluster was used for a
+        # different vstart_runner.py run.
+        self.run_cluster_cmd(f'auth rm {self.client_name}')
+
+        self.fs1 = self.fs
+        self.fs1.enable_multifs()
+        self.fs2 = self.mds_cluster.newfs(name='cephfs2', create=True)
+
+        # we'll reassign caps to client.1 so that it can operate with cephfs2
+        self.run_cluster_cmd(f'auth caps client.{self.mount_b.client_id} mon '
+                             f'"allow r" osd "allow rw '
+                             f'pool={self.fs2.get_data_pool_name()}" mds allow')
+        self.mount_b.remount(cephfs_name=self.fs2.name)
+
+
+class TestMONCaps(TestMultiFS):
+
+    def test_moncap_with_one_fs_names(self):
+        moncap = f'allow r fsname={self.fs1.name}'
+        keyring = self.setup_test_env(moncap)
+
+        self.run_mon_cap_tests(moncap, keyring)
+
+    def test_moncap_with_multiple_fs_names(self):
+        moncap = (f'allow r fsname={self.fs1.name}, '
+                  f'allow r fsname={self.fs2.name}')
+        keyring = self.setup_test_env(moncap)
+
+        self.run_mon_cap_tests(moncap, keyring)
+
+    def test_moncap_with_blanket_allow(self):
+        moncap = 'allow r'
+        keyring = self.setup_test_env(moncap)
+
+        self.run_mon_cap_tests(moncap, keyring)
+
+    def setup_test_env(self, moncap):
+        return self.create_client(self.client_id, moncap)
+
+
+#TODO: add tests for capsecs 'p' and 's'.
+class TestMDSCaps(TestMultiFS):
+    """
+    0. Have 2 FSs on Ceph cluster.
+    1. Create new files on both FSs.
+    2. Create a new client that has authorization for both FSs.
+    3. Remount the current mounts with this new client.
+    4. Test read and write on both FSs.
+    """
+    def test_rw_with_fsname_and_no_path_in_cap(self):
+        perm = 'rw'
+        filepaths, filedata, mounts = self.setup_test_env(perm, True)
+
+        self.run_mds_cap_tests(filepaths, filedata, mounts, perm)
+
+    def test_r_with_fsname_and_no_path_in_cap(self):
+        perm = 'r'
+        filepaths, filedata, mounts = self.setup_test_env(perm, True)
+
+        self.run_mds_cap_tests(filepaths, filedata, mounts, perm)
+
+    def test_rw_with_fsname_and_path_in_cap(self):
+        perm = 'rw'
+        filepaths, filedata, mounts = self.setup_test_env(perm, True,'dir1')
+
+        self.run_mds_cap_tests(filepaths, filedata, mounts, perm)
+
+    def test_r_with_fsname_and_path_in_cap(self):
+        perm = 'r'
+        filepaths, filedata, mounts = self.setup_test_env(perm, True, 'dir1')
+
+        self.run_mds_cap_tests(filepaths, filedata, mounts, perm)
+
+    # XXX: this tests the backward compatibility; "allow rw path=<dir1>" is
+    # treated as "allow rw fsname=* path=<dir1>"
+    def test_rw_with_no_fsname_and_path_in_cap(self):
+        perm = 'rw'
+        filepaths, filedata, mounts = self.setup_test_env(perm, False, 'dir1')
+
+        self.run_mds_cap_tests(filepaths, filedata, mounts, perm)
+
+    # XXX: this tests the backward compatibility; "allow r path=<dir1>" is
+    # treated as "allow r fsname=* path=<dir1>"
+    def test_r_with_no_fsname_and_path_in_cap(self):
+        perm = 'r'
+        filepaths, filedata, mounts = self.setup_test_env(perm, False, 'dir1')
+
+        self.run_mds_cap_tests(filepaths, filedata, mounts, perm)
+
+    def test_rw_with_no_fsname_and_no_path(self):
+        perm = 'rw'
+        filepaths, filedata, mounts = self.setup_test_env(perm)
+
+        self.run_mds_cap_tests(filepaths, filedata, mounts, perm)
+
+    def test_r_with_no_fsname_and_no_path(self):
+        perm = 'r'
+        filepaths, filedata, mounts = self.setup_test_env(perm)
+
+        self.run_mds_cap_tests(filepaths, filedata, mounts, perm)
+
+    def tearDown(self):
+        self.mount_a.umount_wait()
+        self.mount_b.umount_wait()
+
+        super(type(self), self).tearDown()
+
+    def setup_test_env(self, perm, fsname=False, cephfs_mntpt='/'):
+        """
+        Creates the cap string, files on both the FSs and then creates the
+        new client with the cap and remounts both the FSs with newly created
+        client.
+        """
+        filenames = ('file_on_fs1', 'file_on_fs2')
+        filedata = ('some data on first fs', 'some data on second fs')
+        mounts = (self.mount_a, self.mount_b)
+        self.setup_fs_contents(cephfs_mntpt, filenames, filedata)
+
+        keyring_paths = self.create_client_and_keyring_file(perm, fsname,
+                                                            cephfs_mntpt)
+        filepaths = self.remount_with_new_client(cephfs_mntpt, filenames,
+                                                 keyring_paths)
+
+        return filepaths, filedata, mounts
+
+    def generate_caps(self, perm, fsname, cephfs_mntpt):
+        moncap = 'allow r'
+        osdcap = (f'allow {perm} tag cephfs data={self.fs1.name}, '
+                  f'allow {perm} tag cephfs data={self.fs2.name}')
+
+        if fsname:
+            if cephfs_mntpt == '/':
+                mdscap = (f'allow {perm} fsname={self.fs1.name}, '
+                          f'allow {perm} fsname={self.fs2.name}')
+            else:
+                mdscap = (f'allow {perm} fsname={self.fs1.name} '
+                          f'path=/{cephfs_mntpt}, '
+                          f'allow {perm} fsname={self.fs2.name} '
+                          f'path=/{cephfs_mntpt}')
+        else:
+            if cephfs_mntpt == '/':
+                mdscap = f'allow {perm}'
+            else:
+                mdscap = f'allow {perm} path=/{cephfs_mntpt}'
+
+        return moncap, osdcap, mdscap
+
+    def create_client_and_keyring_file(self, perm, fsname, cephfs_mntpt):
+        moncap, osdcap, mdscap = self.generate_caps(perm, fsname,
+                                                    cephfs_mntpt)
+
+        keyring = self.create_client(self.client_id, moncap, osdcap, mdscap)
+        keyring_paths = []
+        for mount_x in (self.mount_a, self.mount_b):
+            keyring_paths.append(self.create_keyring_file(
+                mount_x.client_remote, keyring))
+
+        return keyring_paths
+
+    def setup_fs_contents(self, cephfs_mntpt, filenames, filedata):
+        filepaths = []
+        iter_on = zip((self.mount_a, self.mount_b), filenames, filedata)
+
+        for mount_x, filename, data in iter_on:
+            if cephfs_mntpt != '/' :
+                mount_x.run_shell(args=['mkdir', cephfs_mntpt])
+                filepaths.append(os_path_join(mount_x.hostfs_mntpt,
+                                              cephfs_mntpt, filename))
+            else:
+                filepaths.append(os_path_join(mount_x.hostfs_mntpt, filename))
+
+            mount_x.write_file(filepaths[-1], data)
+
+    def remount_with_new_client(self, cephfs_mntpt, filenames,
+                                           keyring_paths):
+        if isinstance(cephfs_mntpt, str) and cephfs_mntpt != '/' :
+            cephfs_mntpt = '/' + cephfs_mntpt
+
+        self.mount_a.remount(client_id=self.client_id,
+                             client_keyring_path=keyring_paths[0],
+                             client_remote=self.mount_a.client_remote,
+                             cephfs_name=self.fs1.name,
+                             cephfs_mntpt=cephfs_mntpt,
+                             hostfs_mntpt=self.mount_a.hostfs_mntpt,
+                             wait=True)
+        self.mount_b.remount(client_id=self.client_id,
+                             client_keyring_path=keyring_paths[1],
+                             client_remote=self.mount_b.client_remote,
+                             cephfs_name=self.fs2.name,
+                             cephfs_mntpt=cephfs_mntpt,
+                             hostfs_mntpt=self.mount_b.hostfs_mntpt,
+                             wait=True)
+
+        return (os_path_join(self.mount_a.hostfs_mntpt, filenames[0]),
+                os_path_join(self.mount_b.hostfs_mntpt, filenames[1]))
+
+
+class TestClientsWithoutAuth(TestMultiFS):
+
+    def setUp(self):
+        super(TestClientsWithoutAuth, self).setUp()
+
+        # TODO: When MON and OSD caps for a Ceph FS are assigned to a
+        # client but MDS caps are not, mount.ceph prints "permission
+        # denied". But when MON caps are not assigned and MDS and OSD
+        # caps are, mount.ceph prints "no mds server or cluster laggy"
+        # instead of "permission denied".
+        #
+        # Before uncommenting the following line a fix would be required
+        # for latter case to change "no mds server is up or the cluster is
+        #  laggy" to "permission denied".
+        self.kernel_errmsgs = ('permission denied', 'no mds server is up or '
+                               'the cluster is laggy', 'no such file or '
+                               'directory')
+
+        # TODO: When MON and OSD caps are assigned for a Ceph FS to a
+        # client but MDS caps are not, ceph-fuse prints "operation not
+        # permitted". But when MON caps are not assigned and MDS and OSD
+        # caps are, ceph-fuse prints "no such file or directory" instead
+        # of "operation not permitted".
+        #
+        # Before uncommenting the following line a fix would be required
+        # for the latter case to change "no such file or directory" to
+        # "operation not permitted".
+        #self.assertIn('operation not permitted', retval[2].lower())
+        self.fuse_errmsgs = ('operation not permitted', 'no such file or '
+                             'directory')
+
+        if 'kernel' in str(type(self.mount_a)).lower():
+            self.errmsgs = self.kernel_errmsgs
+        elif 'fuse' in str(type(self.mount_a)).lower():
+            self.errmsgs = self.fuse_errmsgs
+        else:
+            raise RuntimeError('strange, the client was neither based on '
+                               'kernel nor FUSE.')
+
+    def check_that_mount_failed_for_right_reason(self, stderr):
+        stderr = stderr.lower()
+        for errmsg in self.errmsgs:
+            if errmsg in stderr:
+                break
+        else:
+            raise AssertionError('can\'t find expected set of words in the '
+                                 f'stderr\nself.errmsgs - {self.errmsgs}\n'
+                                 f'stderr - {stderr}')
+
+    def test_mount_all_caps_absent(self):
+        # setup part...
+        keyring = self.fs1.authorize(self.client_id, ('/', 'rw'))
+        keyring_path = self.create_keyring_file(self.mount_a.client_remote,
+                                                keyring)
+
+        # mount the FS for which client has no auth...
+        retval = self.mount_a.remount(client_id=self.client_id,
+                                      client_keyring_path=keyring_path,
+                                      cephfs_name=self.fs2.name,
+                                      check_status=False)
+
+        # tests...
+        self.assertIsInstance(retval, tuple)
+        self.assertEqual(len(retval), 3)
+        self.assertIsInstance(retval[0], CommandFailedError)
+        self.check_that_mount_failed_for_right_reason(retval[2])
+
+    def test_mount_mon_and_osd_caps_present_mds_caps_absent(self):
+        # setup part...
+        moncap = f'allow rw fsname={self.fs1.name}, allow rw fsname={self.fs2.name}'
+        mdscap = f'allow rw fsname={self.fs1.name}'
+        osdcap = (f'allow rw tag cephfs data={self.fs1.name}, allow rw tag '
+                  f'cephfs data={self.fs2.name}')
+        keyring = self.create_client(self.client_id, moncap, osdcap, mdscap)
+        keyring_path = self.create_keyring_file(self.mount_a.client_remote,
+                                                keyring)
+
+        # mount the FS for which client has no auth...
+        retval = self.mount_a.remount(client_id=self.client_id,
+                                      client_keyring_path=keyring_path,
+                                      cephfs_name=self.fs2.name,
+                                      check_status=False)
+
+        # tests...
+        self.assertIsInstance(retval, tuple)
+        self.assertEqual(len(retval), 3)
+        self.assertIsInstance(retval[0], CommandFailedError)
+        self.check_that_mount_failed_for_right_reason(retval[2])
index 0d278356d4e1636c14604ae8edad52499e74390b..334ed172f36318051527e2370139c1eb3105a648 100644 (file)
@@ -671,8 +671,17 @@ class LocalKernelMount(KernelMount):
                 return (e, mountcmd_stdout.getvalue(),
                         mountcmd_stderr.getvalue())
 
-        self.client_remote.run(args=['sudo', 'chmod', '1777',
-                                     self.hostfs_mntpt], timeout=(5*60))
+        stderr = StringIO()
+        try:
+            self.client_remote.run(args=['sudo', 'chmod', '1777',
+                                   self.hostfs_mntpt], stderr=stderr,
+                                   timeout=(5*60))
+        except CommandFailedError:
+            # the client does not have write permissions in cap it holds for
+            # the Ceph FS that was just mounted.
+            if 'permission denied' in stderr.getvalue().lower():
+                pass
+
         self.mounted = True
 
     def cleanup_netns(self):