From 352a41d1b31b0e6dd2414d3b00ab4f38e456ef92 Mon Sep 17 00:00:00 2001 From: Rishabh Dave Date: Mon, 27 Jan 2020 11:31:40 +0530 Subject: [PATCH] qa/cephfs: add tests for multi-FS auth tests Add testsuite for testing authorization on Ceph cluster with multiple file systems and enable it to be executable with Teuthology framework. Also add helper methods required to setup the test environment for multi-FS tests. Signed-off-by: Rishabh Dave --- qa/suites/fs/multifs/tasks/multifs-auth.yaml | 5 + qa/suites/kcephfs/multifs/% | 0 qa/suites/kcephfs/multifs/.qa | 1 + qa/suites/kcephfs/multifs/begin.yaml | 1 + .../multifs/clusters/2-mds-2-client.yaml | 14 + qa/suites/kcephfs/multifs/conf | 1 + qa/suites/kcephfs/multifs/kclient | 1 + qa/suites/kcephfs/multifs/objectstore-ec | 1 + qa/suites/kcephfs/multifs/overrides/+ | 0 qa/suites/kcephfs/multifs/overrides/.qa | 1 + .../multifs/overrides/frag_enable.yaml | 1 + .../kcephfs/multifs/overrides/log-config.yaml | 1 + .../multifs/overrides/osd-asserts.yaml | 1 + .../multifs/overrides/whitelist_health.yaml | 1 + .../whitelist_wrongly_marked_down.yaml | 1 + .../kcephfs/multifs/tasks/multifs-auth.yaml | 4 + qa/tasks/cephfs/caps_helper.py | 79 +++++ qa/tasks/cephfs/cephfs_test_case.py | 63 +++- qa/tasks/cephfs/filesystem.py | 17 + qa/tasks/cephfs/fuse_mount.py | 6 +- qa/tasks/cephfs/kernel_mount.py | 13 +- qa/tasks/cephfs/test_multifs_auth.py | 311 ++++++++++++++++++ qa/tasks/vstart_runner.py | 13 +- 23 files changed, 522 insertions(+), 14 deletions(-) create mode 100644 qa/suites/fs/multifs/tasks/multifs-auth.yaml create mode 100644 qa/suites/kcephfs/multifs/% create mode 120000 qa/suites/kcephfs/multifs/.qa create mode 120000 qa/suites/kcephfs/multifs/begin.yaml create mode 100644 qa/suites/kcephfs/multifs/clusters/2-mds-2-client.yaml create mode 120000 qa/suites/kcephfs/multifs/conf create mode 120000 qa/suites/kcephfs/multifs/kclient create mode 120000 qa/suites/kcephfs/multifs/objectstore-ec create mode 100644 qa/suites/kcephfs/multifs/overrides/+ create mode 120000 qa/suites/kcephfs/multifs/overrides/.qa create mode 120000 qa/suites/kcephfs/multifs/overrides/frag_enable.yaml create mode 120000 qa/suites/kcephfs/multifs/overrides/log-config.yaml create mode 120000 qa/suites/kcephfs/multifs/overrides/osd-asserts.yaml create mode 120000 qa/suites/kcephfs/multifs/overrides/whitelist_health.yaml create mode 120000 qa/suites/kcephfs/multifs/overrides/whitelist_wrongly_marked_down.yaml create mode 100644 qa/suites/kcephfs/multifs/tasks/multifs-auth.yaml create mode 100644 qa/tasks/cephfs/caps_helper.py create mode 100644 qa/tasks/cephfs/test_multifs_auth.py diff --git a/qa/suites/fs/multifs/tasks/multifs-auth.yaml b/qa/suites/fs/multifs/tasks/multifs-auth.yaml new file mode 100644 index 00000000000..ed1bdb47515 --- /dev/null +++ b/qa/suites/fs/multifs/tasks/multifs-auth.yaml @@ -0,0 +1,5 @@ +tasks: + - cephfs_test_runner: + fail_on_skip: false + modules: + - tasks.cephfs.test_multifs_auth diff --git a/qa/suites/kcephfs/multifs/% b/qa/suites/kcephfs/multifs/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/kcephfs/multifs/.qa b/qa/suites/kcephfs/multifs/.qa new file mode 120000 index 00000000000..fea2489fdf6 --- /dev/null +++ b/qa/suites/kcephfs/multifs/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/kcephfs/multifs/begin.yaml b/qa/suites/kcephfs/multifs/begin.yaml new file mode 120000 index 00000000000..311d404f7c2 --- /dev/null +++ b/qa/suites/kcephfs/multifs/begin.yaml @@ -0,0 +1 @@ +.qa/cephfs/begin.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/multifs/clusters/2-mds-2-client.yaml b/qa/suites/kcephfs/multifs/clusters/2-mds-2-client.yaml new file mode 100644 index 00000000000..89c0d05e85c --- /dev/null +++ b/qa/suites/kcephfs/multifs/clusters/2-mds-2-client.yaml @@ -0,0 +1,14 @@ +roles: +- [mon.a, mgr.y, mds.a, mds.c, osd.0, osd.1, osd.2, osd.3] +- [mon.b, mon.c, mgr.x, mds.b, osd.4, osd.5, osd.6, osd.7] +- [client.0] +- [client.1] +openstack: +- volumes: # attached to each instance + count: 4 + size: 20 # GB +- machine: + disk: 200 # GB +log-rotate: + ceph-mds: 10G + ceph-osd: 10G diff --git a/qa/suites/kcephfs/multifs/conf b/qa/suites/kcephfs/multifs/conf new file mode 120000 index 00000000000..6d47129847f --- /dev/null +++ b/qa/suites/kcephfs/multifs/conf @@ -0,0 +1 @@ +.qa/cephfs/conf/ \ No newline at end of file diff --git a/qa/suites/kcephfs/multifs/kclient b/qa/suites/kcephfs/multifs/kclient new file mode 120000 index 00000000000..893d2d364eb --- /dev/null +++ b/qa/suites/kcephfs/multifs/kclient @@ -0,0 +1 @@ +.qa/cephfs/mount/kclient \ No newline at end of file diff --git a/qa/suites/kcephfs/multifs/objectstore-ec b/qa/suites/kcephfs/multifs/objectstore-ec new file mode 120000 index 00000000000..efca6130e8f --- /dev/null +++ b/qa/suites/kcephfs/multifs/objectstore-ec @@ -0,0 +1 @@ +.qa/cephfs/objectstore-ec/ \ No newline at end of file diff --git a/qa/suites/kcephfs/multifs/overrides/+ b/qa/suites/kcephfs/multifs/overrides/+ new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/kcephfs/multifs/overrides/.qa b/qa/suites/kcephfs/multifs/overrides/.qa new file mode 120000 index 00000000000..a602a0353e7 --- /dev/null +++ b/qa/suites/kcephfs/multifs/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/kcephfs/multifs/overrides/frag_enable.yaml b/qa/suites/kcephfs/multifs/overrides/frag_enable.yaml new file mode 120000 index 00000000000..34a39a368cf --- /dev/null +++ b/qa/suites/kcephfs/multifs/overrides/frag_enable.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/multifs/overrides/log-config.yaml b/qa/suites/kcephfs/multifs/overrides/log-config.yaml new file mode 120000 index 00000000000..d955aa5ba86 --- /dev/null +++ b/qa/suites/kcephfs/multifs/overrides/log-config.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/log-config.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/multifs/overrides/osd-asserts.yaml b/qa/suites/kcephfs/multifs/overrides/osd-asserts.yaml new file mode 120000 index 00000000000..f290c749bdc --- /dev/null +++ b/qa/suites/kcephfs/multifs/overrides/osd-asserts.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/osd-asserts.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/multifs/overrides/whitelist_health.yaml b/qa/suites/kcephfs/multifs/overrides/whitelist_health.yaml new file mode 120000 index 00000000000..74f39a49b27 --- /dev/null +++ b/qa/suites/kcephfs/multifs/overrides/whitelist_health.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_health.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/multifs/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/kcephfs/multifs/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000000..b4528c0f8c0 --- /dev/null +++ b/qa/suites/kcephfs/multifs/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/multifs/tasks/multifs-auth.yaml b/qa/suites/kcephfs/multifs/tasks/multifs-auth.yaml new file mode 100644 index 00000000000..2825c7a6ff7 --- /dev/null +++ b/qa/suites/kcephfs/multifs/tasks/multifs-auth.yaml @@ -0,0 +1,4 @@ +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_multifs_auth diff --git a/qa/tasks/cephfs/caps_helper.py b/qa/tasks/cephfs/caps_helper.py new file mode 100644 index 00000000000..a6633354623 --- /dev/null +++ b/qa/tasks/cephfs/caps_helper.py @@ -0,0 +1,79 @@ +""" +Helper methods to test that MON and MDS caps are enforced properly. +""" +from tasks.cephfs.cephfs_test_case import CephFSTestCase + +from teuthology.orchestra.run import Raw + +class CapsHelper(CephFSTestCase): + + def run_mon_cap_tests(self, moncap, keyring): + keyring_path = self.create_keyring_file(self.fs.admin_remote, keyring) + + fsls = self.run_cluster_cmd(f'fs ls --id {self.client_id} -k ' + f'{keyring_path}') + + # we need to check only for default FS when fsname clause is absent + # in MON/MDS caps + if 'fsname' not in moncap: + self.assertIn(self.fs.name, fsls) + return + + fss = (self.fs1.name, self.fs2.name) if hasattr(self, 'fs1') else \ + (self.fs.name,) + for fsname in fss: + if fsname in moncap: + self.assertIn('name: ' + fsname, fsls) + else: + self.assertNotIn('name: ' + fsname, fsls) + + def run_mds_cap_tests(self, filepaths, filedata, mounts, perm): + self.conduct_pos_test_for_read_caps(filepaths, filedata, mounts) + + if perm == 'rw': + self.conduct_pos_test_for_write_caps(filepaths, mounts) + elif perm == 'r': + self.conduct_neg_test_for_write_caps(filepaths, mounts) + else: + raise RuntimeError(f'perm = {perm}\nIt should be "r" or "rw".') + + def conduct_pos_test_for_read_caps(self, filepaths, filedata, mounts): + for mount in mounts: + for path, data in zip(filepaths, filedata): + # XXX: conduct tests only if path belongs to current mount; in + # teuth tests client are located on same machines. + if path.find(mount.hostfs_mntpt) != -1: + contents = mount.read_file(path) + self.assertEqual(data, contents) + + def conduct_pos_test_for_write_caps(self, filepaths, mounts): + filedata = ('some new data on first fs', 'some new data on second fs') + + for mount in mounts: + for path, data in zip(filepaths, filedata): + if path.find(mount.hostfs_mntpt) != -1: + # test that write was successful + mount.write_file(path=path, data=data) + # verify that contents written was same as the one that was + # intended + contents1 = mount.read_file(path=path) + self.assertEqual(data, contents1) + + def conduct_neg_test_for_write_caps(self, filepaths, mounts): + cmdargs = ['echo', 'some random data', Raw('|'), 'sudo', 'tee'] + + for mount in mounts: + for path in filepaths: + if path.find(mount.hostfs_mntpt) != -1: + cmdargs.append(path) + mount.negtestcmd(args=cmdargs, retval=1, + errmsg='permission denied') + + def get_mon_cap_from_keyring(self, client_name): + keyring = self.run_cluster_cmd(cmd=f'auth get {client_name}') + for line in keyring.split('\n'): + if 'caps mon' in line: + return line[line.find(' = "') + 4 : -1] + + raise RuntimeError('get_save_mon_cap: mon cap not found in keyring. ' + 'keyring -\n' + keyring) diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py index e0648f94756..e6718af01eb 100644 --- a/qa/tasks/cephfs/cephfs_test_case.py +++ b/qa/tasks/cephfs/cephfs_test_case.py @@ -1,20 +1,24 @@ import json import logging -from tasks.ceph_test_case import CephTestCase import os import re +from shlex import split as shlex_split +from io import StringIO + +from tasks.ceph_test_case import CephTestCase from tasks.cephfs.fuse_mount import FuseMount from teuthology import contextutil +from teuthology.misc import sudo_write_file from teuthology.orchestra import run from teuthology.orchestra.run import CommandFailedError + from teuthology.contextutil import safe_while log = logging.getLogger(__name__) - def for_teuthology(f): """ Decorator that adds an "is_for_teuthology" attribute to the wrapped function @@ -134,11 +138,16 @@ class CephFSTestCase(CephTestCase): # In case some test messed with auth caps, reset them for client_id in client_mount_ids: - self.mds_cluster.mon_manager.raw_cluster_cmd_result( - 'auth', 'caps', "client.{0}".format(client_id), - 'mds', 'allow', - 'mon', 'allow r', - 'osd', 'allow rw pool={0}'.format(self.fs.get_data_pool_name())) + cmd = ['auth', 'caps', f'client.{client_id}', 'mon','allow r', + 'osd', f'allow rw pool={self.fs.get_data_pool_name()}', + 'mds', 'allow'] + + if self.run_cluster_cmd_result(cmd) == 0: + break + + cmd[1] = 'add' + if self.run_cluster_cmd_result(cmd) != 0: + raise RuntimeError(f'Failed to create new client {cmd[2]}') # wait for ranks to become active self.fs.wait_for_daemons() @@ -369,3 +378,43 @@ class CephFSTestCase(CephTestCase): return subtrees except contextutil.MaxWhileTries as e: raise RuntimeError("rank {0} failed to reach desired subtree state".format(rank)) from e + + def run_cluster_cmd(self, cmd): + if isinstance(cmd, str): + cmd = shlex_split(cmd) + return self.fs.mon_manager.raw_cluster_cmd(*cmd) + + def run_cluster_cmd_result(self, cmd): + if isinstance(cmd, str): + cmd = shlex_split(cmd) + return self.fs.mon_manager.raw_cluster_cmd_result(*cmd) + + def create_client(self, client_id, moncap=None, osdcap=None, mdscap=None): + if not (moncap or osdcap or mdscap): + if self.fs: + return self.fs.authorize(client_id, ('/', 'rw')) + else: + raise RuntimeError('no caps were passed and the default FS ' + 'is not created yet to allow client auth ' + 'for it.') + + cmd = ['auth', 'add', f'client.{client_id}'] + if moncap: + cmd += ['mon', moncap] + if osdcap: + cmd += ['osd', osdcap] + if mdscap: + cmd += ['mds', mdscap] + + self.run_cluster_cmd(cmd) + return self.run_cluster_cmd(f'auth get {self.client_name}') + + def create_keyring_file(self, remote, keyring): + keyring_path = remote.run(args=['mktemp'], stdout=StringIO()).\ + stdout.getvalue().strip() + sudo_write_file(remote, keyring_path, keyring) + + # required when triggered using vstart_runner.py. + remote.run(args=['chmod', '644', keyring_path]) + + return keyring_path diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index 084866f22ae..b5afbbbc184 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -1442,3 +1442,20 @@ class Filesystem(MDSCluster): def is_full(self): return self.is_pool_full(self.get_data_pool_name()) + + def enable_multifs(self): + self.mon_manager.raw_cluster_cmd('fs', 'flag', 'set', + 'enable_multiple', 'true', '--yes-i-really-mean-it') + + def authorize(self, client_id, caps=('/', 'rw')): + """ + Run "ceph fs authorize" and run "ceph auth get" to get and returnt the + keyring. + + client_id: client id that will be authorized + caps: tuple containing the path and permission (can be r or rw) + respectively. + """ + client_name = 'client.' + client_id + return self.mon_manager.raw_cluster_cmd('fs', 'authorize', self.name, + client_name, *caps) diff --git a/qa/tasks/cephfs/fuse_mount.py b/qa/tasks/cephfs/fuse_mount.py index 9a57e87195e..e53ba680b94 100644 --- a/qa/tasks/cephfs/fuse_mount.py +++ b/qa/tasks/cephfs/fuse_mount.py @@ -274,10 +274,10 @@ class FuseMount(CephFSMount): stderr=stderr, omit_sudo=False) break except run.CommandFailedError: - stderr = stderr.getvalue() - if "Read-only file system".lower() in stderr.lower(): + stderr = stderr.getvalue().lower() + if "read-only file system" in stderr: break - elif "Permission denied".lower() in stderr.lower(): + elif "permission denied" in stderr: time.sleep(5) else: raise diff --git a/qa/tasks/cephfs/kernel_mount.py b/qa/tasks/cephfs/kernel_mount.py index bdca338494b..33c557c2d34 100644 --- a/qa/tasks/cephfs/kernel_mount.py +++ b/qa/tasks/cephfs/kernel_mount.py @@ -51,8 +51,17 @@ class KernelMount(CephFSMount): if retval: return retval - self.client_remote.run( - args=['sudo', 'chmod', '1777', self.hostfs_mntpt], timeout=(5*60)) + stderr = StringIO() + try: + self.client_remote.run( + args=['sudo', 'chmod', '1777', self.hostfs_mntpt], + stderr=stderr, timeout=(5*60)) + except CommandFailedError: + # the client does not have write permissions in the caps it holds + # for the Ceph FS that was just mounted. + if 'permission denied' in stderr.getvalue().lower(): + pass + self.mounted = True diff --git a/qa/tasks/cephfs/test_multifs_auth.py b/qa/tasks/cephfs/test_multifs_auth.py new file mode 100644 index 00000000000..36fc5c51ecf --- /dev/null +++ b/qa/tasks/cephfs/test_multifs_auth.py @@ -0,0 +1,311 @@ +""" +Test for Ceph clusters with multiple FSs. +""" +import logging + +from os.path import join as os_path_join + +# CapsHelper is subclassed from CephFSTestCase +from tasks.cephfs.caps_helper import CapsHelper + +from teuthology.orchestra.run import CommandFailedError + + +log = logging.getLogger(__name__) + + +class TestMultiFS(CapsHelper): + client_id = 'testuser' + client_name = 'client.' + client_id + # one dedicated for each FS + MDSS_REQUIRED = 2 + CLIENTS_REQUIRED = 2 + + def setUp(self): + super(TestMultiFS, self).setUp() + + # we might have it - the client - if the same cluster was used for a + # different vstart_runner.py run. + self.run_cluster_cmd(f'auth rm {self.client_name}') + + self.fs1 = self.fs + self.fs1.enable_multifs() + self.fs2 = self.mds_cluster.newfs(name='cephfs2', create=True) + + # we'll reassign caps to client.1 so that it can operate with cephfs2 + self.run_cluster_cmd(f'auth caps client.{self.mount_b.client_id} mon ' + f'"allow r" osd "allow rw ' + f'pool={self.fs2.get_data_pool_name()}" mds allow') + self.mount_b.remount(cephfs_name=self.fs2.name) + + +class TestMONCaps(TestMultiFS): + + def test_moncap_with_one_fs_names(self): + moncap = f'allow r fsname={self.fs1.name}' + keyring = self.setup_test_env(moncap) + + self.run_mon_cap_tests(moncap, keyring) + + def test_moncap_with_multiple_fs_names(self): + moncap = (f'allow r fsname={self.fs1.name}, ' + f'allow r fsname={self.fs2.name}') + keyring = self.setup_test_env(moncap) + + self.run_mon_cap_tests(moncap, keyring) + + def test_moncap_with_blanket_allow(self): + moncap = 'allow r' + keyring = self.setup_test_env(moncap) + + self.run_mon_cap_tests(moncap, keyring) + + def setup_test_env(self, moncap): + return self.create_client(self.client_id, moncap) + + +#TODO: add tests for capsecs 'p' and 's'. +class TestMDSCaps(TestMultiFS): + """ + 0. Have 2 FSs on Ceph cluster. + 1. Create new files on both FSs. + 2. Create a new client that has authorization for both FSs. + 3. Remount the current mounts with this new client. + 4. Test read and write on both FSs. + """ + def test_rw_with_fsname_and_no_path_in_cap(self): + perm = 'rw' + filepaths, filedata, mounts = self.setup_test_env(perm, True) + + self.run_mds_cap_tests(filepaths, filedata, mounts, perm) + + def test_r_with_fsname_and_no_path_in_cap(self): + perm = 'r' + filepaths, filedata, mounts = self.setup_test_env(perm, True) + + self.run_mds_cap_tests(filepaths, filedata, mounts, perm) + + def test_rw_with_fsname_and_path_in_cap(self): + perm = 'rw' + filepaths, filedata, mounts = self.setup_test_env(perm, True,'dir1') + + self.run_mds_cap_tests(filepaths, filedata, mounts, perm) + + def test_r_with_fsname_and_path_in_cap(self): + perm = 'r' + filepaths, filedata, mounts = self.setup_test_env(perm, True, 'dir1') + + self.run_mds_cap_tests(filepaths, filedata, mounts, perm) + + # XXX: this tests the backward compatibility; "allow rw path=" is + # treated as "allow rw fsname=* path=" + def test_rw_with_no_fsname_and_path_in_cap(self): + perm = 'rw' + filepaths, filedata, mounts = self.setup_test_env(perm, False, 'dir1') + + self.run_mds_cap_tests(filepaths, filedata, mounts, perm) + + # XXX: this tests the backward compatibility; "allow r path=" is + # treated as "allow r fsname=* path=" + def test_r_with_no_fsname_and_path_in_cap(self): + perm = 'r' + filepaths, filedata, mounts = self.setup_test_env(perm, False, 'dir1') + + self.run_mds_cap_tests(filepaths, filedata, mounts, perm) + + def test_rw_with_no_fsname_and_no_path(self): + perm = 'rw' + filepaths, filedata, mounts = self.setup_test_env(perm) + + self.run_mds_cap_tests(filepaths, filedata, mounts, perm) + + def test_r_with_no_fsname_and_no_path(self): + perm = 'r' + filepaths, filedata, mounts = self.setup_test_env(perm) + + self.run_mds_cap_tests(filepaths, filedata, mounts, perm) + + def tearDown(self): + self.mount_a.umount_wait() + self.mount_b.umount_wait() + + super(type(self), self).tearDown() + + def setup_test_env(self, perm, fsname=False, cephfs_mntpt='/'): + """ + Creates the cap string, files on both the FSs and then creates the + new client with the cap and remounts both the FSs with newly created + client. + """ + filenames = ('file_on_fs1', 'file_on_fs2') + filedata = ('some data on first fs', 'some data on second fs') + mounts = (self.mount_a, self.mount_b) + self.setup_fs_contents(cephfs_mntpt, filenames, filedata) + + keyring_paths = self.create_client_and_keyring_file(perm, fsname, + cephfs_mntpt) + filepaths = self.remount_with_new_client(cephfs_mntpt, filenames, + keyring_paths) + + return filepaths, filedata, mounts + + def generate_caps(self, perm, fsname, cephfs_mntpt): + moncap = 'allow r' + osdcap = (f'allow {perm} tag cephfs data={self.fs1.name}, ' + f'allow {perm} tag cephfs data={self.fs2.name}') + + if fsname: + if cephfs_mntpt == '/': + mdscap = (f'allow {perm} fsname={self.fs1.name}, ' + f'allow {perm} fsname={self.fs2.name}') + else: + mdscap = (f'allow {perm} fsname={self.fs1.name} ' + f'path=/{cephfs_mntpt}, ' + f'allow {perm} fsname={self.fs2.name} ' + f'path=/{cephfs_mntpt}') + else: + if cephfs_mntpt == '/': + mdscap = f'allow {perm}' + else: + mdscap = f'allow {perm} path=/{cephfs_mntpt}' + + return moncap, osdcap, mdscap + + def create_client_and_keyring_file(self, perm, fsname, cephfs_mntpt): + moncap, osdcap, mdscap = self.generate_caps(perm, fsname, + cephfs_mntpt) + + keyring = self.create_client(self.client_id, moncap, osdcap, mdscap) + keyring_paths = [] + for mount_x in (self.mount_a, self.mount_b): + keyring_paths.append(self.create_keyring_file( + mount_x.client_remote, keyring)) + + return keyring_paths + + def setup_fs_contents(self, cephfs_mntpt, filenames, filedata): + filepaths = [] + iter_on = zip((self.mount_a, self.mount_b), filenames, filedata) + + for mount_x, filename, data in iter_on: + if cephfs_mntpt != '/' : + mount_x.run_shell(args=['mkdir', cephfs_mntpt]) + filepaths.append(os_path_join(mount_x.hostfs_mntpt, + cephfs_mntpt, filename)) + else: + filepaths.append(os_path_join(mount_x.hostfs_mntpt, filename)) + + mount_x.write_file(filepaths[-1], data) + + def remount_with_new_client(self, cephfs_mntpt, filenames, + keyring_paths): + if isinstance(cephfs_mntpt, str) and cephfs_mntpt != '/' : + cephfs_mntpt = '/' + cephfs_mntpt + + self.mount_a.remount(client_id=self.client_id, + client_keyring_path=keyring_paths[0], + client_remote=self.mount_a.client_remote, + cephfs_name=self.fs1.name, + cephfs_mntpt=cephfs_mntpt, + hostfs_mntpt=self.mount_a.hostfs_mntpt, + wait=True) + self.mount_b.remount(client_id=self.client_id, + client_keyring_path=keyring_paths[1], + client_remote=self.mount_b.client_remote, + cephfs_name=self.fs2.name, + cephfs_mntpt=cephfs_mntpt, + hostfs_mntpt=self.mount_b.hostfs_mntpt, + wait=True) + + return (os_path_join(self.mount_a.hostfs_mntpt, filenames[0]), + os_path_join(self.mount_b.hostfs_mntpt, filenames[1])) + + +class TestClientsWithoutAuth(TestMultiFS): + + def setUp(self): + super(TestClientsWithoutAuth, self).setUp() + + # TODO: When MON and OSD caps for a Ceph FS are assigned to a + # client but MDS caps are not, mount.ceph prints "permission + # denied". But when MON caps are not assigned and MDS and OSD + # caps are, mount.ceph prints "no mds server or cluster laggy" + # instead of "permission denied". + # + # Before uncommenting the following line a fix would be required + # for latter case to change "no mds server is up or the cluster is + # laggy" to "permission denied". + self.kernel_errmsgs = ('permission denied', 'no mds server is up or ' + 'the cluster is laggy', 'no such file or ' + 'directory') + + # TODO: When MON and OSD caps are assigned for a Ceph FS to a + # client but MDS caps are not, ceph-fuse prints "operation not + # permitted". But when MON caps are not assigned and MDS and OSD + # caps are, ceph-fuse prints "no such file or directory" instead + # of "operation not permitted". + # + # Before uncommenting the following line a fix would be required + # for the latter case to change "no such file or directory" to + # "operation not permitted". + #self.assertIn('operation not permitted', retval[2].lower()) + self.fuse_errmsgs = ('operation not permitted', 'no such file or ' + 'directory') + + if 'kernel' in str(type(self.mount_a)).lower(): + self.errmsgs = self.kernel_errmsgs + elif 'fuse' in str(type(self.mount_a)).lower(): + self.errmsgs = self.fuse_errmsgs + else: + raise RuntimeError('strange, the client was neither based on ' + 'kernel nor FUSE.') + + def check_that_mount_failed_for_right_reason(self, stderr): + stderr = stderr.lower() + for errmsg in self.errmsgs: + if errmsg in stderr: + break + else: + raise AssertionError('can\'t find expected set of words in the ' + f'stderr\nself.errmsgs - {self.errmsgs}\n' + f'stderr - {stderr}') + + def test_mount_all_caps_absent(self): + # setup part... + keyring = self.fs1.authorize(self.client_id, ('/', 'rw')) + keyring_path = self.create_keyring_file(self.mount_a.client_remote, + keyring) + + # mount the FS for which client has no auth... + retval = self.mount_a.remount(client_id=self.client_id, + client_keyring_path=keyring_path, + cephfs_name=self.fs2.name, + check_status=False) + + # tests... + self.assertIsInstance(retval, tuple) + self.assertEqual(len(retval), 3) + self.assertIsInstance(retval[0], CommandFailedError) + self.check_that_mount_failed_for_right_reason(retval[2]) + + def test_mount_mon_and_osd_caps_present_mds_caps_absent(self): + # setup part... + moncap = f'allow rw fsname={self.fs1.name}, allow rw fsname={self.fs2.name}' + mdscap = f'allow rw fsname={self.fs1.name}' + osdcap = (f'allow rw tag cephfs data={self.fs1.name}, allow rw tag ' + f'cephfs data={self.fs2.name}') + keyring = self.create_client(self.client_id, moncap, osdcap, mdscap) + keyring_path = self.create_keyring_file(self.mount_a.client_remote, + keyring) + + # mount the FS for which client has no auth... + retval = self.mount_a.remount(client_id=self.client_id, + client_keyring_path=keyring_path, + cephfs_name=self.fs2.name, + check_status=False) + + # tests... + self.assertIsInstance(retval, tuple) + self.assertEqual(len(retval), 3) + self.assertIsInstance(retval[0], CommandFailedError) + self.check_that_mount_failed_for_right_reason(retval[2]) diff --git a/qa/tasks/vstart_runner.py b/qa/tasks/vstart_runner.py index 0d278356d4e..334ed172f36 100644 --- a/qa/tasks/vstart_runner.py +++ b/qa/tasks/vstart_runner.py @@ -671,8 +671,17 @@ class LocalKernelMount(KernelMount): return (e, mountcmd_stdout.getvalue(), mountcmd_stderr.getvalue()) - self.client_remote.run(args=['sudo', 'chmod', '1777', - self.hostfs_mntpt], timeout=(5*60)) + stderr = StringIO() + try: + self.client_remote.run(args=['sudo', 'chmod', '1777', + self.hostfs_mntpt], stderr=stderr, + timeout=(5*60)) + except CommandFailedError: + # the client does not have write permissions in cap it holds for + # the Ceph FS that was just mounted. + if 'permission denied' in stderr.getvalue().lower(): + pass + self.mounted = True def cleanup_netns(self): -- 2.39.5