From: Rishabh Dave Date: Mon, 9 Oct 2023 19:58:36 +0000 (+0530) Subject: cephfs,mon: allow FS rename only if FS is offline X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=c3f632044c9e68aad6f7e1cd4284a8862afdf572;p=ceph.git cephfs,mon: allow FS rename only if FS is offline Reject the attempt to rename the CephFS is the CephFS is not offline. Add new tests for this and update current tests (test_admin.py and test_volumes.py) accordingly. Fixes: https://tracker.ceph.com/issues/63154 Signed-off-by: Rishabh Dave (cherry picked from commit fe3a4b9683d7b6f72f57f5ed8bc324bdbb24351f) Conflicts: src/mon/FSCommands.cc - What is "fsp" in main branch is "fs" in Reef branch due to a missing backport. --- diff --git a/qa/tasks/cephfs/test_admin.py b/qa/tasks/cephfs/test_admin.py index fcca8b335bbd..22321911f410 100644 --- a/qa/tasks/cephfs/test_admin.py +++ b/qa/tasks/cephfs/test_admin.py @@ -775,7 +775,11 @@ class TestRenameCommand(TestAdminCommands): new_fs_name = 'new_cephfs' client_id = 'test_new_cephfs' + self.run_ceph_cmd(f'fs fail {self.fs.name}') + sleep(5) self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it') + self.run_ceph_cmd(f'fs set {new_fs_name} joinable true') + sleep(5) # authorize a cephx ID access to the renamed file system. # use the ID to write to the file system. @@ -808,8 +812,12 @@ class TestRenameCommand(TestAdminCommands): orig_fs_name = self.fs.name new_fs_name = 'new_cephfs' + self.run_ceph_cmd(f'fs fail {self.fs.name}') + sleep(5) self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it') self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it') + self.run_ceph_cmd(f'fs set {new_fs_name} joinable true') + sleep(5) # original file system name does not appear in `fs ls` command self.assertFalse(self.fs.exists()) @@ -828,7 +836,11 @@ class TestRenameCommand(TestAdminCommands): new_fs_name = 'new_cephfs' data_pool = self.fs.get_data_pool_name() metadata_pool = self.fs.get_metadata_pool_name() + self.run_ceph_cmd(f'fs fail {self.fs.name}') + sleep(5) self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it') + self.run_ceph_cmd(f'fs set {new_fs_name} joinable true') + sleep(5) try: self.run_ceph_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool}") @@ -865,6 +877,8 @@ class TestRenameCommand(TestAdminCommands): """ That renaming a file system without '--yes-i-really-mean-it' flag fails. """ + self.run_ceph_cmd(f'fs fail {self.fs.name}') + sleep(5) try: self.run_ceph_cmd(f"fs rename {self.fs.name} new_fs") except CommandFailedError as ce: @@ -874,11 +888,14 @@ class TestRenameCommand(TestAdminCommands): else: self.fail("expected renaming of file system without the " "'--yes-i-really-mean-it' flag to fail ") + self.run_ceph_cmd(f'fs set {self.fs.name} joinable true') def test_fs_rename_fails_for_non_existent_fs(self): """ That renaming a non-existent file system fails. """ + self.run_ceph_cmd(f'fs fail {self.fs.name}') + sleep(5) try: self.run_ceph_cmd("fs rename non_existent_fs new_fs --yes-i-really-mean-it") except CommandFailedError as ce: @@ -892,6 +909,8 @@ class TestRenameCommand(TestAdminCommands): """ self.fs2 = self.mds_cluster.newfs(name='cephfs2', create=True) + self.run_ceph_cmd(f'fs fail {self.fs.name}') + sleep(5) try: self.run_ceph_cmd(f"fs rename {self.fs.name} {self.fs2.name} --yes-i-really-mean-it") except CommandFailedError as ce: @@ -899,6 +918,7 @@ class TestRenameCommand(TestAdminCommands): "invalid error code on renaming to a fs name that is already in use") else: self.fail("expected renaming to a new file system name that is already in use to fail.") + self.run_ceph_cmd(f'fs set {self.fs.name} joinable true') def test_fs_rename_fails_with_mirroring_enabled(self): """ @@ -908,6 +928,8 @@ class TestRenameCommand(TestAdminCommands): new_fs_name = 'new_cephfs' self.run_ceph_cmd(f'fs mirror enable {orig_fs_name}') + self.run_ceph_cmd(f'fs fail {self.fs.name}') + sleep(5) try: self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it') except CommandFailedError as ce: @@ -915,6 +937,36 @@ class TestRenameCommand(TestAdminCommands): else: self.fail("expected renaming of a mirrored file system to fail") self.run_ceph_cmd(f'fs mirror disable {orig_fs_name}') + self.run_ceph_cmd(f'fs set {self.fs.name} joinable true') + + def test_rename_when_fs_is_online(self): + ''' + Test that the command "ceph fs swap" command fails when first of the + two of FSs isn't failed/down. + ''' + client_id = 'test_new_cephfs' + new_fs_name = 'new_cephfs' + self.negtest_ceph_cmd( + args=(f'fs rename {self.fs.name} {new_fs_name} ' + '--yes-i-really-mean-it'), + errmsgs=(f"CephFS '{self.fs.name}' is not offline. Before " + "renaming a CephFS, it must be marked as down. See " + "`ceph fs fail`."), + retval=errno.EPERM) + + self.fs.getinfo() + keyring = self.fs.authorize(client_id, ('/', 'rw')) + keyring_path = self.mount_a.client_remote.mktemp(data=keyring) + self.mount_a.remount(client_id=client_id, + client_keyring_path=keyring_path, + cephfs_mntpt='/', + cephfs_name=self.fs.name) + + self.check_pool_application_metadata_key_value( + self.fs.get_data_pool_name(), 'cephfs', 'data', self.fs.name) + self.check_pool_application_metadata_key_value( + self.fs.get_metadata_pool_name(), 'cephfs', 'metadata', + self.fs.name) class TestDump(CephFSTestCase): diff --git a/qa/tasks/cephfs/test_volumes.py b/qa/tasks/cephfs/test_volumes.py index 612a4ef41d4b..d9f7802a1154 100644 --- a/qa/tasks/cephfs/test_volumes.py +++ b/qa/tasks/cephfs/test_volumes.py @@ -648,8 +648,12 @@ class TestRenameCmd(TestVolumesHelper): oldvolname = self.volname newvolname = self._gen_vol_name() new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta" + + self.run_ceph_cmd(f'fs fail {oldvolname}') self._fs_cmd("volume", "rename", oldvolname, newvolname, "--yes-i-really-mean-it") + self.run_ceph_cmd(f'fs set {newvolname} joinable true') + volumels = json.loads(self._fs_cmd('volume', 'ls')) volnames = [volume['name'] for volume in volumels] # volume name changed @@ -669,10 +673,14 @@ class TestRenameCmd(TestVolumesHelper): oldvolname = self.volname newvolname = self._gen_vol_name() new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta" + + self.run_ceph_cmd(f'fs fail {oldvolname}') self._fs_cmd("volume", "rename", oldvolname, newvolname, "--yes-i-really-mean-it") self._fs_cmd("volume", "rename", oldvolname, newvolname, "--yes-i-really-mean-it") + self.run_ceph_cmd(f'fs set {newvolname} joinable true') + volumels = json.loads(self._fs_cmd('volume', 'ls')) volnames = [volume['name'] for volume in volumels] self.assertIn(newvolname, volnames) @@ -687,6 +695,7 @@ class TestRenameCmd(TestVolumesHelper): """ newvolname = self._gen_vol_name() + self.run_ceph_cmd(f'fs fail {self.volname}') try: self._fs_cmd("volume", "rename", self.volname, newvolname) except CommandFailedError as ce: @@ -696,6 +705,7 @@ class TestRenameCmd(TestVolumesHelper): else: self.fail("expected renaming of FS volume to fail without the " "'--yes-i-really-mean-it' flag") + self.run_ceph_cmd(f'fs set {self.volname} joinable true') def test_volume_rename_for_more_than_one_data_pool(self): """ @@ -710,8 +720,12 @@ class TestRenameCmd(TestVolumesHelper): self.fs.get_pool_names(refresh=True) orig_data_pool_names = list(self.fs.data_pools.values()) new_metadata_pool = f"cephfs.{newvolname}.meta" + + self.run_ceph_cmd(f'fs fail {oldvolname}') self._fs_cmd("volume", "rename", self.volname, newvolname, "--yes-i-really-mean-it") + self.run_ceph_cmd(f'fs set {newvolname} joinable true') + volumels = json.loads(self._fs_cmd('volume', 'ls')) volnames = [volume['name'] for volume in volumels] # volume name changed @@ -723,6 +737,19 @@ class TestRenameCmd(TestVolumesHelper): # data pool names unchanged self.assertCountEqual(orig_data_pool_names, list(self.fs.data_pools.values())) + def test_rename_when_fs_is_online(self): + for m in self.mounts: + m.umount_wait() + newvolname = self._gen_vol_name() + + self.negtest_ceph_cmd( + args=(f'fs volume rename {self.volname} {newvolname} ' + '--yes-i-really-mean-it'), + errmsgs=(f"CephFS '{self.volname}' is not offline. Before " + "renaming a CephFS, it must be marked as down. See " + "`ceph fs fail`."), + retval=errno.EPERM) + def test_volume_info(self): """ Tests the 'fs volume info' command diff --git a/src/mon/FSCommands.cc b/src/mon/FSCommands.cc index c564226b04f5..51cf20cacd15 100644 --- a/src/mon/FSCommands.cc +++ b/src/mon/FSCommands.cc @@ -1273,6 +1273,14 @@ class RenameFilesystemHandler : public FileSystemCommandHandler mon->osdmon()->wait_for_writeable(op, new PaxosService::C_RetryMessage(mon->mdsmon(), op)); return -EAGAIN; } + + // Check that no MDS daemons is up for this CephFS. + if (fs->get_mds_map().get_num_up_mds() > 0) { + ss << "CephFS '" << fs_name << "' is not offline. Before renaming " + << "a CephFS, it must be marked as down. See `ceph fs fail`."; + return -EPERM; + } + for (const auto p : fs->mds_map.get_data_pools()) { mon->osdmon()->do_application_enable(p, APP_NAME_CEPHFS, "data", new_fs_name, true);