From: Rishabh Dave Date: Mon, 9 Oct 2023 19:58:36 +0000 (+0530) Subject: cephfs,mon: allow FS rename only if FS is offline X-Git-Tag: v19.0.0~133^2~2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=fe3a4b9683d7b6f72f57f5ed8bc324bdbb24351f;p=ceph.git cephfs,mon: allow FS rename only if FS is offline Reject the attempt to rename the CephFS is the CephFS is not offline. Add new tests for this and update current tests (test_admin.py and test_volumes.py) accordingly. Fixes: https://tracker.ceph.com/issues/63154 Signed-off-by: Rishabh Dave --- diff --git a/qa/tasks/cephfs/test_admin.py b/qa/tasks/cephfs/test_admin.py index b2e10e4d3cf4..fe224b6831dd 100644 --- a/qa/tasks/cephfs/test_admin.py +++ b/qa/tasks/cephfs/test_admin.py @@ -627,7 +627,11 @@ class TestRenameCommand(TestAdminCommands): new_fs_name = 'new_cephfs' client_id = 'test_new_cephfs' + self.run_ceph_cmd(f'fs fail {self.fs.name}') + sleep(5) self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it') + self.run_ceph_cmd(f'fs set {new_fs_name} joinable true') + sleep(5) # authorize a cephx ID access to the renamed file system. # use the ID to write to the file system. @@ -660,8 +664,12 @@ class TestRenameCommand(TestAdminCommands): orig_fs_name = self.fs.name new_fs_name = 'new_cephfs' + self.run_ceph_cmd(f'fs fail {self.fs.name}') + sleep(5) self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it') self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it') + self.run_ceph_cmd(f'fs set {new_fs_name} joinable true') + sleep(5) # original file system name does not appear in `fs ls` command self.assertFalse(self.fs.exists()) @@ -680,7 +688,11 @@ class TestRenameCommand(TestAdminCommands): new_fs_name = 'new_cephfs' data_pool = self.fs.get_data_pool_name() metadata_pool = self.fs.get_metadata_pool_name() + self.run_ceph_cmd(f'fs fail {self.fs.name}') + sleep(5) self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it') + self.run_ceph_cmd(f'fs set {new_fs_name} joinable true') + sleep(5) try: self.run_ceph_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool}") @@ -717,6 +729,8 @@ class TestRenameCommand(TestAdminCommands): """ That renaming a file system without '--yes-i-really-mean-it' flag fails. """ + self.run_ceph_cmd(f'fs fail {self.fs.name}') + sleep(5) try: self.run_ceph_cmd(f"fs rename {self.fs.name} new_fs") except CommandFailedError as ce: @@ -726,11 +740,14 @@ class TestRenameCommand(TestAdminCommands): else: self.fail("expected renaming of file system without the " "'--yes-i-really-mean-it' flag to fail ") + self.run_ceph_cmd(f'fs set {self.fs.name} joinable true') def test_fs_rename_fails_for_non_existent_fs(self): """ That renaming a non-existent file system fails. """ + self.run_ceph_cmd(f'fs fail {self.fs.name}') + sleep(5) try: self.run_ceph_cmd("fs rename non_existent_fs new_fs --yes-i-really-mean-it") except CommandFailedError as ce: @@ -744,6 +761,8 @@ class TestRenameCommand(TestAdminCommands): """ self.fs2 = self.mds_cluster.newfs(name='cephfs2', create=True) + self.run_ceph_cmd(f'fs fail {self.fs.name}') + sleep(5) try: self.run_ceph_cmd(f"fs rename {self.fs.name} {self.fs2.name} --yes-i-really-mean-it") except CommandFailedError as ce: @@ -751,6 +770,7 @@ class TestRenameCommand(TestAdminCommands): "invalid error code on renaming to a fs name that is already in use") else: self.fail("expected renaming to a new file system name that is already in use to fail.") + self.run_ceph_cmd(f'fs set {self.fs.name} joinable true') def test_fs_rename_fails_with_mirroring_enabled(self): """ @@ -760,6 +780,8 @@ class TestRenameCommand(TestAdminCommands): new_fs_name = 'new_cephfs' self.run_ceph_cmd(f'fs mirror enable {orig_fs_name}') + self.run_ceph_cmd(f'fs fail {self.fs.name}') + sleep(5) try: self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it') except CommandFailedError as ce: @@ -767,6 +789,36 @@ class TestRenameCommand(TestAdminCommands): else: self.fail("expected renaming of a mirrored file system to fail") self.run_ceph_cmd(f'fs mirror disable {orig_fs_name}') + self.run_ceph_cmd(f'fs set {self.fs.name} joinable true') + + def test_rename_when_fs_is_online(self): + ''' + Test that the command "ceph fs swap" command fails when first of the + two of FSs isn't failed/down. + ''' + client_id = 'test_new_cephfs' + new_fs_name = 'new_cephfs' + self.negtest_ceph_cmd( + args=(f'fs rename {self.fs.name} {new_fs_name} ' + '--yes-i-really-mean-it'), + errmsgs=(f"CephFS '{self.fs.name}' is not offline. Before " + "renaming a CephFS, it must be marked as down. See " + "`ceph fs fail`."), + retval=errno.EPERM) + + self.fs.getinfo() + keyring = self.fs.authorize(client_id, ('/', 'rw')) + keyring_path = self.mount_a.client_remote.mktemp(data=keyring) + self.mount_a.remount(client_id=client_id, + client_keyring_path=keyring_path, + cephfs_mntpt='/', + cephfs_name=self.fs.name) + + self.check_pool_application_metadata_key_value( + self.fs.get_data_pool_name(), 'cephfs', 'data', self.fs.name) + self.check_pool_application_metadata_key_value( + self.fs.get_metadata_pool_name(), 'cephfs', 'metadata', + self.fs.name) class TestDump(CephFSTestCase): diff --git a/qa/tasks/cephfs/test_volumes.py b/qa/tasks/cephfs/test_volumes.py index 44e28937bcbd..b11125d7247a 100644 --- a/qa/tasks/cephfs/test_volumes.py +++ b/qa/tasks/cephfs/test_volumes.py @@ -571,8 +571,12 @@ class TestVolumes(TestVolumesHelper): oldvolname = self.volname newvolname = self._generate_random_volume_name() new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta" + + self.run_ceph_cmd(f'fs fail {oldvolname}') self._fs_cmd("volume", "rename", oldvolname, newvolname, "--yes-i-really-mean-it") + self.run_ceph_cmd(f'fs set {newvolname} joinable true') + volumels = json.loads(self._fs_cmd('volume', 'ls')) volnames = [volume['name'] for volume in volumels] # volume name changed @@ -592,10 +596,14 @@ class TestVolumes(TestVolumesHelper): oldvolname = self.volname newvolname = self._generate_random_volume_name() new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta" + + self.run_ceph_cmd(f'fs fail {oldvolname}') self._fs_cmd("volume", "rename", oldvolname, newvolname, "--yes-i-really-mean-it") self._fs_cmd("volume", "rename", oldvolname, newvolname, "--yes-i-really-mean-it") + self.run_ceph_cmd(f'fs set {newvolname} joinable true') + volumels = json.loads(self._fs_cmd('volume', 'ls')) volnames = [volume['name'] for volume in volumels] self.assertIn(newvolname, volnames) @@ -609,6 +617,8 @@ class TestVolumes(TestVolumesHelper): That renaming volume fails without --yes-i-really-mean-it flag. """ newvolname = self._generate_random_volume_name() + + self.run_ceph_cmd(f'fs fail {self.volname}') try: self._fs_cmd("volume", "rename", self.volname, newvolname) except CommandFailedError as ce: @@ -618,6 +628,7 @@ class TestVolumes(TestVolumesHelper): else: self.fail("expected renaming of FS volume to fail without the " "'--yes-i-really-mean-it' flag") + self.run_ceph_cmd(f'fs set {self.volname} joinable true') def test_volume_rename_for_more_than_one_data_pool(self): """ @@ -632,8 +643,12 @@ class TestVolumes(TestVolumesHelper): self.fs.get_pool_names(refresh=True) orig_data_pool_names = list(self.fs.data_pools.values()) new_metadata_pool = f"cephfs.{newvolname}.meta" + + self.run_ceph_cmd(f'fs fail {oldvolname}') self._fs_cmd("volume", "rename", self.volname, newvolname, "--yes-i-really-mean-it") + self.run_ceph_cmd(f'fs set {newvolname} joinable true') + volumels = json.loads(self._fs_cmd('volume', 'ls')) volnames = [volume['name'] for volume in volumels] # volume name changed @@ -645,6 +660,19 @@ class TestVolumes(TestVolumesHelper): # data pool names unchanged self.assertCountEqual(orig_data_pool_names, list(self.fs.data_pools.values())) + def test_rename_when_fs_is_online(self): + for m in self.mounts: + m.umount_wait() + newvolname = self._generate_random_volume_name() + + self.negtest_ceph_cmd( + args=(f'fs volume rename {self.volname} {newvolname} ' + '--yes-i-really-mean-it'), + errmsgs=(f"CephFS '{self.volname}' is not offline. Before " + "renaming a CephFS, it must be marked as down. See " + "`ceph fs fail`."), + retval=errno.EPERM) + def test_volume_info(self): """ Tests the 'fs volume info' command diff --git a/src/mon/FSCommands.cc b/src/mon/FSCommands.cc index de8695fb783c..906505224ea6 100644 --- a/src/mon/FSCommands.cc +++ b/src/mon/FSCommands.cc @@ -1246,6 +1246,14 @@ class RenameFilesystemHandler : public FileSystemCommandHandler mon->osdmon()->wait_for_writeable(op, new PaxosService::C_RetryMessage(mon->mdsmon(), op)); return -EAGAIN; } + + // Check that no MDS daemons is up for this CephFS. + if (fsp->get_mds_map().get_num_up_mds() > 0) { + ss << "CephFS '" << fs_name << "' is not offline. Before renaming " + << "a CephFS, it must be marked as down. See `ceph fs fail`."; + return -EPERM; + } + for (const auto p : fsp->get_mds_map().get_data_pools()) { mon->osdmon()->do_application_enable(p, pg_pool_t::APPLICATION_NAME_CEPHFS,