From: Ramana Raja Date: Thu, 29 Jul 2021 00:06:57 +0000 (-0400) Subject: mgr/volumes: Add `fs volume rename` command X-Git-Tag: v18.0.0~1418^2~2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=70697629bf91b325112e319027bb2c89ce10dca0;p=ceph.git mgr/volumes: Add `fs volume rename` command The `fs volume rename` command renames the volume, i.e., orchestrator MDS service, file system, and the data and metadata pool of the file system. Fixes: https://tracker.ceph.com/issues/51162 Signed-off-by: Ramana Raja --- diff --git a/PendingReleaseNotes b/PendingReleaseNotes index da2de5ed8d17..f92360eda643 100644 --- a/PendingReleaseNotes +++ b/PendingReleaseNotes @@ -41,6 +41,13 @@ using these re-authorized IDs may be disrupted, this command requires the "--yes-i-really-mean-it" flag. Also, mirroring is expected to be disabled on the file system. + +* fs: A FS volume can be renamed using the `fs volume rename` command. Any cephx + credentials authorized for the old volume name will need to be reauthorized to + the new volume name. Since the operations of the clients using these re-authorized + IDs may be disrupted, this command requires the "--yes-i-really-mean-it" flag. Also, + mirroring is expected to be disabled on the file system. + * MDS upgrades no longer require stopping all standby MDS daemons before upgrading the sole active MDS for a file system. diff --git a/doc/cephfs/fs-volumes.rst b/doc/cephfs/fs-volumes.rst index 3cd8fde6a7c3..6ef08a2e9c39 100644 --- a/doc/cephfs/fs-volumes.rst +++ b/doc/cephfs/fs-volumes.rst @@ -79,6 +79,24 @@ List volumes using:: $ ceph fs volume ls +Rename a volume using:: + + $ ceph fs volume rename [--yes-i-really-mean-it] + +Renaming a volume can be an expensive operation. It does the following: + +- renames the orchestrator managed MDS service to match the . + This involves launching a MDS service with and bringing down + the MDS service with . +- renames the file system matching to +- changes the application tags on the data and metadata pools of the file system + to +- renames the metadata and data pools of the file system. + +The CephX IDs authorized to need to be reauthorized to . Any +on-going operations of the clients using these IDs may be disrupted. Mirroring is +expected to be disabled on the volume. + FS Subvolume groups ------------------- diff --git a/qa/tasks/cephfs/test_volumes.py b/qa/tasks/cephfs/test_volumes.py index 9052b578906c..5571873a1b0b 100644 --- a/qa/tasks/cephfs/test_volumes.py +++ b/qa/tasks/cephfs/test_volumes.py @@ -513,6 +513,89 @@ class TestVolumes(TestVolumesHelper): self.assertNotIn(pool["name"], pools, "pool {0} exists after volume removal".format(pool["name"])) + def test_volume_rename(self): + """ + That volume, its file system and pools, can be renamed. + """ + for m in self.mounts: + m.umount_wait() + oldvolname = self.volname + newvolname = self._generate_random_volume_name() + new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta" + self._fs_cmd("volume", "rename", oldvolname, newvolname, + "--yes-i-really-mean-it") + volumels = json.loads(self._fs_cmd('volume', 'ls')) + volnames = [volume['name'] for volume in volumels] + # volume name changed + self.assertIn(newvolname, volnames) + self.assertNotIn(oldvolname, volnames) + # pool names changed + self.fs.get_pool_names(refresh=True) + self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name()) + self.assertEqual(new_data_pool, self.fs.get_data_pool_name()) + + def test_volume_rename_idempotency(self): + """ + That volume rename is idempotent. + """ + for m in self.mounts: + m.umount_wait() + oldvolname = self.volname + newvolname = self._generate_random_volume_name() + new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta" + self._fs_cmd("volume", "rename", oldvolname, newvolname, + "--yes-i-really-mean-it") + self._fs_cmd("volume", "rename", oldvolname, newvolname, + "--yes-i-really-mean-it") + volumels = json.loads(self._fs_cmd('volume', 'ls')) + volnames = [volume['name'] for volume in volumels] + self.assertIn(newvolname, volnames) + self.assertNotIn(oldvolname, volnames) + self.fs.get_pool_names(refresh=True) + self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name()) + self.assertEqual(new_data_pool, self.fs.get_data_pool_name()) + + def test_volume_rename_fails_without_confirmation_flag(self): + """ + That renaming volume fails without --yes-i-really-mean-it flag. + """ + newvolname = self._generate_random_volume_name() + try: + self._fs_cmd("volume", "rename", self.volname, newvolname) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EPERM, + "invalid error code on renaming a FS volume without the " + "'--yes-i-really-mean-it' flag") + else: + self.fail("expected renaming of FS volume to fail without the " + "'--yes-i-really-mean-it' flag") + + def test_volume_rename_for_more_than_one_data_pool(self): + """ + That renaming a volume with more than one data pool does not change + the name of the data pools. + """ + for m in self.mounts: + m.umount_wait() + self.fs.add_data_pool('another-data-pool') + oldvolname = self.volname + newvolname = self._generate_random_volume_name() + self.fs.get_pool_names(refresh=True) + orig_data_pool_names = list(self.fs.data_pools.values()) + new_metadata_pool = f"cephfs.{newvolname}.meta" + self._fs_cmd("volume", "rename", self.volname, newvolname, + "--yes-i-really-mean-it") + volumels = json.loads(self._fs_cmd('volume', 'ls')) + volnames = [volume['name'] for volume in volumels] + # volume name changed + self.assertIn(newvolname, volnames) + self.assertNotIn(oldvolname, volnames) + self.fs.get_pool_names(refresh=True) + # metadata pool name changed + self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name()) + # data pool names unchanged + self.assertCountEqual(orig_data_pool_names, list(self.fs.data_pools.values())) + class TestSubvolumeGroups(TestVolumesHelper): """Tests for FS subvolume group operations.""" diff --git a/src/pybind/mgr/volumes/fs/fs_util.py b/src/pybind/mgr/volumes/fs/fs_util.py index 3d098ddb0588..d4a67d78aac4 100644 --- a/src/pybind/mgr/volumes/fs/fs_util.py +++ b/src/pybind/mgr/volumes/fs/fs_util.py @@ -21,6 +21,11 @@ def remove_pool(mgr, pool_name): 'yes_i_really_really_mean_it': True} return mgr.mon_command(command) +def rename_pool(mgr, pool_name, new_pool_name): + command = {'prefix': 'osd pool rename', 'srcpool': pool_name, + 'destpool': new_pool_name} + return mgr.mon_command(command) + def create_filesystem(mgr, fs_name, metadata_pool, data_pool): command = {'prefix': 'fs new', 'fs_name': fs_name, 'metadata': metadata_pool, 'data': data_pool} @@ -35,6 +40,11 @@ def remove_filesystem(mgr, fs_name): command = {'prefix': 'fs rm', 'fs_name': fs_name, 'yes_i_really_mean_it': True} return mgr.mon_command(command) +def rename_filesystem(mgr, fs_name, new_fs_name): + command = {'prefix': 'fs rename', 'fs_name': fs_name, 'new_fs_name': new_fs_name, + 'yes_i_really_mean_it': True} + return mgr.mon_command(command) + def create_mds(mgr, fs_name, placement): spec = ServiceSpec(service_type='mds', service_id=fs_name, diff --git a/src/pybind/mgr/volumes/fs/operations/volume.py b/src/pybind/mgr/volumes/fs/operations/volume.py index e809f264d791..9ef06fd25e99 100644 --- a/src/pybind/mgr/volumes/fs/operations/volume.py +++ b/src/pybind/mgr/volumes/fs/operations/volume.py @@ -2,7 +2,7 @@ import errno import logging import sys -from typing import List +from typing import List, Tuple from contextlib import contextmanager @@ -10,8 +10,8 @@ import orchestrator from .lock import GlobalLock from ..exception import VolumeException -from ..fs_util import create_pool, remove_pool, create_filesystem, \ - remove_filesystem, create_mds, volume_exists +from ..fs_util import create_pool, remove_pool, rename_pool, create_filesystem, \ + remove_filesystem, rename_filesystem, create_mds, volume_exists from mgr_util import open_filesystem, CephfsConnectionException log = logging.getLogger(__name__) @@ -117,6 +117,102 @@ def delete_volume(mgr, volname, metadata_pool, data_pools): result_str = "metadata pool: {0} data pool: {1} removed".format(metadata_pool, str(data_pools)) return r, result_str, "" +def rename_volume(mgr, volname: str, newvolname: str) -> Tuple[int, str, str]: + """ + rename volume (orch MDS service, file system, pools) + """ + # To allow volume rename to be idempotent, check whether orch managed MDS + # service is already renamed. If so, skip renaming MDS service. + completion = None + rename_mds_service = True + try: + completion = mgr.describe_service( + service_type='mds', service_name=f"mds.{newvolname}", refresh=True) + orchestrator.raise_if_exception(completion) + except (ImportError, orchestrator.OrchestratorError): + log.warning("Failed to fetch orch service mds.%s", newvolname) + except Exception as e: + # Don't let detailed orchestrator exceptions (python backtraces) + # bubble out to the user + log.exception("Failed to fetch orch service mds.%s", newvolname) + return -errno.EINVAL, "", str(e) + if completion and completion.result: + rename_mds_service = False + + # Launch new MDS service matching newvolname + completion = None + remove_mds_service = False + if rename_mds_service: + try: + completion = mgr.describe_service( + service_type='mds', service_name=f"mds.{volname}", refresh=True) + orchestrator.raise_if_exception(completion) + except (ImportError, orchestrator.OrchestratorError): + log.warning("Failed to fetch orch service mds.%s", volname) + except Exception as e: + # Don't let detailed orchestrator exceptions (python backtraces) + # bubble out to the user + log.exception("Failed to fetch orch service mds.%s", volname) + return -errno.EINVAL, "", str(e) + if completion and completion.result: + svc = completion.result[0] + placement = svc.spec.placement.pretty_str() + create_mds(mgr, newvolname, placement) + remove_mds_service = True + + # rename_filesytem is idempotent + r, outb, outs = rename_filesystem(mgr, volname, newvolname) + if r != 0: + errmsg = f"Failed to rename file system '{volname}' to '{newvolname}'" + log.error("Failed to rename file system '%s' to '%s'", volname, newvolname) + outs = f'{errmsg}; {outs}' + return r, outb, outs + + # Rename file system's metadata and data pools + metadata_pool, data_pools = get_pool_names(mgr, newvolname) + + new_metadata_pool, new_data_pool = gen_pool_names(newvolname) + if metadata_pool != new_metadata_pool: + r, outb, outs = rename_pool(mgr, metadata_pool, new_metadata_pool) + if r != 0: + errmsg = f"Failed to rename metadata pool '{metadata_pool}' to '{new_metadata_pool}'" + log.error("Failed to rename metadata pool '%s' to '%s'", metadata_pool, new_metadata_pool) + outs = f'{errmsg}; {outs}' + return r, outb, outs + + data_pool_rename_failed = False + # If file system has more than one data pool, then skip renaming + # the data pools, and proceed to remove the old MDS service. + if len(data_pools) > 1: + data_pool_rename_failed = True + else: + data_pool = data_pools[0] + if data_pool != new_data_pool: + r, outb, outs = rename_pool(mgr, data_pool, new_data_pool) + if r != 0: + errmsg = f"Failed to rename data pool '{data_pool}' to '{new_data_pool}'" + log.error("Failed to rename data pool '%s' to '%s'", data_pool, new_data_pool) + outs = f'{errmsg}; {outs}' + return r, outb, outs + + # Tear down old MDS service + if remove_mds_service: + try: + completion = mgr.remove_service('mds.' + volname) + orchestrator.raise_if_exception(completion) + except (ImportError, orchestrator.OrchestratorError): + log.warning("Failed to tear down orch service mds.%s", volname) + except Exception as e: + # Don't let detailed orchestrator exceptions (python backtraces) + # bubble out to the user + log.exception("Failed to tear down orch service mds.%s", volname) + return -errno.EINVAL, "", str(e) + + outb = f"FS volume '{volname}' renamed to '{newvolname}'" + if data_pool_rename_failed: + outb += ". But failed to rename data pools as more than one data pool was found." + + return r, outb, "" def list_volumes(mgr): """ diff --git a/src/pybind/mgr/volumes/fs/volume.py b/src/pybind/mgr/volumes/fs/volume.py index a3496d94229f..45710986b07a 100644 --- a/src/pybind/mgr/volumes/fs/volume.py +++ b/src/pybind/mgr/volumes/fs/volume.py @@ -10,7 +10,7 @@ from mgr_util import CephfsClient from .fs_util import listdir from .operations.volume import create_volume, \ - delete_volume, list_volumes, open_volume, get_pool_names + delete_volume, rename_volume, list_volumes, open_volume, get_pool_names from .operations.group import open_group, create_group, remove_group, open_group_unique from .operations.subvolume import open_subvol, create_subvol, remove_subvol, \ create_clone @@ -131,6 +131,21 @@ class VolumeClient(CephfsClient["Module"]): volumes = list_volumes(self.mgr) return 0, json.dumps(volumes, indent=4, sort_keys=True), "" + def rename_fs_volume(self, volname, newvolname, sure): + if self.is_stopping(): + return -errno.ESHUTDOWN, "", "shutdown in progress" + + if not sure: + return ( + -errno.EPERM, "", + "WARNING: This will rename the filesystem and possibly its " + "pools. It is a potentially disruptive operation, clients' " + "cephx credentials need reauthorized to access the file system " + "and its pools with the new name. Add --yes-i-really-mean-it " + "if you are sure you wish to continue.") + + return rename_volume(self.mgr, volname, newvolname) + ### subvolume operations def _create_subvolume(self, fs_handle, volname, group, subvolname, **kwargs): diff --git a/src/pybind/mgr/volumes/module.py b/src/pybind/mgr/volumes/module.py index a0a039acca36..bcab12e36fff 100644 --- a/src/pybind/mgr/volumes/module.py +++ b/src/pybind/mgr/volumes/module.py @@ -60,6 +60,14 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): 'desc': "Delete a FS volume by passing --yes-i-really-mean-it flag", 'perm': 'rw' }, + { + 'cmd': 'fs volume rename ' + f'name=vol_name,type=CephString,goodchars={goodchars} ' + f'name=new_vol_name,type=CephString,goodchars={goodchars} ' + 'name=yes_i_really_mean_it,type=CephBool,req=false ', + 'desc': "Rename a CephFS volume by passing --yes-i-really-mean-it flag", + 'perm': 'rw' + }, { 'cmd': 'fs subvolumegroup ls ' 'name=vol_name,type=CephString ', @@ -416,6 +424,12 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): def _cmd_fs_volume_ls(self, inbuf, cmd): return self.vc.list_fs_volumes() + @mgr_cmd_wrap + def _cmd_fs_volume_rename(self, inbuf, cmd): + return self.vc.rename_fs_volume(cmd['vol_name'], + cmd['new_vol_name'], + cmd.get('yes_i_really_mean_it', False)) + @mgr_cmd_wrap def _cmd_fs_subvolumegroup_create(self, inbuf, cmd): """