]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/volumes: Add `fs volume rename` command
authorRamana Raja <rraja@redhat.com>
Thu, 29 Jul 2021 00:06:57 +0000 (20:06 -0400)
committerRamana Raja <rraja@redhat.com>
Thu, 13 Jan 2022 15:36:46 +0000 (10:36 -0500)
The `fs volume rename` command renames the volume, i.e.,
orchestrator MDS service, file system, and the data and
metadata pool of the file system.

Fixes: https://tracker.ceph.com/issues/51162
Signed-off-by: Ramana Raja <rraja@redhat.com>
PendingReleaseNotes
doc/cephfs/fs-volumes.rst
qa/tasks/cephfs/test_volumes.py
src/pybind/mgr/volumes/fs/fs_util.py
src/pybind/mgr/volumes/fs/operations/volume.py
src/pybind/mgr/volumes/fs/volume.py
src/pybind/mgr/volumes/module.py

index da2de5ed8d1754fc7a1948271a9ca56d626badcd..f92360eda643c9c599fb0cbb07d28613d8ea4dae 100644 (file)
   using these re-authorized IDs may be disrupted, this command requires the
   "--yes-i-really-mean-it" flag. Also, mirroring is expected to be disabled
   on the file system.
+
+* fs: A FS volume can be renamed using the `fs volume rename` command. Any cephx
+  credentials authorized for the old volume name will need to be reauthorized to
+  the new volume name. Since the operations of the clients using these re-authorized
+  IDs may be disrupted, this command requires the "--yes-i-really-mean-it" flag. Also,
+  mirroring is expected to be disabled on the file system.
+
 * MDS upgrades no longer require stopping all standby MDS daemons before
   upgrading the sole active MDS for a file system.
 
index 3cd8fde6a7c3d449a5cf1601dd47c65bfb1d613f..6ef08a2e9c395b2304196dd2f66e4735bf40bd56 100644 (file)
@@ -79,6 +79,24 @@ List volumes using::
 
     $ ceph fs volume ls
 
+Rename a volume using::
+
+    $ ceph fs volume rename <vol_name> <new_vol_name> [--yes-i-really-mean-it]
+
+Renaming a volume can be an expensive operation. It does the following:
+
+- renames the orchestrator managed MDS service to match the <new_vol_name>.
+  This involves launching a MDS service with <new_vol_name> and bringing down
+  the MDS service with <vol_name>.
+- renames the file system matching <vol_name> to <new_vol_name>
+- changes the application tags on the data and metadata pools of the file system
+  to <new_vol_name>
+- renames the  metadata and data pools of the file system.
+
+The CephX IDs authorized to <vol_name> need to be reauthorized to <new_vol_name>. Any
+on-going operations of the clients using these IDs may be disrupted. Mirroring is
+expected to be disabled on the volume.
+
 FS Subvolume groups
 -------------------
 
index 9052b578906c6b7c89309c758506688d414fae56..5571873a1b0bdba1b38bf7457182a3233ed5f3f7 100644 (file)
@@ -513,6 +513,89 @@ class TestVolumes(TestVolumesHelper):
             self.assertNotIn(pool["name"], pools,
                              "pool {0} exists after volume removal".format(pool["name"]))
 
+    def test_volume_rename(self):
+        """
+        That volume, its file system and pools, can be renamed.
+        """
+        for m in self.mounts:
+            m.umount_wait()
+        oldvolname = self.volname
+        newvolname = self._generate_random_volume_name()
+        new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta"
+        self._fs_cmd("volume", "rename", oldvolname, newvolname,
+                     "--yes-i-really-mean-it")
+        volumels = json.loads(self._fs_cmd('volume', 'ls'))
+        volnames = [volume['name'] for volume in volumels]
+        # volume name changed
+        self.assertIn(newvolname, volnames)
+        self.assertNotIn(oldvolname, volnames)
+        # pool names changed
+        self.fs.get_pool_names(refresh=True)
+        self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name())
+        self.assertEqual(new_data_pool, self.fs.get_data_pool_name())
+
+    def test_volume_rename_idempotency(self):
+        """
+        That volume rename is idempotent.
+        """
+        for m in self.mounts:
+            m.umount_wait()
+        oldvolname = self.volname
+        newvolname = self._generate_random_volume_name()
+        new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta"
+        self._fs_cmd("volume", "rename", oldvolname, newvolname,
+                     "--yes-i-really-mean-it")
+        self._fs_cmd("volume", "rename", oldvolname, newvolname,
+                     "--yes-i-really-mean-it")
+        volumels = json.loads(self._fs_cmd('volume', 'ls'))
+        volnames = [volume['name'] for volume in volumels]
+        self.assertIn(newvolname, volnames)
+        self.assertNotIn(oldvolname, volnames)
+        self.fs.get_pool_names(refresh=True)
+        self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name())
+        self.assertEqual(new_data_pool, self.fs.get_data_pool_name())
+
+    def test_volume_rename_fails_without_confirmation_flag(self):
+        """
+        That renaming volume fails without --yes-i-really-mean-it flag.
+        """
+        newvolname = self._generate_random_volume_name()
+        try:
+            self._fs_cmd("volume", "rename", self.volname, newvolname)
+        except CommandFailedError as ce:
+            self.assertEqual(ce.exitstatus, errno.EPERM,
+                "invalid error code on renaming a FS volume without the "
+                "'--yes-i-really-mean-it' flag")
+        else:
+            self.fail("expected renaming of FS volume to fail without the "
+                      "'--yes-i-really-mean-it' flag")
+
+    def test_volume_rename_for_more_than_one_data_pool(self):
+        """
+        That renaming a volume with more than one data pool does not change
+        the name of the data pools.
+        """
+        for m in self.mounts:
+            m.umount_wait()
+        self.fs.add_data_pool('another-data-pool')
+        oldvolname = self.volname
+        newvolname = self._generate_random_volume_name()
+        self.fs.get_pool_names(refresh=True)
+        orig_data_pool_names = list(self.fs.data_pools.values())
+        new_metadata_pool = f"cephfs.{newvolname}.meta"
+        self._fs_cmd("volume", "rename", self.volname, newvolname,
+                     "--yes-i-really-mean-it")
+        volumels = json.loads(self._fs_cmd('volume', 'ls'))
+        volnames = [volume['name'] for volume in volumels]
+        # volume name changed
+        self.assertIn(newvolname, volnames)
+        self.assertNotIn(oldvolname, volnames)
+        self.fs.get_pool_names(refresh=True)
+        # metadata pool name changed
+        self.assertEqual(new_metadata_pool, self.fs.get_metadata_pool_name())
+        # data pool names unchanged
+        self.assertCountEqual(orig_data_pool_names, list(self.fs.data_pools.values()))
+
 
 class TestSubvolumeGroups(TestVolumesHelper):
     """Tests for FS subvolume group operations."""
index 3d098ddb0588e8b998b2e36b84dd02a3f0a9f231..d4a67d78aac40c261441fb9c0d8d7c9d4b27011f 100644 (file)
@@ -21,6 +21,11 @@ def remove_pool(mgr, pool_name):
                'yes_i_really_really_mean_it': True}
     return mgr.mon_command(command)
 
+def rename_pool(mgr, pool_name, new_pool_name):
+    command = {'prefix': 'osd pool rename', 'srcpool': pool_name,
+               'destpool': new_pool_name}
+    return mgr.mon_command(command)
+
 def create_filesystem(mgr, fs_name, metadata_pool, data_pool):
     command = {'prefix': 'fs new', 'fs_name': fs_name, 'metadata': metadata_pool,
                'data': data_pool}
@@ -35,6 +40,11 @@ def remove_filesystem(mgr, fs_name):
     command = {'prefix': 'fs rm', 'fs_name': fs_name, 'yes_i_really_mean_it': True}
     return mgr.mon_command(command)
 
+def rename_filesystem(mgr, fs_name, new_fs_name):
+    command = {'prefix': 'fs rename', 'fs_name': fs_name, 'new_fs_name': new_fs_name,
+               'yes_i_really_mean_it': True}
+    return mgr.mon_command(command)
+
 def create_mds(mgr, fs_name, placement):
     spec = ServiceSpec(service_type='mds',
                                     service_id=fs_name,
index e809f264d791930cae44e996fe8eb2304882b8b1..9ef06fd25e992fcd673a1317a574c23e56360f56 100644 (file)
@@ -2,7 +2,7 @@ import errno
 import logging
 import sys
 
-from typing import List
+from typing import List, Tuple
 
 from contextlib import contextmanager
 
@@ -10,8 +10,8 @@ import orchestrator
 
 from .lock import GlobalLock
 from ..exception import VolumeException
-from ..fs_util import create_pool, remove_pool, create_filesystem, \
-    remove_filesystem, create_mds, volume_exists
+from ..fs_util import create_pool, remove_pool, rename_pool, create_filesystem, \
+    remove_filesystem, rename_filesystem, create_mds, volume_exists
 from mgr_util import open_filesystem, CephfsConnectionException
 
 log = logging.getLogger(__name__)
@@ -117,6 +117,102 @@ def delete_volume(mgr, volname, metadata_pool, data_pools):
     result_str = "metadata pool: {0} data pool: {1} removed".format(metadata_pool, str(data_pools))
     return r, result_str, ""
 
+def rename_volume(mgr, volname: str, newvolname: str) -> Tuple[int, str, str]:
+    """
+    rename volume (orch MDS service, file system, pools)
+    """
+    # To allow volume rename to be idempotent, check whether orch managed MDS
+    # service is already renamed. If so, skip renaming MDS service.
+    completion = None
+    rename_mds_service = True
+    try:
+        completion = mgr.describe_service(
+            service_type='mds', service_name=f"mds.{newvolname}", refresh=True)
+        orchestrator.raise_if_exception(completion)
+    except (ImportError, orchestrator.OrchestratorError):
+        log.warning("Failed to fetch orch service mds.%s", newvolname)
+    except Exception as e:
+        # Don't let detailed orchestrator exceptions (python backtraces)
+        # bubble out to the user
+        log.exception("Failed to fetch orch service mds.%s", newvolname)
+        return -errno.EINVAL, "", str(e)
+    if completion and completion.result:
+        rename_mds_service = False
+
+    # Launch new MDS service matching newvolname
+    completion = None
+    remove_mds_service = False
+    if rename_mds_service:
+        try:
+            completion = mgr.describe_service(
+                service_type='mds', service_name=f"mds.{volname}", refresh=True)
+            orchestrator.raise_if_exception(completion)
+        except (ImportError, orchestrator.OrchestratorError):
+            log.warning("Failed to fetch orch service mds.%s", volname)
+        except Exception as e:
+            # Don't let detailed orchestrator exceptions (python backtraces)
+            # bubble out to the user
+            log.exception("Failed to fetch orch service mds.%s", volname)
+            return -errno.EINVAL, "", str(e)
+        if completion and completion.result:
+            svc = completion.result[0]
+            placement = svc.spec.placement.pretty_str()
+            create_mds(mgr, newvolname, placement)
+            remove_mds_service = True
+
+    # rename_filesytem is idempotent
+    r, outb, outs = rename_filesystem(mgr, volname, newvolname)
+    if r != 0:
+        errmsg = f"Failed to rename file system '{volname}' to '{newvolname}'"
+        log.error("Failed to rename file system '%s' to '%s'", volname, newvolname)
+        outs = f'{errmsg}; {outs}'
+        return r, outb, outs
+
+    # Rename file system's metadata and data pools
+    metadata_pool, data_pools = get_pool_names(mgr, newvolname)
+
+    new_metadata_pool, new_data_pool = gen_pool_names(newvolname)
+    if metadata_pool != new_metadata_pool:
+        r, outb, outs =  rename_pool(mgr, metadata_pool, new_metadata_pool)
+        if r != 0:
+            errmsg = f"Failed to rename metadata pool '{metadata_pool}' to '{new_metadata_pool}'"
+            log.error("Failed to rename metadata pool '%s' to '%s'", metadata_pool, new_metadata_pool)
+            outs = f'{errmsg}; {outs}'
+            return r, outb, outs
+
+    data_pool_rename_failed = False
+    # If file system has more than one data pool, then skip renaming
+    # the data pools, and proceed to remove the old MDS service.
+    if len(data_pools) > 1:
+        data_pool_rename_failed = True
+    else:
+        data_pool = data_pools[0]
+        if data_pool != new_data_pool:
+            r, outb, outs = rename_pool(mgr, data_pool, new_data_pool)
+            if r != 0:
+                errmsg = f"Failed to rename data pool '{data_pool}' to '{new_data_pool}'"
+                log.error("Failed to rename data pool '%s' to '%s'", data_pool, new_data_pool)
+                outs = f'{errmsg}; {outs}'
+                return r, outb, outs
+
+    # Tear down old MDS service
+    if remove_mds_service:
+        try:
+            completion = mgr.remove_service('mds.' + volname)
+            orchestrator.raise_if_exception(completion)
+        except (ImportError, orchestrator.OrchestratorError):
+            log.warning("Failed to tear down orch service mds.%s", volname)
+        except Exception as e:
+            # Don't let detailed orchestrator exceptions (python backtraces)
+            # bubble out to the user
+            log.exception("Failed to tear down orch service mds.%s", volname)
+            return -errno.EINVAL, "", str(e)
+
+    outb = f"FS volume '{volname}' renamed to '{newvolname}'"
+    if data_pool_rename_failed:
+        outb += ". But failed to rename data pools as more than one data pool was found."
+
+    return r, outb, ""
 
 def list_volumes(mgr):
     """
index a3496d94229f72ce172741f010635de7b1fe9802..45710986b07a3034418c59a503e0af7a2ffe7e7b 100644 (file)
@@ -10,7 +10,7 @@ from mgr_util import CephfsClient
 from .fs_util import listdir
 
 from .operations.volume import create_volume, \
-    delete_volume, list_volumes, open_volume, get_pool_names
+    delete_volume, rename_volume, list_volumes, open_volume, get_pool_names
 from .operations.group import open_group, create_group, remove_group, open_group_unique
 from .operations.subvolume import open_subvol, create_subvol, remove_subvol, \
     create_clone
@@ -131,6 +131,21 @@ class VolumeClient(CephfsClient["Module"]):
         volumes = list_volumes(self.mgr)
         return 0, json.dumps(volumes, indent=4, sort_keys=True), ""
 
+    def rename_fs_volume(self, volname, newvolname, sure):
+        if self.is_stopping():
+            return -errno.ESHUTDOWN, "", "shutdown in progress"
+
+        if not sure:
+            return (
+                -errno.EPERM, "",
+                "WARNING: This will rename the filesystem and possibly its "
+                "pools. It is a potentially disruptive operation, clients' "
+                "cephx credentials need reauthorized to access the file system "
+                "and its pools with the new name. Add --yes-i-really-mean-it "
+                "if you are sure you wish to continue.")
+
+        return rename_volume(self.mgr, volname, newvolname)
+
     ### subvolume operations
 
     def _create_subvolume(self, fs_handle, volname, group, subvolname, **kwargs):
index a0a039acca368ac52484b1077d07cef298ec4c03..bcab12e36fffe219533d703f90aafe825d9febb2 100644 (file)
@@ -60,6 +60,14 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
             'desc': "Delete a FS volume by passing --yes-i-really-mean-it flag",
             'perm': 'rw'
         },
+        {
+            'cmd': 'fs volume rename '
+                   f'name=vol_name,type=CephString,goodchars={goodchars} '
+                   f'name=new_vol_name,type=CephString,goodchars={goodchars} '
+                   'name=yes_i_really_mean_it,type=CephBool,req=false ',
+            'desc': "Rename a CephFS volume by passing --yes-i-really-mean-it flag",
+            'perm': 'rw'
+        },
         {
             'cmd': 'fs subvolumegroup ls '
             'name=vol_name,type=CephString ',
@@ -416,6 +424,12 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
     def _cmd_fs_volume_ls(self, inbuf, cmd):
         return self.vc.list_fs_volumes()
 
+    @mgr_cmd_wrap
+    def _cmd_fs_volume_rename(self, inbuf, cmd):
+        return self.vc.rename_fs_volume(cmd['vol_name'],
+                                        cmd['new_vol_name'],
+                                        cmd.get('yes_i_really_mean_it', False))
+
     @mgr_cmd_wrap
     def _cmd_fs_subvolumegroup_create(self, inbuf, cmd):
         """