]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
cephfs,mon: allow FS rename only if FS is offline
authorRishabh Dave <ridave@redhat.com>
Mon, 9 Oct 2023 19:58:36 +0000 (01:28 +0530)
committerRishabh Dave <ridave@redhat.com>
Thu, 16 Jan 2025 11:21:45 +0000 (16:51 +0530)
Reject the attempt to rename the CephFS is the CephFS is not offline.
Add new tests for this and update current tests (test_admin.py and
test_volumes.py) accordingly.

Fixes: https://tracker.ceph.com/issues/63154
Signed-off-by: Rishabh Dave <ridave@redhat.com>
(cherry picked from commit fe3a4b9683d7b6f72f57f5ed8bc324bdbb24351f)

Conflicts:

src/mon/FSCommands.cc
- What is "fsp" in main branch is "fs" in Reef branch due to a missing
  backport.

qa/tasks/cephfs/test_admin.py
qa/tasks/cephfs/test_volumes.py
src/mon/FSCommands.cc

index fcca8b335bbd325a15058db69425c3f1ca7bc622..22321911f410a587c2a11a717767af8b4bfff8f1 100644 (file)
@@ -775,7 +775,11 @@ class TestRenameCommand(TestAdminCommands):
         new_fs_name = 'new_cephfs'
         client_id = 'test_new_cephfs'
 
+        self.run_ceph_cmd(f'fs fail {self.fs.name}')
+        sleep(5)
         self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+        self.run_ceph_cmd(f'fs set {new_fs_name} joinable true')
+        sleep(5)
 
         # authorize a cephx ID access to the renamed file system.
         # use the ID to write to the file system.
@@ -808,8 +812,12 @@ class TestRenameCommand(TestAdminCommands):
         orig_fs_name = self.fs.name
         new_fs_name = 'new_cephfs'
 
+        self.run_ceph_cmd(f'fs fail {self.fs.name}')
+        sleep(5)
         self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
         self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+        self.run_ceph_cmd(f'fs set {new_fs_name} joinable true')
+        sleep(5)
 
         # original file system name does not appear in `fs ls` command
         self.assertFalse(self.fs.exists())
@@ -828,7 +836,11 @@ class TestRenameCommand(TestAdminCommands):
         new_fs_name = 'new_cephfs'
         data_pool = self.fs.get_data_pool_name()
         metadata_pool = self.fs.get_metadata_pool_name()
+        self.run_ceph_cmd(f'fs fail {self.fs.name}')
+        sleep(5)
         self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
+        self.run_ceph_cmd(f'fs set {new_fs_name} joinable true')
+        sleep(5)
 
         try:
             self.run_ceph_cmd(f"fs new {orig_fs_name} {metadata_pool} {data_pool}")
@@ -865,6 +877,8 @@ class TestRenameCommand(TestAdminCommands):
         """
         That renaming a file system without '--yes-i-really-mean-it' flag fails.
         """
+        self.run_ceph_cmd(f'fs fail {self.fs.name}')
+        sleep(5)
         try:
             self.run_ceph_cmd(f"fs rename {self.fs.name} new_fs")
         except CommandFailedError as ce:
@@ -874,11 +888,14 @@ class TestRenameCommand(TestAdminCommands):
         else:
             self.fail("expected renaming of file system without the "
                       "'--yes-i-really-mean-it' flag to fail ")
+        self.run_ceph_cmd(f'fs set {self.fs.name} joinable true')
 
     def test_fs_rename_fails_for_non_existent_fs(self):
         """
         That renaming a non-existent file system fails.
         """
+        self.run_ceph_cmd(f'fs fail {self.fs.name}')
+        sleep(5)
         try:
             self.run_ceph_cmd("fs rename non_existent_fs new_fs --yes-i-really-mean-it")
         except CommandFailedError as ce:
@@ -892,6 +909,8 @@ class TestRenameCommand(TestAdminCommands):
         """
         self.fs2 = self.mds_cluster.newfs(name='cephfs2', create=True)
 
+        self.run_ceph_cmd(f'fs fail {self.fs.name}')
+        sleep(5)
         try:
             self.run_ceph_cmd(f"fs rename {self.fs.name} {self.fs2.name} --yes-i-really-mean-it")
         except CommandFailedError as ce:
@@ -899,6 +918,7 @@ class TestRenameCommand(TestAdminCommands):
                              "invalid error code on renaming to a fs name that is already in use")
         else:
             self.fail("expected renaming to a new file system name that is already in use to fail.")
+        self.run_ceph_cmd(f'fs set {self.fs.name} joinable true')
 
     def test_fs_rename_fails_with_mirroring_enabled(self):
         """
@@ -908,6 +928,8 @@ class TestRenameCommand(TestAdminCommands):
         new_fs_name = 'new_cephfs'
 
         self.run_ceph_cmd(f'fs mirror enable {orig_fs_name}')
+        self.run_ceph_cmd(f'fs fail {self.fs.name}')
+        sleep(5)
         try:
             self.run_ceph_cmd(f'fs rename {orig_fs_name} {new_fs_name} --yes-i-really-mean-it')
         except CommandFailedError as ce:
@@ -915,6 +937,36 @@ class TestRenameCommand(TestAdminCommands):
         else:
             self.fail("expected renaming of a mirrored file system to fail")
         self.run_ceph_cmd(f'fs mirror disable {orig_fs_name}')
+        self.run_ceph_cmd(f'fs set {self.fs.name} joinable true')
+
+    def test_rename_when_fs_is_online(self):
+        '''
+        Test that the command "ceph fs swap" command fails when first of the
+        two of FSs isn't failed/down.
+        '''
+        client_id = 'test_new_cephfs'
+        new_fs_name = 'new_cephfs'
+        self.negtest_ceph_cmd(
+            args=(f'fs rename {self.fs.name} {new_fs_name} '
+                   '--yes-i-really-mean-it'),
+            errmsgs=(f"CephFS '{self.fs.name}' is not offline. Before "
+                      "renaming a CephFS, it must be marked as down. See "
+                      "`ceph fs fail`."),
+            retval=errno.EPERM)
+
+        self.fs.getinfo()
+        keyring = self.fs.authorize(client_id, ('/', 'rw'))
+        keyring_path = self.mount_a.client_remote.mktemp(data=keyring)
+        self.mount_a.remount(client_id=client_id,
+                             client_keyring_path=keyring_path,
+                             cephfs_mntpt='/',
+                             cephfs_name=self.fs.name)
+
+        self.check_pool_application_metadata_key_value(
+            self.fs.get_data_pool_name(), 'cephfs', 'data', self.fs.name)
+        self.check_pool_application_metadata_key_value(
+            self.fs.get_metadata_pool_name(), 'cephfs', 'metadata',
+            self.fs.name)
 
 
 class TestDump(CephFSTestCase):
index 612a4ef41d4bb66736eb75d7169b78fc23d77bd0..d9f7802a1154bfacf94b6ccc2e0480d51995a5e4 100644 (file)
@@ -648,8 +648,12 @@ class TestRenameCmd(TestVolumesHelper):
         oldvolname = self.volname
         newvolname = self._gen_vol_name()
         new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta"
+
+        self.run_ceph_cmd(f'fs fail {oldvolname}')
         self._fs_cmd("volume", "rename", oldvolname, newvolname,
                      "--yes-i-really-mean-it")
+        self.run_ceph_cmd(f'fs set {newvolname} joinable true')
+
         volumels = json.loads(self._fs_cmd('volume', 'ls'))
         volnames = [volume['name'] for volume in volumels]
         # volume name changed
@@ -669,10 +673,14 @@ class TestRenameCmd(TestVolumesHelper):
         oldvolname = self.volname
         newvolname = self._gen_vol_name()
         new_data_pool, new_metadata_pool = f"cephfs.{newvolname}.data", f"cephfs.{newvolname}.meta"
+
+        self.run_ceph_cmd(f'fs fail {oldvolname}')
         self._fs_cmd("volume", "rename", oldvolname, newvolname,
                      "--yes-i-really-mean-it")
         self._fs_cmd("volume", "rename", oldvolname, newvolname,
                      "--yes-i-really-mean-it")
+        self.run_ceph_cmd(f'fs set {newvolname} joinable true')
+
         volumels = json.loads(self._fs_cmd('volume', 'ls'))
         volnames = [volume['name'] for volume in volumels]
         self.assertIn(newvolname, volnames)
@@ -687,6 +695,7 @@ class TestRenameCmd(TestVolumesHelper):
         """
         newvolname = self._gen_vol_name()
 
+        self.run_ceph_cmd(f'fs fail {self.volname}')
         try:
             self._fs_cmd("volume", "rename", self.volname, newvolname)
         except CommandFailedError as ce:
@@ -696,6 +705,7 @@ class TestRenameCmd(TestVolumesHelper):
         else:
             self.fail("expected renaming of FS volume to fail without the "
                       "'--yes-i-really-mean-it' flag")
+        self.run_ceph_cmd(f'fs set {self.volname} joinable true')
 
     def test_volume_rename_for_more_than_one_data_pool(self):
         """
@@ -710,8 +720,12 @@ class TestRenameCmd(TestVolumesHelper):
         self.fs.get_pool_names(refresh=True)
         orig_data_pool_names = list(self.fs.data_pools.values())
         new_metadata_pool = f"cephfs.{newvolname}.meta"
+
+        self.run_ceph_cmd(f'fs fail {oldvolname}')
         self._fs_cmd("volume", "rename", self.volname, newvolname,
                      "--yes-i-really-mean-it")
+        self.run_ceph_cmd(f'fs set {newvolname} joinable true')
+
         volumels = json.loads(self._fs_cmd('volume', 'ls'))
         volnames = [volume['name'] for volume in volumels]
         # volume name changed
@@ -723,6 +737,19 @@ class TestRenameCmd(TestVolumesHelper):
         # data pool names unchanged
         self.assertCountEqual(orig_data_pool_names, list(self.fs.data_pools.values()))
 
+    def test_rename_when_fs_is_online(self):
+        for m in self.mounts:
+            m.umount_wait()
+        newvolname = self._gen_vol_name()
+
+        self.negtest_ceph_cmd(
+            args=(f'fs volume rename {self.volname} {newvolname} '
+                   '--yes-i-really-mean-it'),
+            errmsgs=(f"CephFS '{self.volname}' is not offline. Before "
+                      "renaming a CephFS, it must be marked as down. See "
+                      "`ceph fs fail`."),
+            retval=errno.EPERM)
+
     def test_volume_info(self):
         """
         Tests the 'fs volume info' command
index c564226b04f5904f2bf03c8de327986219f00586..51cf20cacd152f2cfd801ea0c62f1c63054a4ac3 100644 (file)
@@ -1273,6 +1273,14 @@ class RenameFilesystemHandler : public FileSystemCommandHandler
       mon->osdmon()->wait_for_writeable(op, new PaxosService::C_RetryMessage(mon->mdsmon(), op));
       return -EAGAIN;
     }
+
+    // Check that no MDS daemons is up for this CephFS.
+    if (fs->get_mds_map().get_num_up_mds() > 0) {
+      ss << "CephFS '" << fs_name << "' is not offline. Before renaming "
+        << "a CephFS, it must be marked as down. See `ceph fs fail`.";
+      return -EPERM;
+    }
+
     for (const auto p : fs->mds_map.get_data_pools()) {
       mon->osdmon()->do_application_enable(p, APP_NAME_CEPHFS, "data",
                                           new_fs_name, true);