]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/volumes: allow --force to delete snapshots even when UUID dir is missing
authorRishabh Dave <ridave@redhat.com>
Tue, 9 Sep 2025 11:04:18 +0000 (16:34 +0530)
committerRishabh Dave <ridave@redhat.com>
Mon, 13 Oct 2025 07:12:25 +0000 (12:42 +0530)
Also, add tests for the same.

Fixes: https://tracker.ceph.com/issues/72956
Signed-off-by: Rishabh Dave <ridave@redhat.com>
qa/tasks/cephfs/test_volumes.py
src/pybind/mgr/volumes/fs/operations/template.py
src/pybind/mgr/volumes/fs/operations/versions/subvolume_v2.py
src/pybind/mgr/volumes/fs/volume.py

index 16319a23b73a95c0ba7e4f67480330be9cb411ea..d9b7b0a398d43261fae1852b8d3c2c40d22c6387 100644 (file)
@@ -9652,3 +9652,23 @@ class TestCorruptedSubvolumes(TestVolumesHelper):
                               retval=errno.ENOENT,
                               errmsgs='mount path missing for subvolume')
         self.run_ceph_cmd(f'fs subvolume rm {self.volname} {sv1} --force')
+
+    def test_rm_subvol_that_has_snap_and_missing__UUID_dir(self):
+        sv1 = 'sv1'
+        ss1 = 'ss1'
+
+        self.run_ceph_cmd(f'fs subvolume create {self.volname} {sv1}')
+        self.run_ceph_cmd(f'fs subvolume snapshot create {self.volname} {sv1} {ss1}')
+
+        sv_path = self.get_ceph_cmd_stdout('fs subvolume getpath '
+                                           f'{self.volname} {sv1}').strip()[1:]
+        self.mount_a.run_shell(f'sudo rmdir {sv_path}', omit_sudo=False)
+
+        self.negtest_ceph_cmd(f'fs subvolume snapshot rm {self.volname} {sv1} {ss1}',
+                              retval=errno.ENOENT,
+                              errmsgs='mount path missing for subvolume')
+        self.run_ceph_cmd(f'fs subvolume snapshot rm {self.volname} {sv1} {ss1} '
+                           '--force')
+
+        # cleanup
+        self.run_ceph_cmd(f'fs subvolume rm {self.volname} {sv1} --force')
index 7aa953045a1ad97361deee60374b93be74cc3473..fc1a70d206aab4ce2e49016e9f40a99362433e96 100644 (file)
@@ -48,6 +48,7 @@ class SubvolumeOpType(Enum):
     RESIZE                = 'resize'
     SNAP_CREATE           = 'snap-create'
     SNAP_REMOVE           = 'snap-rm'
+    SNAP_REMOVE_FORCE     = 'snap-rm-force'
     SNAP_LIST             = 'snap-ls'
     SNAP_GETPATH          = 'snap-getpath'
     SNAP_INFO             = 'snap-info'
index 7a968ce93096c75b4a285b7139c8bb50d78c68ce..de17411f1e3bba1fdb6413d515897411f094ab99 100644 (file)
@@ -281,6 +281,7 @@ class SubvolumeV2(SubvolumeV1):
                 SubvolumeOpType.LIST,
                 SubvolumeOpType.INFO,
                 SubvolumeOpType.SNAP_REMOVE,
+                SubvolumeOpType.SNAP_REMOVE_FORCE,
                 SubvolumeOpType.SNAP_LIST,
                 SubvolumeOpType.SNAP_GETPATH,
                 SubvolumeOpType.SNAP_INFO,
@@ -336,7 +337,7 @@ class SubvolumeV2(SubvolumeV1):
                 raise VolumeException(-errno.ENOENT, "subvolume '{0}' does not exist".format(self.subvolname))
             raise VolumeException(me.args[0], me.args[1])
         except cephfs.ObjectNotFound:
-            if op_type == SubvolumeOpType.REMOVE_FORCE:
+            if op_type in (SubvolumeOpType.REMOVE_FORCE, SubvolumeOpType.SNAP_REMOVE_FORCE):
                 log.debug("since --force is passed, ignoring missing subvolume '"
                           f"path '{subvol_path}' for subvolume "
                           f"{self.subvolname}'")
index 6b796f878c2351efffe7850d9179a3d7a8dc7e39..98b5aeeb6e94b82502d24707f249870d31bb4c0a 100644 (file)
@@ -761,7 +761,8 @@ class VolumeClient(CephfsClient["Module"]):
         try:
             with open_volume(self, volname) as fs_handle:
                 with open_group(fs_handle, self.volspec, groupname) as group:
-                    with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_REMOVE) as subvolume:
+                    op = SubvolumeOpType.SNAP_REMOVE_FORCE if force else SubvolumeOpType.SNAP_REMOVE
+                    with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, op) as subvolume:
                         subvolume.remove_snapshot(snapname, force)
         except VolumeException as ve:
             # ESTALE serves as an error to state that subvolume is currently stale due to internal removal and,