From c52a7383d391864b7dce3ba09197a077a998dff7 Mon Sep 17 00:00:00 2001 From: Shyamsundar Ranganathan Date: Tue, 23 Jun 2020 16:53:46 -0400 Subject: [PATCH] mgr/volumes: Deprecate protect/unprotect CLI calls for subvolume snapshots Subvolume snapshots required to be protected, prior to cloning the same. Also, protected snapshots were not allowed to be unprotected or removed, if there were in-flight clones, whose source was the snapshot being removed. The protection of snapshots explicitly is not required, as these can be prevented from being removed based only on the in-flight clones checks. This commit hence deprecates the additional protect/unprotect requirements prior to cloning a snapshot. In addition to deprecating the above, support to query a subvolume for supported features, via the info command, is added. The feature list is set to "clone" and "auto-protect", where the latter is useful to decide if protect/unprotect commands are required or not. Fixes: https://tracker.ceph.com/issues/45371 Signed-off-by: Shyamsundar Ranganathan --- doc/cephfs/fs-volumes.rst | 35 ++- qa/tasks/cephfs/test_volumes.py | 284 ++++++------------ .../mgr/volumes/fs/operations/template.py | 28 -- .../fs/operations/versions/subvolume_base.py | 12 +- .../fs/operations/versions/subvolume_v1.py | 58 +--- src/pybind/mgr/volumes/fs/volume.py | 12 +- src/pybind/mgr/volumes/module.py | 4 +- 7 files changed, 133 insertions(+), 300 deletions(-) diff --git a/doc/cephfs/fs-volumes.rst b/doc/cephfs/fs-volumes.rst index f77bd6c885f..f9994ce6edf 100644 --- a/doc/cephfs/fs-volumes.rst +++ b/doc/cephfs/fs-volumes.rst @@ -174,6 +174,13 @@ The output format is json and contains fields as follows. * path: absolute path of a subvolume * type: subvolume type indicating whether it's clone or subvolume * pool_namespace: RADOS namespace of the subvolume +* features: features supported by the subvolume + +The subvolume "features" are based on the internal version of the subvolume and is a list containing +a subset of the following features, + +* "snapshot-clone": supports cloning using a subvolumes snapshot as the source +* "snapshot-autoprotect": supports automatically protecting snapshots, that are active clone sources, from deletion List subvolumes using:: @@ -204,7 +211,6 @@ The output format is json and contains fields as follows. * created_at: time of creation of snapshot in the format "YYYY-MM-DD HH:MM:SS:ffffff" * data_pool: data pool the snapshot belongs to * has_pending_clones: "yes" if snapshot clone is in progress otherwise "no" -* protected: "yes" if snapshot is protected otherwise "no" * size: snapshot size in bytes Cloning Snapshots @@ -214,10 +220,19 @@ Subvolumes can be created by cloning subvolume snapshots. Cloning is an asynchro data from a snapshot to a subvolume. Due to this bulk copy nature, cloning is currently inefficient for very huge data sets. -Before starting a clone operation, the snapshot should be protected. Protecting a snapshot ensures that the snapshot -cannot be deleted when a clone operation is in progress. Snapshots can be protected using:: +.. note:: Removing a snapshot (source subvolume) would fail if there are pending or in progress clone operations. + +Protecting snapshots prior to cloning was a pre-requisite in the Nautilus release, and the commands to protect/unprotect +snapshots were introduced for this purpose. This pre-requisite, and hence the commands to protect/unprotect, is being +deprecated in mainline CephFS, and may be removed from a future release. +The commands being deprecated are: $ ceph fs subvolume snapshot protect [--group_name ] + $ ceph fs subvolume snapshot unprotect [--group_name ] + +.. note:: Using the above commands would not result in an error, but they serve no useful function. + +.. note:: Use subvolume info command to fetch subvolume metadata regarding supported "features" to help decide if protect/unprotect of snapshots is required, based on the "snapshot-autoprotect" feature availability. To initiate a clone operation use:: @@ -243,12 +258,11 @@ A clone can be in one of the following states: #. `pending` : Clone operation has not started #. `in-progress` : Clone operation is in progress -#. `complete` : Clone operation has sucessfully finished +#. `complete` : Clone operation has successfully finished #. `failed` : Clone operation has failed Sample output from an `in-progress` clone operation:: - $ ceph fs subvolume snapshot protect cephfs subvol1 snap1 $ ceph fs subvolume snapshot clone cephfs subvol1 snap1 clone1 $ ceph fs clone status cephfs clone1 { @@ -266,7 +280,7 @@ Sample output from an `in-progress` clone operation:: .. note:: Cloned subvolumes are accessible only after the clone operation has successfully completed. -For a successsful clone operation, `clone status` would look like so:: +For a successful clone operation, `clone status` would look like so:: $ ceph fs clone status cephfs clone1 { @@ -282,14 +296,6 @@ To delete a partial clone use:: $ ceph fs subvolume rm [--group_name ] --force -When no clone operations are in progress or scheduled, the snaphot can be unprotected. To unprotect a snapshot use:: - - $ ceph fs subvolume snapshot unprotect [--group_name ] - -Note that unprotecting a snapshot would fail if there are pending or in progress clone operations. Also note that, -only unprotected snapshots can be removed. This guarantees that a snapshot cannot be deleted when clones are pending -(or in progress). - .. note:: Cloning only synchronizes directories, regular files and symbolic links. Also, inode timestamps (access and modification times) are synchronized upto seconds granularity. @@ -299,7 +305,6 @@ An `in-progress` or a `pending` clone operation can be canceled. To cancel a clo On successful cancelation, the cloned subvolume is moved to `canceled` state:: - $ ceph fs subvolume snapshot protect cephfs subvol1 snap1 $ ceph fs subvolume snapshot clone cephfs subvol1 snap1 clone1 $ ceph fs clone cancel cephfs clone1 $ ceph fs clone status cephfs clone1 diff --git a/qa/tasks/cephfs/test_volumes.py b/qa/tasks/cephfs/test_volumes.py index 0f5dad54879..680ce32a575 100644 --- a/qa/tasks/cephfs/test_volumes.py +++ b/qa/tasks/cephfs/test_volumes.py @@ -881,7 +881,7 @@ class TestVolumes(CephFSTestCase): subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime", "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace", - "type", "uid"] + "type", "uid", "features"] # create subvolume subvolume = self._generate_random_subvolume_name() @@ -889,37 +889,34 @@ class TestVolumes(CephFSTestCase): # get subvolume metadata subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume)) - if len(subvol_info) == 0: - raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume") + self.assertNotEqual(len(subvol_info), 0, "expected the 'fs subvolume info' command to list metadata of subvolume") for md in subvol_md: - if md not in subvol_info.keys(): - raise RuntimeError("%s not present in the metadata of subvolume" % md) + self.assertIn(md, subvol_info.keys(), "'{0}' key not present in metadata of subvolume".format(md)) - if subvol_info["bytes_pcent"] != "undefined": - raise RuntimeError("bytes_pcent should be set to undefined if quota is not set") + self.assertEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set") + self.assertEqual(subvol_info["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set") + self.assertEqual(subvol_info["pool_namespace"], "", "expected pool namespace to be empty") - if subvol_info["bytes_quota"] != "infinite": - raise RuntimeError("bytes_quota should be set to infinite if quota is not set") - self.assertEqual(subvol_info["pool_namespace"], "") + self.assertEqual(len(subvol_info["features"]), 2, + msg="expected 2 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"])) + for feature in ['snapshot-clone', 'snapshot-autoprotect']: + self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature)) nsize = self.DEFAULT_FILE_SIZE*1024*1024 - try: - self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize)) - except CommandFailedError: - raise RuntimeError("expected the 'fs subvolume resize' command to succeed") + self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize)) # get subvolume metadata after quota set subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume)) - if len(subvol_info) == 0: - raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume") - if subvol_info["bytes_pcent"] == "undefined": - raise RuntimeError("bytes_pcent should not be set to undefined if quota is set") + self.assertNotEqual(len(subvol_info), 0, "expected the 'fs subvolume info' command to list metadata of subvolume") - if subvol_info["bytes_quota"] == "infinite": - raise RuntimeError("bytes_quota should not be set to infinite if quota is set") + self.assertNotEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is not set") + self.assertNotEqual(subvol_info["bytes_quota"], "infinite", "bytes_quota should not be set to infinite if quota is not set") + self.assertEqual(subvol_info["type"], "subvolume", "type should be set to subvolume") - if subvol_info["type"] != "subvolume": - raise RuntimeError("type should be set to subvolume") + self.assertEqual(len(subvol_info["features"]), 2, + msg="expected 2 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"])) + for feature in ['snapshot-clone', 'snapshot-autoprotect']: + self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature)) # remove subvolumes self._fs_cmd("subvolume", "rm", self.volname, subvolume) @@ -947,18 +944,12 @@ class TestVolumes(CephFSTestCase): # snapshot subvolume self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) - # now, protect snapshot - self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot) - # schedule a clone self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) # check clone status self._wait_for_clone_to_complete(clone) - # now, unprotect snapshot - self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot) - # remove snapshot self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) @@ -1287,7 +1278,7 @@ class TestVolumes(CephFSTestCase): tests the 'fs subvolume snapshot info' command """ - snap_metadata = ["created_at", "data_pool", "has_pending_clones", "protected", "size"] + snap_metadata = ["created_at", "data_pool", "has_pending_clones", "size"] subvolume = self._generate_random_subvolume_name() snapshot = self._generate_random_snapshot_name() @@ -1301,20 +1292,13 @@ class TestVolumes(CephFSTestCase): # snapshot subvolume self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) - # now, protect snapshot - self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot) - snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot)) self.assertNotEqual(len(snap_info), 0) for md in snap_metadata: if md not in snap_info: raise RuntimeError("%s not present in the metadata of subvolume snapshot" % md) - self.assertEqual(snap_info["protected"], "yes") self.assertEqual(snap_info["has_pending_clones"], "no") - # now, unprotect snapshot - self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot) - # remove snapshot self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) @@ -1660,21 +1644,20 @@ class TestVolumes(CephFSTestCase): # verify trash dir is clean self._wait_for_trash_empty() - def test_subvolume_snapshot_protect_unprotect(self): + def test_subvolume_snapshot_protect_unprotect_sanity(self): + """ + Snapshot protect/unprotect commands are deprecated. This test exists to ensure that + invoking the command does not cause errors, till they are removed from a subsequent release. + """ subvolume = self._generate_random_subvolume_name() snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() # create subvolume self._fs_cmd("subvolume", "create", self.volname, subvolume) - # protect a nonexistent snapshot - try: - self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot) - except CommandFailedError as ce: - if ce.exitstatus != errno.ENOENT: - raise RuntimeError("invalid error code when protecting a non-existing snapshot") - else: - raise RuntimeError("expected protection of non existent snapshot to fail") + # do some IO + self._do_subvolume_io(subvolume, number_of_files=64) # snapshot subvolume self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) @@ -1682,23 +1665,11 @@ class TestVolumes(CephFSTestCase): # now, protect snapshot self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot) - # protecting snapshot again, should return EEXIST - try: - self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot) - except CommandFailedError as ce: - if ce.exitstatus != errno.EEXIST: - raise RuntimeError("invalid error code when protecting a protected snapshot") - else: - raise RuntimeError("expected protection of already protected snapshot to fail") + # schedule a clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) - # remove snapshot should fail since the snapshot is protected - try: - self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) - except CommandFailedError as ce: - if ce.exitstatus != errno.EINVAL: - raise RuntimeError("invalid error code when removing a protected snapshot") - else: - raise RuntimeError("expected removal of protected snapshot to fail") + # check clone status + self._wait_for_clone_to_complete(clone) # now, unprotect snapshot self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot) @@ -1706,37 +1677,12 @@ class TestVolumes(CephFSTestCase): # remove snapshot self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) - # remove subvolume - self._fs_cmd("subvolume", "rm", self.volname, subvolume) - - # verify trash dir is clean - self._wait_for_trash_empty() - - def test_subvolume_snapshot_clone_unprotected_snapshot(self): - subvolume = self._generate_random_subvolume_name() - snapshot = self._generate_random_snapshot_name() - clone = self._generate_random_clone_name() - - # create subvolume - self._fs_cmd("subvolume", "create", self.volname, subvolume) - - # snapshot subvolume - self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) - - # clone a non protected snapshot - try: - self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) - except CommandFailedError as ce: - if ce.exitstatus != errno.EINVAL: - raise RuntimeError("invalid error code when cloning a non protected snapshot") - else: - raise RuntimeError("expected cloning of unprotected snapshot to fail") - - # remove snapshot - self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + # verify clone + self._verify_clone(subvolume, clone) # remove subvolumes self._fs_cmd("subvolume", "rm", self.volname, subvolume) + self._fs_cmd("subvolume", "rm", self.volname, clone) # verify trash dir is clean self._wait_for_trash_empty() @@ -1755,27 +1701,12 @@ class TestVolumes(CephFSTestCase): # snapshot subvolume self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) - # now, protect snapshot - self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot) - # schedule a clone self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) - # unprotecting when a clone is in progress should fail - try: - self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot) - except CommandFailedError as ce: - if ce.exitstatus != errno.EEXIST: - raise RuntimeError("invalid error code when unprotecting snapshot during clone") - else: - raise RuntimeError("expected unprotecting a snapshot to fail since it has pending clones") - # check clone status self._wait_for_clone_to_complete(clone) - # now, unprotect snapshot - self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot) - # remove snapshot self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) @@ -1807,18 +1738,12 @@ class TestVolumes(CephFSTestCase): # snapshot subvolume self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) - # now, protect snapshot - self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot) - # schedule a clone self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, "--pool_layout", new_pool) # check clone status self._wait_for_clone_to_complete(clone) - # now, unprotect snapshot - self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot) - # remove snapshot self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) @@ -1854,18 +1779,12 @@ class TestVolumes(CephFSTestCase): # snapshot subvolume self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) - # now, protect snapshot - self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot) - # schedule a clone self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) # check clone status self._wait_for_clone_to_complete(clone) - # now, unprotect snapshot - self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot) - # remove snapshot self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) @@ -1893,18 +1812,12 @@ class TestVolumes(CephFSTestCase): # snapshot subvolume self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) - # now, protect snapshot - self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot) - # schedule a clone self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1) # check clone status self._wait_for_clone_to_complete(clone1) - # now, unprotect snapshot - self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot) - # remove snapshot self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) @@ -1918,18 +1831,12 @@ class TestVolumes(CephFSTestCase): # snapshot clone -- use same snap name self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone1, snapshot) - # now, protect snapshot - self._fs_cmd("subvolume", "snapshot", "protect", self.volname, clone1, snapshot) - # schedule a clone self._fs_cmd("subvolume", "snapshot", "clone", self.volname, clone1, snapshot, clone2) # check clone status self._wait_for_clone_to_complete(clone2) - # now, unprotect snapshot - self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, clone1, snapshot) - # remove snapshot self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone1, snapshot) @@ -1959,9 +1866,6 @@ class TestVolumes(CephFSTestCase): # snapshot subvolume self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) - # now, protect snapshot - self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot) - # create group self._fs_cmd("subvolumegroup", "create", self.volname, group) @@ -1971,9 +1875,6 @@ class TestVolumes(CephFSTestCase): # check clone status self._wait_for_clone_to_complete(clone, clone_group=group) - # now, unprotect snapshot - self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot) - # remove snapshot self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) @@ -2008,18 +1909,12 @@ class TestVolumes(CephFSTestCase): # snapshot subvolume self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group) - # now, protect snapshot - self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot, group) - # schedule a clone self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--group_name', group) # check clone status self._wait_for_clone_to_complete(clone) - # now, unprotect snapshot - self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot, group) - # remove snapshot self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group) @@ -2055,9 +1950,6 @@ class TestVolumes(CephFSTestCase): # snapshot subvolume self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, s_group) - # now, protect snapshot - self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot, s_group) - # schedule a clone self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--group_name', s_group, '--target_group_name', c_group) @@ -2065,9 +1957,6 @@ class TestVolumes(CephFSTestCase): # check clone status self._wait_for_clone_to_complete(clone, clone_group=c_group) - # now, unprotect snapshot - self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot, s_group) - # remove snapshot self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, s_group) @@ -2100,23 +1989,25 @@ class TestVolumes(CephFSTestCase): self.mount_a.run_shell(['mkdir', '-p', createpath]) # do some IO - self._do_subvolume_io(subvolume, number_of_files=32) + self._do_subvolume_io(subvolume, number_of_files=64) # snapshot subvolume self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) - # now, protect snapshot - self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot) - # schedule a clone self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + # snapshot should not be deletable now + try: + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone") + else: + self.fail("expected removing source snapshot of a clone to fail") + # check clone status self._wait_for_clone_to_complete(clone) - # now, unprotect snapshot - self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot) - # remove snapshot self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) @@ -2144,9 +2035,6 @@ class TestVolumes(CephFSTestCase): # snapshot subvolume self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) - # now, protect snapshot - self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot) - # schedule a clone self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) @@ -2155,7 +2043,7 @@ class TestVolumes(CephFSTestCase): self._get_subvolume_path(self.volname, clone) except CommandFailedError as ce: if ce.exitstatus != errno.EAGAIN: - raise RuntimeError("invalid error code when cloning a non protected snapshot") + raise RuntimeError("invalid error code when fetching path of an pending clone") else: raise RuntimeError("expected fetching path of an pending clone to fail") @@ -2166,8 +2054,50 @@ class TestVolumes(CephFSTestCase): subvolpath = self._get_subvolume_path(self.volname, clone) self.assertNotEqual(subvolpath, None) - # now, unprotect snapshot - self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot) + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # verify clone + self._verify_clone(subvolume, clone) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + self._fs_cmd("subvolume", "rm", self.volname, clone) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_clone_in_progress_snapshot_rm(self): + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=64) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # schedule a clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + + # snapshot should not be deletable now + try: + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone") + else: + self.fail("expected removing source snapshot of a clone to fail") + + # check clone status + self._wait_for_clone_to_complete(clone) + + # clone should be accessible now + subvolpath = self._get_subvolume_path(self.volname, clone) + self.assertNotEqual(subvolpath, None) # remove snapshot self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) @@ -2196,9 +2126,6 @@ class TestVolumes(CephFSTestCase): # snapshot subvolume self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) - # now, protect snapshot - self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot) - # schedule a clone self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) @@ -2217,9 +2144,6 @@ class TestVolumes(CephFSTestCase): subvolpath = self._get_subvolume_path(self.volname, clone) self.assertNotEqual(subvolpath, None) - # now, unprotect snapshot - self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot) - # remove snapshot self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) @@ -2268,9 +2192,6 @@ class TestVolumes(CephFSTestCase): # snapshot subvolume self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume1, snapshot) - # now, protect snapshot - self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume1, snapshot) - # schedule a clone with target as subvolume2 try: self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, subvolume2) @@ -2294,9 +2215,6 @@ class TestVolumes(CephFSTestCase): # check clone status self._wait_for_clone_to_complete(clone) - # now, unprotect snapshot - self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume1, snapshot) - # remove snapshot self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, snapshot) @@ -2329,9 +2247,6 @@ class TestVolumes(CephFSTestCase): # snapshot subvolume self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) - # now, protect snapshot - self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot) - # add data pool new_pool = "new_pool" self.fs.add_data_pool(new_pool) @@ -2357,9 +2272,6 @@ class TestVolumes(CephFSTestCase): # check clone status self._wait_for_clone_to_fail(clone2) - # now, unprotect snapshot - self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot) - # remove snapshot self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) @@ -2394,18 +2306,12 @@ class TestVolumes(CephFSTestCase): # snapshot subvolume self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) - # now, protect snapshot - self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot) - # schedule a clone self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) # check clone status self._wait_for_clone_to_complete(clone) - # now, unprotect snapshot - self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot) - # remove snapshot self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) @@ -2433,9 +2339,6 @@ class TestVolumes(CephFSTestCase): # snapshot subvolume self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) - # now, protect snapshot - self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot) - # schedule a clone self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) @@ -2445,9 +2348,6 @@ class TestVolumes(CephFSTestCase): # verify canceled state self._check_clone_canceled(clone) - # now, unprotect snapshot - self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot) - # remove snapshot self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) @@ -2487,9 +2387,6 @@ class TestVolumes(CephFSTestCase): # snapshot subvolume self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) - # now, protect snapshot - self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot) - # schedule clones for clone in clones: self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) @@ -2515,9 +2412,6 @@ class TestVolumes(CephFSTestCase): if ce.exitstatus != errno.EINVAL: raise RuntimeError("invalid error code when cancelling on-going clone") - # now, unprotect snapshot - self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot) - # remove snapshot self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) diff --git a/src/pybind/mgr/volumes/fs/operations/template.py b/src/pybind/mgr/volumes/fs/operations/template.py index 2b95bb80696..40d9efb5931 100644 --- a/src/pybind/mgr/volumes/fs/operations/template.py +++ b/src/pybind/mgr/volumes/fs/operations/template.py @@ -141,34 +141,6 @@ class SubvolumeTemplate(object): """ raise VolumeException(-errno.ENOTSUP, "operation not supported.") - def is_snapshot_protected(self, snapname): - """ - check if a snapshot is protected. - - :param: snapname: snapshot to protect - :return: True if the snapshot is protected, False otherwise. - """ - raise VolumeException(-errno.ENOTSUP, "operation not supported.") - - def protect_snapshot(self, snapname): - """ - protect a subvolume snapshot. only a protected snapshot can be cloned. - - :param: snapname: snapshot to protect - :return: None - """ - raise VolumeException(-errno.ENOTSUP, "operation not supported.") - - def unprotect_snapshot(self, snapname): - """ - unprotect a subvolume snapshot. fail to unprotect if there are pending - clone operations on the snapshot. - - :param: snapname: snapshot to unprotect - :return: None - """ - raise VolumeException(-errno.ENOTSUP, "operation not supported.") - def attach_snapshot(self, snapname, tgt_subvolume): """ attach a snapshot to a target cloned subvolume. the target subvolume diff --git a/src/pybind/mgr/volumes/fs/operations/versions/subvolume_base.py b/src/pybind/mgr/volumes/fs/operations/versions/subvolume_base.py index 3801a6c67e0..523f8e3d70e 100644 --- a/src/pybind/mgr/volumes/fs/operations/versions/subvolume_base.py +++ b/src/pybind/mgr/volumes/fs/operations/versions/subvolume_base.py @@ -2,6 +2,7 @@ import os import uuid import errno import logging +from enum import Enum, unique from hashlib import md5 import cephfs @@ -14,6 +15,11 @@ from ...exception import MetadataMgrException, VolumeException log = logging.getLogger(__name__) +@unique +class SubvolumeFeatures(Enum): + FEATURE_SNAPSHOT_CLONE = "snapshot-clone" + FEATURE_SNAPSHOT_AUTOPROTECT = "snapshot-autoprotect" + class SubvolumeBase(object): LEGACY_CONF_DIR = "_legacy" @@ -94,6 +100,10 @@ class SubvolumeBase(object): def legacy_mode(self, mode): self.legacy = mode + @property + def features(self): + raise NotImplementedError + def load_config(self): if self.legacy_mode: self.metadata_mgr = MetadataManager(self.fs, self.legacy_config_path, 0o640) @@ -261,4 +271,4 @@ class SubvolumeBase(object): 'mode': int(st["mode"]), 'data_pool': data_pool, 'created_at': str(st["btime"]), 'bytes_quota': "infinite" if nsize == 0 else nsize, 'bytes_used': int(usedbytes), 'bytes_pcent': "undefined" if nsize == 0 else '{0:.2f}'.format((float(usedbytes) / nsize) * 100.0), - 'pool_namespace': pool_namespace} + 'pool_namespace': pool_namespace, 'features': self.features} diff --git a/src/pybind/mgr/volumes/fs/operations/versions/subvolume_v1.py b/src/pybind/mgr/volumes/fs/operations/versions/subvolume_v1.py index e4255d25b33..73363d3dc56 100644 --- a/src/pybind/mgr/volumes/fs/operations/versions/subvolume_v1.py +++ b/src/pybind/mgr/volumes/fs/operations/versions/subvolume_v1.py @@ -8,7 +8,7 @@ from datetime import datetime import cephfs from .metadata_manager import MetadataManager -from .subvolume_base import SubvolumeBase +from .subvolume_base import SubvolumeBase, SubvolumeFeatures from ..op_sm import OpSm from ..template import SubvolumeTemplate from ..snapshot_util import mksnap, rmsnap @@ -34,6 +34,10 @@ class SubvolumeV1(SubvolumeBase, SubvolumeTemplate): except MetadataMgrException as me: raise VolumeException(-errno.EINVAL, "error fetching subvolume metadata") + @property + def features(self): + return [SubvolumeFeatures.FEATURE_SNAPSHOT_CLONE.value, SubvolumeFeatures.FEATURE_SNAPSHOT_AUTOPROTECT.value] + def create(self, size, isolate_nspace, pool, mode, uid, gid): subvolume_type = SubvolumeBase.SUBVOLUME_TYPE_NORMAL try: @@ -197,18 +201,6 @@ class SubvolumeV1(SubvolumeBase, SubvolumeTemplate): snappath = self.snapshot_path(snapname) mksnap(self.fs, snappath) - def is_snapshot_protected(self, snapname): - try: - self.metadata_mgr.get_option('protected snaps', snapname) - except MetadataMgrException as me: - if me.errno == -errno.ENOENT: - return False - else: - log.warning("error checking protected snap {0} ({1})".format(snapname, me)) - raise VolumeException(-errno.EINVAL, "snapshot protection check failed") - else: - return True - def has_pending_clones(self, snapname): try: return self.metadata_mgr.section_has_item('clone snaps', snapname) @@ -218,8 +210,8 @@ class SubvolumeV1(SubvolumeBase, SubvolumeTemplate): raise def remove_snapshot(self, snapname): - if self.is_snapshot_protected(snapname): - raise VolumeException(-errno.EINVAL, "snapshot '{0}' is protected".format(snapname)) + if self.has_pending_clones(snapname): + raise VolumeException(-errno.EAGAIN, "snapshot '{0}' has pending clones".format(snapname)) snappath = self.snapshot_path(snapname) rmsnap(self.fs, snappath) @@ -234,7 +226,6 @@ class SubvolumeV1(SubvolumeBase, SubvolumeTemplate): return {'size': int(snap_info['size']), 'created_at': str(datetime.fromtimestamp(float(snap_info['created_at']))), 'data_pool': snap_info['data_pool'].decode('utf-8'), - 'protected': "yes" if self.is_snapshot_protected(snapname) else "no", 'has_pending_clones': "yes" if self.has_pending_clones(snapname) else "no"} except cephfs.Error as e: if e.errno == errno.ENOENT: @@ -252,39 +243,6 @@ class SubvolumeV1(SubvolumeBase, SubvolumeTemplate): return [] raise - def _protect_snapshot(self, snapname): - try: - self.metadata_mgr.add_section("protected snaps") - self.metadata_mgr.update_section("protected snaps", snapname, "1") - self.metadata_mgr.flush() - except MetadataMgrException as me: - log.warning("error updating protected snap list ({0})".format(me)) - raise VolumeException(-errno.EINVAL, "error protecting snapshot") - - def _unprotect_snapshot(self, snapname): - try: - self.metadata_mgr.remove_option("protected snaps", snapname) - self.metadata_mgr.flush() - except MetadataMgrException as me: - log.warning("error updating protected snap list ({0})".format(me)) - raise VolumeException(-errno.EINVAL, "error unprotecting snapshot") - - def protect_snapshot(self, snapname): - if not snapname.encode('utf-8') in self.list_snapshots(): - raise VolumeException(-errno.ENOENT, "snapshot '{0}' does not exist".format(snapname)) - if self.is_snapshot_protected(snapname): - raise VolumeException(-errno.EEXIST, "snapshot '{0}' is already protected".format(snapname)) - self._protect_snapshot(snapname) - - def unprotect_snapshot(self, snapname): - if not snapname.encode('utf-8') in self.list_snapshots(): - raise VolumeException(-errno.ENOENT, "snapshot '{0}' does not exist".format(snapname)) - if not self.is_snapshot_protected(snapname): - raise VolumeException(-errno.EEXIST, "snapshot '{0}' is not protected".format(snapname)) - if self.has_pending_clones(snapname): - raise VolumeException(-errno.EEXIST, "snapshot '{0}' has pending clones".format(snapname)) - self._unprotect_snapshot(snapname) - def _add_snap_clone(self, track_id, snapname): self.metadata_mgr.add_section("clone snaps") self.metadata_mgr.update_section("clone snaps", track_id, snapname) @@ -297,8 +255,6 @@ class SubvolumeV1(SubvolumeBase, SubvolumeTemplate): def attach_snapshot(self, snapname, tgt_subvolume): if not snapname.encode('utf-8') in self.list_snapshots(): raise VolumeException(-errno.ENOENT, "snapshot '{0}' does not exist".format(snapname)) - if not self.is_snapshot_protected(snapname): - raise VolumeException(-errno.EINVAL, "snapshot '{0}' is not protected".format(snapname)) try: create_clone_index(self.fs, self.vol_spec) with open_clone_index(self.fs, self.vol_spec) as index: diff --git a/src/pybind/mgr/volumes/fs/volume.py b/src/pybind/mgr/volumes/fs/volume.py index f0a39cd41c3..065b1fe6463 100644 --- a/src/pybind/mgr/volumes/fs/volume.py +++ b/src/pybind/mgr/volumes/fs/volume.py @@ -359,33 +359,31 @@ class VolumeClient(CephfsClient): return ret def protect_subvolume_snapshot(self, **kwargs): - ret = 0, "", "" + ret = 0, "", "Deprecation warning: 'snapshot protect' call is deprecated and will be removed in a future release" volname = kwargs['vol_name'] subvolname = kwargs['sub_name'] - snapname = kwargs['snap_name'] groupname = kwargs['group_name'] try: with open_volume(self, volname) as fs_handle: with open_group(fs_handle, self.volspec, groupname) as group: with open_subvol(fs_handle, self.volspec, group, subvolname) as subvolume: - subvolume.protect_snapshot(snapname) + log.warning("snapshot protect call is deprecated and will be removed in a future release") except VolumeException as ve: ret = self.volume_exception_to_retval(ve) return ret def unprotect_subvolume_snapshot(self, **kwargs): - ret = 0, "", "" + ret = 0, "", "Deprecation warning: 'snapshot unprotect' call is deprecated and will be removed in a future release" volname = kwargs['vol_name'] subvolname = kwargs['sub_name'] - snapname = kwargs['snap_name'] groupname = kwargs['group_name'] try: with open_volume(self, volname) as fs_handle: with open_group(fs_handle, self.volspec, groupname) as group: with open_subvol(fs_handle, self.volspec, group, subvolname) as subvolume: - subvolume.unprotect_snapshot(snapname) + log.warning("snapshot unprotect call is deprecated and will be removed in a future release") except VolumeException as ve: ret = self.volume_exception_to_retval(ve) return ret @@ -412,8 +410,6 @@ class VolumeClient(CephfsClient): if not snapname.encode('utf-8') in subvolume.list_snapshots(): raise VolumeException(-errno.ENOENT, "snapshot '{0}' does not exist".format(snapname)) - if not subvolume.is_snapshot_protected(snapname): - raise VolumeException(-errno.EINVAL, "snapshot '{0}' is not protected".format(snapname)) # TODO: when the target group is same as source, reuse group object. with open_group(fs_handle, self.volspec, target_groupname) as target_group: diff --git a/src/pybind/mgr/volumes/module.py b/src/pybind/mgr/volumes/module.py index 17be126ed1e..a0cdbecccf3 100644 --- a/src/pybind/mgr/volumes/module.py +++ b/src/pybind/mgr/volumes/module.py @@ -211,7 +211,7 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): 'name=sub_name,type=CephString ' 'name=snap_name,type=CephString ' 'name=group_name,type=CephString,req=false ', - 'desc': "Protect snapshot of a CephFS subvolume in a volume, " + 'desc': "(deprecated) Protect snapshot of a CephFS subvolume in a volume, " "and optionally, in a specific subvolume group", 'perm': 'rw' }, @@ -221,7 +221,7 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): 'name=sub_name,type=CephString ' 'name=snap_name,type=CephString ' 'name=group_name,type=CephString,req=false ', - 'desc': "Unprotect a snapshot of a CephFS subvolume in a volume, " + 'desc': "(deprecated) Unprotect a snapshot of a CephFS subvolume in a volume, " "and optionally, in a specific subvolume group", 'perm': 'rw' }, -- 2.39.5