]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
mgr/volumes: Validate mon_allow_pool_delete before volume deletion
authorKotresh HR <khiremat@redhat.com>
Tue, 23 Jun 2020 18:19:22 +0000 (23:49 +0530)
committerKotresh HR <khiremat@redhat.com>
Thu, 2 Jul 2020 14:49:00 +0000 (20:19 +0530)
Volume deletion wasn't validating mon_allow_pool_delete config
before destroying volume metadata. Hence when mon_allow_pool_delete
is set to false, it was deleting metadata but failed to delete pool
resulting in inconsistent state. This patch validates the config
before going ahead with deletion.

Fixes: https://tracker.ceph.com/issues/45662
Signed-off-by: Kotresh HR <khiremat@redhat.com>
qa/tasks/cephfs/test_volumes.py
src/pybind/mgr/volumes/fs/volume.py

index 4a4c8a57472477a37b142619486ced90555708fa..0f5dad54879fd93491c5bb0e326da1e244998fa7 100644 (file)
@@ -227,6 +227,7 @@ class TestVolumes(CephFSTestCase):
         self.vol_created = False
         self._enable_multi_fs()
         self._create_or_reuse_test_volume()
+        self.config_set('mon', 'mon_allow_pool_delete', True)
 
     def tearDown(self):
         if self.vol_created:
@@ -333,6 +334,33 @@ class TestVolumes(CephFSTestCase):
         for pool in vol_status["pools"]:
             self.assertNotIn(pool["name"], pools)
 
+    def test_volume_rm_when_mon_delete_pool_false(self):
+        """
+        That the volume can only be removed when mon_allowd_pool_delete is set
+        to true and verify that the pools are removed after volume deletion.
+        """
+        self.config_set('mon', 'mon_allow_pool_delete', False)
+        try:
+            self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
+        except CommandFailedError as ce:
+            self.assertEqual(ce.exitstatus, errno.EPERM,
+                             "expected the 'fs volume rm' command to fail with EPERM, "
+                             "but it failed with {0}".format(ce.exitstatus))
+        vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty"))
+        self.config_set('mon', 'mon_allow_pool_delete', True)
+        self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
+
+        #check if fs is gone
+        volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
+        volnames = [volume['name'] for volume in volumes]
+        self.assertNotIn(self.volname, volnames,
+                         "volume {0} exists after removal".format(self.volname))
+        #check if pools are gone
+        pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty"))
+        for pool in vol_status["pools"]:
+            self.assertNotIn(pool["name"], pools,
+                             "pool {0} exists after volume removal".format(pool["name"]))
+
     ### basic subvolume operations
 
     def test_subvolume_create_and_rm(self):
index bd437929556edc0d27c52ee391a2c8d5970e1e7f..f0a39cd41c32ac6c4754148661abce5ab764e5c8 100644 (file)
@@ -98,6 +98,18 @@ class VolumeClient(CephfsClient):
                 "that is what you want, re-issue the command followed by " \
                 "--yes-i-really-mean-it.".format(volname)
 
+        ret, out, err = self.mgr.check_mon_command({
+            'prefix': 'config get',
+            'key': 'mon_allow_pool_delete',
+            'who': 'mon',
+            'format': 'json',
+        })
+        mon_allow_pool_delete = json.loads(out)
+        if not mon_allow_pool_delete:
+            return -errno.EPERM, "", "pool deletion is disabled; you must first " \
+                "set the mon_allow_pool_delete config option to true before volumes " \
+                "can be deleted"
+
         metadata_pool, data_pools = get_pool_names(self.mgr, volname)
         if not metadata_pool:
             return -errno.ENOENT, "", "volume {0} doesn't exist".format(volname)