]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
mgr/volumes: Fix pool removal on volume deletion
authorKotresh HR <khiremat@redhat.com>
Fri, 5 Jun 2020 17:58:36 +0000 (23:28 +0530)
committerKotresh HR <khiremat@redhat.com>
Tue, 23 Jun 2020 17:34:28 +0000 (23:04 +0530)
While volume deletion, the associated pools are not always
removed. The pools are removed only if the volume is created
using mgr plugin and not if created with custom osd pools.
This is because mgr plugin generates pool names with specific
pattern. Both create and delete volume relies on it. This
patch fixes the issue by identifying the pools of the volume
without relying on the pattern.

Fixes: https://tracker.ceph.com/issues/45910
Signed-off-by: Kotresh HR <khiremat@redhat.com>
qa/tasks/cephfs/test_volumes.py
src/pybind/mgr/volumes/fs/operations/volume.py
src/pybind/mgr/volumes/fs/volume.py

index 5c0f1e9815d3df27913348c5643c7f144f533c7b..00189a9cb660cae77636d53036830a56096acbc5 100644 (file)
@@ -29,6 +29,9 @@ class TestVolumes(CephFSTestCase):
     def _fs_cmd(self, *args):
         return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", *args)
 
+    def _raw_cmd(self, *args):
+        return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args)
+
     def __check_clone_state(self, state, clone, clone_group=None, timo=120):
         check = 0
         args = ["clone", "status", self.volname, clone]
@@ -302,6 +305,27 @@ class TestVolumes(CephFSTestCase):
         else:
             raise RuntimeError("expected the 'fs volume rm' command to fail.")
 
+    def test_volume_rm_arbitrary_pool_removal(self):
+        """
+        That the arbitrary pool added to the volume out of band is removed
+        successfully on volume removal.
+        """
+        new_pool = "new_pool"
+        # add arbitrary data pool
+        self.fs.add_data_pool(new_pool)
+        vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty"))
+        self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
+
+        #check if fs is gone
+        volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
+        volnames = [volume['name'] for volume in volumes]
+        self.assertNotIn(self.volname, volnames)
+
+        #check if osd pools are gone
+        pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty"))
+        for pool in vol_status["pools"]:
+            self.assertNotIn(pool["name"], pools)
+
     ### basic subvolume operations
 
     def test_subvolume_create_and_rm(self):
index 110f206aba9780e80a80ba8376d20afe69c68f81..946d51f6979661b09684e4ccf19063ad671adc15 100644 (file)
@@ -21,6 +21,24 @@ def gen_pool_names(volname):
     """
     return "cephfs.{}.meta".format(volname), "cephfs.{}.data".format(volname)
 
+def get_pool_names(mgr, volname):
+    """
+    return metadata and data pools (list) names of volume as a tuple
+    """
+    fs_map = mgr.get("fs_map")
+    for f in fs_map['filesystems']:
+        if volname == f['mdsmap']['fs_name']:
+            metadata_pool_id = f['mdsmap']['metadata_pool']
+            data_pool_ids = f['mdsmap']['data_pools']
+        else:
+            return None, None
+
+    osdmap = mgr.get("osd_map")
+    pools = dict([(p['pool'], p['pool_name']) for p in osdmap['pools']])
+    metadata_pool = pools[metadata_pool_id]
+    data_pools = [pools[id] for id in data_pool_ids]
+    return metadata_pool, data_pools
+
 def create_volume(mgr, volname, placement):
     """
     create volume  (pool, filesystem and mds)
@@ -47,9 +65,9 @@ def create_volume(mgr, volname, placement):
     return create_mds(mgr, volname, placement)
 
 
-def delete_volume(mgr, volname):
+def delete_volume(mgr, volname, metadata_pool, data_pools):
     """
-    delete the given module (tear down mds, remove filesystem)
+    delete the given module (tear down mds, remove filesystem, remove pools)
     """
     # Tear down MDS daemons
     try:
@@ -74,11 +92,16 @@ def delete_volume(mgr, volname):
         err = "Filesystem not found for volume '{0}'".format(volname)
         log.warning(err)
         return -errno.ENOENT, "", err
-    metadata_pool, data_pool = gen_pool_names(volname)
     r, outb, outs = remove_pool(mgr, metadata_pool)
     if r != 0:
         return r, outb, outs
-    return remove_pool(mgr, data_pool)
+
+    for data_pool in data_pools:
+        r, outb, outs = remove_pool(mgr, data_pool)
+        if r != 0:
+            return r, outb, outs
+    result_str = "metadata pool: {0} data pool: {1} removed".format(metadata_pool, str(data_pools))
+    return r, result_str, ""
 
 
 def list_volumes(mgr):
index 88ab6582b843b12b0d4d8e0e3b71840a50590c98..42fcfa56c0d475fc3376db042f0949bd842b79d0 100644 (file)
@@ -9,7 +9,7 @@ from mgr_util import CephfsClient
 from .fs_util import listdir
 
 from .operations.volume import create_volume, \
-    delete_volume, list_volumes, open_volume
+    delete_volume, list_volumes, open_volume, get_pool_names
 from .operations.group import open_group, create_group, remove_group
 from .operations.subvolume import open_subvol, create_subvol, remove_subvol, \
     create_clone
@@ -98,9 +98,12 @@ class VolumeClient(CephfsClient):
                 "that is what you want, re-issue the command followed by " \
                 "--yes-i-really-mean-it.".format(volname)
 
+        metadata_pool, data_pools = get_pool_names(self.mgr, volname)
+        if not metadata_pool:
+            return -errno.ENOENT, "", "volume {0} doesn't exist".format(volname)
         self.purge_queue.cancel_jobs(volname)
         self.connection_pool.del_fs_handle(volname, wait=True)
-        return delete_volume(self.mgr, volname)
+        return delete_volume(self.mgr, volname, metadata_pool, data_pools)
 
     def list_fs_volumes(self):
         if self.stopping.is_set():