]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/volumes: Fix pool removal on volume deletion
authorKotresh HR <khiremat@redhat.com>
Fri, 5 Jun 2020 17:58:36 +0000 (23:28 +0530)
committerRamana Raja <rraja@redhat.com>
Mon, 27 Jul 2020 12:35:47 +0000 (18:05 +0530)
While volume deletion, the associated pools are not always
removed. The pools are removed only if the volume is created
using mgr plugin and not if created with custom osd pools.
This is because mgr plugin generates pool names with specific
pattern. Both create and delete volume relies on it. This
patch fixes the issue by identifying the pools of the volume
without relying on the pattern.

Fixes: https://tracker.ceph.com/issues/45910
Signed-off-by: Kotresh HR <khiremat@redhat.com>
(cherry picked from commit d07ea8db471b1a1d9082756e1cb775d5543b1307)

Conflicts:
src/pybind/mgr/volumes/fs/operations/volume.py:
- In nautilus, fs volume create doesn't have placement arg
src/pybind/mgr/volumes/fs/volume.py:
- In nautilus, VolumeClient code not moved to mgr_util.py

qa/tasks/cephfs/test_volumes.py
src/pybind/mgr/volumes/fs/operations/volume.py
src/pybind/mgr/volumes/fs/volume.py

index 0e4961376f944abfdb9e758447dbde1468b975ad..dce0e4f618c5224d16f6edd09d6b7d059c383803 100644 (file)
@@ -29,6 +29,9 @@ class TestVolumes(CephFSTestCase):
     def _fs_cmd(self, *args):
         return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", *args)
 
+    def _raw_cmd(self, *args):
+        return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args)
+
     def __check_clone_state(self, state, clone, clone_group=None, timo=120):
         check = 0
         args = ["clone", "status", self.volname, clone]
@@ -310,6 +313,27 @@ class TestVolumes(CephFSTestCase):
         else:
             raise RuntimeError("expected the 'fs volume rm' command to fail.")
 
+    def test_volume_rm_arbitrary_pool_removal(self):
+        """
+        That the arbitrary pool added to the volume out of band is removed
+        successfully on volume removal.
+        """
+        new_pool = "new_pool"
+        # add arbitrary data pool
+        self.fs.add_data_pool(new_pool)
+        vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty"))
+        self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
+
+        #check if fs is gone
+        volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
+        volnames = [volume['name'] for volume in volumes]
+        self.assertNotIn(self.volname, volnames)
+
+        #check if osd pools are gone
+        pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty"))
+        for pool in vol_status["pools"]:
+            self.assertNotIn(pool["name"], pools)
+
     ### basic subvolume operations
 
     def test_subvolume_create_and_rm(self):
index a97e9b8c73a2a63cc3b76465304f549f21dcedf0..fd7fc4f62a4492d85b1202f04f2d6168b6456732 100644 (file)
@@ -196,6 +196,24 @@ def gen_pool_names(volname):
     """
     return "cephfs.{}.meta".format(volname), "cephfs.{}.data".format(volname)
 
+def get_pool_names(mgr, volname):
+    """
+    return metadata and data pools (list) names of volume as a tuple
+    """
+    fs_map = mgr.get("fs_map")
+    for f in fs_map['filesystems']:
+        if volname == f['mdsmap']['fs_name']:
+            metadata_pool_id = f['mdsmap']['metadata_pool']
+            data_pool_ids = f['mdsmap']['data_pools']
+        else:
+            return None, None
+
+    osdmap = mgr.get("osd_map")
+    pools = dict([(p['pool'], p['pool_name']) for p in osdmap['pools']])
+    metadata_pool = pools[metadata_pool_id]
+    data_pools = [pools[id] for id in data_pool_ids]
+    return metadata_pool, data_pools
+
 def create_volume(mgr, volname):
     """
     create volume  (pool, filesystem and mds)
@@ -221,9 +239,9 @@ def create_volume(mgr, volname):
     # create mds
     return create_mds(mgr, volname)
 
-def delete_volume(mgr, volname):
+def delete_volume(mgr, volname, metadata_pool, data_pools):
     """
-    delete the given module (tear down mds, remove filesystem)
+    delete the given module (tear down mds, remove filesystem, remove pools)
     """
     # Tear down MDS daemons
     try:
@@ -248,11 +266,16 @@ def delete_volume(mgr, volname):
         err = "Filesystem not found for volume '{0}'".format(volname)
         log.warning(err)
         return -errno.ENOENT, "", err
-    metadata_pool, data_pool = gen_pool_names(volname)
     r, outb, outs = remove_pool(mgr, metadata_pool)
     if r != 0:
         return r, outb, outs
-    return remove_pool(mgr, data_pool)
+
+    for data_pool in data_pools:
+        r, outb, outs = remove_pool(mgr, data_pool)
+        if r != 0:
+            return r, outb, outs
+    result_str = "metadata pool: {0} data pool: {1} removed".format(metadata_pool, str(data_pools))
+    return r, result_str, ""
 
 def list_volumes(mgr):
     """
index f0224d2119fcbee21a50fbd36cab93778acb8160..f12c8afbeeaddc2b917a7d15383158f53c762faf 100644 (file)
@@ -8,7 +8,7 @@ import cephfs
 from .fs_util import listdir
 
 from .operations.volume import ConnectionPool, open_volume, create_volume, \
-    delete_volume, list_volumes
+    delete_volume, list_volumes, get_pool_names
 from .operations.group import open_group, create_group, remove_group
 from .operations.subvolume import open_subvol, create_subvol, remove_subvol, \
     create_clone
@@ -98,9 +98,12 @@ class VolumeClient(object):
                 "that is what you want, re-issue the command followed by " \
                 "--yes-i-really-mean-it.".format(volname)
 
+        metadata_pool, data_pools = get_pool_names(self.mgr, volname)
+        if not metadata_pool:
+            return -errno.ENOENT, "", "volume {0} doesn't exist".format(volname)
         self.purge_queue.cancel_jobs(volname)
         self.connection_pool.del_fs_handle(volname, wait=True)
-        return delete_volume(self.mgr, volname)
+        return delete_volume(self.mgr, volname, metadata_pool, data_pools)
 
     def list_fs_volumes(self):
         if self.stopping.is_set():