]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/volumes: Fix pool removal on volume deletion
authorKotresh HR <khiremat@redhat.com>
Fri, 5 Jun 2020 17:58:36 +0000 (23:28 +0530)
committerKotresh HR <khiremat@redhat.com>
Tue, 28 Jul 2020 09:48:05 +0000 (15:18 +0530)
While volume deletion, the associated pools are not always
removed. The pools are removed only if the volume is created
using mgr plugin and not if created with custom osd pools.
This is because mgr plugin generates pool names with specific
pattern. Both create and delete volume relies on it. This
patch fixes the issue by identifying the pools of the volume
without relying on the pattern.

Fixes: https://tracker.ceph.com/issues/45910
Signed-off-by: Kotresh HR <khiremat@redhat.com>
(cherry picked from commit d07ea8db471b1a1d9082756e1cb775d5543b1307)

Conflicts:
    src/pybind/mgr/volumes/fs/operations/volume.py: An extra empty line
was creating conflict, removed it.
    src/pybind/mgr/volumes/fs/volume.py: Imports ConnectionPool which
isn't present in master. So retained that.

qa/tasks/cephfs/test_volumes.py
src/pybind/mgr/volumes/fs/operations/volume.py
src/pybind/mgr/volumes/fs/volume.py

index 423193c42c41f6a88afc3b7a5446ce603551642f..ec91f214e5f5e9bb2502bda6bc6804a91460b464 100644 (file)
@@ -30,6 +30,9 @@ class TestVolumes(CephFSTestCase):
     def _fs_cmd(self, *args):
         return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", *args)
 
+    def _raw_cmd(self, *args):
+        return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args)
+
     def __check_clone_state(self, state, clone, clone_group=None, timo=120):
         check = 0
         args = ["clone", "status", self.volname, clone]
@@ -311,6 +314,27 @@ class TestVolumes(CephFSTestCase):
         else:
             raise RuntimeError("expected the 'fs volume rm' command to fail.")
 
+    def test_volume_rm_arbitrary_pool_removal(self):
+        """
+        That the arbitrary pool added to the volume out of band is removed
+        successfully on volume removal.
+        """
+        new_pool = "new_pool"
+        # add arbitrary data pool
+        self.fs.add_data_pool(new_pool)
+        vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty"))
+        self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
+
+        #check if fs is gone
+        volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
+        volnames = [volume['name'] for volume in volumes]
+        self.assertNotIn(self.volname, volnames)
+
+        #check if osd pools are gone
+        pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty"))
+        for pool in vol_status["pools"]:
+            self.assertNotIn(pool["name"], pools)
+
     ### basic subvolume operations
 
     def test_subvolume_create_and_rm(self):
index c1030d39fed18fb8cbe3d74a71c5f93462a658fd..05a16ccda43e3ebac6b13e295ecd973aaab917cd 100644 (file)
@@ -201,6 +201,24 @@ def gen_pool_names(volname):
     """
     return "cephfs.{}.meta".format(volname), "cephfs.{}.data".format(volname)
 
+def get_pool_names(mgr, volname):
+    """
+    return metadata and data pools (list) names of volume as a tuple
+    """
+    fs_map = mgr.get("fs_map")
+    for f in fs_map['filesystems']:
+        if volname == f['mdsmap']['fs_name']:
+            metadata_pool_id = f['mdsmap']['metadata_pool']
+            data_pool_ids = f['mdsmap']['data_pools']
+        else:
+            return None, None
+
+    osdmap = mgr.get("osd_map")
+    pools = dict([(p['pool'], p['pool_name']) for p in osdmap['pools']])
+    metadata_pool = pools[metadata_pool_id]
+    data_pools = [pools[id] for id in data_pool_ids]
+    return metadata_pool, data_pools
+
 def create_volume(mgr, volname, placement):
     """
     create volume  (pool, filesystem and mds)
@@ -226,9 +244,9 @@ def create_volume(mgr, volname, placement):
     # create mds
     return create_mds(mgr, volname, placement)
 
-def delete_volume(mgr, volname):
+def delete_volume(mgr, volname, metadata_pool, data_pools):
     """
-    delete the given module (tear down mds, remove filesystem)
+    delete the given module (tear down mds, remove filesystem, remove pools)
     """
     # Tear down MDS daemons
     try:
@@ -253,11 +271,16 @@ def delete_volume(mgr, volname):
         err = "Filesystem not found for volume '{0}'".format(volname)
         log.warning(err)
         return -errno.ENOENT, "", err
-    metadata_pool, data_pool = gen_pool_names(volname)
     r, outb, outs = remove_pool(mgr, metadata_pool)
     if r != 0:
         return r, outb, outs
-    return remove_pool(mgr, data_pool)
+
+    for data_pool in data_pools:
+        r, outb, outs = remove_pool(mgr, data_pool)
+        if r != 0:
+            return r, outb, outs
+    result_str = "metadata pool: {0} data pool: {1} removed".format(metadata_pool, str(data_pools))
+    return r, result_str, ""
 
 def list_volumes(mgr):
     """
index 0a8347a146df454726b8fad2b3adb1e3ae8c89a5..4b4965fc833ca6fc8e23f82615d65b8458e09f67 100644 (file)
@@ -8,7 +8,7 @@ import cephfs
 from .fs_util import listdir
 
 from .operations.volume import ConnectionPool, open_volume, create_volume, \
-    delete_volume, list_volumes
+    delete_volume, list_volumes, get_pool_names
 from .operations.group import open_group, create_group, remove_group
 from .operations.subvolume import open_subvol, create_subvol, remove_subvol, \
     create_clone
@@ -98,9 +98,12 @@ class VolumeClient(object):
                 "that is what you want, re-issue the command followed by " \
                 "--yes-i-really-mean-it.".format(volname)
 
+        metadata_pool, data_pools = get_pool_names(self.mgr, volname)
+        if not metadata_pool:
+            return -errno.ENOENT, "", "volume {0} doesn't exist".format(volname)
         self.purge_queue.cancel_jobs(volname)
         self.connection_pool.del_fs_handle(volname, wait=True)
-        return delete_volume(self.mgr, volname)
+        return delete_volume(self.mgr, volname, metadata_pool, data_pools)
 
     def list_fs_volumes(self):
         if self.stopping.is_set():