From d07ea8db471b1a1d9082756e1cb775d5543b1307 Mon Sep 17 00:00:00 2001 From: Kotresh HR Date: Fri, 5 Jun 2020 23:28:36 +0530 Subject: [PATCH] mgr/volumes: Fix pool removal on volume deletion While volume deletion, the associated pools are not always removed. The pools are removed only if the volume is created using mgr plugin and not if created with custom osd pools. This is because mgr plugin generates pool names with specific pattern. Both create and delete volume relies on it. This patch fixes the issue by identifying the pools of the volume without relying on the pattern. Fixes: https://tracker.ceph.com/issues/45910 Signed-off-by: Kotresh HR --- qa/tasks/cephfs/test_volumes.py | 24 ++++++++++++++ .../mgr/volumes/fs/operations/volume.py | 31 ++++++++++++++++--- src/pybind/mgr/volumes/fs/volume.py | 7 +++-- 3 files changed, 56 insertions(+), 6 deletions(-) diff --git a/qa/tasks/cephfs/test_volumes.py b/qa/tasks/cephfs/test_volumes.py index 5c0f1e9815d3d..00189a9cb660c 100644 --- a/qa/tasks/cephfs/test_volumes.py +++ b/qa/tasks/cephfs/test_volumes.py @@ -29,6 +29,9 @@ class TestVolumes(CephFSTestCase): def _fs_cmd(self, *args): return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", *args) + def _raw_cmd(self, *args): + return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args) + def __check_clone_state(self, state, clone, clone_group=None, timo=120): check = 0 args = ["clone", "status", self.volname, clone] @@ -302,6 +305,27 @@ class TestVolumes(CephFSTestCase): else: raise RuntimeError("expected the 'fs volume rm' command to fail.") + def test_volume_rm_arbitrary_pool_removal(self): + """ + That the arbitrary pool added to the volume out of band is removed + successfully on volume removal. + """ + new_pool = "new_pool" + # add arbitrary data pool + self.fs.add_data_pool(new_pool) + vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty")) + self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it") + + #check if fs is gone + volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty")) + volnames = [volume['name'] for volume in volumes] + self.assertNotIn(self.volname, volnames) + + #check if osd pools are gone + pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty")) + for pool in vol_status["pools"]: + self.assertNotIn(pool["name"], pools) + ### basic subvolume operations def test_subvolume_create_and_rm(self): diff --git a/src/pybind/mgr/volumes/fs/operations/volume.py b/src/pybind/mgr/volumes/fs/operations/volume.py index 110f206aba978..946d51f697966 100644 --- a/src/pybind/mgr/volumes/fs/operations/volume.py +++ b/src/pybind/mgr/volumes/fs/operations/volume.py @@ -21,6 +21,24 @@ def gen_pool_names(volname): """ return "cephfs.{}.meta".format(volname), "cephfs.{}.data".format(volname) +def get_pool_names(mgr, volname): + """ + return metadata and data pools (list) names of volume as a tuple + """ + fs_map = mgr.get("fs_map") + for f in fs_map['filesystems']: + if volname == f['mdsmap']['fs_name']: + metadata_pool_id = f['mdsmap']['metadata_pool'] + data_pool_ids = f['mdsmap']['data_pools'] + else: + return None, None + + osdmap = mgr.get("osd_map") + pools = dict([(p['pool'], p['pool_name']) for p in osdmap['pools']]) + metadata_pool = pools[metadata_pool_id] + data_pools = [pools[id] for id in data_pool_ids] + return metadata_pool, data_pools + def create_volume(mgr, volname, placement): """ create volume (pool, filesystem and mds) @@ -47,9 +65,9 @@ def create_volume(mgr, volname, placement): return create_mds(mgr, volname, placement) -def delete_volume(mgr, volname): +def delete_volume(mgr, volname, metadata_pool, data_pools): """ - delete the given module (tear down mds, remove filesystem) + delete the given module (tear down mds, remove filesystem, remove pools) """ # Tear down MDS daemons try: @@ -74,11 +92,16 @@ def delete_volume(mgr, volname): err = "Filesystem not found for volume '{0}'".format(volname) log.warning(err) return -errno.ENOENT, "", err - metadata_pool, data_pool = gen_pool_names(volname) r, outb, outs = remove_pool(mgr, metadata_pool) if r != 0: return r, outb, outs - return remove_pool(mgr, data_pool) + + for data_pool in data_pools: + r, outb, outs = remove_pool(mgr, data_pool) + if r != 0: + return r, outb, outs + result_str = "metadata pool: {0} data pool: {1} removed".format(metadata_pool, str(data_pools)) + return r, result_str, "" def list_volumes(mgr): diff --git a/src/pybind/mgr/volumes/fs/volume.py b/src/pybind/mgr/volumes/fs/volume.py index 88ab6582b843b..42fcfa56c0d47 100644 --- a/src/pybind/mgr/volumes/fs/volume.py +++ b/src/pybind/mgr/volumes/fs/volume.py @@ -9,7 +9,7 @@ from mgr_util import CephfsClient from .fs_util import listdir from .operations.volume import create_volume, \ - delete_volume, list_volumes, open_volume + delete_volume, list_volumes, open_volume, get_pool_names from .operations.group import open_group, create_group, remove_group from .operations.subvolume import open_subvol, create_subvol, remove_subvol, \ create_clone @@ -98,9 +98,12 @@ class VolumeClient(CephfsClient): "that is what you want, re-issue the command followed by " \ "--yes-i-really-mean-it.".format(volname) + metadata_pool, data_pools = get_pool_names(self.mgr, volname) + if not metadata_pool: + return -errno.ENOENT, "", "volume {0} doesn't exist".format(volname) self.purge_queue.cancel_jobs(volname) self.connection_pool.del_fs_handle(volname, wait=True) - return delete_volume(self.mgr, volname) + return delete_volume(self.mgr, volname, metadata_pool, data_pools) def list_fs_volumes(self): if self.stopping.is_set(): -- 2.39.5