def _fs_cmd(self, *args):
return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", *args)
+ def _raw_cmd(self, *args):
+ return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args)
+
def __check_clone_state(self, state, clone, clone_group=None, timo=120):
check = 0
args = ["clone", "status", self.volname, clone]
else:
raise RuntimeError("expected the 'fs volume rm' command to fail.")
+ def test_volume_rm_arbitrary_pool_removal(self):
+ """
+ That the arbitrary pool added to the volume out of band is removed
+ successfully on volume removal.
+ """
+ new_pool = "new_pool"
+ # add arbitrary data pool
+ self.fs.add_data_pool(new_pool)
+ vol_status = json.loads(self._fs_cmd("status", self.volname, "--format=json-pretty"))
+ self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
+
+ #check if fs is gone
+ volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty"))
+ volnames = [volume['name'] for volume in volumes]
+ self.assertNotIn(self.volname, volnames)
+
+ #check if osd pools are gone
+ pools = json.loads(self._raw_cmd("osd", "pool", "ls", "--format=json-pretty"))
+ for pool in vol_status["pools"]:
+ self.assertNotIn(pool["name"], pools)
+
### basic subvolume operations
def test_subvolume_create_and_rm(self):
"""
return "cephfs.{}.meta".format(volname), "cephfs.{}.data".format(volname)
+def get_pool_names(mgr, volname):
+ """
+ return metadata and data pools (list) names of volume as a tuple
+ """
+ fs_map = mgr.get("fs_map")
+ for f in fs_map['filesystems']:
+ if volname == f['mdsmap']['fs_name']:
+ metadata_pool_id = f['mdsmap']['metadata_pool']
+ data_pool_ids = f['mdsmap']['data_pools']
+ else:
+ return None, None
+
+ osdmap = mgr.get("osd_map")
+ pools = dict([(p['pool'], p['pool_name']) for p in osdmap['pools']])
+ metadata_pool = pools[metadata_pool_id]
+ data_pools = [pools[id] for id in data_pool_ids]
+ return metadata_pool, data_pools
+
def create_volume(mgr, volname, placement):
"""
create volume (pool, filesystem and mds)
return create_mds(mgr, volname, placement)
-def delete_volume(mgr, volname):
+def delete_volume(mgr, volname, metadata_pool, data_pools):
"""
- delete the given module (tear down mds, remove filesystem)
+ delete the given module (tear down mds, remove filesystem, remove pools)
"""
# Tear down MDS daemons
try:
err = "Filesystem not found for volume '{0}'".format(volname)
log.warning(err)
return -errno.ENOENT, "", err
- metadata_pool, data_pool = gen_pool_names(volname)
r, outb, outs = remove_pool(mgr, metadata_pool)
if r != 0:
return r, outb, outs
- return remove_pool(mgr, data_pool)
+
+ for data_pool in data_pools:
+ r, outb, outs = remove_pool(mgr, data_pool)
+ if r != 0:
+ return r, outb, outs
+ result_str = "metadata pool: {0} data pool: {1} removed".format(metadata_pool, str(data_pools))
+ return r, result_str, ""
def list_volumes(mgr):
from .fs_util import listdir
from .operations.volume import create_volume, \
- delete_volume, list_volumes, open_volume
+ delete_volume, list_volumes, open_volume, get_pool_names
from .operations.group import open_group, create_group, remove_group
from .operations.subvolume import open_subvol, create_subvol, remove_subvol, \
create_clone
"that is what you want, re-issue the command followed by " \
"--yes-i-really-mean-it.".format(volname)
+ metadata_pool, data_pools = get_pool_names(self.mgr, volname)
+ if not metadata_pool:
+ return -errno.ENOENT, "", "volume {0} doesn't exist".format(volname)
self.purge_queue.cancel_jobs(volname)
self.connection_pool.del_fs_handle(volname, wait=True)
- return delete_volume(self.mgr, volname)
+ return delete_volume(self.mgr, volname, metadata_pool, data_pools)
def list_fs_volumes(self):
if self.stopping.is_set():