From 4aa54755b8d216c9dfac8ba621c5433a0a2d3ff7 Mon Sep 17 00:00:00 2001 From: Douglas Fuller Date: Wed, 12 Jul 2017 10:43:39 -0500 Subject: [PATCH] qa/cephfs: support CephFS recovery pools Add support for testing recovery of CephFS metadata into an alternate RADOS pool, useful as a disaster recovery mechanism that avoids modifying the metadata in-place. Signed-off-by: Douglas Fuller (cherry picked from commit 8f9a25202093339afb5308051d354d3ae79c6b2d) --- qa/tasks/cephfs/cephfs_test_case.py | 18 +++++++++++++++++ qa/tasks/cephfs/filesystem.py | 31 ++++++++++++++++++++++------- 2 files changed, 42 insertions(+), 7 deletions(-) diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py index 7f5c6e9d834cd..801d0d3114c4a 100644 --- a/qa/tasks/cephfs/cephfs_test_case.py +++ b/qa/tasks/cephfs/cephfs_test_case.py @@ -43,6 +43,7 @@ class CephFSTestCase(CephTestCase): # FIXME weird explicit naming mount_a = None mount_b = None + recovery_mount = None # Declarative test requirements: subclasses should override these to indicate # their special needs. If not met, tests will be skipped. @@ -55,6 +56,9 @@ class CephFSTestCase(CephTestCase): # Whether to create the default filesystem during setUp REQUIRE_FILESYSTEM = True + # requires REQUIRE_FILESYSTEM = True + REQUIRE_RECOVERY_FILESYSTEM = False + LOAD_SETTINGS = [] def setUp(self): @@ -158,6 +162,20 @@ class CephFSTestCase(CephTestCase): self.mounts[i].mount() self.mounts[i].wait_until_mounted() + if self.REQUIRE_RECOVERY_FILESYSTEM: + if not self.REQUIRE_FILESYSTEM: + raise case.SkipTest("Recovery filesystem requires a primary filesystem as well") + self.fs.mon_manager.raw_cluster_cmd('fs', 'flag', 'set', + 'enable_multiple', 'true', + '--yes-i-really-mean-it') + self.recovery_fs = self.mds_cluster.newfs(name="recovery_fs", create=False) + self.recovery_fs.set_metadata_overlay(True) + self.recovery_fs.set_data_pool_name(self.fs.get_data_pool_name()) + self.recovery_fs.create() + self.recovery_fs.getinfo(refresh=True) + self.recovery_fs.mds_restart() + self.recovery_fs.wait_for_daemons() + # Load an config settings of interest for setting in self.LOAD_SETTINGS: setattr(self, setting, float(self.fs.mds_asok( diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index f96b6c0afffd0..ad15e909c4504 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -259,6 +259,9 @@ class MDSCluster(CephCluster): def newfs(self, name='cephfs', create=True): return Filesystem(self._ctx, name=name, create=create) + def newfs_raw(self, name): + return Filesystem(self._ctx, create=None, name=name) + def status(self): return FSStatus(self.mon_manager) @@ -367,8 +370,9 @@ class Filesystem(MDSCluster): self.name = name self.id = None - self.name = None self.metadata_pool_name = None + self.metadata_overlay = False + self.data_pool_name = None self.data_pools = None client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client')) @@ -411,7 +415,7 @@ class Filesystem(MDSCluster): return status def set_metadata_overlay(self, overlay): - if fscid is not None: + if self.id is not None: raise RuntimeError("cannot specify fscid when configuring overlay") self.metadata_overlay = overlay @@ -444,7 +448,10 @@ class Filesystem(MDSCluster): self.name = "cephfs" if self.metadata_pool_name is None: self.metadata_pool_name = "{0}_metadata".format(self.name) - data_pool_name = "{0}_data".format(self.name) + if self.data_pool_name is None: + data_pool_name = "{0}_data".format(self.name) + else: + data_pool_name = self.data_pool_name log.info("Creating filesystem '{0}'".format(self.name)) @@ -452,10 +459,15 @@ class Filesystem(MDSCluster): self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', self.metadata_pool_name, pgs_per_fs_pool.__str__()) - self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', - data_pool_name, pgs_per_fs_pool.__str__()) - self.mon_manager.raw_cluster_cmd('fs', 'new', - self.name, self.metadata_pool_name, data_pool_name) + if self.metadata_overlay: + self.mon_manager.raw_cluster_cmd('fs', 'new', + self.name, self.metadata_pool_name, data_pool_name, + '--allow-dangerous-metadata-overlay') + else: + self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', + data_pool_name, pgs_per_fs_pool.__str__()) + self.mon_manager.raw_cluster_cmd('fs', 'new', + self.name, self.metadata_pool_name, data_pool_name) self.check_pool_application(self.metadata_pool_name) self.check_pool_application(data_pool_name) # Turn off spurious standby count warnings from modifying max_mds in tests. @@ -568,6 +580,11 @@ class Filesystem(MDSCluster): def get_metadata_pool_name(self): return self.metadata_pool_name + def set_data_pool_name(self, name): + if self.id is not None: + raise RuntimeError("can't set filesystem name if its fscid is set") + self.data_pool_name = name + def get_namespace_id(self): return self.id -- 2.39.5