From fc88e6c6c55402120a432ea47f05f321ba4c9bb1 Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Wed, 9 Oct 2019 10:41:35 -0700 Subject: [PATCH] qa: use small default pg count for CephFS pools The pg count needs to be a power-of-two since dff5697464edb9931d5dfa08cd4a30f85c1f237e. Also, mon_pg_warn_min_per_osd is disabled by default now (or set to a low value in vstart/testing) so there's no need to base the pg count on this value. Ideally someday we can remove this so that the default cluster value is used but we need to keep this for deployments of older versions of Ceph. Fixes: https://tracker.ceph.com/issues/42228 Signed-off-by: Patrick Donnelly --- qa/tasks/cephfs/filesystem.py | 23 +++++++---------------- qa/tasks/cephfs/test_data_scan.py | 2 +- qa/tasks/cephfs/test_misc.py | 4 ++-- qa/tasks/vstart_runner.py | 4 ---- 4 files changed, 10 insertions(+), 23 deletions(-) diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index 8d72ba380d7..899fa38b3b9 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -515,16 +515,9 @@ class Filesystem(MDSCluster): def set_allow_new_snaps(self, yes): self.set_var("allow_new_snaps", str(yes).lower(), '--yes-i-really-mean-it') - def get_pgs_per_fs_pool(self): - """ - Calculate how many PGs to use when creating a pool, in order to avoid raising any - health warnings about mon_pg_warn_min_per_osd - - :return: an integer number of PGs - """ - pg_warn_min_per_osd = int(self.get_config('mon_pg_warn_min_per_osd')) - osd_count = len(list(misc.all_roles_of_type(self._ctx.cluster, 'osd'))) - return pg_warn_min_per_osd * osd_count + # In Octopus+, the PG count can be omitted to use the default. We keep the + # hard-coded value for deployments of Mimic/Nautilus. + pgs_per_fs_pool = 8 def create(self): if self.name is None: @@ -538,10 +531,8 @@ class Filesystem(MDSCluster): log.info("Creating filesystem '{0}'".format(self.name)) - pgs_per_fs_pool = self.get_pgs_per_fs_pool() - self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', - self.metadata_pool_name, pgs_per_fs_pool.__str__()) + self.metadata_pool_name, self.pgs_per_fs_pool.__str__()) if self.metadata_overlay: self.mon_manager.raw_cluster_cmd('fs', 'new', self.name, self.metadata_pool_name, data_pool_name, @@ -554,7 +545,7 @@ class Filesystem(MDSCluster): self.mon_manager.raw_cluster_cmd(*cmd) self.mon_manager.raw_cluster_cmd( 'osd', 'pool', 'create', - data_pool_name, pgs_per_fs_pool.__str__(), 'erasure', + data_pool_name, self.pgs_per_fs_pool.__str__(), 'erasure', data_pool_name) self.mon_manager.raw_cluster_cmd( 'osd', 'pool', 'set', @@ -562,7 +553,7 @@ class Filesystem(MDSCluster): else: self.mon_manager.raw_cluster_cmd( 'osd', 'pool', 'create', - data_pool_name, pgs_per_fs_pool.__str__()) + data_pool_name, self.pgs_per_fs_pool.__str__()) self.mon_manager.raw_cluster_cmd('fs', 'new', self.name, self.metadata_pool_name, data_pool_name) self.check_pool_application(self.metadata_pool_name) @@ -634,7 +625,7 @@ class Filesystem(MDSCluster): return self.get_mds_map(status=status)[var] def add_data_pool(self, name): - self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', name, self.get_pgs_per_fs_pool().__str__()) + self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', name, self.pgs_per_fs_pool.__str__()) self.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', self.name, name) self.get_pool_names(refresh = True) for poolid, fs_name in self.data_pools.items(): diff --git a/qa/tasks/cephfs/test_data_scan.py b/qa/tasks/cephfs/test_data_scan.py index 6e1e23063bc..fe099838e38 100644 --- a/qa/tasks/cephfs/test_data_scan.py +++ b/qa/tasks/cephfs/test_data_scan.py @@ -534,7 +534,7 @@ class TestDataScan(CephFSTestCase): pgs_to_files[pgid].append(file_path) log.info("{0}: {1}".format(file_path, pgid)) - pg_count = self.fs.get_pgs_per_fs_pool() + pg_count = self.fs.pgs_per_fs_pool for pg_n in range(0, pg_count): pg_str = "{0}.{1}".format(self.fs.get_data_pool_id(), pg_n) out = self.fs.data_scan(["pg_files", "mydir", pg_str]) diff --git a/qa/tasks/cephfs/test_misc.py b/qa/tasks/cephfs/test_misc.py index ce6dab253fa..cca9bb617d4 100644 --- a/qa/tasks/cephfs/test_misc.py +++ b/qa/tasks/cephfs/test_misc.py @@ -68,7 +68,7 @@ class TestMisc(CephFSTestCase): '--yes-i-really-really-mean-it') self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', self.fs.metadata_pool_name, - self.fs.get_pgs_per_fs_pool().__str__()) + self.fs.pgs_per_fs_pool.__str__()) dummyfile = '/etc/fstab' @@ -105,7 +105,7 @@ class TestMisc(CephFSTestCase): '--yes-i-really-really-mean-it') self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', self.fs.metadata_pool_name, - self.fs.get_pgs_per_fs_pool().__str__()) + self.fs.pgs_per_fs_pool.__str__()) self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name, self.fs.metadata_pool_name, data_pool_name) diff --git a/qa/tasks/vstart_runner.py b/qa/tasks/vstart_runner.py index 874b86d9a46..b5b07f8cefa 100644 --- a/qa/tasks/vstart_runner.py +++ b/qa/tasks/vstart_runner.py @@ -1078,10 +1078,6 @@ class LocalFilesystem(Filesystem, LocalMDSCluster): def set_clients_block(self, blocked, mds_id=None): raise NotImplementedError() - def get_pgs_per_fs_pool(self): - # FIXME: assuming there are 3 OSDs - return 3 * int(self.get_config('mon_pg_warn_min_per_osd')) - class InteractiveFailureResult(unittest.TextTestResult): """ -- 2.39.5