From 04aefcb442c77c1f62b049b812a9e436189878b6 Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Wed, 9 Oct 2019 10:41:35 -0700 Subject: [PATCH] qa: use small default pg count for CephFS pools The pg count needs to be a power-of-two since dff5697464edb9931d5dfa08cd4a30f85c1f237e. Also, mon_pg_warn_min_per_osd is disabled by default now (or set to a low value in vstart/testing) so there's no need to base the pg count on this value. Ideally someday we can remove this so that the default cluster value is used but we need to keep this for deployments of older versions of Ceph. Fixes: https://tracker.ceph.com/issues/42228 Signed-off-by: Patrick Donnelly (cherry picked from commit fc88e6c6c55402120a432ea47f05f321ba4c9bb1) --- qa/tasks/cephfs/filesystem.py | 23 +++++++---------------- qa/tasks/cephfs/test_data_scan.py | 2 +- qa/tasks/cephfs/test_misc.py | 4 ++-- qa/tasks/vstart_runner.py | 4 ---- 4 files changed, 10 insertions(+), 23 deletions(-) diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index 3cdcee054b64e..7a8c243b0cd33 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -514,16 +514,9 @@ class Filesystem(MDSCluster): def set_allow_new_snaps(self, yes): self.set_var("allow_new_snaps", str(yes).lower(), '--yes-i-really-mean-it') - def get_pgs_per_fs_pool(self): - """ - Calculate how many PGs to use when creating a pool, in order to avoid raising any - health warnings about mon_pg_warn_min_per_osd - - :return: an integer number of PGs - """ - pg_warn_min_per_osd = int(self.get_config('mon_pg_warn_min_per_osd')) - osd_count = len(list(misc.all_roles_of_type(self._ctx.cluster, 'osd'))) - return pg_warn_min_per_osd * osd_count + # In Octopus+, the PG count can be omitted to use the default. We keep the + # hard-coded value for deployments of Mimic/Nautilus. + pgs_per_fs_pool = 8 def create(self): if self.name is None: @@ -537,10 +530,8 @@ class Filesystem(MDSCluster): log.info("Creating filesystem '{0}'".format(self.name)) - pgs_per_fs_pool = self.get_pgs_per_fs_pool() - self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', - self.metadata_pool_name, pgs_per_fs_pool.__str__()) + self.metadata_pool_name, self.pgs_per_fs_pool.__str__()) if self.metadata_overlay: self.mon_manager.raw_cluster_cmd('fs', 'new', self.name, self.metadata_pool_name, data_pool_name, @@ -553,7 +544,7 @@ class Filesystem(MDSCluster): self.mon_manager.raw_cluster_cmd(*cmd) self.mon_manager.raw_cluster_cmd( 'osd', 'pool', 'create', - data_pool_name, pgs_per_fs_pool.__str__(), 'erasure', + data_pool_name, self.pgs_per_fs_pool.__str__(), 'erasure', data_pool_name) self.mon_manager.raw_cluster_cmd( 'osd', 'pool', 'set', @@ -561,7 +552,7 @@ class Filesystem(MDSCluster): else: self.mon_manager.raw_cluster_cmd( 'osd', 'pool', 'create', - data_pool_name, pgs_per_fs_pool.__str__()) + data_pool_name, self.pgs_per_fs_pool.__str__()) self.mon_manager.raw_cluster_cmd('fs', 'new', self.name, self.metadata_pool_name, @@ -641,7 +632,7 @@ class Filesystem(MDSCluster): def add_data_pool(self, name, create=True): if create: - self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', name, self.get_pgs_per_fs_pool().__str__()) + self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', name, self.pgs_per_fs_pool().__str__()) self.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', self.name, name) self.get_pool_names(refresh = True) for poolid, fs_name in self.data_pools.items(): diff --git a/qa/tasks/cephfs/test_data_scan.py b/qa/tasks/cephfs/test_data_scan.py index 6e1e23063bc56..fe099838e38e1 100644 --- a/qa/tasks/cephfs/test_data_scan.py +++ b/qa/tasks/cephfs/test_data_scan.py @@ -534,7 +534,7 @@ class TestDataScan(CephFSTestCase): pgs_to_files[pgid].append(file_path) log.info("{0}: {1}".format(file_path, pgid)) - pg_count = self.fs.get_pgs_per_fs_pool() + pg_count = self.fs.pgs_per_fs_pool for pg_n in range(0, pg_count): pg_str = "{0}.{1}".format(self.fs.get_data_pool_id(), pg_n) out = self.fs.data_scan(["pg_files", "mydir", pg_str]) diff --git a/qa/tasks/cephfs/test_misc.py b/qa/tasks/cephfs/test_misc.py index 2fc9ed53d14f1..6c4eab82157bf 100644 --- a/qa/tasks/cephfs/test_misc.py +++ b/qa/tasks/cephfs/test_misc.py @@ -69,7 +69,7 @@ class TestMisc(CephFSTestCase): '--yes-i-really-really-mean-it') self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', self.fs.metadata_pool_name, - self.fs.get_pgs_per_fs_pool().__str__()) + self.fs.pgs_per_fs_pool.__str__()) dummyfile = '/etc/fstab' @@ -106,7 +106,7 @@ class TestMisc(CephFSTestCase): '--yes-i-really-really-mean-it') self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', self.fs.metadata_pool_name, - self.fs.get_pgs_per_fs_pool().__str__()) + self.fs.pgs_per_fs_pool.__str__()) self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name, self.fs.metadata_pool_name, data_pool_name) diff --git a/qa/tasks/vstart_runner.py b/qa/tasks/vstart_runner.py index 808b7e5cc322f..419ab53322400 100644 --- a/qa/tasks/vstart_runner.py +++ b/qa/tasks/vstart_runner.py @@ -777,10 +777,6 @@ class LocalFilesystem(Filesystem, LocalMDSCluster): def set_clients_block(self, blocked, mds_id=None): raise NotImplementedError() - def get_pgs_per_fs_pool(self): - # FIXME: assuming there are 3 OSDs - return 3 * int(self.get_config('mon_pg_warn_min_per_osd')) - class InteractiveFailureResult(unittest.TextTestResult): """ -- 2.39.5