]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
qa: use small default pg count for CephFS pools
authorPatrick Donnelly <pdonnell@redhat.com>
Wed, 9 Oct 2019 17:41:35 +0000 (10:41 -0700)
committerPatrick Donnelly <pdonnell@redhat.com>
Mon, 14 Oct 2019 17:45:05 +0000 (10:45 -0700)
The pg count needs to be a power-of-two since
dff5697464edb9931d5dfa08cd4a30f85c1f237e.

Also, mon_pg_warn_min_per_osd is disabled by default now (or set to a
low value in vstart/testing) so there's no need to base the pg count on
this value.

Ideally someday we can remove this so that the default cluster value is
used but we need to keep this for deployments of older versions of Ceph.

Fixes: https://tracker.ceph.com/issues/42228
Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
qa/tasks/cephfs/filesystem.py
qa/tasks/cephfs/test_data_scan.py
qa/tasks/cephfs/test_misc.py
qa/tasks/vstart_runner.py

index 8d72ba380d71e751f9e6fb38aecf681440ea966e..899fa38b3b96c3a9042f77b4e2c72dcd2f716838 100644 (file)
@@ -515,16 +515,9 @@ class Filesystem(MDSCluster):
     def set_allow_new_snaps(self, yes):
         self.set_var("allow_new_snaps", str(yes).lower(), '--yes-i-really-mean-it')
 
-    def get_pgs_per_fs_pool(self):
-        """
-        Calculate how many PGs to use when creating a pool, in order to avoid raising any
-        health warnings about mon_pg_warn_min_per_osd
-
-        :return: an integer number of PGs
-        """
-        pg_warn_min_per_osd = int(self.get_config('mon_pg_warn_min_per_osd'))
-        osd_count = len(list(misc.all_roles_of_type(self._ctx.cluster, 'osd')))
-        return pg_warn_min_per_osd * osd_count
+    # In Octopus+, the PG count can be omitted to use the default. We keep the
+    # hard-coded value for deployments of Mimic/Nautilus.
+    pgs_per_fs_pool = 8
 
     def create(self):
         if self.name is None:
@@ -538,10 +531,8 @@ class Filesystem(MDSCluster):
 
         log.info("Creating filesystem '{0}'".format(self.name))
 
-        pgs_per_fs_pool = self.get_pgs_per_fs_pool()
-
         self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
-                                         self.metadata_pool_name, pgs_per_fs_pool.__str__())
+                                         self.metadata_pool_name, self.pgs_per_fs_pool.__str__())
         if self.metadata_overlay:
             self.mon_manager.raw_cluster_cmd('fs', 'new',
                                              self.name, self.metadata_pool_name, data_pool_name,
@@ -554,7 +545,7 @@ class Filesystem(MDSCluster):
                 self.mon_manager.raw_cluster_cmd(*cmd)
                 self.mon_manager.raw_cluster_cmd(
                     'osd', 'pool', 'create',
-                    data_pool_name, pgs_per_fs_pool.__str__(), 'erasure',
+                    data_pool_name, self.pgs_per_fs_pool.__str__(), 'erasure',
                     data_pool_name)
                 self.mon_manager.raw_cluster_cmd(
                     'osd', 'pool', 'set',
@@ -562,7 +553,7 @@ class Filesystem(MDSCluster):
             else:
                 self.mon_manager.raw_cluster_cmd(
                     'osd', 'pool', 'create',
-                    data_pool_name, pgs_per_fs_pool.__str__())
+                    data_pool_name, self.pgs_per_fs_pool.__str__())
             self.mon_manager.raw_cluster_cmd('fs', 'new',
                                              self.name, self.metadata_pool_name, data_pool_name)
         self.check_pool_application(self.metadata_pool_name)
@@ -634,7 +625,7 @@ class Filesystem(MDSCluster):
         return self.get_mds_map(status=status)[var]
 
     def add_data_pool(self, name):
-        self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', name, self.get_pgs_per_fs_pool().__str__())
+        self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', name, self.pgs_per_fs_pool.__str__())
         self.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', self.name, name)
         self.get_pool_names(refresh = True)
         for poolid, fs_name in self.data_pools.items():
index 6e1e23063bc56d81cc42e92f2d9f9b37001fab7c..fe099838e38e15f0b1445716c673be13f4d61db2 100644 (file)
@@ -534,7 +534,7 @@ class TestDataScan(CephFSTestCase):
             pgs_to_files[pgid].append(file_path)
             log.info("{0}: {1}".format(file_path, pgid))
 
-        pg_count = self.fs.get_pgs_per_fs_pool()
+        pg_count = self.fs.pgs_per_fs_pool
         for pg_n in range(0, pg_count):
             pg_str = "{0}.{1}".format(self.fs.get_data_pool_id(), pg_n)
             out = self.fs.data_scan(["pg_files", "mydir", pg_str])
index ce6dab253faebf4bd42e3b3c6bec8ba4997192a9..cca9bb617d42186c1cd535dc97a3991bc23b2b2c 100644 (file)
@@ -68,7 +68,7 @@ class TestMisc(CephFSTestCase):
                                             '--yes-i-really-really-mean-it')
         self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
                                             self.fs.metadata_pool_name,
-                                            self.fs.get_pgs_per_fs_pool().__str__())
+                                            self.fs.pgs_per_fs_pool.__str__())
 
         dummyfile = '/etc/fstab'
 
@@ -105,7 +105,7 @@ class TestMisc(CephFSTestCase):
                                             '--yes-i-really-really-mean-it')
         self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
                                             self.fs.metadata_pool_name,
-                                            self.fs.get_pgs_per_fs_pool().__str__())
+                                            self.fs.pgs_per_fs_pool.__str__())
         self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name,
                                             self.fs.metadata_pool_name,
                                             data_pool_name)
index 874b86d9a463d711043a97851056c9d15ce62fca..b5b07f8cefa656470d30f1a1ac5204233b863ea6 100644 (file)
@@ -1078,10 +1078,6 @@ class LocalFilesystem(Filesystem, LocalMDSCluster):
     def set_clients_block(self, blocked, mds_id=None):
         raise NotImplementedError()
 
-    def get_pgs_per_fs_pool(self):
-        # FIXME: assuming there are 3 OSDs
-        return 3 * int(self.get_config('mon_pg_warn_min_per_osd'))
-
 
 class InteractiveFailureResult(unittest.TextTestResult):
     """