]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
qa: do not check pg count for new data_isolated volume
authorPatrick Donnelly <pdonnell@redhat.com>
Wed, 23 Oct 2019 18:11:49 +0000 (11:11 -0700)
committerPatrick Donnelly <pdonnell@redhat.com>
Wed, 23 Oct 2019 18:14:28 +0000 (11:14 -0700)
We don't need to specify the number of PGs for a new data pool anymore
since b1b821f60812dd36e61be115c5ad8223ab51befe and other related
changes. The related health warnings are also deprecated/gone. So this
no longer needs to be done.

Fixes: b1b821f60812dd36e61be115c5ad8223ab51befe
Fixes: https://tracker.ceph.com/issues/42436
Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
qa/tasks/cephfs/test_volume_client.py

index 817e50c58442ecbed2d91d99ba982cfbd45a0e04..d33223b2bc962806e2272dcae17412fbf120761f 100644 (file)
@@ -361,41 +361,16 @@ vc.disconnect()
         :return:
         """
 
-        # Because the teuthology config template sets mon_max_pg_per_osd to
-        # 10000 (i.e. it just tries to ignore health warnings), reset it to something
-        # sane before using volume_client, to avoid creating pools with absurdly large
-        # numbers of PGs.
-        self.set_conf("global", "mon max pg per osd", "300")
-        for mon_daemon_state in self.ctx.daemons.iter_daemons_of_role('mon'):
-            mon_daemon_state.restart()
-
         self.mount_b.umount_wait()
         self._configure_vc_auth(self.mount_b, "manila")
 
-        # Calculate how many PGs we'll expect the new volume pool to have
-        osd_map = json.loads(self.fs.mon_manager.raw_cluster_cmd('osd', 'dump', '--format=json-pretty'))
-        max_per_osd = int(self.fs.get_config('mon_max_pg_per_osd'))
-        osd_count = len(osd_map['osds'])
-        max_overall = osd_count * max_per_osd
-
-        existing_pg_count = 0
-        for p in osd_map['pools']:
-            existing_pg_count += p['pg_num']
-
-        expected_pg_num = (max_overall - existing_pg_count) / 10
-        log.info("max_per_osd {0}".format(max_per_osd))
-        log.info("osd_count {0}".format(osd_count))
-        log.info("max_overall {0}".format(max_overall))
-        log.info("existing_pg_count {0}".format(existing_pg_count))
-        log.info("expected_pg_num {0}".format(expected_pg_num))
-
         pools_a = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
 
         group_id = "grpid"
         volume_id = "volid"
         self._volume_client_python(self.mount_b, dedent("""
             vp = VolumePath("{group_id}", "{volume_id}")
-            vc.create_volume(vp, 10, data_isolated=True)
+            vc.create_volume(vp, data_isolated=True)
         """.format(
             group_id=group_id,
             volume_id=volume_id,
@@ -407,12 +382,6 @@ vc.disconnect()
         new_pools = set(p['pool_name'] for p in pools_b) - set([p['pool_name'] for p in pools_a])
         self.assertEqual(len(new_pools), 1)
 
-        # It should have followed the heuristic for PG count
-        # (this is an overly strict test condition, so we may want to remove
-        #  it at some point as/when the logic gets fancier)
-        created_pg_num = self.fs.mon_manager.get_pool_int_property(list(new_pools)[0], "pg_num")
-        self.assertEqual(expected_pg_num, created_pg_num)
-
     def test_15303(self):
         """
         Reproducer for #15303 "Client holds incorrect complete flag on dir