From: Sage Weil Date: Thu, 14 Sep 2017 20:00:31 +0000 (-0400) Subject: mon: rename mon_pg_warn_max_per_osd -> mon_max_pg_per_osd X-Git-Tag: v12.2.2~175^2~1 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=7320ee7293a34e4ea0ab5254373f51b93249b91d;p=ceph.git mon: rename mon_pg_warn_max_per_osd -> mon_max_pg_per_osd Signed-off-by: Sage Weil (cherry picked from commit 986b86fbebf9e06f9f841da8ded0bedb310fc69b) --- diff --git a/PendingReleaseNotes b/PendingReleaseNotes index 4ab5301cc22f..b46d1dce1009 100644 --- a/PendingReleaseNotes +++ b/PendingReleaseNotes @@ -31,10 +31,10 @@ * The maximum number of PGs per OSD before the monitor issues a warning has been reduced from 300 to 200 PGs. 200 is still twice the generally recommended target of 100 PGs per OSD. This limit can - be adjusted via the ``mon_pg_warn_max_per_osd`` option on the - monitors. + be adjusted via the ``mon_max_pg_per_osd`` option on the + monitors. The older ``mon_pg_warn_max_per_osd`` option has been removed. * Creating pools or adjusting pg_num will now fail if the change would make the number of PGs per OSD exceed the configured - ``mon_pg_warn_max_per_osd`` limit. The option can be adjusted if it + ``mon_max_pg_per_osd`` limit. The option can be adjusted if it is really necessary to create a pool with more PGs. diff --git a/qa/tasks/cephfs/test_volume_client.py b/qa/tasks/cephfs/test_volume_client.py index 65dc9a9eb856..43bd5c6a4cd1 100644 --- a/qa/tasks/cephfs/test_volume_client.py +++ b/qa/tasks/cephfs/test_volume_client.py @@ -355,11 +355,11 @@ vc.disconnect() :return: """ - # Because the teuthology config template sets mon_pg_warn_max_per_osd to + # Because the teuthology config template sets mon_max_pg_per_osd to # 10000 (i.e. it just tries to ignore health warnings), reset it to something # sane before using volume_client, to avoid creating pools with absurdly large # numbers of PGs. - self.set_conf("global", "mon pg warn max per osd", "300") + self.set_conf("global", "mon max pg per osd", "300") for mon_daemon_state in self.ctx.daemons.iter_daemons_of_role('mon'): mon_daemon_state.restart() @@ -368,7 +368,7 @@ vc.disconnect() # Calculate how many PGs we'll expect the new volume pool to have osd_map = json.loads(self.fs.mon_manager.raw_cluster_cmd('osd', 'dump', '--format=json-pretty')) - max_per_osd = int(self.fs.get_config('mon_pg_warn_max_per_osd')) + max_per_osd = int(self.fs.get_config('mon_max_pg_per_osd')) osd_count = len(osd_map['osds']) max_overall = osd_count * max_per_osd diff --git a/src/common/legacy_config_opts.h b/src/common/legacy_config_opts.h index cb6b406bb12e..f11a648d4727 100644 --- a/src/common/legacy_config_opts.h +++ b/src/common/legacy_config_opts.h @@ -240,7 +240,6 @@ OPTION(mon_timecheck_skew_interval, OPT_FLOAT) // on leader, timecheck (clock dr OPTION(mon_pg_stuck_threshold, OPT_INT) // number of seconds after which pgs can be considered stuck inactive, unclean, etc (see doc/control.rst under dump_stuck for more info) OPTION(mon_pg_min_inactive, OPT_U64) // the number of PGs which have to be inactive longer than 'mon_pg_stuck_threshold' before health goes into ERR. 0 means disabled, never go into ERR. OPTION(mon_pg_warn_min_per_osd, OPT_INT) // min # pgs per (in) osd before we warn the admin -OPTION(mon_pg_warn_max_per_osd, OPT_INT) // max # pgs per (in) osd before we warn the admin OPTION(mon_pg_warn_max_object_skew, OPT_FLOAT) // max skew few average in objects per pg OPTION(mon_pg_warn_min_objects, OPT_INT) // do not warn below this object # OPTION(mon_pg_warn_min_pool_objects, OPT_INT) // do not warn on pools below this object # diff --git a/src/common/options.cc b/src/common/options.cc index c126b82c8816..a3592c0de161 100644 --- a/src/common/options.cc +++ b/src/common/options.cc @@ -1015,9 +1015,9 @@ std::vector