From 55d302ac8d0e5b9f55a55557e071f7e26ee88e7e Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 12 Nov 2019 22:13:22 +0000 Subject: [PATCH] qa/tasks/ceph2: use seed ceph.conf Based on ceph.conf.template, but edited down a bit. Signed-off-by: Sage Weil --- qa/tasks/ceph2.conf | 72 +++++++++++++++++++++++++++++++++++++++++++++ qa/tasks/ceph2.py | 7 +++-- 2 files changed, 76 insertions(+), 3 deletions(-) create mode 100644 qa/tasks/ceph2.conf diff --git a/qa/tasks/ceph2.conf b/qa/tasks/ceph2.conf new file mode 100644 index 00000000000..b840a0f4455 --- /dev/null +++ b/qa/tasks/ceph2.conf @@ -0,0 +1,72 @@ +[global] +mon clock drift allowed = 1.000 + +# replicate across OSDs, not hosts +osd crush chooseleaf type = 0 + +# enable some debugging +auth debug = true +ms die on old message = true +ms die on bug = true +debug asserts on shutdown = true + +# adjust warnings +mon max pg per osd = 10000 # >= luminous +mon pg warn max object skew = 0 +mon osd allow primary affinity = true +mon osd allow pg remap = true +mon warn on legacy crush tunables = false +mon warn on crush straw calc version zero = false +mon warn on no sortbitwise = false +mon warn on osd down out interval zero = false +mon warn on too few osds = false +mon_warn_on_pool_pg_num_not_power_of_two = false + +# disable pg_autoscaler by default for new pools +osd_pool_default_pg_autoscale_mode = off + +#osd pool default size = 2 + +osd pool default erasure code profile = "plugin=jerasure technique=reed_sol_van k=2 m=1 ruleset-failure-domain=osd crush-failure-domain=osd" + + # tests delete pools +mon allow pool delete = true + +mon cluster log file level = debug + +[osd] +osd scrub load threshold = 5.0 +osd scrub max interval = 600 + +osd recover clone overlap = true +osd recovery max chunk = 1048576 + +osd deep scrub update digest min age = 30 + +osd map max advance = 10 + +# debugging +osd debug shutdown = true +osd debug op order = true +osd debug verify stray on activate = true +osd debug pg log writeout = true +bdev debug aio = true +osd debug misdirected ops = true + +[mgr] +mon reweight min pgs per osd = 4 +mon reweight min bytes per osd = 10 +mgr/telemetry/nag = false + +[mon] +mon data avail warn = 5 +mon mgr mkfs grace = 240 +mon reweight min pgs per osd = 4 +mon osd reporter subtree level = osd +mon osd prime pg temp = true +mon reweight min bytes per osd = 10 + +[client.rgw] +rgw cache enabled = true +rgw enable ops log = true +rgw enable usage log = true diff --git a/qa/tasks/ceph2.py b/qa/tasks/ceph2.py index 0603827b097..a36de3c17f7 100644 --- a/qa/tasks/ceph2.py +++ b/qa/tasks/ceph2.py @@ -55,8 +55,8 @@ def shell(ctx, cluster_name, remote, args, **kwargs): def build_initial_config(ctx, config): cluster_name = config['cluster'] - #path = os.path.join(os.path.dirname(__file__), 'ceph.conf.template') - conf = configobj.ConfigObj() #path, file_error=True) + path = os.path.join(os.path.dirname(__file__), 'ceph2.conf') + conf = configobj.ConfigObj(path, file_error=True) conf.setdefault('global', {}) conf['global']['fsid'] = ctx.ceph[cluster_name].fsid @@ -64,7 +64,7 @@ def build_initial_config(ctx, config): # overrides for section, keys in config['conf'].items(): for key, value in keys.items(): - log.info("[%s] %s = %s" % (section, key, value)) + log.info(" override: [%s] %s = %s" % (section, key, value)) if section not in conf: conf[section] = {} conf[section][key] = value @@ -249,6 +249,7 @@ def ceph_bootstrap(ctx, config): remote=bootstrap_remote, path='{}/seed.{}.conf'.format(testdir, cluster_name), data=conf_fp.getvalue()) + log.debug('Final config:\n' + conf_fp.getvalue()) # bootstrap log.info('Bootstrapping...') -- 2.39.5