From 4a6d19dae296969954e5101e9bd53443fddde03d Mon Sep 17 00:00:00 2001 From: Dimitri Savineau Date: Wed, 4 Dec 2019 12:12:05 -0500 Subject: [PATCH] tests: reduce max_mds from 3 to 2 Having max_mds value equals to the number of mds nodes generates a warning in the ceph cluster status: cluster: id: 6d3e49a4-ab4d-4e03-a7d6-58913b8ec00a' health: HEALTH_WARN' insufficient standby MDS daemons available' (...) services: mds: cephfs:3 {0=mds1=up:active,1=mds0=up:active,2=mds2=up:active}' Let's use 2 active and 1 standby mds. Signed-off-by: Dimitri Savineau --- tests/functional/all_daemons/container/group_vars/all | 2 +- tests/functional/all_daemons/group_vars/all | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/functional/all_daemons/container/group_vars/all b/tests/functional/all_daemons/container/group_vars/all index 82d16c488..5b03aa16a 100644 --- a/tests/functional/all_daemons/container/group_vars/all +++ b/tests/functional/all_daemons/container/group_vars/all @@ -41,4 +41,4 @@ openstack_pools: docker_pull_timeout: 600s handler_health_mon_check_delay: 10 handler_health_osd_check_delay: 10 -mds_max_mds: 3 \ No newline at end of file +mds_max_mds: 2 \ No newline at end of file diff --git a/tests/functional/all_daemons/group_vars/all b/tests/functional/all_daemons/group_vars/all index aa3d941ff..a6779998d 100644 --- a/tests/functional/all_daemons/group_vars/all +++ b/tests/functional/all_daemons/group_vars/all @@ -34,4 +34,4 @@ openstack_pools: - "{{ openstack_cinder_pool }}" handler_health_mon_check_delay: 10 handler_health_osd_check_delay: 10 -mds_max_mds: 3 +mds_max_mds: 2 -- 2.39.5