From: Kamoltat Date: Wed, 8 Dec 2021 15:15:50 +0000 (+0000) Subject: qa: Added workunit test for noautoscale flag X-Git-Tag: v16.2.8~228^2~1 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=021b0a4052e39d6e0280d1ec0cde524d4dcdd325;p=ceph.git qa: Added workunit test for noautoscale flag set and unset the noautoscale flag, evaluate if the results are what we expected. As well as, evaluate if the flag is correct when we create new pools. Signed-off-by: Kamoltat (cherry picked from commit bb42c71e7e059be2cc4d1d4408e475b15b1c6340) Conflicts: test-noautoscale-flag.yaml - modified pre-mgr-command to not create device health monitor --- diff --git a/qa/suites/rados/singleton/all/test-noautoscale-flag.yaml b/qa/suites/rados/singleton/all/test-noautoscale-flag.yaml new file mode 100644 index 0000000000000..23caa745d92bf --- /dev/null +++ b/qa/suites/rados/singleton/all/test-noautoscale-flag.yaml @@ -0,0 +1,39 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 + - client.0 +openstack: + - volumes: # attached to each instance + count: 4 + size: 10 # GB +overrides: + ceph: + create_rbd_pool: false + pre-mgr-commands: + - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + conf: + mon: + osd pool default pg autoscale mode: on + log-ignorelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(POOL_ + - \(CACHE_POOL_ + - \(OBJECT_ + - \(SLOW_OPS\) + - \(REQUEST_SLOW\) + - \(TOO_FEW_PGS\) + - slow request +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - mon/test_noautoscale_flag.sh diff --git a/qa/workunits/mon/test_noautoscale_flag.sh b/qa/workunits/mon/test_noautoscale_flag.sh new file mode 100755 index 0000000000000..ca86cdf22d387 --- /dev/null +++ b/qa/workunits/mon/test_noautoscale_flag.sh @@ -0,0 +1,83 @@ +#!/bin/bash -ex + +unset CEPH_CLI_TEST_DUP_COMMAND + +NUM_POOLS=$(ceph osd pool ls | wc -l) + +if [ "$NUM_POOLS" -gt 0 ]; then + echo "test requires no preexisting pools" + exit 1 +fi + +ceph osd pool set noautoscale + +ceph osd pool create pool_a + +echo 'pool_a autoscale_mode:' $(ceph osd pool autoscale-status | grep pool_a | grep -o -m 1 'on\|off') + +NUM_POOLS=$[NUM_POOLS+1] + +sleep 2 + +# Count the number of Pools with AUTOSCALE `off` + +RESULT1=$(ceph osd pool autoscale-status | grep -oe 'off' | wc -l) + +# number of Pools with AUTOSCALE `off` should equal to 2 + +test "$RESULT1" -eq "$NUM_POOLS" + +ceph osd pool unset noautoscale + +echo $(ceph osd pool get noautoscale) + + +ceph osd pool create pool_b + +echo 'pool_a autoscale_mode:' $(ceph osd pool autoscale-status | grep pool_a | grep -o -m 1 'on\|off') + +echo 'pool_b autoscale_mode:' $(ceph osd pool autoscale-status | grep pool_b | grep -o -m 1 'on\|off') + + +NUM_POOLS=$[NUM_POOLS+1] + +sleep 2 + +# Count the number of Pools with AUTOSCALE `on` + +RESULT2=$(ceph osd pool autoscale-status | grep -oe 'on' | wc -l) + +# number of Pools with AUTOSCALE `on` should equal to 3 + +test "$RESULT2" -eq "$NUM_POOLS" + +ceph osd pool set noautoscale + +ceph osd pool create pool_c + +echo 'pool_a autoscale_mode:' $(ceph osd pool autoscale-status | grep pool_a | grep -o -m 1 'on\|off') + +echo 'pool_b autoscale_mode:' $(ceph osd pool autoscale-status | grep pool_b | grep -o -m 1 'on\|off') + +echo 'pool_c autoscale_mode:' $(ceph osd pool autoscale-status | grep pool_c | grep -o -m 1 'on\|off') + + +NUM_POOLS=$[NUM_POOLS+1] + +sleep 2 + +# Count the number of Pools with AUTOSCALE `off` + +RESULT3=$(ceph osd pool autoscale-status | grep -oe 'off' | wc -l) + +# number of Pools with AUTOSCALE `off` should equal to 4 + +test "$RESULT3" -eq "$NUM_POOLS" + +ceph osd pool rm pool_a pool_a --yes-i-really-really-mean-it + +ceph osd pool rm pool_b pool_b --yes-i-really-really-mean-it + +ceph osd pool rm pool_c pool_c --yes-i-really-really-mean-it + +echo OK