From: Sage Weil Date: Fri, 7 Dec 2018 23:46:59 +0000 (-0600) Subject: qa/suites/rados: add simple pg-autoscaler test X-Git-Tag: v14.1.0~582^2~5 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=2cd1ca6625301315d91173827d8d44c30a3c40e2;p=ceph-ci.git qa/suites/rados: add simple pg-autoscaler test Signed-off-by: Sage Weil --- diff --git a/qa/suites/rados/singleton/all/pg-autoscaler.yaml b/qa/suites/rados/singleton/all/pg-autoscaler.yaml new file mode 100644 index 00000000000..ca4d5317ed5 --- /dev/null +++ b/qa/suites/rados/singleton/all/pg-autoscaler.yaml @@ -0,0 +1,26 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 + - client.0 +- - mon.b + - mon.c + - osd.4 + - osd.5 + - osd.6 + - osd.7 +openstack: + - volumes: # attached to each instance + count: 4 + size: 10 # GB +tasks: +- install: +- ceph: + create_rbd_pool: false +- workunit: + clients: + all: + - mon/pg_autoscaler.sh diff --git a/qa/workunits/mon/pg_autoscaler.sh b/qa/workunits/mon/pg_autoscaler.sh new file mode 100755 index 00000000000..29387c67dd8 --- /dev/null +++ b/qa/workunits/mon/pg_autoscaler.sh @@ -0,0 +1,77 @@ +#!/bin/bash -ex + +NUM_OSDS=$(ceph osd ls | wc -l) +if [ $NUM_OSDS -lt 6 ]; then + echo "test requires at least 6 OSDs" + exit 1 +fi + +NUM_POOLS=$(ceph osd pool ls | wc -l) +if [ $NUM_POOLS -gt 0 ]; then + echo "test requires no preexisting pools" + exit 1 +fi + +function wait_for() { + local sec=$1 + local cmd=$2 + + while true ; do + if bash -c "$cmd" ; then + break + fi + sec=$(( $sec - 1 )) + if [ $sec -eq 0 ]; then + echo failed + return 1 + fi + sleep 1 + done + return 0 +} + +# enable +ceph config set mgr mgr/pg_autoscaler/sleep_interval 5 +ceph mgr module enable pg_autoscaler + +# pg_num_min +ceph osd pool create a 16 --pg-num-min 4 +ceph osd pool create b 16 --pg-num-min 2 +ceph osd pool set a pg_autoscale_mode on +ceph osd pool set b pg_autoscale_mode on + +wait_for 120 "ceph osd pool get a pg_num | grep 4" +wait_for 120 "ceph osd pool get b pg_num | grep 2" + +# target ratio +ceph osd pool set a target_size_ratio .5 +ceph osd pool set b target_size_ratio .1 +sleep 30 +APGS=$(ceph osd dump -f json-pretty | jq '.pools[0].pg_num') +BPGS=$(ceph osd dump -f json-pretty | jq '.pools[1].pg_num') +test $APGS -gt 100 +test $BPGS -gt 10 + +# small ratio change does not change pg_num +ceph osd pool set a target_size_ratio .7 +ceph osd pool set b target_size_ratio .2 +sleep 10 +ceph osd pool get a pg_num | grep $APGS +ceph osd pool get b pg_num | grep $BPGS + +# too much ratio +ceph osd pool set a target_size_ratio .9 +ceph osd pool set b target_size_ratio .9 +wait_for 60 "ceph health detail | grep POOL_TARGET_SIZE_RATIO_OVERCOMMITTED" +wait_for 60 "ceph health detail | grep 1.8" +ceph osd pool set a target_size_ratio 0 +ceph osd pool set b target_size_ratio 0 + +# target_size +ceph osd pool set a target_size_bytes 1000000000000000 +ceph osd pool set b target_size_bytes 1000000000000000 +wait_for 60 "ceph health detail | grep POOL_TARGET_SIZE_BYTES_OVERCOMMITTED" +ceph osd pool set a target_size_bytes 0 +ceph osd pool set b target_size_ratio 0 + +echo OK