From: Sridhar Seshasayee Date: Wed, 7 Jul 2021 19:09:26 +0000 (+0530) Subject: qa/standalone: Add missing teardowns to a subset of osd-scrub-repair tests X-Git-Tag: v16.2.6~146^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=refs%2Fpull%2F42258%2Fhead;p=ceph.git qa/standalone: Add missing teardowns to a subset of osd-scrub-repair tests Tests identified with missing teardown within osd-scrub-repair.sh: 1. TEST_periodic_scrub_replicated() 2. TEST_scrub_warning() 3. TEST_request_scrub_priority() Centralize setup and teardown within the run() function for all the tests. Fixes: https://tracker.ceph.com/issues/51580 Signed-off-by: Sridhar Seshasayee (cherry picked from commit 84cab65e3aafcf656b860abd72a09799458f6bc5) Conflicts: qa/standalone/scrub/osd-scrub-repair.sh - Removed non-existent test TEST_auto_repair_bluestore_tag() and its associated helper functions initiate_and_fetch_state() and wait_end_of_scrub() --- diff --git a/qa/standalone/scrub/osd-scrub-repair.sh b/qa/standalone/scrub/osd-scrub-repair.sh index 1461d0d6c61..8491ca93a37 100755 --- a/qa/standalone/scrub/osd-scrub-repair.sh +++ b/qa/standalone/scrub/osd-scrub-repair.sh @@ -60,7 +60,9 @@ function run() { export -n CEPH_CLI_TEST_DUP_COMMAND local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} for func in $funcs ; do + setup $dir || return 1 $func $dir || return 1 + teardown $dir || return 1 done } @@ -91,7 +93,6 @@ function TEST_corrupt_and_repair_replicated() { local dir=$1 local poolname=rbd - setup $dir || return 1 run_mon $dir a --osd_pool_default_size=2 || return 1 run_mgr $dir x || return 1 run_osd $dir 0 || return 1 @@ -103,8 +104,6 @@ function TEST_corrupt_and_repair_replicated() { corrupt_and_repair_one $dir $poolname $(get_not_primary $poolname SOMETHING) || return 1 # Reproduces http://tracker.ceph.com/issues/8914 corrupt_and_repair_one $dir $poolname $(get_primary $poolname SOMETHING) || return 1 - - teardown $dir || return 1 } # @@ -114,7 +113,6 @@ function TEST_allow_repair_during_recovery() { local dir=$1 local poolname=rbd - setup $dir || return 1 run_mon $dir a --osd_pool_default_size=2 || return 1 run_mgr $dir x || return 1 run_osd $dir 0 --osd_scrub_during_recovery=false \ @@ -128,8 +126,6 @@ function TEST_allow_repair_during_recovery() { add_something $dir $poolname || return 1 corrupt_and_repair_one $dir $poolname $(get_not_primary $poolname SOMETHING) || return 1 - - teardown $dir || return 1 } # @@ -139,7 +135,6 @@ function TEST_skip_non_repair_during_recovery() { local dir=$1 local poolname=rbd - setup $dir || return 1 run_mon $dir a --osd_pool_default_size=2 || return 1 run_mgr $dir x || return 1 run_osd $dir 0 --osd_scrub_during_recovery=false \ @@ -153,8 +148,6 @@ function TEST_skip_non_repair_during_recovery() { add_something $dir $poolname || return 1 scrub_and_not_schedule $dir $poolname $(get_not_primary $poolname SOMETHING) || return 1 - - teardown $dir || return 1 } function scrub_and_not_schedule() { @@ -276,7 +269,6 @@ function auto_repair_erasure_coded() { local poolname=ecpool # Launch a cluster with 5 seconds scrub interval - setup $dir || return 1 run_mon $dir a || return 1 run_mgr $dir x || return 1 local ceph_osd_args="--osd-scrub-auto-repair=true \ @@ -314,9 +306,6 @@ function auto_repair_erasure_coded() { objectstore_tool $dir $(get_not_primary $poolname SOMETHING) SOMETHING list-attrs || return 1 rados --pool $poolname get SOMETHING $dir/COPY || return 1 diff $dir/ORIGINAL $dir/COPY || return 1 - - # Tear down - teardown $dir || return 1 } function TEST_auto_repair_erasure_coded_appends() { @@ -334,7 +323,6 @@ function TEST_auto_repair_bluestore_basic() { local poolname=testpool # Launch a cluster with 5 seconds scrub interval - setup $dir || return 1 run_mon $dir a || return 1 run_mgr $dir x || return 1 local ceph_osd_args="--osd-scrub-auto-repair=true \ @@ -373,9 +361,6 @@ function TEST_auto_repair_bluestore_basic() { objectstore_tool $dir $(get_not_primary $poolname SOMETHING) SOMETHING get-bytes $dir/COPY || return 1 diff $dir/ORIGINAL $dir/COPY || return 1 grep scrub_finish $dir/osd.${primary}.log - - # Tear down - teardown $dir || return 1 } function TEST_auto_repair_bluestore_scrub() { @@ -383,7 +368,6 @@ function TEST_auto_repair_bluestore_scrub() { local poolname=testpool # Launch a cluster with 5 seconds scrub interval - setup $dir || return 1 run_mon $dir a || return 1 run_mgr $dir x || return 1 local ceph_osd_args="--osd-scrub-auto-repair=true \ @@ -428,9 +412,6 @@ function TEST_auto_repair_bluestore_scrub() { # This should have caused 1 object to be repaired COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired') test "$COUNT" = "1" || return 1 - - # Tear down - teardown $dir || return 1 } function TEST_auto_repair_bluestore_failed() { @@ -438,7 +419,6 @@ function TEST_auto_repair_bluestore_failed() { local poolname=testpool # Launch a cluster with 5 seconds scrub interval - setup $dir || return 1 run_mon $dir a || return 1 run_mgr $dir x || return 1 local ceph_osd_args="--osd-scrub-auto-repair=true \ @@ -498,9 +478,6 @@ function TEST_auto_repair_bluestore_failed() { ceph pg dump pgs ceph pg dump pgs | grep -q -e "^${pgid}.* active+clean " -e "^${pgid}.* active+clean+wait " || return 1 grep scrub_finish $dir/osd.${primary}.log - - # Tear down - teardown $dir || return 1 } function TEST_auto_repair_bluestore_failed_norecov() { @@ -508,7 +485,6 @@ function TEST_auto_repair_bluestore_failed_norecov() { local poolname=testpool # Launch a cluster with 5 seconds scrub interval - setup $dir || return 1 run_mon $dir a || return 1 run_mgr $dir x || return 1 local ceph_osd_args="--osd-scrub-auto-repair=true \ @@ -552,9 +528,6 @@ function TEST_auto_repair_bluestore_failed_norecov() { grep -q "scrub_finish.*present with no repair possible" $dir/osd.${primary}.log || return 1 ceph pg dump pgs ceph pg dump pgs | grep -q "^${pgid}.*+failed_repair" || return 1 - - # Tear down - teardown $dir || return 1 } function TEST_repair_stats() { @@ -566,7 +539,6 @@ function TEST_repair_stats() { local REPAIRS=20 # Launch a cluster with 5 seconds scrub interval - setup $dir || return 1 run_mon $dir a || return 1 run_mgr $dir x || return 1 local ceph_osd_args="--osd_deep_scrub_randomize_ratio=0 \ @@ -626,9 +598,6 @@ function TEST_repair_stats() { ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum" COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired") test "$COUNT" = "$REPAIRS" || return 1 - - # Tear down - teardown $dir || return 1 } function TEST_repair_stats_ec() { @@ -641,7 +610,6 @@ function TEST_repair_stats_ec() { local allow_overwrites=false # Launch a cluster with 5 seconds scrub interval - setup $dir || return 1 run_mon $dir a || return 1 run_mgr $dir x || return 1 local ceph_osd_args="--osd_deep_scrub_randomize_ratio=0 \ @@ -704,9 +672,6 @@ function TEST_repair_stats_ec() { ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum" COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired") test "$COUNT" = "$REPAIRS" || return 1 - - # Tear down - teardown $dir || return 1 } function corrupt_and_repair_jerasure() { @@ -714,7 +679,6 @@ function corrupt_and_repair_jerasure() { local allow_overwrites=$2 local poolname=ecpool - setup $dir || return 1 run_mon $dir a || return 1 run_mgr $dir x || return 1 for id in $(seq 0 3) ; do @@ -729,8 +693,6 @@ function corrupt_and_repair_jerasure() { create_ec_pool $poolname $allow_overwrites k=2 m=2 || return 1 corrupt_and_repair_erasure_coded $dir $poolname || return 1 - - teardown $dir || return 1 } function TEST_corrupt_and_repair_jerasure_appends() { @@ -748,7 +710,6 @@ function corrupt_and_repair_lrc() { local allow_overwrites=$2 local poolname=ecpool - setup $dir || return 1 run_mon $dir a || return 1 run_mgr $dir x || return 1 for id in $(seq 0 9) ; do @@ -763,8 +724,6 @@ function corrupt_and_repair_lrc() { create_ec_pool $poolname $allow_overwrites k=4 m=2 l=3 plugin=lrc || return 1 corrupt_and_repair_erasure_coded $dir $poolname || return 1 - - teardown $dir || return 1 } function TEST_corrupt_and_repair_lrc_appends() { @@ -783,7 +742,6 @@ function unfound_erasure_coded() { local poolname=ecpool local payload=ABCDEF - setup $dir || return 1 run_mon $dir a || return 1 run_mgr $dir x || return 1 for id in $(seq 0 3) ; do @@ -831,8 +789,6 @@ function unfound_erasure_coded() { ceph -s|grep "4 up" || return 1 ceph -s|grep "4 in" || return 1 ceph -s|grep "1/1 objects unfound" || return 1 - - teardown $dir || return 1 } function TEST_unfound_erasure_coded_appends() { @@ -853,7 +809,6 @@ function list_missing_erasure_coded() { local allow_overwrites=$2 local poolname=ecpool - setup $dir || return 1 run_mon $dir a || return 1 run_mgr $dir x || return 1 for id in $(seq 0 2) ; do @@ -913,8 +868,6 @@ function list_missing_erasure_coded() { matches=$(ceph pg $pg list_unfound | egrep "MOBJ0|MOBJ1" | wc -l) [ $matches -eq 2 ] && break done - - teardown $dir || return 1 } function TEST_list_missing_erasure_coded_appends() { @@ -935,7 +888,6 @@ function TEST_corrupt_scrub_replicated() { local poolname=csr_pool local total_objs=19 - setup $dir || return 1 run_mon $dir a --osd_pool_default_size=2 || return 1 run_mgr $dir x || return 1 run_osd $dir 0 || return 1 @@ -3530,7 +3482,6 @@ EOF fi ceph osd pool rm $poolname $poolname --yes-i-really-really-mean-it - teardown $dir || return 1 } @@ -3543,7 +3494,6 @@ function corrupt_scrub_erasure() { local poolname=ecpool local total_objs=7 - setup $dir || return 1 run_mon $dir a || return 1 run_mgr $dir x || return 1 for id in $(seq 0 2) ; do @@ -5690,7 +5640,6 @@ EOF fi ceph osd pool rm $poolname $poolname --yes-i-really-really-mean-it - teardown $dir || return 1 } function TEST_corrupt_scrub_erasure_appends() { @@ -5711,7 +5660,6 @@ function TEST_periodic_scrub_replicated() { local poolname=psr_pool local objname=POBJ - setup $dir || return 1 run_mon $dir a --osd_pool_default_size=2 || return 1 run_mgr $dir x || return 1 local ceph_osd_args="--osd-scrub-interval-randomize-ratio=0 --osd-deep-scrub-randomize-ratio=0 " @@ -5803,7 +5751,6 @@ function TEST_scrub_warning() { local conf_overdue_seconds=$(calc $i7_days + $i1_day + \( $i7_days \* $overdue \) ) local pool_overdue_seconds=$(calc $i14_days + $i1_day + \( $i14_days \* $overdue \) ) - setup $dir || return 1 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1 run_mgr $dir x --mon_warn_pg_not_scrubbed_ratio=${overdue} --mon_warn_pg_not_deep_scrubbed_ratio=${overdue} || return 1 run_osd $dir 0 $ceph_osd_args --osd_scrub_backoff_ratio=0 || return 1 @@ -5870,7 +5817,6 @@ function TEST_scrub_warning() { ceph health detail | grep "not deep-scrubbed since" return 1 fi - return 0 } # @@ -5881,7 +5827,6 @@ function TEST_corrupt_snapset_scrub_rep() { local poolname=csr_pool local total_objs=2 - setup $dir || return 1 run_mon $dir a --osd_pool_default_size=2 || return 1 run_mgr $dir x || return 1 run_osd $dir 0 || return 1 @@ -6141,7 +6086,6 @@ EOF fi ceph osd pool rm $poolname $poolname --yes-i-really-really-mean-it - teardown $dir || return 1 } function TEST_request_scrub_priority() { @@ -6151,7 +6095,6 @@ function TEST_request_scrub_priority() { local OBJECTS=64 local PGS=8 - setup $dir || return 1 run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1 run_mgr $dir x || return 1 local ceph_osd_args="--osd-scrub-interval-randomize-ratio=0 --osd-deep-scrub-randomize-ratio=0 " @@ -6199,8 +6142,6 @@ function TEST_request_scrub_priority() { # Verify that the requested scrub ran first grep "log_channel.*scrub ok" $dir/osd.${primary}.log | grep -v purged_snaps | head -1 | sed 's/.*[[]DBG[]]//' | grep -q $pg || return 1 - - return 0 }