]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
test: Add test for scrub parallelism 40077/head
authorDavid Zafman <dzafman@redhat.com>
Fri, 15 Jan 2021 04:40:04 +0000 (20:40 -0800)
committerDavid Zafman <dzafman@redhat.com>
Fri, 12 Mar 2021 04:42:48 +0000 (04:42 +0000)
Signed-off-by: David Zafman <dzafman@redhat.com>
(cherry picked from commit dd63577ab372d1d4b2da284c6e506603b293d85d)

qa/standalone/scrub/osd-scrub-test.sh

index 5dd029c356fe196f6b22f3223d555742bb626c4f..660035af915ff4a7aaa45988deb8ac073c6bd1ea 100755 (executable)
@@ -449,6 +449,128 @@ function TEST_scrub_permit_time() {
     teardown $dir || return 1
 }
 
+function TEST_scrub_parallelism() {
+    local dir=$1
+    local poolname=test
+    local OSDS=8
+    local PGS=64
+    local objects=$(expr $PGS \* 15)
+    local size=2
+    local osd_max_scrubs_per_osd=2
+    local maxscrubs=$(expr $OSDS / $size \* $osd_max_scrubs_per_osd)
+
+    TESTDATA="testdata.$$"
+
+    setup $dir || return 1
+    run_mon $dir a --osd_pool_default_size=$size || return 1
+    run_mgr $dir x || return 1
+    for osd in $(seq 0 $(expr $OSDS - 1))
+    do
+      run_osd $dir $osd --osd_pool_default_pg_autoscale_mode=off \
+             --osd_deep_scrub_randomize_ratio=0.0 \
+             --osd_max_scrubs=$osd_max_scrubs_per_osd \
+             --osd_scrub_chunk_max=5 \
+             --osd_scrub_sleep=5.0 \
+             --osd_scrub_interval_randomize_ratio=0  || return 1
+    done
+
+    # Create a pool
+    create_pool $poolname $PGS $PGS
+    wait_for_clean || return 1
+    poolid=$(ceph osd dump | grep "^pool.*[']${poolname}[']" | awk '{ print $2 }')
+
+    ceph pg dump pgs
+
+    dd if=/dev/urandom of=$TESTDATA bs=1032 count=1
+    for i in `seq 1 $objects`
+    do
+        rados -p $poolname put obj${i} $TESTDATA
+    done
+    rm -f $TESTDATA
+
+    ceph pg dump pgs
+
+    ceph osd set noscrub
+    sleep 3
+    for i in $(seq 0 $(expr $PGS - 1))
+    do
+      ceph tell $poolid.$(printf '%x' $i) scrub || return 1
+    done
+    ceph osd unset noscrub
+
+    # Wait for scrubbing to start
+    set -o pipefail
+    found="no"
+    for i in $(seq 0 200)
+    do
+      flush_pg_stats
+      if ceph pg dump pgs | grep -q "scrubbing"
+      then
+        found="yes"
+        #ceph pg dump pgs
+        break
+      fi
+    done
+    set +o pipefail
+
+    if test $found = "no";
+    then
+      echo "Scrubbing never started"
+      return 1
+    fi
+
+    local zero_count=0
+    local found_count=0
+    local threshold="$(expr $maxscrubs - 1)"
+    set -o pipefail
+    while(true)
+    do
+      sleep 1
+      scrubs=$(ceph pg dump pgs | grep scrubbing | wc -l)
+      # Occasionally we still see scrubs that have finished with new scrubs starting
+      # so more scrubs appear to be running then is possible.
+      # Also, count 1 less than maximum possible scrubs
+      if test $scrubs -ge $threshold
+      then
+        found_count=$(expr $found_count + 1)
+        continue
+      fi
+      if test $scrubs = "0"
+      then
+        zero_count=$(expr $zero_count + 1)
+        if test $zero_count = "10"
+        then
+          break
+        fi
+      else
+        zero_count=0
+      fi
+    done
+    set +o pipefail
+
+    echo found $found_count max scrubs
+
+    ERRORS=0
+    if test $found_count -lt "20"
+    then
+           echo "Only detected near to or at maximum scrubs $found_count time(s)"
+      ERRORS=$(expr $ERRORS + 1)
+    fi
+
+    grep "reserve failed" $dir/osd.*.log
+    if ! grep -q "reserve failed" $dir/osd.*.log
+    then
+      ERRORS=$(expr $ERRORS + 1)
+    fi
+
+    if test $ERRORS -gt 0
+    then
+      return 1
+    fi
+
+    teardown $dir || return 1
+}
+
 main osd-scrub-test "$@"
 
 # Local Variables: