generic: test MADV_POPULATE_READ with IO errors
[xfstests-dev.git] / check
diff --git a/check b/check
index 8b5e241ca90c0ed9d151e59b4808a3f77e595d45..c6dba89b5b506efc1b0cc13bbdf79c019292ff7f 100755 (executable)
--- a/check
+++ b/check
@@ -8,18 +8,16 @@ tmp=/tmp/$$
 status=0
 needwrap=true
 needsum=true
-n_try=0
-try=""
-n_bad=0
+try=()
 sum_bad=0
-bad=""
-n_notrun=0
-notrun=""
+bad=()
+notrun=()
 interrupt=true
 diff="diff -u"
 showme=false
 have_test_arg=false
 randomize=false
+exact_order=false
 export here=`pwd`
 xfile=""
 subdir_xfile=""
@@ -27,6 +25,9 @@ brief_test_summary=false
 do_report=false
 DUMP_OUTPUT=false
 iterations=1
+istop=false
+loop_on_fail=0
+exclude_tests=()
 
 # This is a global variable used to pass test failure text to reporting gunk
 _err_msg=""
@@ -34,6 +35,9 @@ _err_msg=""
 # start the initialisation work now
 iam=check
 
+# mkfs.xfs uses the presence of both of these variables to enable formerly
+# supported tiny filesystem configurations that fstests use for fuzz testing
+# in a controlled environment
 export MSGVERB="text:action"
 export QA_CHECK_FS=${QA_CHECK_FS:=true}
 
@@ -43,7 +47,7 @@ export DIFF_LENGTH=${DIFF_LENGTH:=10}
 # by default don't output timestamps
 timestamp=${TIMESTAMP:=false}
 
-rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
+rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.report.* $tmp.arglist
 
 SRC_GROUPS="generic shared"
 export SRC_DIR="tests"
@@ -54,9 +58,11 @@ usage()
 
 check options
     -nfs               test NFS
+    -afs               test AFS
     -glusterfs         test GlusterFS
     -cifs              test CIFS
     -9p                        test 9p
+    -fuse              test fuse
     -virtiofs          test virtiofs
     -overlay           test overlay
     -pvfs2             test PVFS2
@@ -67,18 +73,22 @@ check options
     -n                 show me, do not run tests
     -T                 output timestamps
     -r                 randomize test order
+    --exact-order      run tests in the exact order specified
     -i <n>             iterate the test list <n> times
+    -I <n>             iterate the test list <n> times, but stops iterating further in case of any test failure
     -d                 dump test output to stdout
     -b                 brief test summary
-    -R fmt[,fmt]       generate report in formats specified. Supported format: [xunit]
+    -R fmt[,fmt]       generate report in formats specified. Supported formats: xunit, xunit-quiet
     --large-fs         optimise scratch device for large filesystems
     -s section         run only specified section from config file
     -S section         exclude the specified section from the config file
+    -L <n>             loop tests <n> times following a failure, measuring aggregate pass/fail metrics
 
 testlist options
     -g group[,group...]        include tests from these groups
     -x group[,group...]        exclude tests from these groups
     -X exclude_file    exclude individual tests
+    -e testlist         exclude a specific list of tests
     -E external_file   exclude individual tests
     [testlist]         include tests matching names in testlist
 
@@ -111,7 +121,7 @@ examples:
  check -X .exclude -g auto
  check -E ~/.xfstests.exclude
 '
-           exit 0
+           exit 1
 }
 
 get_sub_group_list()
@@ -119,9 +129,9 @@ get_sub_group_list()
        local d=$1
        local grp=$2
 
-       test -s "$SRC_DIR/$d/group" || return 1
+       test -s "$SRC_DIR/$d/group.list" || return 1
 
-       local grpl=$(sed -n < $SRC_DIR/$d/group \
+       local grpl=$(sed -n < $SRC_DIR/$d/group.list \
                -e 's/#.*//' \
                -e 's/$/ /' \
                -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
@@ -174,10 +184,10 @@ get_all_tests()
 # the function from that list.
 trim_test_list()
 {
-       test_list="$*"
+       local test_list="$*"
 
        rm -f $tmp.grep
-       numsed=0
+       local numsed=0
        for t in $test_list
        do
            if [ $numsed -gt 100 ]; then
@@ -194,15 +204,9 @@ trim_test_list()
        rm -f $tmp.grep
 }
 
-
-_wallclock()
-{
-    date "+%s"
-}
-
 _timestamp()
 {
-    now=`date "+%T"`
+    local now=`date "+%T"`
     echo -n " [$now]"
 }
 
@@ -242,23 +246,28 @@ _prepare_test_list()
                list=$(get_group_list $xgroup)
                if [ -z "$list" ]; then
                        echo "Group \"$xgroup\" is empty or not defined?"
-                       exit 1
+                       continue
                fi
 
                trim_test_list $list
        done
 
-       # sort the list of tests into numeric order
-       if $randomize; then
-               if type shuf >& /dev/null; then
-                       sorter="shuf"
+       # sort the list of tests into numeric order unless we're running tests
+       # in the exact order specified
+       if ! $exact_order; then
+               if $randomize; then
+                       if type shuf >& /dev/null; then
+                               sorter="shuf"
+                       else
+                               sorter="awk -v seed=$RANDOM -f randomize.awk"
+                       fi
                else
-                       sorter="awk -v seed=$RANDOM -f randomize.awk"
+                       sorter="cat"
                fi
+               list=`sort -n $tmp.list | uniq | $sorter`
        else
-               sorter="cat"
+               list=`cat $tmp.list`
        fi
-       list=`sort -n $tmp.list | uniq | $sorter`
        rm -f $tmp.list
 }
 
@@ -267,15 +276,14 @@ while [ $# -gt 0 ]; do
        case "$1" in
        -\? | -h | --help) usage ;;
 
-       -nfs)           FSTYP=nfs ;;
-       -glusterfs)     FSTYP=glusterfs ;;
-       -cifs)          FSTYP=cifs ;;
-       -9p)            FSTYP=9p ;;
-       -virtiofs)      FSTYP=virtiofs ;;
-       -overlay)       FSTYP=overlay; export OVERLAY=true ;;
-       -pvfs2)         FSTYP=pvfs2 ;;
-       -tmpfs)         FSTYP=tmpfs ;;
-       -ubifs)         FSTYP=ubifs ;;
+       -nfs|-afs|-glusterfs|-cifs|-9p|-fuse|-virtiofs|-pvfs2|-tmpfs|-ubifs)
+               FSTYP="${1:1}"
+               ;;
+       -overlay)
+               [ "$FSTYP" == overlay ] || export OVL_BASE_FSTYP="$FSTYP"
+               FSTYP=overlay
+               export OVERLAY=true
+               ;;
 
        -g)     group=$2 ; shift ;
                GROUP_LIST="$GROUP_LIST ${group//,/ }"
@@ -287,10 +295,17 @@ while [ $# -gt 0 ]; do
 
        -X)     subdir_xfile=$2; shift ;
                ;;
+       -e)
+               xfile=$2; shift ;
+               readarray -t -O "${#exclude_tests[@]}" exclude_tests < \
+                       <(echo "$xfile" | tr ', ' '\n\n')
+               ;;
+
        -E)     xfile=$2; shift ;
                if [ -f $xfile ]; then
-                       sed "s/#.*$//" "$xfile" >> $tmp.xlist
-               fi
+                       readarray -t -O ${#exclude_tests[@]} exclude_tests < \
+                               <(sed "s/#.*$//" $xfile)
+               fi
                ;;
        -s)     RUN_SECTION="$RUN_SECTION $2"; shift ;;
        -S)     EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
@@ -298,8 +313,22 @@ while [ $# -gt 0 ]; do
        -udiff) diff="$diff -u" ;;
 
        -n)     showme=true ;;
-        -r)    randomize=true ;;
+       -r)
+               if $exact_order; then
+                       echo "Cannot specify -r and --exact-order."
+                       exit 1
+               fi
+               randomize=true
+               ;;
+       --exact-order)
+               if $randomize; then
+                       echo "Cannnot specify --exact-order and -r."
+                       exit 1
+               fi
+               exact_order=true
+               ;;
        -i)     iterations=$2; shift ;;
+       -I)     iterations=$2; istop=true; shift ;;
        -T)     timestamp=true ;;
        -d)     DUMP_OUTPUT=true ;;
        -b)     brief_test_summary=true;;
@@ -309,6 +338,9 @@ while [ $# -gt 0 ]; do
                ;;
        --large-fs) export LARGE_SCRATCH_DEV=yes ;;
        --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
+       -L)     [[ $2 =~ ^[0-9]+$ ]] || usage
+               loop_on_fail=$2; shift
+               ;;
 
        -*)     usage ;;
        *)      # not an argument, we've got tests now.
@@ -332,11 +364,23 @@ if ! . ./common/rc; then
        exit 1
 fi
 
+# If the test config specified a soak test duration, see if there are any
+# unit suffixes that need converting to an integer seconds count.
+if [ -n "$SOAK_DURATION" ]; then
+       SOAK_DURATION="$(echo "$SOAK_DURATION" | \
+               sed -e 's/^\([.0-9]*\)\([a-z]\)*/\1 \2/g' | \
+               $AWK_PROG -f $here/src/soak_duration.awk)"
+       if [ $? -ne 0 ]; then
+               status=1
+               exit 1
+       fi
+fi
+
 if [ -n "$subdir_xfile" ]; then
        for d in $SRC_GROUPS $FSTYP; do
                [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
                for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
-                       echo $d/$f >> $tmp.xlist
+                       exclude_tests+=($d/$f)
                done
        done
 fi
@@ -352,12 +396,12 @@ if $have_test_arg; then
                *)      # Expand test pattern (e.g. xfs/???, *fs/001)
                        list=$(cd $SRC_DIR; echo $1)
                        for t in $list; do
-                               test_dir=`dirname $t`
-                               test_dir=${test_dir#$SRC_DIR/*}
-                               test_name=`basename $t`
-                               group_file=$SRC_DIR/$test_dir/group
+                               t=${t#$SRC_DIR/}
+                               test_dir=${t%%/*}
+                               test_name=${t##*/}
+                               group_file=$SRC_DIR/$test_dir/group.list
 
-                               if egrep -q "^$test_name" $group_file; then
+                               if grep -Eq "^$test_name" $group_file; then
                                        # in group file ... OK
                                        echo $SRC_DIR/$test_dir/$test_name \
                                                >>$tmp.arglist
@@ -385,31 +429,37 @@ fi
 
 _wipe_counters()
 {
-       n_try="0"
-       n_bad="0"
-       n_notrun="0"
-       unset try notrun bad
+       try=()
+       notrun=()
+       bad=()
 }
 
 _global_log() {
        echo "$1" >> $check.log
-       if $OPTIONS_HAVE_SECIONS; then
+       if $OPTIONS_HAVE_SECTIONS; then
                echo "$1" >> ${REPORT_DIR}/check.log
        fi
 }
 
+if [ -n "$REPORT_GCOV" ]; then
+       . ./common/gcov
+       _gcov_check_report_gcov
+fi
+
 _wrapup()
 {
        seq="check"
        check="$RESULT_BASE/check"
+       $interrupt && sect_stop=`_wallclock`
 
-       if $showme; then
-               if $needwrap; then
-                       if $do_report; then
-                               _make_section_report
-                       fi
-                       needwrap=false
+       if $showme && $needwrap; then
+               if $do_report; then
+                       # $showme = all selected tests are notrun (no tries)
+                       _make_section_report "$section" "${#notrun[*]}" "0" \
+                                            "${#notrun[*]}" \
+                                            "$((sect_stop - sect_start))"
                fi
+               needwrap=false
        elif $needwrap; then
                if [ -f $check.time -a -f $tmp.time ]; then
                        cat $check.time $tmp.time  \
@@ -432,48 +482,62 @@ _wrapup()
 
                echo "SECTION       -- $section" >>$tmp.summary
                echo "=========================" >>$tmp.summary
-               if [ ! -z "$n_try" -a $n_try != 0 ]; then
+               if ((${#try[*]} > 0)); then
                        if [ $brief_test_summary == "false" ]; then
-                               echo "Ran:$try"
-                               echo "Ran:$try" >>$tmp.summary
+                               echo "Ran: ${try[*]}"
+                               echo "Ran: ${try[*]}" >>$tmp.summary
                        fi
-                       _global_log "Ran:$try"
+                       _global_log "Ran: ${try[*]}"
                fi
 
                $interrupt && echo "Interrupted!" | tee -a $check.log
-               if $OPTIONS_HAVE_SECIONS; then
+               if $OPTIONS_HAVE_SECTIONS; then
                        $interrupt && echo "Interrupted!" | tee -a \
                                ${REPORT_DIR}/check.log
                fi
 
-               if [ ! -z "$notrun" ]; then
+               if ((${#notrun[*]} > 0)); then
                        if [ $brief_test_summary == "false" ]; then
-                               echo "Not run:$notrun"
-                               echo "Not run:$notrun" >>$tmp.summary
+                               echo "Not run: ${notrun[*]}"
+                               echo "Not run: ${notrun[*]}" >>$tmp.summary
                        fi
-                       _global_log "Not run:$notrun"
+                       _global_log "Not run: ${notrun[*]}"
                fi
 
-               if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
-                       echo "Failures:$bad"
-                       echo "Failed $n_bad of $n_try tests"
-                       _global_log "Failures:$bad"
-                       _global_log "Failed $n_bad of $n_try tests"
-                       echo "Failures:$bad" >>$tmp.summary
-                       echo "Failed $n_bad of $n_try tests" >>$tmp.summary
+               if ((${#bad[*]} > 0)); then
+                       echo "Failures: ${bad[*]}"
+                       echo "Failed ${#bad[*]} of ${#try[*]} tests"
+                       _global_log "Failures: ${bad[*]}"
+                       _global_log "Failed ${#bad[*]} of ${#try[*]} tests"
+                       echo "Failures: ${bad[*]}" >>$tmp.summary
+                       echo "Failed ${#bad[*]} of ${#try[*]} tests" >>$tmp.summary
                else
-                       echo "Passed all $n_try tests"
-                       _global_log "Passed all $n_try tests"
-                       echo "Passed all $n_try tests" >>$tmp.summary
+                       echo "Passed all ${#try[*]} tests"
+                       _global_log "Passed all ${#try[*]} tests"
+                       echo "Passed all ${#try[*]} tests" >>$tmp.summary
                fi
                echo "" >>$tmp.summary
                if $do_report; then
-                       _make_section_report
+                       _make_section_report "$section" "${#try[*]}" \
+                                            "${#bad[*]}" "${#notrun[*]}" \
+                                            "$((sect_stop - sect_start))"
+               fi
+
+               # Generate code coverage report
+               if [ -n "$REPORT_GCOV" ]; then
+                       # don't trigger multiple times if caller hits ^C
+                       local gcov_report_dir="$REPORT_GCOV"
+                       test "$gcov_report_dir" = "1" && \
+                               gcov_report_dir="$REPORT_DIR/gcov"
+                       unset REPORT_GCOV
+
+                       _gcov_generate_report "$gcov_report_dir"
                fi
+
                needwrap=false
        fi
 
-       sum_bad=`expr $sum_bad + $n_bad`
+       sum_bad=`expr $sum_bad + ${#bad[*]}`
        _wipe_counters
        rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
        if ! $OPTIONS_HAVE_SECTIONS; then
@@ -496,44 +560,150 @@ _summary()
 
 _check_filesystems()
 {
+       local ret=0
+
        if [ -f ${RESULT_DIR}/require_test ]; then
-               _check_test_fs || err=true
+               if ! _check_test_fs ; then
+                       ret=1
+                       echo "Trying to repair broken TEST_DEV file system"
+                       _repair_test_fs
+                       _test_mount
+               fi
                rm -f ${RESULT_DIR}/require_test*
        else
                _test_unmount 2> /dev/null
        fi
        if [ -f ${RESULT_DIR}/require_scratch ]; then
-               _check_scratch_fs || err=true
+               _check_scratch_fs || ret=1
                rm -f ${RESULT_DIR}/require_scratch*
        fi
        _scratch_unmount 2> /dev/null
+       return $ret
 }
 
 _expunge_test()
 {
        local TEST_ID="$1"
-       if [ -s $tmp.xlist ]; then
-               if grep -q $TEST_ID $tmp.xlist; then
+
+       for f in "${exclude_tests[@]}"; do
+               # $f may contain traling spaces and comments
+               local id_regex="^${TEST_ID}\b"
+               if [[ "$f" =~ ${id_regex} ]]; then
                        echo "       [expunged]"
-                       return 1
+                       return 0
                fi
+       done
+       return 1
+}
+
+# retain files which would be overwritten in subsequent reruns of the same test
+_stash_fail_loop_files() {
+       local seq_prefix="${REPORT_DIR}/${1}"
+       local cp_suffix="$2"
+
+       for i in ".full" ".dmesg" ".out.bad" ".notrun" ".core" ".hints"; do
+               rm -f "${seq_prefix}${i}${cp_suffix}"
+               if [ -f "${seq_prefix}${i}" ]; then
+                       cp "${seq_prefix}${i}" "${seq_prefix}${i}${cp_suffix}"
+               fi
+       done
+}
+
+# Retain in @bad / @notrun the result of the just-run @test_seq. @try array
+# entries are added prior to execution.
+_stash_test_status() {
+       local test_seq="$1"
+       local test_status="$2"
+
+       if $do_report && [[ $test_status != "expunge" ]]; then
+               _make_testcase_report "$section" "$test_seq" \
+                                     "$test_status" "$((stop - start))"
        fi
-       return 0
+
+       if ((${#loop_status[*]} > 0)); then
+               # continuing or completing rerun-on-failure loop
+               _stash_fail_loop_files "$test_seq" ".rerun${#loop_status[*]}"
+               loop_status+=("$test_status")
+               if ((${#loop_status[*]} > loop_on_fail)); then
+                       printf "%s aggregate results across %d runs: " \
+                               "$test_seq" "${#loop_status[*]}"
+                       awk "BEGIN {
+                               n=split(\"${loop_status[*]}\", arr);"'
+                               for (i = 1; i <= n; i++)
+                                       stats[arr[i]]++;
+                               for (x in stats)
+                                       printf("%s=%d (%.1f%%)",
+                                              (i-- > n ? x : ", " x),
+                                              stats[x], 100 * stats[x] / n);
+                               }'
+                       echo
+                       loop_status=()
+               fi
+               return  # only stash @bad result for initial failure in loop
+       fi
+
+       case "$test_status" in
+       fail)
+               if ((loop_on_fail > 0)); then
+                       # initial failure, start rerun-on-failure loop
+                       _stash_fail_loop_files "$test_seq" ".rerun0"
+                       loop_status+=("$test_status")
+               fi
+               bad+=("$test_seq")
+               ;;
+       list|notrun)
+               notrun+=("$test_seq")
+               ;;
+       pass|expunge)
+               ;;
+       *)
+               echo "Unexpected test $test_seq status: $test_status"
+               ;;
+       esac
 }
 
+# Can we run systemd scopes?
+HAVE_SYSTEMD_SCOPES=
+systemctl reset-failed "fstests-check" &>/dev/null
+systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null
+test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes
+
 # Make the check script unattractive to the OOM killer...
 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
-test -w ${OOM_SCORE_ADJ} && echo -1000 > ${OOM_SCORE_ADJ}
+function _adjust_oom_score() {
+       test -w "${OOM_SCORE_ADJ}" && echo "$1" > "${OOM_SCORE_ADJ}"
+}
+_adjust_oom_score -500
 
 # ...and make the tests themselves somewhat more attractive to it, so that if
 # the system runs out of memory it'll be the test that gets killed and not the
-# test framework.
+# test framework.  The test is run in a separate process without any of our
+# functions, so we open-code adjusting the OOM score.
+#
+# If systemd is available, run the entire test script in a scope so that we can
+# kill all subprocesses of the test if it fails to clean up after itself.  This
+# is essential for ensuring that the post-test unmount succeeds.  Note that
+# systemd doesn't automatically remove transient scopes that fail to terminate
+# when systemd tells them to terminate (e.g. programs stuck in D state when
+# systemd sends SIGKILL), so we use reset-failed to tear down the scope.
 _run_seq() {
-       bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq"
+       local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq")
+
+       if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then
+               local unit="$(systemd-escape "fs$seq").scope"
+               systemctl reset-failed "${unit}" &> /dev/null
+               systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}"
+               res=$?
+               systemctl stop "${unit}" &> /dev/null
+               return "${res}"
+       else
+               "${cmd[@]}"
+       fi
 }
 
 _detect_kmemleak
 _prepare_test_list
+fstests_start_time="$(date +"%F %T")"
 
 if $OPTIONS_HAVE_SECTIONS; then
        trap "_summary; exit \$status" 0 1 2 3 15
@@ -543,11 +713,10 @@ fi
 
 function run_section()
 {
-       local section=$1
+       local section=$1 skip
 
        OLD_FSTYP=$FSTYP
        OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
-       get_next_config $section
 
        # Do we need to run only some sections ?
        if [ ! -z "$RUN_SECTION" ]; then
@@ -577,6 +746,9 @@ function run_section()
                fi
        fi
 
+       get_next_config $section
+       _canonicalize_devices
+
        mkdir -p $RESULT_BASE
        if [ ! -d $RESULT_BASE ]; then
                echo "failed to create results directory $RESULT_BASE"
@@ -606,6 +778,10 @@ function run_section()
                        status=1
                        exit
                fi
+               # TEST_DEV has been recreated, previous FSTYP derived from
+               # TEST_DEV could be changed, source common/rc again with
+               # correct FSTYP to get FSTYP specific configs, e.g. common/xfs
+               . common/rc
                _prepare_test_list
        elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
                _test_unmount 2> /dev/null
@@ -635,6 +811,7 @@ function run_section()
          echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
        fi
        echo
+       test -n "$REPORT_GCOV" && _gcov_reset
        needwrap=true
 
        if [ ! -z "$SCRATCH_DEV" ]; then
@@ -668,25 +845,12 @@ function run_section()
        seqres="$check"
        _check_test_fs
 
-       err=false
-       first_test=true
-       prev_seq=""
-       for seq in $list ; do
-               # Run report for previous test!
-               if $err ; then
-                       bad="$bad $seqnum"
-                       n_bad=`expr $n_bad + 1`
-                       tc_status="fail"
-               fi
-               if $do_report && ! $first_test ; then
-                       if [ $tc_status != "expunge" ] ; then
-                               _make_testcase_report "$prev_seq" "$tc_status"
-                       fi
-               fi
-               first_test=false
+       loop_status=()  # track rerun-on-failure state
+       local tc_status ix
+       local -a _list=( $list )
+       for ((ix = 0; ix < ${#_list[*]}; !${#loop_status[*]} && ix++)); do
+               seq="${_list[$ix]}"
 
-               err=false
-               prev_seq="$seq"
                if [ ! -f $seq ]; then
                        # Try to get full name in case the user supplied only
                        # seq id and the test has a name. A bit of hassle to
@@ -704,70 +868,72 @@ function run_section()
 
                # the filename for the test and the name output are different.
                # we don't include the tests/ directory in the name output.
-               export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
-
-               # Similarly, the result directory needs to replace the tests/
-               # part of the test location.
-               group=`dirname $seq`
+               export seqnum=${seq#$SRC_DIR/}
+               group=${seqnum%%/*}
                if $OPTIONS_HAVE_SECTIONS; then
-                       export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
                        REPORT_DIR="$RESULT_BASE/$section"
                else
-                       export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
                        REPORT_DIR="$RESULT_BASE"
                fi
+               export RESULT_DIR="$REPORT_DIR/$group"
                seqres="$REPORT_DIR/$seqnum"
 
-               mkdir -p $RESULT_DIR
-               rm -f ${RESULT_DIR}/require_scratch*
-               rm -f ${RESULT_DIR}/require_test*
+               # Generate the entire section report with whatever test results
+               # we have so far.  Leave the $sect_time parameter empty so that
+               # it's a little more obvious that this test run is incomplete.
+               if $do_report; then
+                       _make_section_report "$section" "${#try[*]}" \
+                                            "${#bad[*]}" "${#notrun[*]}" \
+                                            "" &> /dev/null
+               fi
+
                echo -n "$seqnum"
 
                if $showme; then
-                       _expunge_test $seqnum
-                       if [ $? -eq 1 ]; then
-                           tc_status="expunge"
-                           continue
+                       if _expunge_test $seqnum; then
+                               tc_status="expunge"
+                       else
+                               echo
+                               start=0
+                               stop=0
+                               tc_status="list"
                        fi
-                       echo
-                       start=0
-                       stop=0
-                       tc_status="list"
-                       n_notrun=`expr $n_notrun + 1`
+                       _stash_test_status "$seqnum" "$tc_status"
                        continue
                fi
 
                tc_status="pass"
                if [ ! -f $seq ]; then
                        echo " - no such test?"
+                       _stash_test_status "$seqnum" "$tc_status"
                        continue
                fi
 
                # really going to try and run this one
-               rm -f $seqres.out.bad
+               mkdir -p $RESULT_DIR
+               rm -f ${RESULT_DIR}/require_scratch*
+               rm -f ${RESULT_DIR}/require_test*
+               rm -f $seqres.out.bad $seqres.hints
 
                # check if we really should run it
-               _expunge_test $seqnum
-               if [ $? -eq 1 ]; then
+               if _expunge_test $seqnum; then
                        tc_status="expunge"
+                       _stash_test_status "$seqnum" "$tc_status"
                        continue
                fi
 
                # record that we really tried to run this test.
-               try="$try $seqnum"
-               n_try=`expr $n_try + 1`
-
-               # slashes now in names, sed barfs on them so use grep
-               lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
-               if [ "X$lasttime" != X ]; then
-                       echo -n " ${lasttime}s ... "
-               else
-                       echo -n "       " # prettier output with timestamps.
+               if ((!${#loop_status[*]})); then
+                       try+=("$seqnum")
                fi
+
+               awk 'BEGIN {lasttime="       "} \
+                    $1 == "'$seqnum'" {lasttime=" " $2 "s ... "; exit} \
+                    END {printf "%s", lasttime}' "$check.time"
                rm -f core $seqres.notrun
 
                start=`_wallclock`
-               $timestamp && echo -n " ["`date "+%T"`"]"
+               $timestamp && _timestamp
                [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
                $LOGGER_PROG "run xfstest $seqnum"
                if [ -w /dev/kmsg ]; then
@@ -775,6 +941,7 @@ function run_section()
                        echo "run fstests $seqnum at $date_time" > /dev/kmsg
                        # _check_dmesg depends on this log in dmesg
                        touch ${RESULT_DIR}/check_dmesg
+                       rm -f ${RESULT_DIR}/dmesg_filter
                fi
                _try_wipe_scratch_devs > /dev/null 2>&1
 
@@ -782,6 +949,7 @@ function run_section()
                # to be reported for each test
                (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
 
+               test_start_time="$(date +"%F %T")"
                if [ "$DUMP_OUTPUT" = true ]; then
                        _run_seq 2>&1 | tee $tmp.out
                        # Because $? would get tee's return code
@@ -791,11 +959,19 @@ function run_section()
                        sts=$?
                fi
 
-               if [ -f core ]; then
-                       _dump_err_cont "[dumped core]"
-                       mv core $RESULT_BASE/$seqnum.core
-                       err=true
-               fi
+               # If someone sets kernel.core_pattern or kernel.core_uses_pid,
+               # coredumps generated by fstests might have a longer name than
+               # just "core".  Use globbing to find the most common patterns,
+               # assuming there are no other coredump capture packages set up.
+               local cores=0
+               for i in core core.*; do
+                       test -f "$i" || continue
+                       if ((cores++ == 0)); then
+                               _dump_err_cont "[dumped core]"
+                       fi
+                       (_adjust_oom_score 250; _save_coredump "$i")
+                       tc_status="fail"
+               done
 
                if [ -f $seqres.notrun ]; then
                        $timestamp && _timestamp
@@ -804,9 +980,12 @@ function run_section()
                        $timestamp && echo " [not run]" && \
                                      echo -n " $seqnum -- "
                        cat $seqres.notrun
-                       notrun="$notrun $seqnum"
-                       n_notrun=`expr $n_notrun + 1`
                        tc_status="notrun"
+                       _stash_test_status "$seqnum" "$tc_status"
+
+                       # Unmount the scratch fs so that we can wipe the scratch
+                       # dev state prior to the next test run.
+                       _scratch_unmount 2> /dev/null
                        continue;
                fi
 
@@ -816,12 +995,29 @@ function run_section()
                        _scratch_unmount 2> /dev/null
                        rm -f ${RESULT_DIR}/require_test*
                        rm -f ${RESULT_DIR}/require_scratch*
-                       err=true
+                       # Even though we failed, there may be something interesting in
+                       # dmesg which can help debugging.
+                       _check_dmesg
+                       (_adjust_oom_score 250; _check_filesystems)
+                       tc_status="fail"
                else
-                       # the test apparently passed, so check for corruption
-                       # and log messages that shouldn't be there.
-                       _check_filesystems
-                       _check_dmesg || err=true
+                       # The test apparently passed, so check for corruption
+                       # and log messages that shouldn't be there.  Run the
+                       # checking tools from a subshell with adjusted OOM
+                       # score so that the OOM killer will target them instead
+                       # of the check script itself.
+                       (_adjust_oom_score 250; _check_filesystems) || tc_status="fail"
+                       _check_dmesg || tc_status="fail"
+
+                       # Save any coredumps from the post-test fs checks
+                       for i in core core.*; do
+                               test -f "$i" || continue
+                               if ((cores++ == 0)); then
+                                       _dump_err_cont "[dumped core]"
+                               fi
+                               (_adjust_oom_score 250; _save_coredump "$i")
+                               tc_status="fail"
+                       done
                fi
 
                # Reload the module after each test to check for leaks or
@@ -835,7 +1031,7 @@ function run_section()
 
                # Scan for memory leaks after every test so that associating
                # a leak to a particular test will be as accurate as possible.
-               _check_kmemleak || err=true
+               _check_kmemleak || tc_status="fail"
 
                # test ends after all checks are done.
                $timestamp && _timestamp
@@ -843,7 +1039,8 @@ function run_section()
 
                if [ ! -f $seq.out ]; then
                        _dump_err "no qualified output"
-                       err=true
+                       tc_status="fail"
+                       _stash_test_status "$seqnum" "$tc_status"
                        continue;
                fi
 
@@ -852,7 +1049,7 @@ function run_section()
                # version.
                sed -i "s/\`/\'/g" $tmp.out
                if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
-                       if ! $err ; then
+                       if [ "$tc_status" != "fail" ]; then
                                echo "$seqnum `expr $stop - $start`" >>$tmp.time
                                echo -n " `expr $stop - $start`s"
                        fi
@@ -869,21 +1066,18 @@ function run_section()
                                echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
                                        " to see the entire diff)"
                        fi; } | sed -e 's/^\(.\)/    \1/'
-                       err=true
+                       tc_status="fail"
                fi
-       done
-
-       # make sure we record the status of the last test we ran.
-       if $err ; then
-               bad="$bad $seqnum"
-               n_bad=`expr $n_bad + 1`
-               tc_status="fail"
-       fi
-       if $do_report && ! $first_test ; then
-               if [ $tc_status != "expunge" ] ; then
-                       _make_testcase_report "$prev_seq" "$tc_status"
+               if [ -f $seqres.hints ]; then
+                       if [ "$tc_status" == "fail" ]; then
+                               echo
+                               cat $seqres.hints
+                       else
+                               rm -f $seqres.hints
+                       fi
                fi
-       fi
+               _stash_test_status "$seqnum" "$tc_status"
+       done
 
        sect_stop=`_wallclock`
        interrupt=false
@@ -898,6 +1092,11 @@ function run_section()
 for ((iters = 0; iters < $iterations; iters++)) do
        for section in $HOST_OPTIONS_SECTIONS; do
                run_section $section
+               if [ "$sum_bad" != 0 ] && [ "$istop" = true ]; then
+                       interrupt=false
+                       status=`expr $sum_bad != 0`
+                       exit
+               fi
        done
 done