xfs: Verify correctness of upgrading an fs to support large extent counters
[xfstests-dev.git] / check
diff --git a/check b/check
index 8b5e241ca90c0ed9d151e59b4808a3f77e595d45..de11b37e1346806d80158521b28a1752aeb63092 100755 (executable)
--- a/check
+++ b/check
@@ -20,6 +20,7 @@ diff="diff -u"
 showme=false
 have_test_arg=false
 randomize=false
+exact_order=false
 export here=`pwd`
 xfile=""
 subdir_xfile=""
@@ -27,6 +28,7 @@ brief_test_summary=false
 do_report=false
 DUMP_OUTPUT=false
 iterations=1
+istop=false
 
 # This is a global variable used to pass test failure text to reporting gunk
 _err_msg=""
@@ -67,7 +69,9 @@ check options
     -n                 show me, do not run tests
     -T                 output timestamps
     -r                 randomize test order
+    --exact-order      run tests in the exact order specified
     -i <n>             iterate the test list <n> times
+    -I <n>             iterate the test list <n> times, but stops iterating further in case of any test failure
     -d                 dump test output to stdout
     -b                 brief test summary
     -R fmt[,fmt]       generate report in formats specified. Supported format: [xunit]
@@ -79,6 +83,7 @@ testlist options
     -g group[,group...]        include tests from these groups
     -x group[,group...]        exclude tests from these groups
     -X exclude_file    exclude individual tests
+    -e testlist         exclude a specific list of tests
     -E external_file   exclude individual tests
     [testlist]         include tests matching names in testlist
 
@@ -111,7 +116,7 @@ examples:
  check -X .exclude -g auto
  check -E ~/.xfstests.exclude
 '
-           exit 0
+           exit 1
 }
 
 get_sub_group_list()
@@ -119,9 +124,9 @@ get_sub_group_list()
        local d=$1
        local grp=$2
 
-       test -s "$SRC_DIR/$d/group" || return 1
+       test -s "$SRC_DIR/$d/group.list" || return 1
 
-       local grpl=$(sed -n < $SRC_DIR/$d/group \
+       local grpl=$(sed -n < $SRC_DIR/$d/group.list \
                -e 's/#.*//' \
                -e 's/$/ /' \
                -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
@@ -242,23 +247,28 @@ _prepare_test_list()
                list=$(get_group_list $xgroup)
                if [ -z "$list" ]; then
                        echo "Group \"$xgroup\" is empty or not defined?"
-                       exit 1
+                       continue
                fi
 
                trim_test_list $list
        done
 
-       # sort the list of tests into numeric order
-       if $randomize; then
-               if type shuf >& /dev/null; then
-                       sorter="shuf"
+       # sort the list of tests into numeric order unless we're running tests
+       # in the exact order specified
+       if ! $exact_order; then
+               if $randomize; then
+                       if type shuf >& /dev/null; then
+                               sorter="shuf"
+                       else
+                               sorter="awk -v seed=$RANDOM -f randomize.awk"
+                       fi
                else
-                       sorter="awk -v seed=$RANDOM -f randomize.awk"
+                       sorter="cat"
                fi
+               list=`sort -n $tmp.list | uniq | $sorter`
        else
-               sorter="cat"
+               list=`cat $tmp.list`
        fi
-       list=`sort -n $tmp.list | uniq | $sorter`
        rm -f $tmp.list
 }
 
@@ -287,6 +297,11 @@ while [ $# -gt 0 ]; do
 
        -X)     subdir_xfile=$2; shift ;
                ;;
+       -e)
+               xfile=$2; shift ;
+               echo "$xfile" | tr ', ' '\n\n' >> $tmp.xlist
+               ;;
+
        -E)     xfile=$2; shift ;
                if [ -f $xfile ]; then
                        sed "s/#.*$//" "$xfile" >> $tmp.xlist
@@ -298,8 +313,22 @@ while [ $# -gt 0 ]; do
        -udiff) diff="$diff -u" ;;
 
        -n)     showme=true ;;
-        -r)    randomize=true ;;
+       -r)
+               if $exact_order; then
+                       echo "Cannot specify -r and --exact-order."
+                       exit 1
+               fi
+               randomize=true
+               ;;
+       --exact-order)
+               if $randomize; then
+                       echo "Cannnot specify --exact-order and -r."
+                       exit 1
+               fi
+               exact_order=true
+               ;;
        -i)     iterations=$2; shift ;;
+       -I)     iterations=$2; istop=true; shift ;;
        -T)     timestamp=true ;;
        -d)     DUMP_OUTPUT=true ;;
        -b)     brief_test_summary=true;;
@@ -355,7 +384,7 @@ if $have_test_arg; then
                                test_dir=`dirname $t`
                                test_dir=${test_dir#$SRC_DIR/*}
                                test_name=`basename $t`
-                               group_file=$SRC_DIR/$test_dir/group
+                               group_file=$SRC_DIR/$test_dir/group.list
 
                                if egrep -q "^$test_name" $group_file; then
                                        # in group file ... OK
@@ -393,7 +422,7 @@ _wipe_counters()
 
 _global_log() {
        echo "$1" >> $check.log
-       if $OPTIONS_HAVE_SECIONS; then
+       if $OPTIONS_HAVE_SECTIONS; then
                echo "$1" >> ${REPORT_DIR}/check.log
        fi
 }
@@ -441,7 +470,7 @@ _wrapup()
                fi
 
                $interrupt && echo "Interrupted!" | tee -a $check.log
-               if $OPTIONS_HAVE_SECIONS; then
+               if $OPTIONS_HAVE_SECTIONS; then
                        $interrupt && echo "Interrupted!" | tee -a \
                                ${REPORT_DIR}/check.log
                fi
@@ -496,17 +525,20 @@ _summary()
 
 _check_filesystems()
 {
+       local ret=0
+
        if [ -f ${RESULT_DIR}/require_test ]; then
-               _check_test_fs || err=true
+               _check_test_fs || ret=1
                rm -f ${RESULT_DIR}/require_test*
        else
                _test_unmount 2> /dev/null
        fi
        if [ -f ${RESULT_DIR}/require_scratch ]; then
-               _check_scratch_fs || err=true
+               _check_scratch_fs || ret=1
                rm -f ${RESULT_DIR}/require_scratch*
        fi
        _scratch_unmount 2> /dev/null
+       return $ret
 }
 
 _expunge_test()
@@ -521,15 +553,43 @@ _expunge_test()
        return 0
 }
 
+# Can we run systemd scopes?
+HAVE_SYSTEMD_SCOPES=
+systemctl reset-failed "fstests-check" &>/dev/null
+systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null
+test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes
+
 # Make the check script unattractive to the OOM killer...
 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
-test -w ${OOM_SCORE_ADJ} && echo -1000 > ${OOM_SCORE_ADJ}
+function _adjust_oom_score() {
+       test -w "${OOM_SCORE_ADJ}" && echo "$1" > "${OOM_SCORE_ADJ}"
+}
+_adjust_oom_score -500
 
 # ...and make the tests themselves somewhat more attractive to it, so that if
 # the system runs out of memory it'll be the test that gets killed and not the
-# test framework.
+# test framework.  The test is run in a separate process without any of our
+# functions, so we open-code adjusting the OOM score.
+#
+# If systemd is available, run the entire test script in a scope so that we can
+# kill all subprocesses of the test if it fails to clean up after itself.  This
+# is essential for ensuring that the post-test unmount succeeds.  Note that
+# systemd doesn't automatically remove transient scopes that fail to terminate
+# when systemd tells them to terminate (e.g. programs stuck in D state when
+# systemd sends SIGKILL), so we use reset-failed to tear down the scope.
 _run_seq() {
-       bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq"
+       local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq")
+
+       if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then
+               local unit="$(systemd-escape "fs$seq").scope"
+               systemctl reset-failed "${unit}" &> /dev/null
+               systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}"
+               res=$?
+               systemctl stop "${unit}" &> /dev/null
+               return "${res}"
+       else
+               "${cmd[@]}"
+       fi
 }
 
 _detect_kmemleak
@@ -606,6 +666,10 @@ function run_section()
                        status=1
                        exit
                fi
+               # TEST_DEV has been recreated, previous FSTYP derived from
+               # TEST_DEV could be changed, source common/rc again with
+               # correct FSTYP to get FSTYP specific configs, e.g. common/xfs
+               . common/rc
                _prepare_test_list
        elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
                _test_unmount 2> /dev/null
@@ -744,7 +808,7 @@ function run_section()
                fi
 
                # really going to try and run this one
-               rm -f $seqres.out.bad
+               rm -f $seqres.out.bad $seqres.hints
 
                # check if we really should run it
                _expunge_test $seqnum
@@ -807,6 +871,10 @@ function run_section()
                        notrun="$notrun $seqnum"
                        n_notrun=`expr $n_notrun + 1`
                        tc_status="notrun"
+
+                       # Unmount the scratch fs so that we can wipe the scratch
+                       # dev state prior to the next test run.
+                       _scratch_unmount 2> /dev/null
                        continue;
                fi
 
@@ -818,9 +886,12 @@ function run_section()
                        rm -f ${RESULT_DIR}/require_scratch*
                        err=true
                else
-                       # the test apparently passed, so check for corruption
-                       # and log messages that shouldn't be there.
-                       _check_filesystems
+                       # The test apparently passed, so check for corruption
+                       # and log messages that shouldn't be there.  Run the
+                       # checking tools from a subshell with adjusted OOM
+                       # score so that the OOM killer will target them instead
+                       # of the check script itself.
+                       (_adjust_oom_score 250; _check_filesystems) || err=true
                        _check_dmesg || err=true
                fi
 
@@ -871,6 +942,14 @@ function run_section()
                        fi; } | sed -e 's/^\(.\)/    \1/'
                        err=true
                fi
+               if [ -f $seqres.hints ]; then
+                       if $err; then
+                               echo
+                               cat $seqres.hints
+                       else
+                               rm -f $seqres.hints
+                       fi
+               fi
        done
 
        # make sure we record the status of the last test we ran.
@@ -898,6 +977,11 @@ function run_section()
 for ((iters = 0; iters < $iterations; iters++)) do
        for section in $HOST_OPTIONS_SECTIONS; do
                run_section $section
+               if [ "$sum_bad" != 0 ] && [ "$istop" = true ]; then
+                       interrupt=false
+                       status=`expr $sum_bad != 0`
+                       exit
+               fi
        done
 done