#!/bin/bash
-#
-# Control script for QA
-#
+# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2000-2002,2006 Silicon Graphics, Inc. All Rights Reserved.
#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it would be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-#
+# Control script for QA
#
-
tmp=/tmp/$$
status=0
needwrap=true
+needsum=true
n_try=0
try=""
n_bad=0
+sum_bad=0
bad=""
+n_notrun=0
notrun=""
interrupt=true
diff="diff -u"
showme=false
have_test_arg=false
randomize=false
-here=`pwd`
-FSTYP=xfs
+exact_order=false
+export here=`pwd`
xfile=""
+subdir_xfile=""
+brief_test_summary=false
+do_report=false
+DUMP_OUTPUT=false
+iterations=1
+istop=false
+
+# This is a global variable used to pass test failure text to reporting gunk
+_err_msg=""
# start the initialisation work now
iam=check
# by default don't output timestamps
timestamp=${TIMESTAMP:=false}
-rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist
-
-# we need common/config
-if ! . ./common/config
-then
- echo "$iam: failed to source common/config"
- exit 1
-fi
-
-# Autodetect fs type based on what's on $TEST_DEV
-if [ "$HOSTOS" == "Linux" ]; then
- FSTYP=`blkid -c /dev/null -s TYPE -o value $TEST_DEV`
-fi
-export FSTYP
+rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
-SUPPORTED_TESTS="[0-9][0-9][0-9] [0-9][0-9][0-9][0-9]"
SRC_GROUPS="generic shared"
export SRC_DIR="tests"
-export RESULT_BASE=${RESULT_BASE:="$here/results"}
usage()
{
echo "Usage: $0 [options] [testlist]"'
check options
- -xfs test XFS (default)
- -udf test UDF
- -nfs test NFS
+ -nfs test NFS
+ -glusterfs test GlusterFS
+ -cifs test CIFS
+ -9p test 9p
+ -virtiofs test virtiofs
+ -overlay test overlay
+ -pvfs2 test PVFS2
+ -tmpfs test TMPFS
+ -ubifs test ubifs
-l line mode diff
-udiff show unified diff (default)
-n show me, do not run tests
-T output timestamps
-r randomize test order
+ --exact-order run tests in the exact order specified
+ -i <n> iterate the test list <n> times
+ -I <n> iterate the test list <n> times, but stops iterating further in case of any test failure
+ -d dump test output to stdout
+ -b brief test summary
+ -R fmt[,fmt] generate report in formats specified. Supported format: [xunit]
--large-fs optimise scratch device for large filesystems
+ -s section run only specified section from config file
+ -S section exclude the specified section from the config file
testlist options
-g group[,group...] include tests from these groups
-x group[,group...] exclude tests from these groups
- -X file exclude individual tests
+ -X exclude_file exclude individual tests
+ -e testlist exclude a specific list of tests
+ -E external_file exclude individual tests
[testlist] include tests matching names in testlist
+
+testlist argument is a list of tests in the form of <test dir>/<test name>.
+
+<test dir> is a directory under tests that contains a group file,
+with a list of the names of the tests in that directory.
+
+<test name> may be either a specific test file name (e.g. xfs/001) or
+a test file name match pattern (e.g. xfs/*).
+
+group argument is either a name of a tests group to collect from all
+the test dirs (e.g. quick) or a name of a tests group to collect from
+a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
+If you want to run all the tests in the test suite, use "-g all" to specify all
+groups.
+
+exclude_file argument refers to a name of a file inside each test directory.
+for every test dir where this file is found, the listed test names are
+excluded from the list of tests to run from that test dir.
+
+external_file argument is a path to a single file containing a list of tests
+to exclude in the form of <test dir>/<test name>.
+
+examples:
+ check xfs/001
+ check -g quick
+ check -g xfs/quick
+ check -x stress xfs/*
+ check -X .exclude -g auto
+ check -E ~/.xfstests.exclude
'
exit 0
}
+get_sub_group_list()
+{
+ local d=$1
+ local grp=$2
+
+ test -s "$SRC_DIR/$d/group" || return 1
+
+ local grpl=$(sed -n < $SRC_DIR/$d/group \
+ -e 's/#.*//' \
+ -e 's/$/ /' \
+ -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
+ echo $grpl
+}
+
get_group_list()
{
- grp=$1
+ local grp=$1
+ local grpl=""
+ local sub=$(dirname $grp)
+ local fsgroup="$FSTYP"
+
+ if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
+ # group is given as <subdir>/<group> (e.g. xfs/quick)
+ grp=$(basename $grp)
+ get_sub_group_list $sub $grp
+ return
+ fi
- for d in $SRC_GROUPS $FSTYP; do
- l=$(sed -n < $SRC_DIR/$d/group \
- -e 's/#.*//' \
- -e 's/$/ /' \
- -e "s;\(^[0-9][0-9][0-9]\).* $grp .*;$SRC_DIR/$d/\1;p")
- grpl="$grpl $l"
+ if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
+ fsgroup=ext4
+ fi
+ for d in $SRC_GROUPS $fsgroup; do
+ if ! test -d "$SRC_DIR/$d" ; then
+ continue
+ fi
+ grpl="$grpl $(get_sub_group_list $d $grp)"
done
echo $grpl
}
-# find all tests, excluding files that are test metadata such as group files.
-# This assumes that tests are defined purely by alphanumeric filenames with no
-# ".xyz" extensions in the name.
+# Find all tests, excluding files that are test metadata such as group files.
+# It matches test names against $VALID_TEST_NAME defined in common/rc
get_all_tests()
{
touch $tmp.list
for d in $SRC_GROUPS $FSTYP; do
+ if ! test -d "$SRC_DIR/$d" ; then
+ continue
+ fi
ls $SRC_DIR/$d/* | \
grep -v "\..*" | \
- grep -v group >> $tmp.list 2>/dev/null
+ grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
+ grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
done
}
done
grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
mv $tmp.tmp $tmp.list
+ rm -f $tmp.grep
}
_wallclock()
{
- date "+%H %M %S" | $AWK_PROG '{ print $1*3600 + $2*60 + $3 }'
+ date "+%s"
}
_timestamp()
echo -n " [$now]"
}
+_prepare_test_list()
+{
+ unset list
+ # Tests specified on the command line
+ if [ -s $tmp.arglist ]; then
+ cat $tmp.arglist > $tmp.list
+ else
+ touch $tmp.list
+ fi
+
+ # Specified groups to include
+ # Note that the CLI processing adds a leading space to the first group
+ # parameter, so we have to catch that here checking for "all"
+ if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
+ # no test numbers, do everything
+ get_all_tests
+ else
+ for group in $GROUP_LIST; do
+ list=$(get_group_list $group)
+ if [ -z "$list" ]; then
+ echo "Group \"$group\" is empty or not defined?"
+ exit 1
+ fi
+
+ for t in $list; do
+ grep -s "^$t\$" $tmp.list >/dev/null || \
+ echo "$t" >>$tmp.list
+ done
+ done
+ fi
+
+ # Specified groups to exclude
+ for xgroup in $XGROUP_LIST; do
+ list=$(get_group_list $xgroup)
+ if [ -z "$list" ]; then
+ echo "Group \"$xgroup\" is empty or not defined?"
+ continue
+ fi
+
+ trim_test_list $list
+ done
+
+ # sort the list of tests into numeric order unless we're running tests
+ # in the exact order specified
+ if ! $exact_order; then
+ if $randomize; then
+ if type shuf >& /dev/null; then
+ sorter="shuf"
+ else
+ sorter="awk -v seed=$RANDOM -f randomize.awk"
+ fi
+ else
+ sorter="cat"
+ fi
+ list=`sort -n $tmp.list | uniq | $sorter`
+ else
+ list=`cat $tmp.list`
+ fi
+ rm -f $tmp.list
+}
+
# Process command arguments first.
while [ $# -gt 0 ]; do
case "$1" in
-\? | -h | --help) usage ;;
- -udf) FSTYP=udf ;;
- -xfs) FSTYP=xfs ;;
- -nfs) FSTYP=nfs ;;
+ -nfs) FSTYP=nfs ;;
+ -glusterfs) FSTYP=glusterfs ;;
+ -cifs) FSTYP=cifs ;;
+ -9p) FSTYP=9p ;;
+ -virtiofs) FSTYP=virtiofs ;;
+ -overlay) FSTYP=overlay; export OVERLAY=true ;;
+ -pvfs2) FSTYP=pvfs2 ;;
+ -tmpfs) FSTYP=tmpfs ;;
+ -ubifs) FSTYP=ubifs ;;
-g) group=$2 ; shift ;
- group_list=$(get_group_list $group)
- if [ -z "$group_list" ]; then
- echo "Group \"$group\" is empty or not defined?"
- exit 1
- fi
-
- [ ! -s $tmp.list ] && touch $tmp.list
- for t in $group_list; do
- grep -s "^$t\$" $tmp.list >/dev/null || \
- echo "$t" >>$tmp.list
- done
-
+ GROUP_LIST="$GROUP_LIST ${group//,/ }"
;;
-x) xgroup=$2 ; shift ;
-
- # Note: behaviour is dependent on command line ordering of
- # -g and -x parameters. If there are no preceding -g commands,
- # this works on all tests, otherwise just the tests specified by
- # the early -g inclusions.
- [ ! -s $tmp.list ] && get_all_tests
-
- group_list=$(get_group_list $xgroup)
- if [ -z "$group_list" ]; then
- echo "Group \"$xgroup\" is empty or not defined?"
- exit 1
- fi
-
- trim_test_list $group_list
+ XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
;;
- -X) xfile=$2; shift ;
- for d in $SRC_GROUPS $FSTYP; do
- [ -f $SRC_DIR/$d/$xfile ] || continue
- for f in `cat $SRC_DIR/$d/$xfile`; do
- echo $d/$f >> $tmp.xlist
- done
- done
+ -X) subdir_xfile=$2; shift ;
+ ;;
+ -e)
+ xfile=$2; shift ;
+ echo "$xfile" | tr ', ' '\n\n' >> $tmp.xlist
;;
+ -E) xfile=$2; shift ;
+ if [ -f $xfile ]; then
+ sed "s/#.*$//" "$xfile" >> $tmp.xlist
+ fi
+ ;;
+ -s) RUN_SECTION="$RUN_SECTION $2"; shift ;;
+ -S) EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
-l) diff="diff" ;;
-udiff) diff="$diff -u" ;;
-n) showme=true ;;
- -r) randomize=true ;;
-
+ -r)
+ if $exact_order; then
+ echo "Cannot specify -r and --exact-order."
+ exit 1
+ fi
+ randomize=true
+ ;;
+ --exact-order)
+ if $randomize; then
+ echo "Cannnot specify --exact-order and -r."
+ exit 1
+ fi
+ exact_order=true
+ ;;
+ -i) iterations=$2; shift ;;
+ -I) iterations=$2; istop=true; shift ;;
-T) timestamp=true ;;
-
+ -d) DUMP_OUTPUT=true ;;
+ -b) brief_test_summary=true;;
+ -R) report_fmt=$2 ; shift ;
+ REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
+ do_report=true
+ ;;
--large-fs) export LARGE_SCRATCH_DEV=yes ;;
--extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
shift
done
+# we need common/rc, that also sources common/config. We need to source it
+# after processing args, overlay needs FSTYP set before sourcing common/config
+if ! . ./common/rc; then
+ echo "check: failed to source common/rc"
+ exit 1
+fi
+
+if [ -n "$subdir_xfile" ]; then
+ for d in $SRC_GROUPS $FSTYP; do
+ [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
+ for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
+ echo $d/$f >> $tmp.xlist
+ done
+ done
+fi
+
# Process tests from command line now.
if $have_test_arg; then
while [ $# -gt 0 ]; do
case "$1" in
- -*) echo "Argments before tests, please!"
+ -*) echo "Arguments before tests, please!"
status=1
exit $status
;;
- *) test_dir=`dirname $1`
- test_name=`basename $1`
- group_file=$SRC_DIR/$test_dir/group
-
- if egrep "^$test_name" $group_file >/dev/null ; then
- # in group file ... OK
- echo $SRC_DIR/$1 >>$tmp.list
- else
- # oops
- echo "$1 - unknown test, ignored"
- fi
+ *) # Expand test pattern (e.g. xfs/???, *fs/001)
+ list=$(cd $SRC_DIR; echo $1)
+ for t in $list; do
+ test_dir=`dirname $t`
+ test_dir=${test_dir#$SRC_DIR/*}
+ test_name=`basename $t`
+ group_file=$SRC_DIR/$test_dir/group
+
+ if egrep -q "^$test_name" $group_file; then
+ # in group file ... OK
+ echo $SRC_DIR/$test_dir/$test_name \
+ >>$tmp.arglist
+ else
+ # oops
+ echo "$t - unknown test, ignored"
+ fi
+ done
;;
esac
shift
done
-fi
-
-if [ -s $tmp.list ]; then
- # found some valid test numbers ... this is good
- :
-elif $have_test_arg; then
- # had test numbers, but none in group file ... do nothing
- touch $tmp.list
-else
- # no test numbers, do everything from group file
- sed -n -e '/^[0-9][0-9][0-9]*/s/[ ].*//p' <group >$tmp.list
-fi
-
-# sort the list of tests into numeric order
-list=`sort -n $tmp.list`
-rm -f $tmp.list $tmp.tmp $tmp.grep
-
-if $randomize
-then
- list=`echo $list | awk -f randomize.awk`
-fi
-
-# we need common/rc
-if ! . ./common/rc
-then
- echo "check: failed to source common/rc"
- exit 1
+elif [ -z "$GROUP_LIST" ]; then
+ # default group list is the auto group. If any other group or test is
+ # specified, we use that instead.
+ GROUP_LIST="auto"
fi
if [ `id -u` -ne 0 ]
exit 1
fi
-# Ok, time to start running...
+_wipe_counters()
+{
+ n_try="0"
+ n_bad="0"
+ n_notrun="0"
+ unset try notrun bad
+}
+
+_global_log() {
+ echo "$1" >> $check.log
+ if $OPTIONS_HAVE_SECTIONS; then
+ echo "$1" >> ${REPORT_DIR}/check.log
+ fi
+}
_wrapup()
{
- seq="check"
- check="$RESULT_BASE/check"
-
- if $showme
- then
- :
- elif $needwrap
- then
- if [ -f $check.time -a -f $tmp.time ]
- then
- cat $check.time $tmp.time \
- | $AWK_PROG '
- { t[$1] = $2 }
-END { if (NR > 0) {
- for (i in t) print i " " t[i]
- }
- }' \
- | sort -n >$tmp.out
- mv $tmp.out $check.time
+ seq="check"
+ check="$RESULT_BASE/check"
+
+ if $showme; then
+ if $needwrap; then
+ if $do_report; then
+ _make_section_report
+ fi
+ needwrap=false
+ fi
+ elif $needwrap; then
+ if [ -f $check.time -a -f $tmp.time ]; then
+ cat $check.time $tmp.time \
+ | $AWK_PROG '
+ { t[$1] = $2 }
+ END {
+ if (NR > 0) {
+ for (i in t) print i " " t[i]
+ }
+ }' \
+ | sort -n >$tmp.out
+ mv $tmp.out $check.time
+ if $OPTIONS_HAVE_SECTIONS; then
+ cp $check.time ${REPORT_DIR}/check.time
+ fi
+ fi
+
+ _global_log ""
+ _global_log "$(date)"
+
+ echo "SECTION -- $section" >>$tmp.summary
+ echo "=========================" >>$tmp.summary
+ if [ ! -z "$n_try" -a $n_try != 0 ]; then
+ if [ $brief_test_summary == "false" ]; then
+ echo "Ran:$try"
+ echo "Ran:$try" >>$tmp.summary
+ fi
+ _global_log "Ran:$try"
+ fi
+
+ $interrupt && echo "Interrupted!" | tee -a $check.log
+ if $OPTIONS_HAVE_SECTIONS; then
+ $interrupt && echo "Interrupted!" | tee -a \
+ ${REPORT_DIR}/check.log
+ fi
+
+ if [ ! -z "$notrun" ]; then
+ if [ $brief_test_summary == "false" ]; then
+ echo "Not run:$notrun"
+ echo "Not run:$notrun" >>$tmp.summary
+ fi
+ _global_log "Not run:$notrun"
+ fi
+
+ if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
+ echo "Failures:$bad"
+ echo "Failed $n_bad of $n_try tests"
+ _global_log "Failures:$bad"
+ _global_log "Failed $n_bad of $n_try tests"
+ echo "Failures:$bad" >>$tmp.summary
+ echo "Failed $n_bad of $n_try tests" >>$tmp.summary
+ else
+ echo "Passed all $n_try tests"
+ _global_log "Passed all $n_try tests"
+ echo "Passed all $n_try tests" >>$tmp.summary
+ fi
+ echo "" >>$tmp.summary
+ if $do_report; then
+ _make_section_report
+ fi
+ needwrap=false
fi
- echo "" >>$check.log
- date >>$check.log
- echo $list | fmt | sed -e 's/^/ /' -e "s;$SRC_DIR/;;g" >>$check.log
- $interrupt && echo "Interrupted!" >>$check.log
-
- if [ ! -z "$n_try" -a $n_try != 0 ]
- then
- echo "Ran:$try"
+ sum_bad=`expr $sum_bad + $n_bad`
+ _wipe_counters
+ rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
+ if ! $OPTIONS_HAVE_SECTIONS; then
+ rm -f $tmp.*
fi
+}
- if [ ! -z "$notrun" ]
- then
- echo "Not run:$notrun"
- echo "Not run:$notrun" >>$check.log
+_summary()
+{
+ _wrapup
+ if $showme; then
+ :
+ elif $needsum; then
+ count=`wc -L $tmp.summary | cut -f1 -d" "`
+ cat $tmp.summary
+ needsum=false
fi
+ rm -f $tmp.*
+}
- if [ ! -z "$n_bad" -a $n_bad != 0 ]
- then
- echo "Failures:$bad"
- echo "Failed $n_bad of $n_try tests"
- echo "Failures:$bad" | fmt >>$check.log
- echo "Failed $n_bad of $n_try tests" >>$check.log
+_check_filesystems()
+{
+ if [ -f ${RESULT_DIR}/require_test ]; then
+ _check_test_fs || err=true
+ rm -f ${RESULT_DIR}/require_test*
else
- echo "Passed all $n_try tests"
- echo "Passed all $n_try tests" >>$check.log
+ _test_unmount 2> /dev/null
fi
- needwrap=false
- fi
+ if [ -f ${RESULT_DIR}/require_scratch ]; then
+ _check_scratch_fs || err=true
+ rm -f ${RESULT_DIR}/require_scratch*
+ fi
+ _scratch_unmount 2> /dev/null
+}
+
+_expunge_test()
+{
+ local TEST_ID="$1"
+ if [ -s $tmp.xlist ]; then
+ if grep -q $TEST_ID $tmp.xlist; then
+ echo " [expunged]"
+ return 1
+ fi
+ fi
+ return 0
+}
+
+# Can we run systemd scopes?
+HAVE_SYSTEMD_SCOPES=
+systemctl reset-failed "fstests-check" &>/dev/null
+systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null
+test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes
+
+# Make the check script unattractive to the OOM killer...
+OOM_SCORE_ADJ="/proc/self/oom_score_adj"
+test -w ${OOM_SCORE_ADJ} && echo -1000 > ${OOM_SCORE_ADJ}
- rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
- rm -f $tmp.*
+# ...and make the tests themselves somewhat more attractive to it, so that if
+# the system runs out of memory it'll be the test that gets killed and not the
+# test framework.
+#
+# If systemd is available, run the entire test script in a scope so that we can
+# kill all subprocesses of the test if it fails to clean up after itself. This
+# is essential for ensuring that the post-test unmount succeeds. Note that
+# systemd doesn't automatically remove transient scopes that fail to terminate
+# when systemd tells them to terminate (e.g. programs stuck in D state when
+# systemd sends SIGKILL), so we use reset-failed to tear down the scope.
+_run_seq() {
+ local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq")
+
+ if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then
+ local unit="$(systemd-escape "fs$seq").scope"
+ systemctl reset-failed "${unit}" &> /dev/null
+ systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}"
+ res=$?
+ systemctl stop "${unit}" &> /dev/null
+ return "${res}"
+ else
+ "${cmd[@]}"
+ fi
}
-trap "_wrapup; exit \$status" 0 1 2 3 15
+_detect_kmemleak
+_prepare_test_list
-mkdir -p $RESULT_BASE
-if [ ! -d $RESULT_BASE ]; then
- echo "failed to create results directory $RESULTS_BASE"
- exit 1;
+if $OPTIONS_HAVE_SECTIONS; then
+ trap "_summary; exit \$status" 0 1 2 3 15
+else
+ trap "_wrapup; exit \$status" 0 1 2 3 15
fi
-seq="check"
-check="$RESULT_BASE/check"
+function run_section()
+{
+ local section=$1
+
+ OLD_FSTYP=$FSTYP
+ OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
+ get_next_config $section
+
+ # Do we need to run only some sections ?
+ if [ ! -z "$RUN_SECTION" ]; then
+ skip=true
+ for s in $RUN_SECTION; do
+ if [ $section == $s ]; then
+ skip=false
+ break;
+ fi
+ done
+ if $skip; then
+ return
+ fi
+ fi
-# don't leave old full output behind on a clean run
-rm -f $check.full
+ # Did this section get excluded?
+ if [ ! -z "$EXCLUDE_SECTION" ]; then
+ skip=false
+ for s in $EXCLUDE_SECTION; do
+ if [ $section == $s ]; then
+ skip=true
+ break;
+ fi
+ done
+ if $skip; then
+ return
+ fi
+ fi
-[ -f $check.time ] || touch $check.time
+ mkdir -p $RESULT_BASE
+ if [ ! -d $RESULT_BASE ]; then
+ echo "failed to create results directory $RESULT_BASE"
+ status=1
+ exit
+ fi
-# print out our test configuration
-echo "FSTYP -- `_full_fstyp_details`"
-echo "PLATFORM -- `_full_platform_details`"
-if [ ! -z "$SCRATCH_DEV" ]; then
- echo "MKFS_OPTIONS -- `_scratch_mkfs_options`"
- echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
-fi
-echo
-
-
-if [ ! -z "$SCRATCH_DEV" ]; then
- umount $SCRATCH_DEV 2>/dev/null
- # call the overridden mkfs - make sure the FS is built
- # the same as we'll create it later.
-
- if ! _scratch_mkfs $flag >$tmp.err 2>&1
- then
- echo "our local _scratch_mkfs routine ..."
- cat $tmp.err
- echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
- exit 1
- fi
-
- # call the overridden mount - make sure the FS mounts with
- # the same options that we'll mount with later.
- if ! _scratch_mount >$tmp.err 2>&1
- then
- echo "our local mount routine ..."
- cat $tmp.err
- echo "check: failed to mount \$SCRATCH_DEV using specified options"
- exit 1
- fi
-fi
+ if $OPTIONS_HAVE_SECTIONS; then
+ echo "SECTION -- $section"
+ fi
-seqres="$check"
-_check_test_fs
+ sect_start=`_wallclock`
+ if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
+ echo "RECREATING -- $FSTYP on $TEST_DEV"
+ _test_unmount 2> /dev/null
+ if ! _test_mkfs >$tmp.err 2>&1
+ then
+ echo "our local _test_mkfs routine ..."
+ cat $tmp.err
+ echo "check: failed to mkfs \$TEST_DEV using specified options"
+ status=1
+ exit
+ fi
+ if ! _test_mount
+ then
+ echo "check: failed to mount $TEST_DEV on $TEST_DIR"
+ status=1
+ exit
+ fi
+ # TEST_DEV has been recreated, previous FSTYP derived from
+ # TEST_DEV could be changed, source common/rc again with
+ # correct FSTYP to get FSTYP specific configs, e.g. common/xfs
+ . common/rc
+ _prepare_test_list
+ elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
+ _test_unmount 2> /dev/null
+ if ! _test_mount
+ then
+ echo "check: failed to mount $TEST_DEV on $TEST_DIR"
+ status=1
+ exit
+ fi
+ fi
-for seq in $list
-do
- err=false
+ init_rc
- # the filename for the test and the name output are different.
- # we don't include the tests/ directory in the name output.
- seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
+ seq="check"
+ check="$RESULT_BASE/check"
- # Similarly, the result directory needs to replace the tests/
- # part of the test location.
- group=`dirname $seq`
- export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
- mkdir -p $RESULT_DIR
- seqres="$RESULT_BASE/$seqnum"
+ # don't leave old full output behind on a clean run
+ rm -f $check.full
- echo -n "$seqnum"
+ [ -f $check.time ] || touch $check.time
- if $showme
- then
+ # print out our test configuration
+ echo "FSTYP -- `_full_fstyp_details`"
+ echo "PLATFORM -- `_full_platform_details`"
+ if [ ! -z "$SCRATCH_DEV" ]; then
+ echo "MKFS_OPTIONS -- `_scratch_mkfs_options`"
+ echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
+ fi
echo
- continue
- elif [ ! -f $seq ]
- then
- echo " - no such test?"
- else
- # really going to try and run this one
- #
- rm -f $seqres.out.bad
-
- # check if we really should run it
- if [ -s $tmp.xlist ]; then
- if grep $seqnum $tmp.xlist > /dev/null 2>&1 ; then
- echo " [expunged]"
+ needwrap=true
+
+ if [ ! -z "$SCRATCH_DEV" ]; then
+ _scratch_unmount 2> /dev/null
+ # call the overridden mkfs - make sure the FS is built
+ # the same as we'll create it later.
+
+ if ! _scratch_mkfs >$tmp.err 2>&1
+ then
+ echo "our local _scratch_mkfs routine ..."
+ cat $tmp.err
+ echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
+ status=1
+ exit
+ fi
+
+ # call the overridden mount - make sure the FS mounts with
+ # the same options that we'll mount with later.
+ if ! _try_scratch_mount >$tmp.err 2>&1
+ then
+ echo "our local mount routine ..."
+ cat $tmp.err
+ echo "check: failed to mount \$SCRATCH_DEV using specified options"
+ status=1
+ exit
+ else
+ _scratch_unmount
+ fi
+ fi
+
+ seqres="$check"
+ _check_test_fs
+
+ err=false
+ first_test=true
+ prev_seq=""
+ for seq in $list ; do
+ # Run report for previous test!
+ if $err ; then
+ bad="$bad $seqnum"
+ n_bad=`expr $n_bad + 1`
+ tc_status="fail"
+ fi
+ if $do_report && ! $first_test ; then
+ if [ $tc_status != "expunge" ] ; then
+ _make_testcase_report "$prev_seq" "$tc_status"
+ fi
+ fi
+ first_test=false
+
+ err=false
+ prev_seq="$seq"
+ if [ ! -f $seq ]; then
+ # Try to get full name in case the user supplied only
+ # seq id and the test has a name. A bit of hassle to
+ # find really the test and not its sample output or
+ # helping files.
+ bname=$(basename $seq)
+ full_seq=$(find $(dirname $seq) -name $bname* -executable |
+ awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
+ END { print shortest }')
+ if [ -f $full_seq ] && \
+ [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
+ seq=$full_seq
+ fi
+ fi
+
+ # the filename for the test and the name output are different.
+ # we don't include the tests/ directory in the name output.
+ export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
+
+ # Similarly, the result directory needs to replace the tests/
+ # part of the test location.
+ group=`dirname $seq`
+ if $OPTIONS_HAVE_SECTIONS; then
+ export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
+ REPORT_DIR="$RESULT_BASE/$section"
+ else
+ export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
+ REPORT_DIR="$RESULT_BASE"
+ fi
+ seqres="$REPORT_DIR/$seqnum"
+
+ mkdir -p $RESULT_DIR
+ rm -f ${RESULT_DIR}/require_scratch*
+ rm -f ${RESULT_DIR}/require_test*
+ echo -n "$seqnum"
+
+ if $showme; then
+ _expunge_test $seqnum
+ if [ $? -eq 1 ]; then
+ tc_status="expunge"
+ continue
+ fi
+ echo
+ start=0
+ stop=0
+ tc_status="list"
+ n_notrun=`expr $n_notrun + 1`
continue
fi
- fi
- # slashes now in names, sed barfs on them so use grep
- lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
- if [ "X$lasttime" != X ]; then
- echo -n " ${lasttime}s ..."
- else
- echo -n " " # prettier output with timestamps.
- fi
- rm -f core $seqres.notrun
-
- start=`_wallclock`
- $timestamp && echo -n " ["`date "+%T"`"]"
- [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
- $LOGGER_PROG "run xfstest $seqnum"
- ./$seq >$tmp.rawout 2>&1
- sts=$?
- $timestamp && _timestamp
- stop=`_wallclock`
-
- _fix_malloc <$tmp.rawout >$tmp.out
- rm -f $tmp.rawout
-
- if [ -f core ]
- then
- echo -n " [dumped core]"
- mv core $RESULT_BASE/$seqnum.core
- err=true
- fi
+ tc_status="pass"
+ if [ ! -f $seq ]; then
+ echo " - no such test?"
+ continue
+ fi
- if [ -f $seqres.notrun ]
- then
- $timestamp || echo -n " [not run] "
- $timestamp && echo " [not run]" && echo -n " $seqnum -- "
- cat $seqres.notrun
- notrun="$notrun $seqnum"
- else
- if [ $sts -ne 0 ]
- then
- echo -n " [failed, exit status $sts]"
- err=true
- fi
- if [ ! -f $seq.out ]
- then
- echo " - no qualified output"
- err=true
- else
- if diff $seq.out $tmp.out >/dev/null 2>&1
- then
- if $err
- then
- :
- else
- echo "$seqnum `expr $stop - $start`" >>$tmp.time
- echo -n " `expr $stop - $start`s"
- fi
- echo ""
+ # really going to try and run this one
+ rm -f $seqres.out.bad
+
+ # check if we really should run it
+ _expunge_test $seqnum
+ if [ $? -eq 1 ]; then
+ tc_status="expunge"
+ continue
+ fi
+
+ # record that we really tried to run this test.
+ try="$try $seqnum"
+ n_try=`expr $n_try + 1`
+
+ # slashes now in names, sed barfs on them so use grep
+ lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
+ if [ "X$lasttime" != X ]; then
+ echo -n " ${lasttime}s ... "
+ else
+ echo -n " " # prettier output with timestamps.
+ fi
+ rm -f core $seqres.notrun
+
+ start=`_wallclock`
+ $timestamp && echo -n " ["`date "+%T"`"]"
+ [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
+ $LOGGER_PROG "run xfstest $seqnum"
+ if [ -w /dev/kmsg ]; then
+ export date_time=`date +"%F %T"`
+ echo "run fstests $seqnum at $date_time" > /dev/kmsg
+ # _check_dmesg depends on this log in dmesg
+ touch ${RESULT_DIR}/check_dmesg
+ fi
+ _try_wipe_scratch_devs > /dev/null 2>&1
+
+ # clear the WARN_ONCE state to allow a potential problem
+ # to be reported for each test
+ (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
+
+ if [ "$DUMP_OUTPUT" = true ]; then
+ _run_seq 2>&1 | tee $tmp.out
+ # Because $? would get tee's return code
+ sts=${PIPESTATUS[0]}
else
- echo " - output mismatch (see $seqres.out.bad)"
- mv $tmp.out $seqres.out.bad
- $diff $seq.out $seqres.out.bad | {
- if test "$DIFF_LENGTH" -le 0; then
+ _run_seq >$tmp.out 2>&1
+ sts=$?
+ fi
+
+ if [ -f core ]; then
+ _dump_err_cont "[dumped core]"
+ mv core $RESULT_BASE/$seqnum.core
+ err=true
+ fi
+
+ if [ -f $seqres.notrun ]; then
+ $timestamp && _timestamp
+ stop=`_wallclock`
+ $timestamp || echo -n "[not run] "
+ $timestamp && echo " [not run]" && \
+ echo -n " $seqnum -- "
+ cat $seqres.notrun
+ notrun="$notrun $seqnum"
+ n_notrun=`expr $n_notrun + 1`
+ tc_status="notrun"
+ continue;
+ fi
+
+ if [ $sts -ne 0 ]; then
+ _dump_err_cont "[failed, exit status $sts]"
+ _test_unmount 2> /dev/null
+ _scratch_unmount 2> /dev/null
+ rm -f ${RESULT_DIR}/require_test*
+ rm -f ${RESULT_DIR}/require_scratch*
+ err=true
+ else
+ # the test apparently passed, so check for corruption
+ # and log messages that shouldn't be there.
+ _check_filesystems
+ _check_dmesg || err=true
+ fi
+
+ # Reload the module after each test to check for leaks or
+ # other problems.
+ if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then
+ _test_unmount 2> /dev/null
+ _scratch_unmount 2> /dev/null
+ modprobe -r fs-$FSTYP
+ modprobe fs-$FSTYP
+ fi
+
+ # Scan for memory leaks after every test so that associating
+ # a leak to a particular test will be as accurate as possible.
+ _check_kmemleak || err=true
+
+ # test ends after all checks are done.
+ $timestamp && _timestamp
+ stop=`_wallclock`
+
+ if [ ! -f $seq.out ]; then
+ _dump_err "no qualified output"
+ err=true
+ continue;
+ fi
+
+ # coreutils 8.16+ changed quote formats in error messages
+ # from `foo' to 'foo'. Filter old versions to match the new
+ # version.
+ sed -i "s/\`/\'/g" $tmp.out
+ if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
+ if ! $err ; then
+ echo "$seqnum `expr $stop - $start`" >>$tmp.time
+ echo -n " `expr $stop - $start`s"
+ fi
+ echo ""
+ else
+ _dump_err "- output mismatch (see $seqres.out.bad)"
+ mv $tmp.out $seqres.out.bad
+ $diff $seq.out $seqres.out.bad | {
+ if test "$DIFF_LENGTH" -le 0; then
cat
else
head -n "$DIFF_LENGTH"
- fi; } | \
- sed -e 's/^\(.\)/ \1/'
- echo " ..."
- echo " (Run '$diff $seq.out $seqres.out.bad' to see the" \
- "entire diff)"
- err=true
+ echo "..."
+ echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
+ " to see the entire diff)"
+ fi; } | sed -e 's/^\(.\)/ \1/'
+ err=true
+ fi
+ done
+
+ # make sure we record the status of the last test we ran.
+ if $err ; then
+ bad="$bad $seqnum"
+ n_bad=`expr $n_bad + 1`
+ tc_status="fail"
+ fi
+ if $do_report && ! $first_test ; then
+ if [ $tc_status != "expunge" ] ; then
+ _make_testcase_report "$prev_seq" "$tc_status"
fi
- fi
fi
- fi
-
- # come here for each test, except when $showme is true
- #
- if $err
- then
- bad="$bad $seqnum"
- n_bad=`expr $n_bad + 1`
- quick=false
- fi
- if [ ! -f $seqres.notrun ]
- then
- try="$try $seqnum"
- n_try=`expr $n_try + 1`
- _check_test_fs
- fi
-
- seq="after_$seqnum"
+ sect_stop=`_wallclock`
+ interrupt=false
+ _wrapup
+ interrupt=true
+ echo
+
+ _test_unmount 2> /dev/null
+ _scratch_unmount 2> /dev/null
+}
+
+for ((iters = 0; iters < $iterations; iters++)) do
+ for section in $HOST_OPTIONS_SECTIONS; do
+ run_section $section
+ if [ "$sum_bad" != 0 ] && [ "$istop" = true ]; then
+ interrupt=false
+ status=`expr $sum_bad != 0`
+ exit
+ fi
+ done
done
interrupt=false
-status=`expr $n_bad`
+status=`expr $sum_bad != 0`
exit