#!/bin/bash
-#
-# Control script for QA
-#
+# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2000-2002,2006 Silicon Graphics, Inc. All Rights Reserved.
#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it would be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-#
+# Control script for QA
#
-
tmp=/tmp/$$
status=0
needwrap=true
n_bad=0
sum_bad=0
bad=""
+n_notrun=0
notrun=""
interrupt=true
diff="diff -u"
randomize=false
export here=`pwd`
xfile=""
+subdir_xfile=""
+brief_test_summary=false
+do_report=false
+DUMP_OUTPUT=false
+iterations=1
+
+# This is a global variable used to pass test failure text to reporting gunk
+_err_msg=""
# start the initialisation work now
iam=check
# by default don't output timestamps
timestamp=${TIMESTAMP:=false}
-rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist
-
-# we need common/config
-if ! . ./common/config
-then
- echo "$iam: failed to source common/config"
- exit 1
-fi
-
-# Autodetect fs type based on what's on $TEST_DEV unless it's been set
-# externally
-if [ -z "$FSTYP" -a "$HOSTOS" == "Linux" ]; then
- FSTYP=`blkid -c /dev/null -s TYPE -o value $TEST_DEV`
-fi
-FSTYP=${FSTYP:=xfs}
-export FSTYP
+rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
-SUPPORTED_TESTS="[0-9][0-9][0-9] [0-9][0-9][0-9][0-9]"
SRC_GROUPS="generic shared"
export SRC_DIR="tests"
echo "Usage: $0 [options] [testlist]"'
check options
- -nfs test NFS
- -tmpfs test TMPFS
+ -nfs test NFS
+ -glusterfs test GlusterFS
+ -cifs test CIFS
+ -9p test 9p
+ -virtiofs test virtiofs
+ -overlay test overlay
+ -pvfs2 test PVFS2
+ -tmpfs test TMPFS
+ -ubifs test ubifs
-l line mode diff
-udiff show unified diff (default)
-n show me, do not run tests
-T output timestamps
-r randomize test order
+ -i <n> iterate the test list <n> times
+ -d dump test output to stdout
+ -b brief test summary
+ -R fmt[,fmt] generate report in formats specified. Supported format: [xunit]
--large-fs optimise scratch device for large filesystems
+ -s section run only specified section from config file
+ -S section exclude the specified section from the config file
testlist options
-g group[,group...] include tests from these groups
-x group[,group...] exclude tests from these groups
- -X file exclude individual tests
+ -X exclude_file exclude individual tests
+ -E external_file exclude individual tests
[testlist] include tests matching names in testlist
+
+testlist argument is a list of tests in the form of <test dir>/<test name>.
+
+<test dir> is a directory under tests that contains a group file,
+with a list of the names of the tests in that directory.
+
+<test name> may be either a specific test file name (e.g. xfs/001) or
+a test file name match pattern (e.g. xfs/*).
+
+group argument is either a name of a tests group to collect from all
+the test dirs (e.g. quick) or a name of a tests group to collect from
+a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
+If you want to run all the tests in the test suite, use "-g all" to specify all
+groups.
+
+exclude_file argument refers to a name of a file inside each test directory.
+for every test dir where this file is found, the listed test names are
+excluded from the list of tests to run from that test dir.
+
+external_file argument is a path to a single file containing a list of tests
+to exclude in the form of <test dir>/<test name>.
+
+examples:
+ check xfs/001
+ check -g quick
+ check -g xfs/quick
+ check -x stress xfs/*
+ check -X .exclude -g auto
+ check -E ~/.xfstests.exclude
'
exit 0
}
+get_sub_group_list()
+{
+ local d=$1
+ local grp=$2
+
+ test -s "$SRC_DIR/$d/group" || return 1
+
+ local grpl=$(sed -n < $SRC_DIR/$d/group \
+ -e 's/#.*//' \
+ -e 's/$/ /' \
+ -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
+ echo $grpl
+}
+
get_group_list()
{
- grp=$1
+ local grp=$1
+ local grpl=""
+ local sub=$(dirname $grp)
+ local fsgroup="$FSTYP"
+
+ if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
+ # group is given as <subdir>/<group> (e.g. xfs/quick)
+ grp=$(basename $grp)
+ get_sub_group_list $sub $grp
+ return
+ fi
- for d in $SRC_GROUPS $FSTYP; do
- l=$(sed -n < $SRC_DIR/$d/group \
- -e 's/#.*//' \
- -e 's/$/ /' \
- -e "s;\(^[0-9][0-9][0-9]\).* $grp .*;$SRC_DIR/$d/\1;p")
- grpl="$grpl $l"
+ if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
+ fsgroup=ext4
+ fi
+ for d in $SRC_GROUPS $fsgroup; do
+ if ! test -d "$SRC_DIR/$d" ; then
+ continue
+ fi
+ grpl="$grpl $(get_sub_group_list $d $grp)"
done
echo $grpl
}
-# find all tests, excluding files that are test metadata such as group files.
-# This assumes that tests are defined purely by alphanumeric filenames with no
-# ".xyz" extensions in the name.
+# Find all tests, excluding files that are test metadata such as group files.
+# It matches test names against $VALID_TEST_NAME defined in common/rc
get_all_tests()
{
touch $tmp.list
for d in $SRC_GROUPS $FSTYP; do
+ if ! test -d "$SRC_DIR/$d" ; then
+ continue
+ fi
ls $SRC_DIR/$d/* | \
grep -v "\..*" | \
+ grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
done
}
done
grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
mv $tmp.tmp $tmp.list
+ rm -f $tmp.grep
}
_wallclock()
{
- date "+%H %M %S" | $AWK_PROG '{ print $1*3600 + $2*60 + $3 }'
+ date "+%s"
}
_timestamp()
fi
# Specified groups to include
- for group in $GROUP_LIST; do
- list=$(get_group_list $group)
- if [ -z "$list" ]; then
- echo "Group \"$group\" is empty or not defined?"
- exit 1
- fi
+ # Note that the CLI processing adds a leading space to the first group
+ # parameter, so we have to catch that here checking for "all"
+ if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
+ # no test numbers, do everything
+ get_all_tests
+ else
+ for group in $GROUP_LIST; do
+ list=$(get_group_list $group)
+ if [ -z "$list" ]; then
+ echo "Group \"$group\" is empty or not defined?"
+ exit 1
+ fi
- for t in $list; do
- grep -s "^$t\$" $tmp.list >/dev/null || \
+ for t in $list; do
+ grep -s "^$t\$" $tmp.list >/dev/null || \
echo "$t" >>$tmp.list
+ done
done
- done
-
- if ! $have_test_arg && [ -z "$GROUP_LIST" ]; then
- # no test numbers, do everything
- get_all_tests
fi
# Specified groups to exclude
done
# sort the list of tests into numeric order
- list=`sort -n $tmp.list | uniq`
- rm -f $tmp.list $tmp.tmp $tmp.grep
-
- if $randomize
- then
- list=`echo $list | awk -f randomize.awk`
+ if $randomize; then
+ if type shuf >& /dev/null; then
+ sorter="shuf"
+ else
+ sorter="awk -v seed=$RANDOM -f randomize.awk"
+ fi
+ else
+ sorter="cat"
fi
+ list=`sort -n $tmp.list | uniq | $sorter`
+ rm -f $tmp.list
}
# Process command arguments first.
case "$1" in
-\? | -h | --help) usage ;;
- -nfs) FSTYP=nfs ;;
- -tmpfs) FSTYP=tmpfs ;;
+ -nfs) FSTYP=nfs ;;
+ -glusterfs) FSTYP=glusterfs ;;
+ -cifs) FSTYP=cifs ;;
+ -9p) FSTYP=9p ;;
+ -virtiofs) FSTYP=virtiofs ;;
+ -overlay) FSTYP=overlay; export OVERLAY=true ;;
+ -pvfs2) FSTYP=pvfs2 ;;
+ -tmpfs) FSTYP=tmpfs ;;
+ -ubifs) FSTYP=ubifs ;;
-g) group=$2 ; shift ;
- GROUP_LIST="$GROUP_LIST $group"
+ GROUP_LIST="$GROUP_LIST ${group//,/ }"
;;
-x) xgroup=$2 ; shift ;
- XGROUP_LIST="$XGROUP_LIST $xgroup"
+ XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
;;
- -X) xfile=$2; shift ;
- for d in $SRC_GROUPS $FSTYP; do
- [ -f $SRC_DIR/$d/$xfile ] || continue
- for f in `cat $SRC_DIR/$d/$xfile`; do
- echo $d/$f >> $tmp.xlist
- done
- done
+ -X) subdir_xfile=$2; shift ;
;;
-
+ -E) xfile=$2; shift ;
+ if [ -f $xfile ]; then
+ sed "s/#.*$//" "$xfile" >> $tmp.xlist
+ fi
+ ;;
+ -s) RUN_SECTION="$RUN_SECTION $2"; shift ;;
+ -S) EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
-l) diff="diff" ;;
-udiff) diff="$diff -u" ;;
-n) showme=true ;;
-r) randomize=true ;;
-
+ -i) iterations=$2; shift ;;
-T) timestamp=true ;;
-
+ -d) DUMP_OUTPUT=true ;;
+ -b) brief_test_summary=true;;
+ -R) report_fmt=$2 ; shift ;
+ REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
+ do_report=true
+ ;;
--large-fs) export LARGE_SCRATCH_DEV=yes ;;
--extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
shift
done
+# we need common/rc, that also sources common/config. We need to source it
+# after processing args, overlay needs FSTYP set before sourcing common/config
+if ! . ./common/rc; then
+ echo "check: failed to source common/rc"
+ exit 1
+fi
+
+if [ -n "$subdir_xfile" ]; then
+ for d in $SRC_GROUPS $FSTYP; do
+ [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
+ for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
+ echo $d/$f >> $tmp.xlist
+ done
+ done
+fi
+
# Process tests from command line now.
if $have_test_arg; then
while [ $# -gt 0 ]; do
case "$1" in
- -*) echo "Argments before tests, please!"
+ -*) echo "Arguments before tests, please!"
status=1
exit $status
;;
- *) test_dir=`dirname $1`
- test_dir=${test_dir#$SRC_DIR/*}
- test_name=`basename $1`
- group_file=$SRC_DIR/$test_dir/group
-
- if egrep "^$test_name" $group_file >/dev/null ; then
- # in group file ... OK
- echo $SRC_DIR/$test_dir/$test_name >>$tmp.arglist
- else
- # oops
- echo "$1 - unknown test, ignored"
- fi
+ *) # Expand test pattern (e.g. xfs/???, *fs/001)
+ list=$(cd $SRC_DIR; echo $1)
+ for t in $list; do
+ test_dir=`dirname $t`
+ test_dir=${test_dir#$SRC_DIR/*}
+ test_name=`basename $t`
+ group_file=$SRC_DIR/$test_dir/group
+
+ if egrep -q "^$test_name" $group_file; then
+ # in group file ... OK
+ echo $SRC_DIR/$test_dir/$test_name \
+ >>$tmp.arglist
+ else
+ # oops
+ echo "$t - unknown test, ignored"
+ fi
+ done
;;
esac
shift
done
-fi
-
-# we need common/rc
-if ! . ./common/rc
-then
- echo "check: failed to source common/rc"
- exit 1
+elif [ -z "$GROUP_LIST" ]; then
+ # default group list is the auto group. If any other group or test is
+ # specified, we use that instead.
+ GROUP_LIST="auto"
fi
if [ `id -u` -ne 0 ]
{
n_try="0"
n_bad="0"
+ n_notrun="0"
unset try notrun bad
}
+_global_log() {
+ echo "$1" >> $check.log
+ if $OPTIONS_HAVE_SECTIONS; then
+ echo "$1" >> ${REPORT_DIR}/check.log
+ fi
+}
+
_wrapup()
{
- seq="check"
- check="$RESULT_BASE/check"
-
- if $showme
- then
- :
- elif $needwrap
- then
- if [ -f $check.time -a -f $tmp.time ]
- then
- cat $check.time $tmp.time \
- | $AWK_PROG '
- { t[$1] = $2 }
-END { if (NR > 0) {
- for (i in t) print i " " t[i]
- }
- }' \
- | sort -n >$tmp.out
- mv $tmp.out $check.time
- fi
+ seq="check"
+ check="$RESULT_BASE/check"
- echo "" >>$check.log
- date >>$check.log
- echo $list | fmt | sed -e 's/^/ /' -e "s;$SRC_DIR/;;g" >>$check.log
- $interrupt && echo "Interrupted!" >>$check.log
-
- echo "SECTION -- $section" >>$tmp.summary
- echo "=========================" >>$tmp.summary
- if [ ! -z "$n_try" -a $n_try != 0 ]
- then
- echo "Ran:$try"
- echo "Ran:$try" >>$tmp.summary
- fi
+ if $showme; then
+ if $needwrap; then
+ if $do_report; then
+ _make_section_report
+ fi
+ needwrap=false
+ fi
+ elif $needwrap; then
+ if [ -f $check.time -a -f $tmp.time ]; then
+ cat $check.time $tmp.time \
+ | $AWK_PROG '
+ { t[$1] = $2 }
+ END {
+ if (NR > 0) {
+ for (i in t) print i " " t[i]
+ }
+ }' \
+ | sort -n >$tmp.out
+ mv $tmp.out $check.time
+ if $OPTIONS_HAVE_SECTIONS; then
+ cp $check.time ${REPORT_DIR}/check.time
+ fi
+ fi
+
+ _global_log ""
+ _global_log "$(date)"
+
+ echo "SECTION -- $section" >>$tmp.summary
+ echo "=========================" >>$tmp.summary
+ if [ ! -z "$n_try" -a $n_try != 0 ]; then
+ if [ $brief_test_summary == "false" ]; then
+ echo "Ran:$try"
+ echo "Ran:$try" >>$tmp.summary
+ fi
+ _global_log "Ran:$try"
+ fi
+
+ $interrupt && echo "Interrupted!" | tee -a $check.log
+ if $OPTIONS_HAVE_SECTIONS; then
+ $interrupt && echo "Interrupted!" | tee -a \
+ ${REPORT_DIR}/check.log
+ fi
+
+ if [ ! -z "$notrun" ]; then
+ if [ $brief_test_summary == "false" ]; then
+ echo "Not run:$notrun"
+ echo "Not run:$notrun" >>$tmp.summary
+ fi
+ _global_log "Not run:$notrun"
+ fi
- if [ ! -z "$notrun" ]
- then
- echo "Not run:$notrun"
- echo "Not run:$notrun" >>$check.log
- echo "Not run:$notrun" >>$tmp.summary
+ if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
+ echo "Failures:$bad"
+ echo "Failed $n_bad of $n_try tests"
+ _global_log "Failures:$bad"
+ _global_log "Failed $n_bad of $n_try tests"
+ echo "Failures:$bad" >>$tmp.summary
+ echo "Failed $n_bad of $n_try tests" >>$tmp.summary
+ else
+ echo "Passed all $n_try tests"
+ _global_log "Passed all $n_try tests"
+ echo "Passed all $n_try tests" >>$tmp.summary
+ fi
+ echo "" >>$tmp.summary
+ if $do_report; then
+ _make_section_report
+ fi
+ needwrap=false
fi
- if [ ! -z "$n_bad" -a $n_bad != 0 ]
- then
- echo "Failures:$bad"
- echo "Failed $n_bad of $n_try tests"
- echo "Failures:$bad" | fmt >>$check.log
- echo "Failed $n_bad of $n_try tests" >>$check.log
- echo "Failures:$bad" >>$tmp.summary
- echo "Failed $n_bad of $n_try tests" >>$tmp.summary
- else
- echo "Passed all $n_try tests"
- echo "Passed all $n_try tests" >>$check.log
- echo "Passed all $n_try tests" >>$tmp.summary
+ sum_bad=`expr $sum_bad + $n_bad`
+ _wipe_counters
+ rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
+ if ! $OPTIONS_HAVE_SECTIONS; then
+ rm -f $tmp.*
fi
- echo "" >>$tmp.summary
- needwrap=false
- fi
-
- sum_bad=`expr $sum_bad + $n_bad`
- _wipe_counters
- rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
- if ! $OPTIONS_HAVE_SECTIONS; then
- rm -f $tmp.*
- fi
}
_summary()
rm -f $tmp.*
}
+_check_filesystems()
+{
+ if [ -f ${RESULT_DIR}/require_test ]; then
+ _check_test_fs || err=true
+ rm -f ${RESULT_DIR}/require_test*
+ else
+ _test_unmount 2> /dev/null
+ fi
+ if [ -f ${RESULT_DIR}/require_scratch ]; then
+ _check_scratch_fs || err=true
+ rm -f ${RESULT_DIR}/require_scratch*
+ fi
+ _scratch_unmount 2> /dev/null
+}
+
+_expunge_test()
+{
+ local TEST_ID="$1"
+ if [ -s $tmp.xlist ]; then
+ if grep -q $TEST_ID $tmp.xlist; then
+ echo " [expunged]"
+ return 1
+ fi
+ fi
+ return 0
+}
+
+# Make the check script unattractive to the OOM killer...
+OOM_SCORE_ADJ="/proc/self/oom_score_adj"
+test -w ${OOM_SCORE_ADJ} && echo -1000 > ${OOM_SCORE_ADJ}
+
+# ...and make the tests themselves somewhat more attractive to it, so that if
+# the system runs out of memory it'll be the test that gets killed and not the
+# test framework.
+_run_seq() {
+ bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq"
+}
+
+_detect_kmemleak
_prepare_test_list
if $OPTIONS_HAVE_SECTIONS; then
trap "_wrapup; exit \$status" 0 1 2 3 15
fi
-for section in $HOST_OPTIONS_SECTIONS; do
+function run_section()
+{
+ local section=$1
+
OLD_FSTYP=$FSTYP
- OLD_MOUNT_OPTIONS=$MOUNT_OPTIONS
+ OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
get_next_config $section
+ # Do we need to run only some sections ?
+ if [ ! -z "$RUN_SECTION" ]; then
+ skip=true
+ for s in $RUN_SECTION; do
+ if [ $section == $s ]; then
+ skip=false
+ break;
+ fi
+ done
+ if $skip; then
+ return
+ fi
+ fi
+
+ # Did this section get excluded?
+ if [ ! -z "$EXCLUDE_SECTION" ]; then
+ skip=false
+ for s in $EXCLUDE_SECTION; do
+ if [ $section == $s ]; then
+ skip=true
+ break;
+ fi
+ done
+ if $skip; then
+ return
+ fi
+ fi
+
mkdir -p $RESULT_BASE
if [ ! -d $RESULT_BASE ]; then
echo "failed to create results directory $RESULT_BASE"
- exit 1;
+ status=1
+ exit
fi
if $OPTIONS_HAVE_SECTIONS; then
echo "SECTION -- $section"
fi
+ sect_start=`_wallclock`
if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
echo "RECREATING -- $FSTYP on $TEST_DEV"
- umount $TEST_DEV 2> /dev/null
+ _test_unmount 2> /dev/null
if ! _test_mkfs >$tmp.err 2>&1
then
echo "our local _test_mkfs routine ..."
cat $tmp.err
echo "check: failed to mkfs \$TEST_DEV using specified options"
- exit 1
+ status=1
+ exit
fi
- out=`_mount_or_remount_rw "$MOUNT_OPTIONS" $TEST_DEV $TEST_DIR`
- if [ $? -ne 1 ]; then
- echo $out
- exit 1
+ if ! _test_mount
+ then
+ echo "check: failed to mount $TEST_DEV on $TEST_DIR"
+ status=1
+ exit
fi
_prepare_test_list
- elif [ "$OLD_MOUNT_OPTIONS" != "$MOUNT_OPTIONS" ]; then
- umount $TEST_DEV 2> /dev/null
- out=`_mount_or_remount_rw "$MOUNT_OPTIONS" $TEST_DEV $TEST_DIR`
- if [ $? -ne 1 ]; then
- echo $out
- exit 1
+ elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
+ _test_unmount 2> /dev/null
+ if ! _test_mount
+ then
+ echo "check: failed to mount $TEST_DEV on $TEST_DIR"
+ status=1
+ exit
fi
fi
needwrap=true
if [ ! -z "$SCRATCH_DEV" ]; then
- umount $SCRATCH_DEV 2>/dev/null
+ _scratch_unmount 2> /dev/null
# call the overridden mkfs - make sure the FS is built
# the same as we'll create it later.
- if ! _scratch_mkfs $flag >$tmp.err 2>&1
+ if ! _scratch_mkfs >$tmp.err 2>&1
then
echo "our local _scratch_mkfs routine ..."
cat $tmp.err
echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
- exit 1
+ status=1
+ exit
fi
# call the overridden mount - make sure the FS mounts with
# the same options that we'll mount with later.
- if ! _scratch_mount >$tmp.err 2>&1
+ if ! _try_scratch_mount >$tmp.err 2>&1
then
echo "our local mount routine ..."
cat $tmp.err
echo "check: failed to mount \$SCRATCH_DEV using specified options"
- exit 1
+ status=1
+ exit
+ else
+ _scratch_unmount
fi
fi
seqres="$check"
_check_test_fs
- for seq in $list
- do
- err=false
-
- # the filename for the test and the name output are different.
- # we don't include the tests/ directory in the name output.
- seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
-
- # Similarly, the result directory needs to replace the tests/
- # part of the test location.
- group=`dirname $seq`
- if $OPTIONS_HAVE_SECTIONS; then
- export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
- seqres="$RESULT_BASE/$section/$seqnum"
- else
- export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
- seqres="$RESULT_BASE/$seqnum"
- fi
+ err=false
+ first_test=true
+ prev_seq=""
+ for seq in $list ; do
+ # Run report for previous test!
+ if $err ; then
+ bad="$bad $seqnum"
+ n_bad=`expr $n_bad + 1`
+ tc_status="fail"
+ fi
+ if $do_report && ! $first_test ; then
+ if [ $tc_status != "expunge" ] ; then
+ _make_testcase_report "$prev_seq" "$tc_status"
+ fi
+ fi
+ first_test=false
+
+ err=false
+ prev_seq="$seq"
+ if [ ! -f $seq ]; then
+ # Try to get full name in case the user supplied only
+ # seq id and the test has a name. A bit of hassle to
+ # find really the test and not its sample output or
+ # helping files.
+ bname=$(basename $seq)
+ full_seq=$(find $(dirname $seq) -name $bname* -executable |
+ awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
+ END { print shortest }')
+ if [ -f $full_seq ] && \
+ [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
+ seq=$full_seq
+ fi
+ fi
- mkdir -p $RESULT_DIR
+ # the filename for the test and the name output are different.
+ # we don't include the tests/ directory in the name output.
+ export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
- echo -n "$seqnum"
+ # Similarly, the result directory needs to replace the tests/
+ # part of the test location.
+ group=`dirname $seq`
+ if $OPTIONS_HAVE_SECTIONS; then
+ export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
+ REPORT_DIR="$RESULT_BASE/$section"
+ else
+ export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
+ REPORT_DIR="$RESULT_BASE"
+ fi
+ seqres="$REPORT_DIR/$seqnum"
+
+ mkdir -p $RESULT_DIR
+ rm -f ${RESULT_DIR}/require_scratch*
+ rm -f ${RESULT_DIR}/require_test*
+ echo -n "$seqnum"
+
+ if $showme; then
+ _expunge_test $seqnum
+ if [ $? -eq 1 ]; then
+ tc_status="expunge"
+ continue
+ fi
+ echo
+ start=0
+ stop=0
+ tc_status="list"
+ n_notrun=`expr $n_notrun + 1`
+ continue
+ fi
+
+ tc_status="pass"
+ if [ ! -f $seq ]; then
+ echo " - no such test?"
+ continue
+ fi
- if $showme
- then
- echo
- continue
- elif [ ! -f $seq ]
- then
- echo " - no such test?"
- else
# really going to try and run this one
- #
rm -f $seqres.out.bad
# check if we really should run it
- if [ -s $tmp.xlist ]; then
- if grep $seqnum $tmp.xlist > /dev/null 2>&1 ; then
- echo " [expunged]"
- continue
- fi
+ _expunge_test $seqnum
+ if [ $? -eq 1 ]; then
+ tc_status="expunge"
+ continue
fi
+ # record that we really tried to run this test.
+ try="$try $seqnum"
+ n_try=`expr $n_try + 1`
+
# slashes now in names, sed barfs on them so use grep
lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
if [ "X$lasttime" != X ]; then
- echo -n " ${lasttime}s ..."
+ echo -n " ${lasttime}s ... "
else
- echo -n " " # prettier output with timestamps.
+ echo -n " " # prettier output with timestamps.
fi
rm -f core $seqres.notrun
$timestamp && echo -n " ["`date "+%T"`"]"
[ ! -x $seq ] && chmod u+x $seq # ensure we can run it
$LOGGER_PROG "run xfstest $seqnum"
- ./$seq >$tmp.rawout 2>&1
- sts=$?
- $timestamp && _timestamp
- stop=`_wallclock`
+ if [ -w /dev/kmsg ]; then
+ export date_time=`date +"%F %T"`
+ echo "run fstests $seqnum at $date_time" > /dev/kmsg
+ # _check_dmesg depends on this log in dmesg
+ touch ${RESULT_DIR}/check_dmesg
+ fi
+ _try_wipe_scratch_devs > /dev/null 2>&1
- _fix_malloc <$tmp.rawout >$tmp.out
- rm -f $tmp.rawout
+ # clear the WARN_ONCE state to allow a potential problem
+ # to be reported for each test
+ (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
- if [ -f core ]
- then
- echo -n " [dumped core]"
- mv core $RESULT_BASE/$seqnum.core
- err=true
+ if [ "$DUMP_OUTPUT" = true ]; then
+ _run_seq 2>&1 | tee $tmp.out
+ # Because $? would get tee's return code
+ sts=${PIPESTATUS[0]}
+ else
+ _run_seq >$tmp.out 2>&1
+ sts=$?
fi
- if [ -f $seqres.notrun ]
- then
- $timestamp || echo -n " [not run] "
- $timestamp && echo " [not run]" && echo -n " $seqnum -- "
- cat $seqres.notrun
- notrun="$notrun $seqnum"
- else
- if [ $sts -ne 0 ]
- then
- echo -n " [failed, exit status $sts]"
+ if [ -f core ]; then
+ _dump_err_cont "[dumped core]"
+ mv core $RESULT_BASE/$seqnum.core
err=true
- fi
- if [ ! -f $seq.out ]
- then
- echo " - no qualified output"
+ fi
+
+ if [ -f $seqres.notrun ]; then
+ $timestamp && _timestamp
+ stop=`_wallclock`
+ $timestamp || echo -n "[not run] "
+ $timestamp && echo " [not run]" && \
+ echo -n " $seqnum -- "
+ cat $seqres.notrun
+ notrun="$notrun $seqnum"
+ n_notrun=`expr $n_notrun + 1`
+ tc_status="notrun"
+ continue;
+ fi
+
+ if [ $sts -ne 0 ]; then
+ _dump_err_cont "[failed, exit status $sts]"
+ _test_unmount 2> /dev/null
+ _scratch_unmount 2> /dev/null
+ rm -f ${RESULT_DIR}/require_test*
+ rm -f ${RESULT_DIR}/require_scratch*
err=true
- else
-
- # coreutils 8.16+ changed quote formats in error messages from
- # `foo' to 'foo'. Filter old versions to match the new version.
- sed -i "s/\`/\'/g" $tmp.out
- if diff $seq.out $tmp.out >/dev/null 2>&1
- then
- if $err
- then
- :
- else
+ else
+ # the test apparently passed, so check for corruption
+ # and log messages that shouldn't be there.
+ _check_filesystems
+ _check_dmesg || err=true
+ fi
+
+ # Reload the module after each test to check for leaks or
+ # other problems.
+ if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then
+ _test_unmount 2> /dev/null
+ _scratch_unmount 2> /dev/null
+ modprobe -r fs-$FSTYP
+ modprobe fs-$FSTYP
+ fi
+
+ # Scan for memory leaks after every test so that associating
+ # a leak to a particular test will be as accurate as possible.
+ _check_kmemleak || err=true
+
+ # test ends after all checks are done.
+ $timestamp && _timestamp
+ stop=`_wallclock`
+
+ if [ ! -f $seq.out ]; then
+ _dump_err "no qualified output"
+ err=true
+ continue;
+ fi
+
+ # coreutils 8.16+ changed quote formats in error messages
+ # from `foo' to 'foo'. Filter old versions to match the new
+ # version.
+ sed -i "s/\`/\'/g" $tmp.out
+ if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
+ if ! $err ; then
echo "$seqnum `expr $stop - $start`" >>$tmp.time
echo -n " `expr $stop - $start`s"
- fi
- echo ""
- else
- echo " - output mismatch (see $seqres.out.bad)"
- mv $tmp.out $seqres.out.bad
- $diff $seq.out $seqres.out.bad | {
- if test "$DIFF_LENGTH" -le 0; then
- cat
- else
- head -n "$DIFF_LENGTH"
- echo "..."
- echo "(Run '$diff $seq.out $seqres.out.bad'" \
- " to see the entire diff)"
- fi; } | \
- sed -e 's/^\(.\)/ \1/'
- err=true
fi
- fi
+ echo ""
+ else
+ _dump_err "- output mismatch (see $seqres.out.bad)"
+ mv $tmp.out $seqres.out.bad
+ $diff $seq.out $seqres.out.bad | {
+ if test "$DIFF_LENGTH" -le 0; then
+ cat
+ else
+ head -n "$DIFF_LENGTH"
+ echo "..."
+ echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
+ " to see the entire diff)"
+ fi; } | sed -e 's/^\(.\)/ \1/'
+ err=true
fi
+ done
- fi
-
- # come here for each test, except when $showme is true
- #
- if $err
- then
+ # make sure we record the status of the last test we ran.
+ if $err ; then
bad="$bad $seqnum"
n_bad=`expr $n_bad + 1`
- quick=false
- fi
- if [ ! -f $seqres.notrun ]
- then
- try="$try $seqnum"
- n_try=`expr $n_try + 1`
- _check_test_fs
- fi
+ tc_status="fail"
+ fi
+ if $do_report && ! $first_test ; then
+ if [ $tc_status != "expunge" ] ; then
+ _make_testcase_report "$prev_seq" "$tc_status"
+ fi
+ fi
- seq="after_$seqnum"
- done
+ sect_stop=`_wallclock`
+ interrupt=false
_wrapup
+ interrupt=true
echo
- umount $TEST_DEV 2> /dev/null
- umount $SCRATCH_DEV 2> /dev/null
+ _test_unmount 2> /dev/null
+ _scratch_unmount 2> /dev/null
+}
+
+for ((iters = 0; iters < $iterations; iters++)) do
+ for section in $HOST_OPTIONS_SECTIONS; do
+ run_section $section
+ done
done
interrupt=false
-status=`expr $sum_bad`
+status=`expr $sum_bad != 0`
exit