tmp=/tmp/$$
status=0
needwrap=true
+needsum=true
n_try=0
try=""
n_bad=0
+sum_bad=0
bad=""
notrun=""
interrupt=true
showme=false
have_test_arg=false
randomize=false
-here=`pwd`
-FSTYP=xfs
+export here=`pwd`
+xfile=""
-SUPPORTED_TESTS="[0-9][0-9][0-9] [0-9][0-9][0-9][0-9]"
-SRC_DIR="tests"
-SRC_GROUPS="generic shared"
-
-# generic initialization
+# start the initialisation work now
iam=check
+export MSGVERB="text:action"
export QA_CHECK_FS=${QA_CHECK_FS:=true}
-# by default don't output timestamps
-timestamp=${TIMESTAMP:=false}
-
# number of diff lines from a failed test, 0 for whole output
export DIFF_LENGTH=${DIFF_LENGTH:=10}
# by default don't output timestamps
timestamp=${TIMESTAMP:=false}
+rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist
+
+# we need common/config
+if ! . ./common/config
+then
+ echo "$iam: failed to source common/config"
+ exit 1
+fi
+
+SUPPORTED_TESTS="[0-9][0-9][0-9] [0-9][0-9][0-9][0-9]"
+SRC_GROUPS="generic shared"
+export SRC_DIR="tests"
+
usage()
{
echo "Usage: $0 [options] [testlist]"'
check options
- -xfs test XFS (default)
- -udf test UDF
-nfs test NFS
+ -cifs test CIFS
+ -tmpfs test TMPFS
-l line mode diff
-udiff show unified diff (default)
-n show me, do not run tests
-T output timestamps
-r randomize test order
--large-fs optimise scratch device for large filesystems
+ -s section run only specified section from config file
testlist options
-g group[,group...] include tests from these groups
-x group[,group...] exclude tests from these groups
+ -X file exclude individual tests
+ -E external_file exclude individual tests
[testlist] include tests matching names in testlist
'
exit 0
}
-_setenvironment()
-{
- MSGVERB="text:action"
- export MSGVERB
-}
-
get_group_list()
{
grp=$1
echo $grpl
}
+# find all tests, excluding files that are test metadata such as group files.
+# This assumes that tests are defined purely by alphanumeric filenames with no
+# ".xyz" extensions in the name.
+get_all_tests()
+{
+ touch $tmp.list
+ for d in $SRC_GROUPS $FSTYP; do
+ ls $SRC_DIR/$d/* | \
+ grep -v "\..*" | \
+ grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
+ done
+}
+
+# takes the list of tests to run in $tmp.list, and removes the tests passed to
+# the function from that list.
+trim_test_list()
+{
+ test_list="$*"
+
+ rm -f $tmp.grep
+ numsed=0
+ for t in $test_list
+ do
+ if [ $numsed -gt 100 ]; then
+ grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
+ mv $tmp.tmp $tmp.list
+ numsed=0
+ rm -f $tmp.grep
+ fi
+ echo "^$t\$" >>$tmp.grep
+ numsed=`expr $numsed + 1`
+ done
+ grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
+ mv $tmp.tmp $tmp.list
+}
+
+
_wallclock()
{
- date "+%H %M %S" | $AWK_PROG '{ print $1*3600 + $2*60 + $3 }'
+ date "+%s"
}
_timestamp()
echo -n " [$now]"
}
-# start the initialisation work now
-_setenvironment
+_prepare_test_list()
+{
+ unset list
+ # Tests specified on the command line
+ if [ -s $tmp.arglist ]; then
+ cat $tmp.arglist > $tmp.list
+ else
+ touch $tmp.list
+ fi
-rm -f $tmp.list $tmp.tmp $tmp.sed $here/$iam.out
+ # Specified groups to include
+ for group in $GROUP_LIST; do
+ list=$(get_group_list $group)
+ if [ -z "$list" ]; then
+ echo "Group \"$group\" is empty or not defined?"
+ exit 1
+ fi
-# Autodetect fs type based on what's on $TEST_DEV
-if [ "$HOSTOS" == "Linux" ]; then
- FSTYP=`blkid -c /dev/null -s TYPE -o value $TEST_DEV`
-fi
-export FSTYP
+ for t in $list; do
+ grep -s "^$t\$" $tmp.list >/dev/null || \
+ echo "$t" >>$tmp.list
+ done
+ done
-# we need common.config
-if ! . ./common.config
-then
- echo "$iam: failed to source common.config"
- exit 1
-fi
+ if ! $have_test_arg && [ -z "$GROUP_LIST" ]; then
+ # no test numbers, do everything
+ get_all_tests
+ fi
+
+ # Specified groups to exclude
+ for xgroup in $XGROUP_LIST; do
+ list=$(get_group_list $xgroup)
+ if [ -z "$list" ]; then
+ echo "Group \"$xgroup\" is empty or not defined?"
+ exit 1
+ fi
+
+ trim_test_list $list
+ done
+
+ # sort the list of tests into numeric order
+ list=`sort -n $tmp.list | uniq`
+ rm -f $tmp.list $tmp.tmp $tmp.grep
+
+ if $randomize
+ then
+ list=`echo $list | awk -f randomize.awk`
+ fi
+}
# Process command arguments first.
while [ $# -gt 0 ]; do
case "$1" in
-\? | -h | --help) usage ;;
- -udf) FSTYP=udf ;;
- -xfs) FSTYP=xfs ;;
-nfs) FSTYP=nfs ;;
+ -cifs) FSTYP=cifs ;;
+ -tmpfs) FSTYP=tmpfs ;;
-g) group=$2 ; shift ;
- group_list=$(get_group_list $group)
- if [ -z "$group_list" ]; then
- echo "Group \"$group\" is empty or not defined?"
- exit 1
- fi
-
- [ ! -s $tmp.list ] && touch $tmp.list
- for t in $group_list; do
- grep -s "^$t\$" $tmp.list >/dev/null || \
- echo "$t" >>$tmp.list
- done
-
+ GROUP_LIST="$GROUP_LIST $group"
;;
-x) xgroup=$2 ; shift ;
- [ ! -s $tmp.list ] && ls $SUPPORTED_TESTS >$tmp.list 2>/dev/null
- group_list=$(get_group_list $xgroup)
- if [ -z "$group_list" ]; then
- echo "Group \"$xgroup\" is empty or not defined?"
- exit 1
- fi
+ XGROUP_LIST="$XGROUP_LIST $xgroup"
+ ;;
- rm -f $tmp.sed
- numsed=0
- for t in $group_list
- do
- if [ $numsed -gt 100 ]; then
- sed -f $tmp.sed <$tmp.list >$tmp.tmp
- mv $tmp.tmp $tmp.list
- numsed=0
- rm -f $tmp.sed
- fi
- echo "/^$t\$/d" >>$tmp.sed
- numsed=`expr $numsed + 1`
+ -X) xfile=$2; shift ;
+ for d in $SRC_GROUPS $FSTYP; do
+ [ -f $SRC_DIR/$d/$xfile ] || continue
+ for f in `cat $SRC_DIR/$d/$xfile`; do
+ echo $d/$f >> $tmp.xlist
+ done
done
- sed -f $tmp.sed <$tmp.list >$tmp.tmp
- mv $tmp.tmp $tmp.list
;;
-
+ -E) xfile=$2; shift ;
+ if [ -f $xfile ]; then
+ cat "$xfile" >> $tmp.xlist
+ fi
+ ;;
+ -s) RUN_SECTION="$RUN_SECTION $2"; shift ;;
-l) diff="diff" ;;
-udiff) diff="$diff -u" ;;
exit $status
;;
*) test_dir=`dirname $1`
+ test_dir=${test_dir#$SRC_DIR/*}
test_name=`basename $1`
group_file=$SRC_DIR/$test_dir/group
- if grep "^$testname" $group_file >/dev/null ; then
+ if egrep "^$test_name" $group_file >/dev/null ; then
# in group file ... OK
- echo $SRC_DIR/$1 >>$tmp.list
+ echo $SRC_DIR/$test_dir/$test_name >>$tmp.arglist
else
# oops
echo "$1 - unknown test, ignored"
done
fi
-if [ -s $tmp.list ]; then
- # found some valid test numbers ... this is good
- :
-elif $have_test_arg; then
- # had test numbers, but none in group file ... do nothing
- touch $tmp.list
-else
- # no test numbers, do everything from group file
- sed -n -e '/^[0-9][0-9][0-9]*/s/[ ].*//p' <group >$tmp.list
-fi
-
-# sort the list of tests into numeric order
-list=`sort -n $tmp.list`
-rm -f $tmp.list $tmp.tmp $tmp.sed
-
-if $randomize
-then
- list=`echo $list | awk -f randomize.awk`
-fi
-
-# we need common.rc
-if ! . ./common.rc
+# we need common/rc
+if ! . ./common/rc
then
- echo "check: failed to source common.rc"
+ echo "check: failed to source common/rc"
exit 1
fi
exit 1
fi
-# Ok, time to start running...
+_wipe_counters()
+{
+ n_try="0"
+ n_bad="0"
+ unset try notrun bad
+}
_wrapup()
{
+ seq="check"
+ check="$RESULT_BASE/check"
+
if $showme
then
:
elif $needwrap
then
- if [ -f check.time -a -f $tmp.time ]
+ if [ -f $check.time -a -f $tmp.time ]
then
- cat check.time $tmp.time \
+ cat $check.time $tmp.time \
| $AWK_PROG '
{ t[$1] = $2 }
END { if (NR > 0) {
}
}' \
| sort -n >$tmp.out
- mv $tmp.out check.time
+ mv $tmp.out $check.time
fi
- echo "" >>check.log
- date >>check.log
- echo $list | fmt | sed -e 's/^/ /' -e "s;$SRC_DIR/;;g" >>check.log
- $interrupt && echo "Interrupted!" >>check.log
-
+ echo "" >>$check.log
+ date >>$check.log
+ echo $list | fmt | sed -e 's/^/ /' -e "s;$SRC_DIR/;;g" >>$check.log
+ $interrupt && echo "Interrupted!" >>$check.log
+
+ echo "SECTION -- $section" >>$tmp.summary
+ echo "=========================" >>$tmp.summary
if [ ! -z "$n_try" -a $n_try != 0 ]
then
echo "Ran:$try"
+ echo "Ran:$try" >>$tmp.summary
fi
if [ ! -z "$notrun" ]
then
echo "Not run:$notrun"
- echo "Not run:$notrun" >>check.log
+ echo "Not run:$notrun" >>$check.log
+ echo "Not run:$notrun" >>$tmp.summary
fi
if [ ! -z "$n_bad" -a $n_bad != 0 ]
then
echo "Failures:$bad"
echo "Failed $n_bad of $n_try tests"
- echo "Failures:$bad" | fmt >>check.log
- echo "Failed $n_bad of $n_try tests" >>check.log
+ echo "Failures:$bad" | fmt >>$check.log
+ echo "Failed $n_bad of $n_try tests" >>$check.log
+ echo "Failures:$bad" >>$tmp.summary
+ echo "Failed $n_bad of $n_try tests" >>$tmp.summary
else
echo "Passed all $n_try tests"
- echo "Passed all $n_try tests" >>check.log
+ echo "Passed all $n_try tests" >>$check.log
+ echo "Passed all $n_try tests" >>$tmp.summary
fi
+ echo "" >>$tmp.summary
needwrap=false
fi
+ sum_bad=`expr $sum_bad + $n_bad`
+ _wipe_counters
rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
- rm -f $tmp.*
+ if ! $OPTIONS_HAVE_SECTIONS; then
+ rm -f $tmp.*
+ fi
}
-trap "_wrapup; exit \$status" 0 1 2 3 15
-
-# don't leave old full output behind on a clean run
-rm -f check.full
+_summary()
+{
+ _wrapup
+ if $showme; then
+ :
+ elif $needsum; then
+ count=`wc -L $tmp.summary | cut -f1 -d" "`
+ cat $tmp.summary
+ needsum=false
+ fi
+ rm -f $tmp.*
+}
-[ -f check.time ] || touch check.time
+_prepare_test_list
-# print out our test configuration
-echo "FSTYP -- `_full_fstyp_details`"
-echo "PLATFORM -- `_full_platform_details`"
-if [ ! -z "$SCRATCH_DEV" ]; then
- echo "MKFS_OPTIONS -- `_scratch_mkfs_options`"
- echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
-fi
-echo
-
-
-if [ ! -z "$SCRATCH_DEV" ]; then
- umount $SCRATCH_DEV 2>/dev/null
- # call the overridden mkfs - make sure the FS is built
- # the same as we'll create it later.
-
- if ! _scratch_mkfs $flag >$tmp.err 2>&1
- then
- echo "our local _scratch_mkfs routine ..."
- cat $tmp.err
- echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
- exit 1
- fi
-
- # call the overridden mount - make sure the FS mounts with
- # the same options that we'll mount with later.
- if ! _scratch_mount >$tmp.err 2>&1
- then
- echo "our local mount routine ..."
- cat $tmp.err
- echo "check: failed to mount \$SCRATCH_DEV using specified options"
- exit 1
- fi
+if $OPTIONS_HAVE_SECTIONS; then
+ trap "_summary; exit \$status" 0 1 2 3 15
+else
+ trap "_wrapup; exit \$status" 0 1 2 3 15
fi
-seq="check"
-_check_test_fs
+for section in $HOST_OPTIONS_SECTIONS; do
+ OLD_FSTYP=$FSTYP
+ OLD_MOUNT_OPTIONS=$MOUNT_OPTIONS
+ get_next_config $section
+
+ # Do we need to run only some sections ?
+ if [ ! -z "$RUN_SECTION" ]; then
+ skip=true
+ for s in $RUN_SECTION; do
+ if [ $section == $s ]; then
+ skip=false
+ fi
+ done
+ if $skip; then
+ continue
+ fi
+ fi
+
+ mkdir -p $RESULT_BASE
+ if [ ! -d $RESULT_BASE ]; then
+ echo "failed to create results directory $RESULT_BASE"
+ exit 1;
+ fi
+
+ if $OPTIONS_HAVE_SECTIONS; then
+ echo "SECTION -- $section"
+ fi
+
+ if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
+ echo "RECREATING -- $FSTYP on $TEST_DEV"
+ umount $TEST_DEV 2> /dev/null
+ if ! _test_mkfs >$tmp.err 2>&1
+ then
+ echo "our local _test_mkfs routine ..."
+ cat $tmp.err
+ echo "check: failed to mkfs \$TEST_DEV using specified options"
+ exit 1
+ fi
+ out=`_mount_or_remount_rw "$MOUNT_OPTIONS" $TEST_DEV $TEST_DIR`
+ if [ $? -ne 1 ]; then
+ echo $out
+ exit 1
+ fi
+ _prepare_test_list
+ elif [ "$OLD_MOUNT_OPTIONS" != "$MOUNT_OPTIONS" ]; then
+ umount $TEST_DEV 2> /dev/null
+ out=`_mount_or_remount_rw "$MOUNT_OPTIONS" $TEST_DEV $TEST_DIR`
+ if [ $? -ne 1 ]; then
+ echo $out
+ exit 1
+ fi
+ fi
+
+ init_rc
-for seq in $list
-do
- err=false
+ seq="check"
+ check="$RESULT_BASE/check"
- # the filename for the test and the name output are different.
- # we don't include the tests/ directory in the name output.
- seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
+ # don't leave old full output behind on a clean run
+ rm -f $check.full
- echo -n "$seqnum"
+ [ -f $check.time ] || touch $check.time
- if $showme
- then
+ # print out our test configuration
+ echo "FSTYP -- `_full_fstyp_details`"
+ echo "PLATFORM -- `_full_platform_details`"
+ if [ ! -z "$SCRATCH_DEV" ]; then
+ echo "MKFS_OPTIONS -- `_scratch_mkfs_options`"
+ echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
+ fi
echo
- continue
- elif [ ! -f $seq ]
- then
- echo " - no such test?"
- else
- # really going to try and run this one
- #
- rm -f $seq.out.bad
-
- # slashes now in names, sed barfs on them so use grep
- lasttime=`grep -w ^$seq check.time | awk '// {print $2}'`
- if [ "X$lasttime" != X ]; then
- echo -n " ${lasttime}s ..."
- else
- echo -n " " # prettier output with timestamps.
+ needwrap=true
+
+ if [ ! -z "$SCRATCH_DEV" ]; then
+ umount $SCRATCH_DEV 2>/dev/null
+ # call the overridden mkfs - make sure the FS is built
+ # the same as we'll create it later.
+
+ if ! _scratch_mkfs $flag >$tmp.err 2>&1
+ then
+ echo "our local _scratch_mkfs routine ..."
+ cat $tmp.err
+ echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
+ exit 1
+ fi
+
+ # call the overridden mount - make sure the FS mounts with
+ # the same options that we'll mount with later.
+ if ! _scratch_mount >$tmp.err 2>&1
+ then
+ echo "our local mount routine ..."
+ cat $tmp.err
+ echo "check: failed to mount \$SCRATCH_DEV using specified options"
+ exit 1
+ fi
fi
- rm -f core $seq.notrun
- start=`_wallclock`
- $timestamp && echo -n " ["`date "+%T"`"]"
- [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
- $LOGGER_PROG "run xfstest $seqnum"
- ./$seq >$tmp.rawout 2>&1
- sts=$?
- $timestamp && _timestamp
- stop=`_wallclock`
+ seqres="$check"
+ _check_test_fs
- _fix_malloc <$tmp.rawout >$tmp.out
- rm -f $tmp.rawout
+ for seq in $list
+ do
+ err=false
- if [ -f core ]
- then
- echo -n " [dumped core]"
- mv core $seq.core
- err=true
- fi
+ # the filename for the test and the name output are different.
+ # we don't include the tests/ directory in the name output.
+ seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
- if [ -f $seq.notrun ]
- then
- $timestamp || echo -n " [not run] "
- $timestamp && echo " [not run]" && echo -n " $seqnum -- "
- cat $seq.notrun
- notrun="$notrun $seqnum"
- else
- if [ $sts -ne 0 ]
- then
- echo -n " [failed, exit status $sts]"
- err=true
+ # Similarly, the result directory needs to replace the tests/
+ # part of the test location.
+ group=`dirname $seq`
+ if $OPTIONS_HAVE_SECTIONS; then
+ export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
+ seqres="$RESULT_BASE/$section/$seqnum"
+ else
+ export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
+ seqres="$RESULT_BASE/$seqnum"
fi
- if [ ! -f $seq.out ]
+
+ mkdir -p $RESULT_DIR
+
+ echo -n "$seqnum"
+
+ if $showme
+ then
+ echo
+ continue
+ elif [ ! -f $seq ]
then
- echo " - no qualified output"
- err=true
+ echo " - no such test?"
else
- if diff $seq.out $tmp.out >/dev/null 2>&1
+ # really going to try and run this one
+ #
+ rm -f $seqres.out.bad
+
+ # check if we really should run it
+ if [ -s $tmp.xlist ]; then
+ if grep $seqnum $tmp.xlist > /dev/null 2>&1 ; then
+ echo " [expunged]"
+ continue
+ fi
+ fi
+
+ # slashes now in names, sed barfs on them so use grep
+ lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
+ if [ "X$lasttime" != X ]; then
+ echo -n " ${lasttime}s ..."
+ else
+ echo -n " " # prettier output with timestamps.
+ fi
+ rm -f core $seqres.notrun
+
+ start=`_wallclock`
+ $timestamp && echo -n " ["`date "+%T"`"]"
+ [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
+ $LOGGER_PROG "run xfstest $seqnum"
+ ./$seq >$tmp.rawout 2>&1
+ sts=$?
+ $timestamp && _timestamp
+ stop=`_wallclock`
+
+ _fix_malloc <$tmp.rawout >$tmp.out
+ rm -f $tmp.rawout
+
+ if [ -f core ]
+ then
+ echo -n " [dumped core]"
+ mv core $RESULT_BASE/$seqnum.core
+ err=true
+ fi
+
+ if [ -f $seqres.notrun ]
then
- if $err
+ $timestamp || echo -n " [not run] "
+ $timestamp && echo " [not run]" && echo -n " $seqnum -- "
+ cat $seqres.notrun
+ notrun="$notrun $seqnum"
+ else
+ if [ $sts -ne 0 ]
then
- :
- else
- echo "$seqnum `expr $stop - $start`" >>$tmp.time
- echo -n " `expr $stop - $start`s"
+ echo -n " [failed, exit status $sts]"
+ err=true
fi
- echo ""
- else
- echo " - output mismatch (see $seq.out.bad)"
- mv $tmp.out $seq.out.bad
- $diff $seq.out $seq.out.bad | {
- if test "$DIFF_LENGTH" -le 0; then
- cat
+ if [ ! -f $seq.out ]
+ then
+ echo " - no qualified output"
+ err=true
+ else
+
+ # coreutils 8.16+ changed quote formats in error messages from
+ # `foo' to 'foo'. Filter old versions to match the new version.
+ sed -i "s/\`/\'/g" $tmp.out
+ if diff $seq.out $tmp.out >/dev/null 2>&1
+ then
+ if $err
+ then
+ :
+ else
+ echo "$seqnum `expr $stop - $start`" >>$tmp.time
+ echo -n " `expr $stop - $start`s"
+ fi
+ echo ""
else
- head -n "$DIFF_LENGTH"
- fi; } | \
- sed -e 's/^\(.\)/ \1/'
- echo " ..."
- echo " (Run '$diff $seq.out $seq.out.bad' to see the" \
- "entire diff)"
- err=true
+ echo " - output mismatch (see $seqres.out.bad)"
+ mv $tmp.out $seqres.out.bad
+ $diff $seq.out $seqres.out.bad | {
+ if test "$DIFF_LENGTH" -le 0; then
+ cat
+ else
+ head -n "$DIFF_LENGTH"
+ echo "..."
+ echo "(Run '$diff $seq.out $seqres.out.bad'" \
+ " to see the entire diff)"
+ fi; } | \
+ sed -e 's/^\(.\)/ \1/'
+ err=true
+ fi
+ fi
fi
+
fi
- fi
- fi
+ # come here for each test, except when $showme is true
+ #
+ if $err
+ then
+ bad="$bad $seqnum"
+ n_bad=`expr $n_bad + 1`
+ quick=false
+ fi
+ if [ ! -f $seqres.notrun ]
+ then
+ try="$try $seqnum"
+ n_try=`expr $n_try + 1`
+ test -f ${RESULT_DIR}/require_test && _check_test_fs
+ rm -f ${RESULT_DIR}/require_test
+ test -f ${RESULT_DIR}/require_scratch && _check_scratch_fs
+ rm -f ${RESULT_DIR}/require_scratch
+ fi
- # come here for each test, except when $showme is true
- #
- if $err
- then
- bad="$bad $seqnum"
- n_bad=`expr $n_bad + 1`
- quick=false
- fi
- if [ ! -f $seq.notrun ]
- then
- try="$try $seqnum"
- n_try=`expr $n_try + 1`
- _check_test_fs
- fi
-
- seq="after_$seqnum"
+ seq="after_$seqnum"
+ done
+ _wrapup
+ echo
+
+ umount $TEST_DEV 2> /dev/null
+ umount $SCRATCH_DEV 2> /dev/null
done
interrupt=false
-status=`expr $n_bad`
+status=`expr $sum_bad`
exit