#!/bin/bash
-#
-# Control script for QA
-#
+# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2000-2002,2006 Silicon Graphics, Inc. All Rights Reserved.
#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it would be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-#
+# Control script for QA
#
-
tmp=/tmp/$$
status=0
needwrap=true
n_bad=0
sum_bad=0
bad=""
+n_notrun=0
notrun=""
interrupt=true
diff="diff -u"
randomize=false
export here=`pwd`
xfile=""
+subdir_xfile=""
brief_test_summary=false
-
+do_report=false
DUMP_OUTPUT=false
+# This is a global variable used to pass test failure text to reporting gunk
+_err_msg=""
+
# start the initialisation work now
iam=check
# by default don't output timestamps
timestamp=${TIMESTAMP:=false}
-rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist
+rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
SRC_GROUPS="generic shared"
export SRC_DIR="tests"
echo "Usage: $0 [options] [testlist]"'
check options
- -nfs test NFS
- -cifs test CIFS
+ -nfs test NFS
+ -glusterfs test GlusterFS
+ -cifs test CIFS
+ -9p test 9p
+ -virtiofs test virtiofs
-overlay test overlay
- -tmpfs test TMPFS
+ -pvfs2 test PVFS2
+ -tmpfs test TMPFS
+ -ubifs test ubifs
-l line mode diff
-udiff show unified diff (default)
-n show me, do not run tests
-r randomize test order
-d dump test output to stdout
-b brief test summary
+ -R fmt[,fmt] generate report in formats specified. Supported format: [xunit]
--large-fs optimise scratch device for large filesystems
-s section run only specified section from config file
-S section exclude the specified section from the config file
testlist options
-g group[,group...] include tests from these groups
-x group[,group...] exclude tests from these groups
- -X file exclude individual tests
+ -X exclude_file exclude individual tests
-E external_file exclude individual tests
[testlist] include tests matching names in testlist
+
+testlist argument is a list of tests in the form of <test dir>/<test name>.
+
+<test dir> is a directory under tests that contains a group file,
+with a list of the names of the tests in that directory.
+
+<test name> may be either a specific test file name (e.g. xfs/001) or
+a test file name match pattern (e.g. xfs/*).
+
+group argument is either a name of a tests group to collect from all
+the test dirs (e.g. quick) or a name of a tests group to collect from
+a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
+If you want to run all the tests in the test suite, use "-g all" to specify all
+groups.
+
+exclude_file argument refers to a name of a file inside each test directory.
+for every test dir where this file is found, the listed test names are
+excluded from the list of tests to run from that test dir.
+
+external_file argument is a path to a single file containing a list of tests
+to exclude in the form of <test dir>/<test name>.
+
+examples:
+ check xfs/001
+ check -g quick
+ check -g xfs/quick
+ check -x stress xfs/*
+ check -X .exclude -g auto
+ check -E ~/.xfstests.exclude
'
exit 0
}
+get_sub_group_list()
+{
+ local d=$1
+ local grp=$2
+
+ test -s "$SRC_DIR/$d/group" || return 1
+
+ local grpl=$(sed -n < $SRC_DIR/$d/group \
+ -e 's/#.*//' \
+ -e 's/$/ /' \
+ -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
+ echo $grpl
+}
+
get_group_list()
{
- grp=$1
+ local grp=$1
+ local grpl=""
+ local sub=$(dirname $grp)
+ local fsgroup="$FSTYP"
+
+ if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
+ # group is given as <subdir>/<group> (e.g. xfs/quick)
+ grp=$(basename $grp)
+ get_sub_group_list $sub $grp
+ return
+ fi
- for d in $SRC_GROUPS $FSTYP; do
+ if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
+ fsgroup=ext4
+ fi
+ for d in $SRC_GROUPS $fsgroup; do
if ! test -d "$SRC_DIR/$d" ; then
continue
fi
- l=$(sed -n < $SRC_DIR/$d/group \
- -e 's/#.*//' \
- -e 's/$/ /' \
- -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
- grpl="$grpl $l"
+ grpl="$grpl $(get_sub_group_list $d $grp)"
done
echo $grpl
}
done
grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
mv $tmp.tmp $tmp.list
+ rm -f $tmp.grep
}
fi
# Specified groups to include
- for group in $GROUP_LIST; do
- list=$(get_group_list $group)
- if [ -z "$list" ]; then
- echo "Group \"$group\" is empty or not defined?"
- exit 1
- fi
+ # Note that the CLI processing adds a leading space to the first group
+ # parameter, so we have to catch that here checking for "all"
+ if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
+ # no test numbers, do everything
+ get_all_tests
+ else
+ for group in $GROUP_LIST; do
+ list=$(get_group_list $group)
+ if [ -z "$list" ]; then
+ echo "Group \"$group\" is empty or not defined?"
+ exit 1
+ fi
- for t in $list; do
- grep -s "^$t\$" $tmp.list >/dev/null || \
+ for t in $list; do
+ grep -s "^$t\$" $tmp.list >/dev/null || \
echo "$t" >>$tmp.list
+ done
done
- done
-
- if ! $have_test_arg && [ -z "$GROUP_LIST" ]; then
- # no test numbers, do everything
- get_all_tests
fi
# Specified groups to exclude
done
# sort the list of tests into numeric order
- list=`sort -n $tmp.list | uniq`
- rm -f $tmp.list $tmp.tmp $tmp.grep
-
- if $randomize
- then
- list=`echo $list | awk -f randomize.awk`
+ if $randomize; then
+ if type shuf >& /dev/null; then
+ sorter="shuf"
+ else
+ sorter="awk -v seed=$RANDOM -f randomize.awk"
+ fi
+ else
+ sorter="cat"
fi
+ list=`sort -n $tmp.list | uniq | $sorter`
+ rm -f $tmp.list
}
# Process command arguments first.
-\? | -h | --help) usage ;;
-nfs) FSTYP=nfs ;;
+ -glusterfs) FSTYP=glusterfs ;;
-cifs) FSTYP=cifs ;;
- -overlay) FSTYP=overlay ;;
+ -9p) FSTYP=9p ;;
+ -virtiofs) FSTYP=virtiofs ;;
+ -overlay) FSTYP=overlay; export OVERLAY=true ;;
+ -pvfs2) FSTYP=pvfs2 ;;
-tmpfs) FSTYP=tmpfs ;;
+ -ubifs) FSTYP=ubifs ;;
-g) group=$2 ; shift ;
GROUP_LIST="$GROUP_LIST ${group//,/ }"
XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
;;
- -X) xfile=$2; shift ;
- for d in $SRC_GROUPS $FSTYP; do
- [ -f $SRC_DIR/$d/$xfile ] || continue
- for f in `sed "s/#.*$//" $SRC_DIR/$d/$xfile`; do
- echo $d/$f >> $tmp.xlist
- done
- done
+ -X) subdir_xfile=$2; shift ;
;;
-E) xfile=$2; shift ;
if [ -f $xfile ]; then
-T) timestamp=true ;;
-d) DUMP_OUTPUT=true ;;
-b) brief_test_summary=true;;
-
+ -R) report_fmt=$2 ; shift ;
+ REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
+ do_report=true
+ ;;
--large-fs) export LARGE_SCRATCH_DEV=yes ;;
--extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
shift
done
-# we need common/config, source it after processing args, overlay needs FSTYP
-# set before sourcing common/config
-if ! . ./common/config; then
- echo "$iam: failed to source common/config"
+# we need common/rc, that also sources common/config. We need to source it
+# after processing args, overlay needs FSTYP set before sourcing common/config
+if ! . ./common/rc; then
+ echo "check: failed to source common/rc"
exit 1
fi
+if [ -n "$subdir_xfile" ]; then
+ for d in $SRC_GROUPS $FSTYP; do
+ [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
+ for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
+ echo $d/$f >> $tmp.xlist
+ done
+ done
+fi
+
# Process tests from command line now.
if $have_test_arg; then
while [ $# -gt 0 ]; do
status=1
exit $status
;;
- *) test_dir=`dirname $1`
- test_dir=${test_dir#$SRC_DIR/*}
- test_name=`basename $1`
- group_file=$SRC_DIR/$test_dir/group
-
- if egrep "^$test_name" $group_file >/dev/null ; then
- # in group file ... OK
- echo $SRC_DIR/$test_dir/$test_name >>$tmp.arglist
- else
- # oops
- echo "$1 - unknown test, ignored"
- fi
+ *) # Expand test pattern (e.g. xfs/???, *fs/001)
+ list=$(cd $SRC_DIR; echo $1)
+ for t in $list; do
+ test_dir=`dirname $t`
+ test_dir=${test_dir#$SRC_DIR/*}
+ test_name=`basename $t`
+ group_file=$SRC_DIR/$test_dir/group
+
+ if egrep -q "^$test_name" $group_file; then
+ # in group file ... OK
+ echo $SRC_DIR/$test_dir/$test_name \
+ >>$tmp.arglist
+ else
+ # oops
+ echo "$t - unknown test, ignored"
+ fi
+ done
;;
esac
shift
done
-fi
-
-# we need common/rc
-if ! . ./common/rc
-then
- echo "check: failed to source common/rc"
- exit 1
+elif [ -z "$GROUP_LIST" ]; then
+ # default group list is the auto group. If any other group or test is
+ # specified, we use that instead.
+ GROUP_LIST="auto"
fi
if [ `id -u` -ne 0 ]
{
n_try="0"
n_bad="0"
+ n_notrun="0"
unset try notrun bad
}
check="$RESULT_BASE/check"
if $showme; then
- :
+ if $needwrap; then
+ if $do_report; then
+ _make_section_report
+ fi
+ needwrap=false
+ fi
elif $needwrap; then
if [ -f $check.time -a -f $tmp.time ]; then
cat $check.time $tmp.time \
echo "Ran:$try" >>$check.log
fi
- $interrupt && echo "Interrupted!" >>$check.log
+ $interrupt && echo "Interrupted!" | tee -a $check.log
if [ ! -z "$notrun" ]; then
if [ $brief_test_summary == "false" ]; then
echo "Passed all $n_try tests" >>$tmp.summary
fi
echo "" >>$tmp.summary
+ if $do_report; then
+ _make_section_report
+ fi
needwrap=false
fi
{
if [ -f ${RESULT_DIR}/require_test ]; then
_check_test_fs || err=true
- rm -f ${RESULT_DIR}/require_test
+ rm -f ${RESULT_DIR}/require_test*
+ else
+ _test_unmount 2> /dev/null
fi
if [ -f ${RESULT_DIR}/require_scratch ]; then
_check_scratch_fs || err=true
- rm -f ${RESULT_DIR}/require_scratch
+ rm -f ${RESULT_DIR}/require_scratch*
+ fi
+ _scratch_unmount 2> /dev/null
+}
+
+_expunge_test()
+{
+ local TEST_ID="$1"
+ if [ -s $tmp.xlist ]; then
+ if grep -q $TEST_ID $tmp.xlist; then
+ echo " [expunged]"
+ return 1
+ fi
fi
+ return 0
}
+# Make the check script unattractive to the OOM killer...
+OOM_SCORE_ADJ="/proc/self/oom_score_adj"
+test -w ${OOM_SCORE_ADJ} && echo -1000 > ${OOM_SCORE_ADJ}
+
+# ...and make the tests themselves somewhat more attractive to it, so that if
+# the system runs out of memory it'll be the test that gets killed and not the
+# test framework.
+_run_seq() {
+ bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq"
+}
+
+_detect_kmemleak
_prepare_test_list
if $OPTIONS_HAVE_SECTIONS; then
for section in $HOST_OPTIONS_SECTIONS; do
OLD_FSTYP=$FSTYP
- OLD_MOUNT_OPTIONS=$MOUNT_OPTIONS
+ OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
get_next_config $section
# Do we need to run only some sections ?
echo "SECTION -- $section"
fi
+ sect_start=`_wallclock`
if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
echo "RECREATING -- $FSTYP on $TEST_DEV"
_test_unmount 2> /dev/null
status=1
exit
fi
- out=`_mount_or_remount_rw "$MOUNT_OPTIONS" $TEST_DEV $TEST_DIR`
- if [ $? -ne 1 ]; then
- echo $out
+ if ! _test_mount
+ then
+ echo "check: failed to mount $TEST_DEV on $TEST_DIR"
status=1
exit
fi
_prepare_test_list
- elif [ "$OLD_MOUNT_OPTIONS" != "$MOUNT_OPTIONS" ]; then
+ elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
_test_unmount 2> /dev/null
- out=`_mount_or_remount_rw "$MOUNT_OPTIONS" $TEST_DEV $TEST_DIR`
- if [ $? -ne 1 ]; then
- echo $out
+ if ! _test_mount
+ then
+ echo "check: failed to mount $TEST_DEV on $TEST_DIR"
status=1
exit
fi
# call the overridden mount - make sure the FS mounts with
# the same options that we'll mount with later.
- if ! _scratch_mount >$tmp.err 2>&1
+ if ! _try_scratch_mount >$tmp.err 2>&1
then
echo "our local mount routine ..."
cat $tmp.err
echo "check: failed to mount \$SCRATCH_DEV using specified options"
status=1
exit
+ else
+ _scratch_unmount
fi
fi
seqres="$check"
_check_test_fs
- for seq in $list
- do
- err=false
- if [ ! -f $seq ]; then
- # Try to get full name in case the user supplied only seq id
- # and the test has a name. A bit of hassle to find really
- # the test and not its sample output or helping files.
- bname=$(basename $seq)
- full_seq=$(find $(dirname $seq) -name $bname* -executable |
- awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
- END { print shortest }')
- if [ -f $full_seq ] \
- && [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
- seq=$full_seq
- fi
- fi
+ err=false
+ first_test=true
+ prev_seq=""
+ for seq in $list ; do
+ # Run report for previous test!
+ if $err ; then
+ bad="$bad $seqnum"
+ n_bad=`expr $n_bad + 1`
+ tc_status="fail"
+ fi
+ if $do_report && ! $first_test ; then
+ if [ $tc_status != "expunge" ] ; then
+ _make_testcase_report "$prev_seq" "$tc_status"
+ fi
+ fi
+ first_test=false
- # the filename for the test and the name output are different.
- # we don't include the tests/ directory in the name output.
- export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
-
- # Similarly, the result directory needs to replace the tests/
- # part of the test location.
- group=`dirname $seq`
- if $OPTIONS_HAVE_SECTIONS; then
- export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
- seqres="$RESULT_BASE/$section/$seqnum"
- else
- export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
- seqres="$RESULT_BASE/$seqnum"
- fi
+ err=false
+ prev_seq="$seq"
+ if [ ! -f $seq ]; then
+ # Try to get full name in case the user supplied only
+ # seq id and the test has a name. A bit of hassle to
+ # find really the test and not its sample output or
+ # helping files.
+ bname=$(basename $seq)
+ full_seq=$(find $(dirname $seq) -name $bname* -executable |
+ awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
+ END { print shortest }')
+ if [ -f $full_seq ] && \
+ [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
+ seq=$full_seq
+ fi
+ fi
+
+ # the filename for the test and the name output are different.
+ # we don't include the tests/ directory in the name output.
+ export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
- mkdir -p $RESULT_DIR
+ # Similarly, the result directory needs to replace the tests/
+ # part of the test location.
+ group=`dirname $seq`
+ if $OPTIONS_HAVE_SECTIONS; then
+ export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
+ REPORT_DIR="$RESULT_BASE/$section"
+ else
+ export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
+ REPORT_DIR="$RESULT_BASE"
+ fi
+ seqres="$REPORT_DIR/$seqnum"
- echo -n "$seqnum"
+ mkdir -p $RESULT_DIR
+ echo -n "$seqnum"
if $showme; then
+ _expunge_test $seqnum
+ if [ $? -eq 1 ]; then
+ tc_status="expunge"
+ continue
+ fi
echo
+ start=0
+ stop=0
+ tc_status="list"
+ n_notrun=`expr $n_notrun + 1`
continue
fi
+ tc_status="pass"
if [ ! -f $seq ]; then
echo " - no such test?"
- else
+ continue
+ fi
+
# really going to try and run this one
- #
rm -f $seqres.out.bad
# check if we really should run it
- if [ -s $tmp.xlist ]; then
- if grep $seqnum $tmp.xlist > /dev/null 2>&1 ; then
- echo " [expunged]"
- continue
- fi
+ _expunge_test $seqnum
+ if [ $? -eq 1 ]; then
+ tc_status="expunge"
+ continue
fi
+ # record that we really tried to run this test.
+ try="$try $seqnum"
+ n_try=`expr $n_try + 1`
+
# slashes now in names, sed barfs on them so use grep
lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
if [ "X$lasttime" != X ]; then
- echo -n " ${lasttime}s ..."
+ echo -n " ${lasttime}s ... "
else
- echo -n " " # prettier output with timestamps.
+ echo -n " " # prettier output with timestamps.
fi
rm -f core $seqres.notrun
# _check_dmesg depends on this log in dmesg
touch ${RESULT_DIR}/check_dmesg
fi
+ _try_wipe_scratch_devs > /dev/null 2>&1
if [ "$DUMP_OUTPUT" = true ]; then
- ./$seq 2>&1 | tee $tmp.rawout
+ _run_seq 2>&1 | tee $tmp.out
# Because $? would get tee's return code
sts=${PIPESTATUS[0]}
else
- ./$seq >$tmp.rawout 2>&1
+ _run_seq >$tmp.out 2>&1
sts=$?
fi
- $timestamp && _timestamp
- stop=`_wallclock`
- _fix_malloc <$tmp.rawout >$tmp.out
- rm -f $tmp.rawout
+ if [ -f core ]; then
+ _dump_err_cont "[dumped core]"
+ mv core $RESULT_BASE/$seqnum.core
+ err=true
+ fi
- if [ -f core ]
- then
- echo -n " [dumped core]"
- mv core $RESULT_BASE/$seqnum.core
- err=true
+ if [ -f $seqres.notrun ]; then
+ $timestamp && _timestamp
+ stop=`_wallclock`
+ $timestamp || echo -n "[not run] "
+ $timestamp && echo " [not run]" && \
+ echo -n " $seqnum -- "
+ cat $seqres.notrun
+ notrun="$notrun $seqnum"
+ n_notrun=`expr $n_notrun + 1`
+ tc_status="notrun"
+ continue;
fi
- if [ -f $seqres.notrun ]
- then
- $timestamp || echo -n " [not run] "
- $timestamp && echo " [not run]" && echo -n " $seqnum -- "
- cat $seqres.notrun
- notrun="$notrun $seqnum"
- else
- if [ $sts -ne 0 ]
- then
- echo -n " [failed, exit status $sts]"
+ if [ $sts -ne 0 ]; then
+ _dump_err_cont "[failed, exit status $sts]"
+ _test_unmount 2> /dev/null
+ _scratch_unmount 2> /dev/null
+ rm -f ${RESULT_DIR}/require_test*
+ rm -f ${RESULT_DIR}/require_scratch*
err=true
- fi
- if [ ! -f $seq.out ]
- then
- echo " - no qualified output"
+ else
+ # the test apparently passed, so check for corruption
+ # and log messages that shouldn't be there.
+ _check_filesystems
+ _check_dmesg || err=true
+ fi
+
+ # Scan for memory leaks after every test so that associating
+ # a leak to a particular test will be as accurate as possible.
+ _check_kmemleak || err=true
+
+ # test ends after all checks are done.
+ $timestamp && _timestamp
+ stop=`_wallclock`
+
+ if [ ! -f $seq.out ]; then
+ _dump_err "no qualified output"
err=true
- else
-
- # coreutils 8.16+ changed quote formats in error messages from
- # `foo' to 'foo'. Filter old versions to match the new version.
- sed -i "s/\`/\'/g" $tmp.out
- if diff $seq.out $tmp.out >/dev/null 2>&1
- then
- if $err
- then
- :
- else
+ continue;
+ fi
+
+ # coreutils 8.16+ changed quote formats in error messages
+ # from `foo' to 'foo'. Filter old versions to match the new
+ # version.
+ sed -i "s/\`/\'/g" $tmp.out
+ if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
+ if ! $err ; then
echo "$seqnum `expr $stop - $start`" >>$tmp.time
echo -n " `expr $stop - $start`s"
- fi
- echo ""
- else
- echo " - output mismatch (see $seqres.out.bad)"
- mv $tmp.out $seqres.out.bad
- $diff $seq.out $seqres.out.bad | {
- if test "$DIFF_LENGTH" -le 0; then
- cat
- else
- head -n "$DIFF_LENGTH"
- echo "..."
- echo "(Run '$diff $seq.out $seqres.out.bad'" \
- " to see the entire diff)"
- fi; } | \
- sed -e 's/^\(.\)/ \1/'
- err=true
fi
- fi
- try="$try $seqnum"
- n_try=`expr $n_try + 1`
- _check_filesystems
- _check_dmesg || err=true
+ echo ""
+ else
+ _dump_err "- output mismatch (see $seqres.out.bad)"
+ mv $tmp.out $seqres.out.bad
+ $diff $seq.out $seqres.out.bad | {
+ if test "$DIFF_LENGTH" -le 0; then
+ cat
+ else
+ head -n "$DIFF_LENGTH"
+ echo "..."
+ echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
+ " to see the entire diff)"
+ fi; } | sed -e 's/^\(.\)/ \1/'
+ err=true
fi
+ done
- fi
-
- # come here for each test, except when $showme is true
- #
- if $err
- then
+ # make sure we record the status of the last test we ran.
+ if $err ; then
bad="$bad $seqnum"
n_bad=`expr $n_bad + 1`
- quick=false
- fi
+ tc_status="fail"
+ fi
+ if $do_report && ! $first_test ; then
+ if [ $tc_status != "expunge" ] ; then
+ _make_testcase_report "$prev_seq" "$tc_status"
+ fi
+ fi
- seq="after_$seqnum"
- done
+ sect_stop=`_wallclock`
+ interrupt=false
_wrapup
+ interrupt=true
echo
_test_unmount 2> /dev/null
done
interrupt=false
-status=`expr $sum_bad`
+status=`expr $sum_bad != 0`
exit